예제 #1
0
파일: kalman.py 프로젝트: Karlos7692/Thesis
    def __m__(self, C1_ts, C2_ts, P_ts, P_t_tm1s, P_tm1s, G_ts):

        # Same as e step, we used initial observation to calculate the prior.
        n = self.n_observations() + 1

        C1_sum = np.sum(C1_ts, axis=2)
        P_t_sum = np.sum(P_ts, axis=2)
        P_t_tm1_sum_1tT = np.sum(P_t_tm1s[:, :, :], axis=2)
        P_tm1_sum_1tT = np.sum(P_tm1s[:, :, :], axis=2)

        # Output matrix fit.
        C = C1_sum @ pinv(P_t_sum)

        # Observation covariance fit.
        R = np.zeros((self.observations_size, self.observations_size))
        for t in range(n):
            R += C2_ts[:, :, t] - C @ G_ts[:, :, t]
        R *= 1.0/n

        # State dynamics
        A = P_t_tm1_sum_1tT @ pinv(P_tm1_sum_1tT)

        # Hidden Noise
        Q = 1.0/(n-1) * (np.sum(P_ts[:, :, 1:], axis=2) - A @ P_t_tm1_sum_1tT)

        # Control signal
        B = self.Bs[:, :, 0]
        D = self.Ds[:, :, 0]

        # Initial state
        init_mu = self.mus[:, :, 0]
        init_V = P_ts[:, :, 0] - init_mu @ init_mu.T

        return (A, B, C, D, Q, R), init_mu, init_V
예제 #2
0
파일: test_pinv.py 프로젝트: jrs65/scalapy
def test_pinv_Z_alt():
    ## Test pseudo-inverse computation of a complex double precision distributed matrix
    m, n = 7, 4

    gA = np.random.standard_normal((m, n)).astype(np.float64)
    gA = gA + 1.0J * np.random.standard_normal((m, n)).astype(np.float64)
    gA = np.dot(gA, gA.T.conj())
    assert np.linalg.matrix_rank(gA) < gA.shape[0] # no full rank
    gA = np.asfortranarray(gA)

    m, n = gA.shape

    dA = core.DistributedMatrix.from_global_array(gA, rank=0)

    pinvA = rt.pinv(dA)
    gpinvA = pinvA.to_global_array()
    gpinvA = gpinvA[:n, :]

    if rank == 0:
        assert not allclose(gA, np.dot(gA, np.dot(gpinvA, gA)))
        assert not allclose(gpinvA, np.dot(gpinvA, np.dot(gA, gpinvA)))
        spinvA = la.pinv(gA)
        assert allclose(gA, np.dot(gA, np.dot(spinvA, gA)))
        assert allclose(spinvA, np.dot(spinvA, np.dot(gA, spinvA)))
        assert not allclose(gpinvA, la.pinv(gA)) # compare with scipy result
예제 #3
0
파일: csp.py 프로젝트: rajul/mne-python
    def _fit(self, cov_a, cov_b):
        """Aux Function (modifies cov_a and cov_b in-place)."""
        cov_a /= np.trace(cov_a)
        cov_b /= np.trace(cov_b)
        # computes the eigen values
        lambda_, u = linalg.eigh(cov_a + cov_b)
        # sort them
        ind = np.argsort(lambda_)[::-1]
        lambda2_ = lambda_[ind]

        u = u[:, ind]
        p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)

        # Compute the generalized eigen value problem
        w_a = np.dot(np.dot(p, cov_a), p.T)
        w_b = np.dot(np.dot(p, cov_b), p.T)
        # and solve it
        vals, vecs = linalg.eigh(w_a, w_b)
        # sort vectors by discriminative power using eigen values
        ind = np.argsort(np.maximum(vals, 1.0 / vals))[::-1]
        vecs = vecs[:, ind]
        # and project
        w = np.dot(vecs.T, p)

        self.filters_ = w
        self.patterns_ = linalg.pinv(w).T
예제 #4
0
    def __init__(self, U, Y, statedim, reg=None):
        if size(shape(U)) == 1:
            U = reshape(U, (-1,1))
        if size(shape(Y)) == 1:
            Y = reshape(Y, (-1,1))
        if reg is None:
            reg = 0

        yDim = size(Y,1)
        uDim = size(U,1)

        self.output_size = size(Y,1) # placeholder

        # number of samples of past/future we'll mash together into a 'state'
        width = 1
        # total number of past/future pairings we get as a result
        K = size(U,0) - 2 * width + 1

        # build hankel matrices containing pasts and futures
        U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
        U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
        Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
        Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T

        # solve the eigenvalue problem
        YfUfT = dot(Y_f, U_f.T)
        YfUpT = dot(Y_f, U_p.T)
        YfYpT = dot(Y_f, Y_p.T)
        UfUpT = dot(U_f, U_p.T)
        UfYpT = dot(U_f, Y_p.T)
        UpYpT = dot(U_p, Y_p.T)
        F = bmat([[None, YfUfT, YfUpT, YfYpT],
                  [YfUfT.T, None, UfUpT, UfYpT],
                  [YfUpT.T, UfUpT.T, None, UpYpT],
                  [YfYpT.T, UfYpT.T, UpYpT.T, None]])
        Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
                     [None, pinv(dot(U_f,U_f.T)), None, None],
                     [None, None, pinv(dot(U_p,U_p.T)), None],
                     [None, None, None, pinv(dot(Y_p,Y_p.T))]])
        F = F - eye(size(F, 0)) * reg

        # Take smallest eigenvalues
        _, W = eigs(Ginv.dot(F), k=statedim, which='SR')

        # State sequence is a weighted combination of the past
        W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
        W_Y_p = W[ width * (yDim + uDim + uDim):, :]
        X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)

        # Regress; trim inputs to match the states we retrieved
        R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
        L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
        RRi = pinv(dot(R, R.T))
        RL  = dot(R, L.T)
        Sys = dot(RRi, RL).T
        self.A = Sys[:statedim, :statedim]
        self.B = Sys[:statedim, statedim:]
        self.C = Sys[statedim:, :statedim]
        self.D = Sys[statedim:, statedim:]
예제 #5
0
    def _update_precisions(self, X, z):
        """Update the variational distributions for the precisions"""
        n_features = X.shape[1]
        if self.covariance_type == 'spherical':
            self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
            for k in xrange(self.n_components):
                # could be more memory efficient ?
                sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
                self.scale_[k] = 1.
                self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
                self.bound_prec_[k] = (
                    0.5 * n_features * (
                        digamma(self.dof_[k]) - np.log(self.scale_[k])))
            self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T

        elif self.covariance_type == 'diag':
            for k in xrange(self.n_components):
                self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
                sq_diff = (X - self.means_[k]) ** 2  # see comment above
                self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
                    z.T[k], (sq_diff + 1))
                self.precs_[k] = self.dof_[k] / self.scale_[k]
                self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
                                                    - np.log(self.scale_[k]))
                self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])

        elif self.covariance_type == 'tied':
            self.dof_ = 2 + X.shape[0] + n_features
            self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
            for k in xrange(self.n_components):
                    diff = X - self.means_[k]
                    self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
            self.scale_ = linalg.pinv(self.scale_)
            self.precs_ = self.dof_ * self.scale_
            self.det_scale_ = linalg.det(self.scale_)
            self.bound_prec_ = 0.5 * wishart_log_det(
                self.dof_, self.scale_, self.det_scale_, n_features)
            self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)

        elif self.covariance_type == 'full':
            for k in xrange(self.n_components):
                sum_resp = np.sum(z.T[k])
                self.dof_[k] = 2 + sum_resp + n_features
                self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
                diff = X - self.means_[k]
                self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
                self.scale_[k] = linalg.pinv(self.scale_[k])
                self.precs_[k] = self.dof_[k] * self.scale_[k]
                self.det_scale_[k] = linalg.det(self.scale_[k])
                self.bound_prec_[k] = 0.5 * wishart_log_det(self.dof_[k],
                                                            self.scale_[k],
                                                            self.det_scale_[k],
                                                           n_features)
                self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
                    self.scale_[k])
예제 #6
0
def bayesian_regression(X, Y, K):
    d = K.shape[0]
    alpha = model_evidence(X, Y, K)
    V1_inv = np.eye(d)
    V2_inv = K
    # beta = (V1inv+alpha*V2inv)\(V1inv*Y'*X)/(X'*X)
    YTX = np.dot(Y.T, X)    
    XTX = np.dot(X.T, X)
    beta = np.dot(np.dot(linalg.pinv(V1_inv + alpha * V2_inv), 
                         np.dot(V1_inv, YTX)), linalg.pinv(XTX))
    return beta
예제 #7
0
def ICC_rep_anova(Y):

    # the data Y are entered as a 'table' ie subjects are in rows and repeated
    # measures in columns
    # flag = 1 returns design figure + question


    # ------------------------------------------------------------------------------------------
    #                   One Sample Repeated measure ANOVA
    #                   Y = XB + E with X = [FaTor / SubjeT]
    # ------------------------------------------------------------------------------------------

    [nb_subjects, nb_conditions] = Y.shape
    #print nb_subjects, nb_conditions
    df = nb_conditions - 1
    dfe = nb_subjects*nb_conditions - nb_subjects - df
    dfmodel = nb_subjects - df

    # create the design matrix for the different levels
    # ------------------------------------------------

    x = kron(eye(nb_conditions), ones((nb_subjects, 1)))# effect
    x0 = tile(eye(nb_subjects), (nb_conditions, 1))# subjeT
    X = hstack([x, x0])

    # Compute the repeated measure effect
    # ------------------------------------
    Y = Y.flatten(1)

    # Sum Square Total
    SST = dot((Y.reshape(-1,1) - tile(mean(Y), (Y.shape[0], 1))).T, (Y.reshape(-1,1) - tile(mean(Y), (Y.shape[0], 1))))

    # Sum Square SubjeT (error in the ANOVA model)
    M = dot(dot(X, pinv(dot(X.T,X))), X.T)
    R = eye(Y.shape[0]) - M
    SSS = dot(dot(Y.T,R),Y)
    MSS = SSS / dfe

    # Sum square effect (repeated measure)
    Betas = dot(pinv(x),Y)# compute without cst/subjects
    yhat = dot(x,Betas)
    SSE = diag(dot((yhat.reshape(-1,1) - tile(mean(yhat), (yhat.shape[0], 1))).T, (yhat.reshape(-1,1) - tile(mean(yhat), (yhat.shape[0], 1)))))
    # MSE    = SSE / df;

    # Sum Square error
    SSError = SST - SSS - SSE
    MSError = SSError / dfmodel

    # ICC(3,1) = (mean square subjeT - mean square error) / (mean square subjeT + (k-1)*-mean square error) 
    return -((MSS - MSError) / (MSS + df * MSError))
    

#Y = array([[1,2],[2,3.5]])
#print ICC_rep_anova(Y)
예제 #8
0
def _cp3(X, n_components, tol, max_iter, init_type, random_state=None):
    """
    3 dimensional CANDECOMP/PARAFAC decomposition.

    This code is meant to be a tutorial/testing example... in general _cpN
    should be more compact and equivalent mathematically.
    """

    if len(X.shape) != 3:
        raise ValueError("CP3 decomposition only supports 3 dimensions!")

    if init_type == "random":
        A, B, C = _random_init(X, n_components, random_state)
    elif init_type == "hosvd":
        A, B, C = _hosvd_init(X, n_components)
    grams = [np.dot(arr.T, arr) for arr in (A, B, C)]
    err = 1E10

    for itr in range(max_iter):
        err_old = err
        A = matricize(X, 0).dot(kr(C, B)).dot(linalg.pinv(grams[1] * grams[2]))
        if itr == 0:
            normalization = np.sqrt((A ** 2).sum(axis=0))
        else:
            normalization = A.max(axis=0)
            normalization[normalization < 1] = 1
        A /= normalization
        grams[0] = np.dot(A.T, A)

        B = matricize(X, 1).dot(kr(C, A)).dot(linalg.pinv(grams[0] * grams[2]))
        if itr == 0:
            normalization = np.sqrt((B ** 2).sum(axis=0))
        else:
            normalization = B.max(axis=0)
            normalization[normalization < 1] = 1
        B /= normalization
        grams[1] = np.dot(B.T, B)

        C = matricize(X, 2).dot(kr(B, A)).dot(linalg.pinv(grams[0] * grams[1]))
        if itr == 0:
            normalization = np.sqrt((C ** 2).sum(axis=0))
        else:
            normalization = C.max(axis=0)
            normalization[normalization < 1] = 1
        C /= normalization
        grams[2] = np.dot(C.T, C)

        err = linalg.norm(matricize(X, 0) - np.dot(A, kr(C, B).T)) ** 2
        thresh = np.abs(err - err_old) / err_old
        if (thresh < tol) or (itr > max_iter):
            break

    return A, B, C
예제 #9
0
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
                                 norm_y_weights=False):
    """Inner loop of the iterative NIPALS algorithm.

    Provides an alternative to the svd(X'Y); returns the first left and right
    singular vectors of X'Y.  See PLS for the meaning of the parameters.  It is
    similar to the Power method for determining the eigenvectors and
    eigenvalues of a X'Y.
    """
    y_score = Y[:, [0]]
    x_weights_old = 0
    ite = 1
    X_pinv = Y_pinv = None
    eps = np.finfo(X.dtype).eps
    # Inner loop of the Wold algo.
    while True:
        # 1.1 Update u: the X weights
        if mode == "B":
            if X_pinv is None:
                X_pinv = linalg.pinv(X)   # compute once pinv(X)
            x_weights = np.dot(X_pinv, y_score)
        else:  # mode A
            # Mode A regress each X column on y_score
            x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
        # 1.2 Normalize u
        x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
        # 1.3 Update x_score: the X latent scores
        x_score = np.dot(X, x_weights)
        # 2.1 Update y_weights
        if mode == "B":
            if Y_pinv is None:
                Y_pinv = linalg.pinv(Y)    # compute once pinv(Y)
            y_weights = np.dot(Y_pinv, x_score)
        else:
            # Mode A regress each Y column on x_score
            y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
        ## 2.2 Normalize y_weights
        if norm_y_weights:
            y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
        # 2.3 Update y_score: the Y latent scores
        y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
        ## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
        x_weights_diff = x_weights - x_weights_old
        if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
            break
        if ite == max_iter:
            warnings.warn('Maximum number of iterations reached')
            break
        x_weights_old = x_weights
        ite += 1
    return x_weights, y_weights, ite
예제 #10
0
    def _update_precisions(self):
        """Update the variational distributions for the precisions"""
        if self.cvtype == "spherical":
            self._a = 0.5 * self.n_features * np.sum(self._z, axis=0)
            for k in xrange(self.n_components):
                # XXX: how to avoid this huge temporary matrix in memory
                dif = self._X - self._means[k]
                self._b[k] = 1.0
                d = np.sum(dif * dif, axis=1)
                self._b[k] += 0.5 * np.sum(self._z.T[k] * (d + self.n_features))
                self._bound_prec[k] = 0.5 * self.n_features * (digamma(self._a[k]) - np.log(self._b[k]))
            self._precs = self._a / self._b

        elif self.cvtype == "diag":
            for k in xrange(self.n_components):
                self._a[k].fill(1.0 + 0.5 * np.sum(self._z.T[k], axis=0))
                ddif = self._X - self._means[k]  # see comment above
                for d in xrange(self.n_features):
                    self._b[k, d] = 1.0
                    dd = ddif.T[d] * ddif.T[d]
                    self._b[k, d] += 0.5 * np.sum(self._z.T[k] * (dd + 1))
                self._precs[k] = self._a[k] / self._b[k]
                self._bound_prec[k] = 0.5 * np.sum(digamma(self._a[k]) - np.log(self._b[k]))
                self._bound_prec[k] -= 0.5 * np.sum(self._precs[k])

        elif self.cvtype == "tied":
            self._a = 2 + self._X.shape[0] + self.n_features
            self._B = (self._X.shape[0] + 1) * np.identity(self.n_features)
            for i in xrange(self._X.shape[0]):
                for k in xrange(self.n_components):
                    dif = self._X[i] - self._means[k]
                    self._B += self._z[i, k] * np.dot(dif.reshape((-1, 1)), dif.reshape((1, -1)))
            self._B = linalg.pinv(self._B)
            self._precs = self._a * self._B
            self._detB = linalg.det(self._B)
            self._bound_prec = 0.5 * detlog_wishart(self._a, self._B, self._detB, self.n_features)
            self._bound_prec -= 0.5 * self._a * np.trace(self._B)

        elif self.cvtype == "full":
            for k in xrange(self.n_components):
                T = np.sum(self._z.T[k])
                self._a[k] = 2 + T + self.n_features
                self._B[k] = (T + 1) * np.identity(self.n_features)
                for i in xrange(self._X.shape[0]):
                    dif = self._X[i] - self._means[k]
                    self._B[k] += self._z[i, k] * np.dot(dif.reshape((-1, 1)), dif.reshape((1, -1)))
                self._B[k] = linalg.pinv(self._B[k])
                self._precs[k] = self._a[k] * self._B[k]
                self._detB[k] = linalg.det(self._B[k])
                self._bound_prec[k] = 0.5 * detlog_wishart(self._a[k], self._B[k], self._detB[k], self.n_features)
                self._bound_prec[k] -= 0.5 * self._a[k] * np.trace(self._B[k])
예제 #11
0
    def __init__(self, X, Y, rank, reg=None):
        if size(shape(X)) == 1:
            X = reshape(X, (-1, 1))
        if size(shape(Y)) == 1:
            Y = reshape(Y, (-1, 1))
        if reg is None:
            reg = 0
        self.rank = rank

        CXX = dot(X.T, X) + reg * eye(size(X, 1))
        CXY = dot(X.T, Y)
        _U, _S, V = svd(dot(CXY.T, dot(pinv(CXX), CXY)))
        self.W = V[0:rank, :].T
        self.A = dot(pinv(CXX), dot(CXY, self.W)).T
예제 #12
0
def preProcess(u,y,NumDict):
    
    NumInputs = u.shape[0]
    NumOutputs = y.shape[0]
    NumRows = NumDict['Rows']
    NumCols = NumDict['Columns']
    NSig = NumDict['Dimension']
    UPast,UFuture = getHankelMatrices(u,NumRows,NumCols)
    YPast,YFuture = getHankelMatrices(y,NumRows,NumCols)
    Data = np.vstack((UPast,UFuture,YPast))
    L = la.lstsq(Data.T,YFuture.T)[0].T
    Z = np.dot(L,Data)
    DataShift = np.vstack((UPast,UFuture[NumInputs:],YPast))
    LShift = la.lstsq(DataShift.T,YFuture[NumOutputs:].T)[0].T
    ZShift = np.dot(LShift,DataShift)

    L1 = L[:,:NumInputs*NumRows]
    L3 = L[:,2*NumInputs*NumRows:]

    LPast = np.hstack((L1,L3))
    DataPast = np.vstack((UPast,YPast))

    U, S, Vt = la.svd(np.dot(LPast,DataPast))
    
    Sig = np.diag(S[:NSig])
    SigRt = np.diag(np.sqrt(S[:NSig]))
    Gamma = np.dot(U[:,:NSig],SigRt)
    GammaLess = Gamma[:-NumOutputs]

    GammaPinv = la.pinv(Gamma)
    GammaLessPinv = la.pinv(GammaLess)

    GamShiftSolve = la.lstsq(GammaLess,ZShift)[0]


    GamSolve = la.lstsq(Gamma,Z)[0]
    GamData = np.vstack((GamSolve,UFuture))

    GamYData = np.vstack((GamShiftSolve,YFuture[:NumOutputs]))
    # Should probably move to a better output structure
    # One that doesn't depent so heavily on ordering

    GammaDict = {'Data':GamData,
                 'DataLess':GammaLess,
                 'DataY':GamYData,
                 'Pinv': GammaPinv,
                 'LessPinv': GammaLessPinv}
    return GammaDict,S
예제 #13
0
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06):
    """Inner loop of the iterative NIPALS algorithm. provide an alternative
    of the svd(X'Y) ie. return the first left and rigth singular vectors of X'Y
    See PLS for the meaning of the parameters.
    It is similar to the Power method for determining the eigenvectors and
    eigenvalues of a X'Y
    """
    y_score = Y[:, [0]]
    u_old = 0
    ite = 1
    X_pinv = Y_pinv = None
    # Inner loop of the Wold algo.
    while True:
        # 1.1 Update u: the X weights
        if mode is "B":
            if X_pinv is None:
                X_pinv = linalg.pinv(X)   # compute once pinv(X)
            u = np.dot(X_pinv, y_score)
        else:  # mode A
        # Mode A regress each X column on y_score
            u = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
        # 1.2 Normalize u
        u /= np.sqrt(np.dot(u.T, u))
        # 1.3 Update x_score: the X latent scores
        x_score = np.dot(X, u)

        # 2.1 Update v: the Y weights
        if mode is "B":
            if Y_pinv is None:
                Y_pinv = linalg.pinv(Y)    # compute once pinv(Y)
            v = np.dot(Y_pinv, x_score)
        else:
            # Mode A regress each X column on y_score
            v = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
        # 2.2 Normalize v
        v /= np.sqrt(np.dot(v.T, v))
        # 2.3 Update y_score: the Y latent scores
        y_score = np.dot(Y, v)

        u_diff = u - u_old
        if np.dot(u_diff.T, u_diff) < tol or Y.shape[1] == 1:
            break
        if ite == max_iter:
            warnings.warn('Maximum number of iterations reached')
            break
        u_old = u
        ite += 1
    return u, v
예제 #14
0
파일: xdawn.py 프로젝트: vwyart/mne-python
def _least_square_evoked(data, events, event_id, tmin, tmax, sfreq):
    """Least square estimation of evoked response from data.

    Parameters
    ----------
    data : ndarray, shape (n_channels, n_times)
        The data to estimates evoked
    events : ndarray, shape (n_events, 3)
        The events typically returned by the read_events function.
        If some events don't match the events of interest as specified
        by event_id, they will be ignored.
    event_id : dict
        The id of the events to consider
    tmin : float
        Start time before event.
    tmax : float
        End time after event.
    sfreq : float
        Sampling frequency.

    Returns
    -------
    evokeds_data : dict of ndarray
        A dict of evoked data for each event type in event_id.
    toeplitz : dict of ndarray
        A dict of toeplitz matrix for each event type in event_id.
    """
    nmin = int(tmin * sfreq)
    nmax = int(tmax * sfreq)

    window = nmax - nmin
    n_samples = data.shape[1]
    toeplitz_mat = dict()
    full_toep = list()
    for eid in event_id:
        # select events by type
        ix_ev = events[:, -1] == event_id[eid]

        # build toeplitz matrix
        trig = np.zeros((n_samples, 1))
        ix_trig = (events[ix_ev, 0]) + nmin
        trig[ix_trig] = 1
        toep_mat = linalg.toeplitz(trig[0:window], trig)
        toeplitz_mat[eid] = toep_mat
        full_toep.append(toep_mat)

    # Concatenate toeplitz
    full_toep = np.concatenate(full_toep)

    # least square estimation
    predictor = np.dot(linalg.pinv(np.dot(full_toep, full_toep.T)), full_toep)
    all_evokeds = np.dot(predictor, data.T)
    all_evokeds = np.vsplit(all_evokeds, len(event_id))

    # parse evoked response
    evoked_data = dict()
    for idx, eid in enumerate(event_id):
        evoked_data[eid] = all_evokeds[idx].T

    return evoked_data, toeplitz_mat
예제 #15
0
    def calculateGradient(self):
        # normalize rewards
        # self.dataset.data['reward'] /= max(ravel(abs(self.dataset.data['reward'])))

        # initialize variables
        R = ones((self.dataset.getNumSequences(), 1), float)
        X = ones((self.dataset.getNumSequences(), self.loglh.getDimension('loglh') + 1), float)

        # collect sufficient statistics
        print self.dataset.getNumSequences()
        for n in range(self.dataset.getNumSequences()):
            _state, _action, reward = self.dataset.getSequence(n)
            seqidx = ravel(self.dataset['sequence_index'])
            if n == self.dataset.getNumSequences() - 1:
                # last sequence until end of dataset
                loglh = self.loglh['loglh'][seqidx[n]:, :]
            else:
                loglh = self.loglh['loglh'][seqidx[n]:seqidx[n + 1], :]

            X[n, :-1] = sum(loglh, 0)
            R[n, 0] = sum(reward, 0)

        # linear regression
        beta = dot(pinv(X), R)
        return beta[:-1]
예제 #16
0
	def _initParams_fast(self):
		""" 
		initialize the gp parameters
			1) project Y on the known factor X0 -> Y0
				average variance of Y0 is used to initialize the variance explained by X0
			2) considers the residual Y1 = Y-Y0 (this equivals to regress out X0)
			3) perform PCA on cov(Y1) and considers the first k PC for initializing X
			4) the variance of all other PCs is used to initialize the noise
			5) the variance explained by interaction is set to a small random number 
		"""
		Xd = LA.pinv(self.X0)
		Y0 = self.X0.dot(Xd.dot(self.Y))
		Y1 = self.Y-Y0
		YY = SP.cov(Y1)
		S,U = LA.eigh(YY)
		X = U[:,-self.k:]*SP.sqrt(S[-self.k:])
		a = SP.array([SP.sqrt(Y0.var(0).mean())])
		b = 1e-3*SP.randn(1)
		c = SP.array([SP.sqrt((YY-SP.dot(X,X.T)).diagonal().mean())])
		# gp hyper params
		params = limix.CGPHyperParams()
		if self.interaction:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
		else:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
		params['lik'] = c
		return params
예제 #17
0
파일: misc.py 프로젝트: PMBio/scLVM
def regressOut(Y,X):
	"""
	regresses out X from Y
	"""
	Xd = LA.pinv(X)
	Y_out = Y-X.dot(Xd.dot(Y))
	return Y_out
예제 #18
0
    def testPI(self, level=1):
	""" test TRAIN_PI with zero input and feedback """
        
	# init network
	self.net.setSimAlgorithm(SIM_STD)
	self.net.setTrainAlgorithm(TRAIN_PI)
	self.net.init()
	
	# train network
	washout = 2
	# test with zero input:
	indata = N.zeros((self.ins,self.train_size),self.dtype)
	outdata = N.random.rand(self.outs,self.train_size) * 2 - 1
	indata = N.asfarray( indata, self.dtype )
	outdata = N.asfarray( outdata, self.dtype )
	self.net.train( indata, outdata, washout )
	wout_target = self.net.getWout().copy()
	
	# teacher forcing, collect states
	X = self._teacherForcing(indata,outdata)
	
	# restructure data
	M = N.r_[X,indata]
	M = M[:,washout:self.train_size].T
	T = outdata[:,washout:self.train_size].T
	
	# calc pseudo inverse: wout = pinv(M) * T
	wout = ( N.dot(pinv(M),T) ).T
	
	# normalize result for comparison
	wout = wout / abs(wout).max()
	wout_target = wout_target / abs(wout_target).max()
	assert_array_almost_equal(wout_target,wout,2)
예제 #19
0
파일: test_basic.py 프로젝트: 7924102/scipy
 def test_simple_complex(self):
     a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
          + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
     a_pinv = pinv(a)
     assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
     a_pinv = pinv2(a)
     assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
예제 #20
0
def _least_square_evoked(epochs_data, events, tmin, sfreq):
    """Least square estimation of evoked response from epochs data.

    Parameters
    ----------
    epochs_data : array, shape (n_channels, n_times)
        The epochs data to estimate evoked.
    events : array, shape (n_events, 3)
        The events typically returned by the read_events function.
        If some events don't match the events of interest as specified
        by event_id, they will be ignored.
    tmin : float
        Start time before event.
    sfreq : float
        Sampling frequency.

    Returns
    -------
    evokeds : array, shape (n_class, n_components, n_times)
        An concatenated array of evoked data for each event type.
    toeplitz : array, shape (n_class * n_components, n_channels)
        An concatenated array of toeplitz matrix for each event type.
    """

    n_epochs, n_channels, n_times = epochs_data.shape
    tmax = tmin + n_times / float(sfreq)

    # Deal with shuffled epochs
    events = events.copy()
    events[:, 0] -= events[0, 0] + int(tmin * sfreq)

    # Contruct raw signal
    raw = _construct_signal_from_epochs(epochs_data, events, sfreq, tmin)

    # Compute the independent evoked responses per condition, while correcting
    # for event overlaps.
    n_min, n_max = int(tmin * sfreq), int(tmax * sfreq)
    window = n_max - n_min
    n_samples = raw.shape[1]
    toeplitz = list()
    classes = np.unique(events[:, 2])
    for ii, this_class in enumerate(classes):
        # select events by type
        sel = events[:, 2] == this_class

        # build toeplitz matrix
        trig = np.zeros((n_samples, 1))
        ix_trig = (events[sel, 0]) + n_min
        trig[ix_trig] = 1
        toeplitz.append(linalg.toeplitz(trig[0:window], trig))

    # Concatenate toeplitz
    toeplitz = np.array(toeplitz)
    X = np.concatenate(toeplitz)

    # least square estimation
    predictor = np.dot(linalg.pinv(np.dot(X, X.T)), X)
    evokeds = np.dot(predictor, raw.T)
    evokeds = np.transpose(np.vsplit(evokeds, len(classes)), (0, 2, 1))
    return evokeds, toeplitz
예제 #21
0
    def _randomized_dpca(self,X,mXs,pinvX=None):
        """ Solves the dPCA minimization problem analytically by using a randomized SVD solver from sklearn.

            Returns
            -------
            P : dict mapping strings to array-like,
                Holds encoding matrices for each term in variance decompostions (used to transform data
                to low-dimensional space).

            D : dict mapping strings to array-like,
                Holds decoding matrices for each term in variance decompostions (used in inverse_transform
                to map from low-dimensional representation back to original data space).

        """

        n_features = X.shape[0]
        rX = X.reshape((n_features,-1))
        pinvX = pinv(rX) if pinvX is None else pinvX

        P, D = {}, {}

        for key in list(mXs.keys()):
            mX = mXs[key].reshape((n_features,-1)) # called X_phi in paper
            C = np.dot(mX,pinvX)

            if isinstance(self.n_components,dict):
                U,s,V = randomized_svd(np.dot(C,rX),n_components=self.n_components[key],n_iter=self.n_iter,random_state=np.random.randint(10e5))
            else:
                U,s,V = randomized_svd(np.dot(C,rX),n_components=self.n_components,n_iter=self.n_iter,random_state=np.random.randint(10e5))

            P[key] = U
            D[key] = np.dot(U.T,C).T

        return P, D
    def score(self, X_test, assume_centered=False):
        """Computes the log-likelihood of a gaussian data set with
        `self.covariance_` as an estimator of its covariance matrix.

        Parameters
        ----------
        X_test : array-like, shape = [n_samples, n_features]
          Test data of which we compute the likelihood,
          where n_samples is the number of samples and n_features is
          the number of features.

        Returns
        -------
        res: float
          The likelihood of the data set with self.covariance_ as an estimator
          of its covariance matrix.

        """
        # compute empirical covariance of the test set
        test_cov = empirical_covariance(X_test, assume_centered=assume_centered)
        # compute log likelihood
        if self.store_precision:
            res = log_likelihood(test_cov, self.precision_)
        else:
            res = log_likelihood(test_cov, linalg.pinv(self.covariance_))

        return res
예제 #23
0
    def correct_covariance(self, data):
        """Apply a correction to raw Minimum Covariance Determinant estimates.

        Correction using the empirical correction factor suggested
        by Rousseeuw and Van Driessen in [Rouseeuw1984]_.

        Parameters
        ----------
        data: array-like, shape (n_samples, n_features)
          The data matrix, with p features and n samples.
          The data set must be the one which was used to compute
          the raw estimates.

        Returns
        -------
        covariance_corrected: array-like, shape (n_features, n_features)
          Corrected robust covariance estimate.

        """
        X_centered = data - self.raw_location_
        dist = np.sum(
            np.dot(X_centered, linalg.pinv(self.raw_covariance_)) * X_centered,
            1)
        correction = np.median(dist) / chi2(data.shape[1]).isf(0.5)
        covariance_corrected = self.raw_covariance_ * correction
        self._set_estimates(covariance_corrected)
        return covariance_corrected
    def train(self, dataset, targets, lamda=0):
        """ dataset: matrix of dimensions n x input
            targets: column vector of dimension n x output """

        # Choose random center vectors from training set
        self.centers = random.permutation(dataset)[:self.hidden_length]

        # Calculate data variance
        self.variance = np.var(dataset)

        # Calculate activations of RBFs
        green_matrix = self.calc_activation(dataset)

        # Calculate output weights
        if lamda == 0:
            self.W = dot(pinv(green_matrix), targets)  # With pseudoinverse
        else:
            green_matrix_transpose = np.transpose(green_matrix)
            # With operator lambda
            self.W = dot(inv(dot(green_matrix_transpose, green_matrix) + lamda * np.identity(self.hidden_length)),
                         dot(green_matrix_transpose, targets))

        # Get error
        result = self.test(dataset)
        error = self.cost_function(targets, result)
        return error
예제 #25
0
def dist_commute_time(X):
    n_samples = X.shape[0]
    #n_features = X.shape[1]
    
    E = dist_euclidean(X)
    Estd = E.std()
    A = exp(-E**2 / Estd**2);
    D = zeros((n_samples,n_samples))
    for i in range(0,n_samples):
        A[i,i] = 0
        D[i,i] = A[i,:].sum(dtype=float)
    
    V = A.sum(dtype=float)
    print "\tGraph volume = " + str(V)
    
    #D = diag(A.sum(axis=1));
    L = D - A;
    Lp = linalg.pinv(L);
    
    CTD = zeros((n_samples,n_samples))
    for i in range(0,n_samples):
        for j in range(0,n_samples):
            CTD[i,j] = V * (Lp[i,i] + Lp[j,j] - 2*Lp[i,j])
    
    #CTD = CTD / CTD.max()
    #E = E / E.max()
    
    return CTD, E
def precompute_D_step(z_hat, size_z, rho):
    """Computes to cache the values of Z^.T and (Z^.T*Z^ + rho*I)^-1 as in algorithm"""
    
    n = size_z[0]
    k = size_z[1]
    
    zhat_mat = np.transpose(z_hat.transpose(0,1,3,2).reshape(n, k, -1), [2, 0, 1])    
    zhat_inv_mat = np.zeros((zhat_mat.shape[0], k, k), dtype=imaginary_type)
    inv_rho_z_hat_z_hat_t = np.zeros((zhat_mat.shape[0], n, n), dtype=imaginary_type)
    z_hat_mat_t = np.transpose(np.ma.conjugate(zhat_mat), [0, 2, 1])
    
    #Compute Z_hat * Z_hat^T for each pixel
    z_hat_z_hat_t = np.einsum('knm,kmj->knj',zhat_mat, z_hat_mat_t)
    
    for i in range(zhat_mat.shape[0]):
        z_hat_z_hat_t_plus_rho = z_hat_z_hat_t[i]
        z_hat_z_hat_t_plus_rho.flat[::n + 1] += rho
        inv_rho_z_hat_z_hat_t[i] = linalg.pinv(z_hat_z_hat_t_plus_rho)
                         
    zhat_inv_mat = 1.0/rho * (np.eye(k) - 
                              np.einsum('knm,kmj->knj',                              
                                        np.einsum('knm,kmj->knj',
                                                  z_hat_mat_t,
                                                  inv_rho_z_hat_z_hat_t),
                                        zhat_mat))
    
    print ('Done precomputing for D')                                
    return [zhat_mat, zhat_inv_mat]
예제 #27
0
파일: rbfn.py 프로젝트: fakedrake/beethoven
 def _calc_weights(self, dataset_input, dataset_output):
     """
     Returns:
         weights (n_centroids+1, n_categories)
     """
     hidden_layer_output = self._calc_hidden_layer_output(dataset_input)
     self.weights = scipy.dot(pinv(hidden_layer_output.T), dataset_output.T)
예제 #28
0
def test_yule_walker_R():
    # Test YW implementation against R results
    Y = np.array([1,3,4,5,8,9,10])
    N = len(Y)
    X = np.ones((N, 2))
    X[:,0] = np.arange(1,8)
    pX = spl.pinv(X)
    betas = np.dot(pX, Y)
    Yhat = Y - np.dot(X, betas)
    # R results obtained from:
    # >>> np.savetxt('yhat.csv', Yhat)
    # > yhat = read.table('yhat.csv')
    # > ar.yw(yhat$V1, aic=FALSE, order.max=2)
    def r_fudge(sigma, order):
        # Reverse fudge in ar.R calculation labeled as splus compatibility fix
        return sigma **2 * N / (N-order-1)
    rhos, sd = yule_walker(Yhat, 1, 'mle')
    assert_array_almost_equal(rhos, [-0.3004], 4)
    assert_array_almost_equal(r_fudge(sd, 1), 0.2534, 4)
    rhos, sd = yule_walker(Yhat, 2, 'mle')
    assert_array_almost_equal(rhos, [-0.5113, -0.7021], 4)
    assert_array_almost_equal(r_fudge(sd, 2), 0.1606, 4)
    rhos, sd = yule_walker(Yhat, 3, 'mle')
    assert_array_almost_equal(rhos, [-0.6737, -0.8204, -0.2313], 4)
    assert_array_almost_equal(r_fudge(sd, 3), 0.2027, 4)
예제 #29
0
    def invmodes_m(self, mi, threshold=None):
        """Get the inverse modes.

        If the true inverse has been cached, return the modes for the current
        `threshold`. Otherwise generate the Moore-Penrose pseudo-inverse.

        Parameters
        ----------
        mi : integer
            m-mode to generate for.
        threshold : scalar
            S/N threshold to use.

        Returns
        -------
        invmodes : np.ndarray
        """

        evals = self.evals_m(mi, threshold)

        with h5py.File(self._evfile % mi, 'r') as f:
            if 'evinv' in f:
                inv = f['evinv'][:]

                if threshold != None:
                    nevals = evals.size
                    inv = inv[(-nevals):]

                return inv.T

            else:
                print "Inverse not cached, generating pseudo-inverse."
                return la.pinv(self.modes_m(mi, threshold)[1])
예제 #30
0
def _pseudo_inverse_dense(L, rhoss, method='direct', **pseudo_args):
    """
    Internal function for computing the pseudo inverse of an Liouvillian using
    dense matrix methods. See pseudo_inverse for details.
    """
    if method == 'direct':
        rho_vec = np.transpose(mat2vec(rhoss.full()))

        tr_mat = tensor([identity(n) for n in L.dims[0][0]])
        tr_vec = np.transpose(mat2vec(tr_mat.full()))

        N = np.prod(L.dims[0][0])
        I = np.identity(N * N)
        P = np.kron(np.transpose(rho_vec), tr_vec)
        Q = I - P
        LIQ = np.linalg.solve(L.full(), Q)
        R = np.dot(Q, LIQ)

        return Qobj(R, dims=L.dims)

    elif method == 'numpy':
        return Qobj(np.linalg.pinv(L.full()), dims=L.dims)

    elif method == 'scipy':
        return Qobj(la.pinv(L.full()), dims=L.dims)

    elif method == 'scipy2':
        return Qobj(la.pinv2(L.full()), dims=L.dims)

    else:
        raise ValueError("Unsupported method '%s'. Use 'direct' or 'numpy'" %
                         method)
예제 #31
0
def _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg,
                label=None, picks=None, pick_ori=None, verbose=None):
    """Dynamic Imaging of Coherent Sources (DICS).

    Calculate the DICS spatial filter based on a given cross-spectral
    density object and return estimates of source activity based on given data.

    Parameters
    ----------
    data : array or list / iterable
        Sensor space data. If data.ndim == 2 a single observation is assumed
        and a single stc is returned. If data.ndim == 3 or if data is
        a list / iterable, a list of stc's is returned.
    info : dict
        Measurement info.
    tmin : float
        Time of first sample.
    forward : dict
        Forward operator.
    noise_csd : instance of CrossSpectralDensity
        The noise cross-spectral density.
    data_csd : instance of CrossSpectralDensity
        The data cross-spectral density.
    reg : float
        The regularization for the cross-spectral density.
    label : Label | None
        Restricts the solution to a given label.
    picks : array-like of int | None
        Indices (in info) of data channels. If None, MEG and EEG data channels
        (without bad channels) will be used.
    pick_ori : None | 'normal'
        If 'normal', rather than pooling the orientations by taking the norm,
        only the radial component is kept.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    stc : SourceEstimate (or list of SourceEstimate)
        Source time courses.
    """

    is_free_ori, picks, _, proj, vertno, G =\
        _prepare_beamformer_input(info, forward, label, picks, pick_ori)

    Cm = data_csd.data

    # Calculating regularized inverse, equivalent to an inverse operation after
    # regularization: Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
    Cm_inv = linalg.pinv(Cm, reg)

    # Compute spatial filters
    W = np.dot(G.T, Cm_inv)
    n_orient = 3 if is_free_ori else 1
    n_sources = G.shape[1] // n_orient

    for k in range(n_sources):
        Wk = W[n_orient * k: n_orient * k + n_orient]
        Gk = G[:, n_orient * k: n_orient * k + n_orient]
        Ck = np.dot(Wk, Gk)

        # TODO: max-power is not implemented yet, however DICS does employ
        # orientation picking when one eigen value is much larger than the
        # other

        if is_free_ori:
            # Free source orientation
            Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
        else:
            # Fixed source orientation
            Wk /= Ck

        # Noise normalization
        noise_norm = np.dot(np.dot(Wk.conj(), noise_csd.data), Wk.T)
        noise_norm = np.abs(noise_norm).trace()
        Wk /= np.sqrt(noise_norm)

    # Pick source orientation normal to cortical surface
    if pick_ori == 'normal':
        W = W[2::3]
        is_free_ori = False

    if isinstance(data, np.ndarray) and data.ndim == 2:
        data = [data]
        return_single = True
    else:
        return_single = False

    subject = _subject_from_forward(forward)
    for i, M in enumerate(data):
        if len(M) != len(picks):
            raise ValueError('data and picks must have the same length')

        if not return_single:
            logger.info("Processing epoch : %d" % (i + 1))

        # Apply SSPs
        if info['projs']:
            M = np.dot(proj, M)

        # project to source space using beamformer weights
        if is_free_ori:
            sol = np.dot(W, M)
            logger.info('combining the current components...')
            sol = combine_xyz(sol)
        else:
            # Linear inverse: do not delay compuation due to non-linear abs
            sol = np.dot(W, M)

        tstep = 1.0 / info['sfreq']
        if np.iscomplexobj(sol):
            sol = np.abs(sol)  # XXX : STC cannot contain (yet?) complex values
        yield SourceEstimate(sol, vertices=vertno, tmin=tmin, tstep=tstep,
                             subject=subject)

    logger.info('[done]')
예제 #32
0
    def _linear_inverse_function(model, state, **kwargs):
        model_matrix = model.matrix(**kwargs)
        inv_model_matrix = pinv(model_matrix)

        return inv_model_matrix @ state.state_vector
예제 #33
0
    def fit(self, X, Y):
        """Fit model to data.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training vectors, where n_samples in the number of samples and
            n_features is the number of predictors.

        Y : array-like of response, shape = [n_samples, n_targets]
            Target vectors, where n_samples in the number of samples and
            n_targets is the number of response variables.
        """

        # copy since this will contains the residuals (deflated) matrices
        check_consistent_length(X, Y)
        X = check_array(X, dtype=np.float, copy=self.copy)
        Y = check_array(Y, dtype=np.float, copy=self.copy, ensure_2d=False)
        if Y.ndim == 1:
            Y = Y[:, None]

        n = X.shape[0]
        p = X.shape[1]
        q = Y.shape[1]

        if self.n_components < 1 or self.n_components > p:
            raise ValueError('invalid number of components')
        if self.algorithm not in ("svd", "nipals"):
            raise ValueError("Got algorithm %s when only 'svd' "
                             "and 'nipals' are known" % self.algorithm)
        if self.algorithm == "svd" and self.mode == "B":
            raise ValueError('Incompatible configuration: mode B is not '
                             'implemented with svd algorithm')
        if not self.deflation_mode in ["canonical", "regression"]:
            raise ValueError('The deflation mode is unknown')
        # Scale (in place)
        X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
            = _center_scale_xy(X, Y, self.scale)
        # Residuals (deflated) matrices
        Xk = X
        Yk = Y
        # Results matrices
        self.x_scores_ = np.zeros((n, self.n_components))
        self.y_scores_ = np.zeros((n, self.n_components))
        self.x_weights_ = np.zeros((p, self.n_components))
        self.y_weights_ = np.zeros((q, self.n_components))
        self.x_loadings_ = np.zeros((p, self.n_components))
        self.y_loadings_ = np.zeros((q, self.n_components))
        self.n_iter_ = []

        # NIPALS algo: outer loop, over components
        for k in range(self.n_components):
            #1) weights estimation (inner loop)
            # -----------------------------------
            if self.algorithm == "nipals":
                x_weights, y_weights, n_iter_ = \
                    _nipals_twoblocks_inner_loop(
                        X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
                        tol=self.tol, norm_y_weights=self.norm_y_weights)
                self.n_iter_.append(n_iter_)
            elif self.algorithm == "svd":
                x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
            # compute scores
            x_scores = np.dot(Xk, x_weights)
            if self.norm_y_weights:
                y_ss = 1
            else:
                y_ss = np.dot(y_weights.T, y_weights)
            y_scores = np.dot(Yk, y_weights) / y_ss
            # test for null variance
            if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
                warnings.warn('X scores are null at iteration %s' % k)
            #2) Deflation (in place)
            # ----------------------
            # Possible memory footprint reduction may done here: in order to
            # avoid the allocation of a data chunk for the rank-one
            # approximations matrix which is then subtracted to Xk, we suggest
            # to perform a column-wise deflation.
            #
            # - regress Xk's on x_score
            x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
            # - subtract rank-one approximations to obtain remainder matrix
            Xk -= np.dot(x_scores, x_loadings.T)
            if self.deflation_mode == "canonical":
                # - regress Yk's on y_score, then subtract rank-one approx.
                y_loadings = (np.dot(Yk.T, y_scores) /
                              np.dot(y_scores.T, y_scores))
                Yk -= np.dot(y_scores, y_loadings.T)
            if self.deflation_mode == "regression":
                # - regress Yk's on x_score, then subtract rank-one approx.
                y_loadings = (np.dot(Yk.T, x_scores) /
                              np.dot(x_scores.T, x_scores))
                Yk -= np.dot(x_scores, y_loadings.T)
            # 3) Store weights, scores and loadings # Notation:
            self.x_scores_[:, k] = x_scores.ravel()  # T
            self.y_scores_[:, k] = y_scores.ravel()  # U
            self.x_weights_[:, k] = x_weights.ravel()  # W
            self.y_weights_[:, k] = y_weights.ravel()  # C
            self.x_loadings_[:, k] = x_loadings.ravel()  # P
            self.y_loadings_[:, k] = y_loadings.ravel()  # Q
        # Such that: X = TP' + Err and Y = UQ' + Err

        # 4) rotations from input space to transformed space (scores)
        # T = X W(P'W)^-1 = XW* (W* : p x k matrix)
        # U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
        self.x_rotations_ = np.dot(
            self.x_weights_,
            linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
        if Y.shape[1] > 1:
            self.y_rotations_ = np.dot(
                self.y_weights_,
                linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
        else:
            self.y_rotations_ = np.ones(1)

        if True or self.deflation_mode == "regression":
            # Estimate regression coefficient
            # Regress Y on T
            # Y = TQ' + Err,
            # Then express in function of X
            # Y = X W(P'W)^-1Q' + Err = XB + Err
            # => B = W*Q' (p x q)
            self.coefs = np.dot(self.x_rotations_, self.y_loadings_.T)
            self.coefs = (1. / self.x_std_.reshape(
                (p, 1)) * self.coefs * self.y_std_)
        return self
예제 #34
0
    def train(self,
              inputs,
              teachers,
              wash_nr_time_step,
              reset_state=True,
              float32=False,
              verbose=False):
        #TODO float32 : use float32 precision for training the reservoir instead of the default float64 precision
        #TODO: add a 'speed mode' where all asserts, prints and saved values are minimal
        #TODO: add option to enable direct connection from input to output to be learned
        # need to remember the input at this stage
        """
        Dimensions:
        Inputs:
            - inputs: list of numpy array item with dimension (nr_time_step, input_dimension)
            - teachers: list of numpy array item with dimension (nr_time_step, output_dimension)
        Outputs:
            - all_int_states: list of numpy array item with dimension
                - during the execution of this method : (N, nr_time_step)
                - returned dim (nr_time_step, N)

        - TODO float32 : use float32 precision for training the reservoir instead of the default float64 precision
        - TODO: add option to enable direct connection from input to output to be learned
            # need to remember the input at this stage
        - TODO: add a 'speed mode' where all asserts, prints and saved values are minimal
        """
        if verbose:
            print "len(inputs)", len(inputs)
            print "len(teachers)", len(teachers)
            print "self.N", self.N
            print "self.W.shape", self.W.shape
            print "self.Win.shape", self.Win.shape
        self.autocheck_io(inputs=inputs, outputs=teachers)

        # 'pre-allocated' memory for the list that will collect the states
        all_int_states = [None] * len(inputs)
        x = np.zeros((self.N, 1))  # current internal state initialized at zero
        x = np.zeros((self.N, 1))  # current internal state initialized at zero
        # 'pre-allocated' memory for the list that will collect the teachers (desired outputs)
        all_teachers = [None] * len(teachers)

        # change of variable for conveniance in equation
        di = self.dim_inp

        if float32:
            #TODO: FINISH TO PUT ALL USEFUL VARIABLES IN float32
            inputs = [aa.astype('float32') for aa in inputs]
            teachers = [aa.astype('float32') for aa in teachers]
            raise Exception, "TODO: float32 option not finished yet!"

        # run reservoir over the inputs, to save the internal states and the teachers (desired outputs) in lists
        for (j, (inp, tea)) in enumerate(zip(inputs, teachers)):
            if verbose:
                print "j:", j
                print "inp.shape", inp.shape
                print "tea.shape", tea.shape

            if self.in_bias:
                u = np.column_stack((np.ones((inp.shape[0], 1)), inp))
            else:
                u = inp
            # reset the states or not, that is the question
            if reset_state:
                x = np.zeros(
                    (self.N, 1))  # current internal state initialized at zero
                if self.Wfb is not None:
                    y = np.zeros((self.dim_out, 1))
            else:
                # keep the previously runned state
                pass

            all_int_states[j] = np.zeros(
                (self.N, inp.shape[0] - wash_nr_time_step))
            all_teachers[j] = np.zeros(
                (tea.shape[1], inp.shape[0] - wash_nr_time_step))

            for t in range(inp.shape[0]):  # for each time step in the input
                # u = data[t]
                # u = np.atleast_2d(inp[t,:])
                if verbose:
                    print "inp[t,:].shape", inp[t, :].shape
                    print "tea[t,:].shape", tea[t, :].shape
                    print "u.shape", u.shape
                    print "inp[t,:]", inp[t, :]
                    print "u[t].shape", u[t].shape
                    print "u[t,:].shape", u[t, :].shape
                    print "di", di
                    print "np.atleast_2d(u[t,:]).shape", np.atleast_2d(
                        u[t, :]).shape
                    # print "np.atleast_2d(u[t,:]).reshape(di,1).shape", np.atleast_2d(u[t,:]).reshape(di,1).shape
                    # print "u[t,:].reshape(di,1).shape", u[t,:].reshape(di,1).shape
                    # print "u[t,:].reshape(di,1).shape", u[t,:].reshape(di,-1).shape
                    # print "y", y
                    print "self.dim_out", self.dim_out
                    # print "tea[t,:].reshape(self.dim_out,1).T", tea[t,:].reshape(self.dim_out,1).T
                    # print "y.T", y.T
                    # print "np.atleast_2d(u[t,:]).shape", np.atleast_2d(u[t,:]).shape
                    # print "np.atleast_2d(u[t,:].T).shape", np.atleast_2d(u[t,:].T).shape
                    # print "np.atleast_2d(u[t]).T", np.atleast_2d(u[t]).T.shape
                    # print "np.atleast_2d(u[t,:]).T", np.atleast_2d(u[t,:]).T.shape
                # x = (1-self.lr) * x  +  self.lr * np.tanh( np.dot( self.Win, np.atleast_2d(u[t]).T ) + np.dot( self.W, x ) )
                # TODO: this one is equivalent, but don't know which one is faster #TODO have to be tested
                if self.Wfb is None:
                    if verbose:
                        print "u", u.shape
                        print "x", x.shape
                        print "self.Win", self.Win.shape
                        print "self.W", self.W.shape
                        print "u[t,:]", u[t, :].shape
                        print "atleast_2d(u)[t,:]", np.atleast_2d(u)[
                            t, :].shape
                        print "u[t,:].reshape(di,1)", u[t, :].reshape(di,
                                                                      1).shape
                        print "x", x
                        print "DEBUG BEFORE"
                        print "self.W", self.W
                        print "(1-self.lr) * x", (1 - self.lr) * x
                        print "np.dot( self.Win, u[t,:].reshape(di,1) )", np.dot(
                            self.Win, u[t, :].reshape(di, 1))
                        print "np.dot( self.W, x )", np.dot(self.W, x)
                    x = (1 - self.lr) * x + self.lr * np.tanh(
                        np.dot(self.Win, u[t, :].reshape(di, 1)) +
                        np.dot(self.W, x))
                    if verbose:
                        print "DEBUG AFTER"
                        print "x.shape", x.shape
                        print "x", x
                    # raw_input()
                else:
                    x = (1 - self.lr) * x + self.lr * np.tanh(
                        np.dot(self.Win, u[t, :].reshape(di, 1)) +
                        np.dot(self.W, x) + np.dot(self.Wfb, self.fbfunc(y)))
                    y = tea[t, :].reshape(self.dim_out, 1)

                if t >= wash_nr_time_step:
                    # X[:,t-initLen] = np.vstack((1,u,x))[:,0]
                    if verbose:
                        print "x.shape", x.shape
                        print "x", x
                        print "x.reshape(-1,).shape", x.reshape(-1, ).shape
                        print "all_int_states[j][:,t-wash_nr_time_step].shape", all_int_states[
                            j][:, t - wash_nr_time_step].shape
                        # raw_input()
                        if self.Wfb is not None:
                            print "y.shape", y.shape
                            print "y.reshape(-1,).shape", y.reshape(-1, ).shape
                            print "tea[t,:].shape", tea[t, :].shape
                            print "tea[t,:].reshape(-1,).shape", tea[
                                t, :].reshape(-1, ).shape
                            print "tea[t,:].reshape(-1,).T", tea[t, :].reshape(
                                -1, ).T
                            print "y.T", y.T
                            print "(y.reshape(-1,) == tea[t,:].reshape(-1,))", (
                                y.reshape(-1, ).shape == tea[t, :].reshape(
                                    -1, ))
                    if self.Wfb is not None:
                        assert all(y.reshape(-1, ) == tea[t, :].reshape(-1, ))
                    if verbose:
                        print "x", x
                        print "x.reshape(-1,)", x.reshape(-1, )
                    #TODO: add option to enable direct connection from input to output to be learned
                    # need to remember the input at this stage
                    all_int_states[j][:,
                                      t - wash_nr_time_step] = x.reshape(-1, )
                    all_teachers[j][:,
                                    t - wash_nr_time_step] = tea[t, :].reshape(
                                        -1, )

        if verbose:
            print "all_int_states", all_int_states
            print "len(all_int_states)", len(all_int_states)
            print "all_int_states[0].shape", all_int_states[0].shape
            print "all_int_states[0][:5,:15] (5 neurons on 15 time steps)", all_int_states[
                0][:5, :15]
            print "all_int_states.count(None)", all_int_states.count(None)
        # TODO: change the 2 following lines according to this error:
        # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
        # assert all_int_states.count(None) == 0 # check if some input/teacher pass was not done
        # assert all_teachers.count(None) == 0 # check if some input/teacher pass was not done
        # self.check_values(array_or_list=all_int_states, value=None)
        # self.check_values(array_or_list=all_teachers, value=None)

        # concatenate the lists
        X = np.hstack(all_int_states)
        Y = np.hstack(all_teachers)
        if verbose:
            print "X.shape", X.shape
            print "Y.shape", Y.shape
        # Adding ones for regression with biais b in (y = a*x + b)
        X = np.vstack((np.ones((1, X.shape[1])), X))
        if verbose:
            print "X.shape", X.shape
        # raw_input()

        # train the output
        X_T = X.T  # dim of X_T (nr of time steps, nr of neurons)
        # Yt = Y.T # dim of Y_T (output_dim, nr_of_time_steps)
        if verbose:
            print "X_T.shape", X_T.shape
            print "Y.shape", Y.shape
        if self.ridge is not None:
            # use ridge regression (linear regression with regularization)
            if verbose:
                print "USING RIDGE REGRESSION"
                print "X", X.shape
                print "X_T", X_T.shape
                print "Y", Y.shape
                print "N", self.N
            # Wout = np.dot(np.dot(Yt,X_T), linalg.inv(np.dot(X,X_T) + \
            Wout = np.dot(np.dot(Y,X_T), linalg.inv(np.dot(X,X_T) + \
                    self.ridge*np.eye(1+self.N) ) )
            # self.ridge*np.eye(1+inSize+resSize) ) )

            ### Just if you want to try the difference between scipy.linalg and numpy.linalg which does not give the same results
            ### For more info, see https://www.scipy.org/scipylib/faq.html#why-both-numpy-linalg-and-scipy-linalg-what-s-the-difference
        #    np_Wout = np.dot(np.dot(Yt,X_T), np.linalg.inv(np.dot(X,X_T) + \
        #        reg*np.eye(1+inSize+resSize) ) )
        #    print "Difference between scipy and numpy .inv() method:\n\tscipy_mean_Wout="+\
        #        str(np.mean(Wout))+"\n\tnumpy_mean_Wout="+str(np.mean(np_Wout))
        else:
            # use pseudo inverse
            if verbose:
                print "USING PSEUDO INVERSE"
            # Wout = np.dot( Yt, linalg.pinv(X) )
            Wout = np.dot(Y, linalg.pinv(X))

        # saving the output matrix in the ESN object for later use
        self.Wout = Wout
        # saving the last state of the reservoir, in case we want to run the reservoir from the last state on
        self.x = x
        y = all_teachers[-1][:, -1]  #the last time step of the last teacher
        self.y = y  #useful when we will have feedback

        if verbose:
            print "Wout.shape", Wout.shape
            print "all_int_states[0].shape", all_int_states[0].shape

        # return all_int_states
        return [st.T for st in all_int_states]
예제 #35
0
def als(X, rank, **kwargs):
    """
    Alternating least-sqaures algorithm to compute the CP decomposition.

    Parameters
    ----------
    X : tensor_mixin
        The tensor to be decomposed.
    rank : int
        Tensor rank of the decomposition.
    init : {'random', 'nvecs'}, optional
        The initialization method to use.
            - random : Factor matrices are initialized randomly.
            - nvecs : Factor matrices are initialzed via HOSVD.
        (default 'nvecs')
    max_iter : int, optional
        Maximium number of iterations of the ALS algorithm.
        (default 500)
    fit_method : {'full', None}
        The method to compute the fit of the factorization
            - 'full' : Compute least-squares fit of the dense approximation of.
                       X and X.
            - None : Do not compute the fit of the factorization, but iterate
                     until ``max_iter`` (Useful for large-scale tensors).
        (default 'full')
    conv : float
        Convergence tolerance on difference of fit between iterations
        (default 1e-5)

    Returns
    -------
    P : ktensor
        Rank ``rank`` factorization of X. ``P.U[i]`` corresponds to the factor
        matrix for the i-th mode. ``P.lambda[i]`` corresponds to the weight
        of the i-th mode.
    fit : float
        Fit of the factorization compared to ``X``
    itr : int
        Number of iterations that were needed until convergence

    Examples
    --------
    Create random dense tensor

    >>> from sktensor import dtensor, ktensor
    >>> U = [np.random.rand(i,3) for i in (20, 10, 14)]
    >>> T = dtensor(ktensor(U).toarray())

    Compute rank-3 CP decomposition of ``T`` with ALS

    >>> P, fit, itr = als(T, 3)

    Result is a decomposed tensor stored as a Kruskal operator

    >>> type(P)
    <class 'sktensor.ktensor.ktensor'>

    Factorization should be close to original data

    >>> np.allclose(T, P.totensor())
    False

    References
    ----------
    .. [1] Kolda, T. G. & Bader, B. W.
           Tensor Decompositions and Applications.
           SIAM Rev. 51, 455–500 (2009).
    .. [2] Harshman, R. A.
           Foundations of the PARAFAC procedure: models and conditions for an 'explanatory' multimodal factor analysis.
           UCLA Working Papers in Phonetics 16, (1970).
    .. [3] Carroll, J. D.,  Chang, J. J.
           Analysis of individual differences in multidimensional scaling via an N-way generalization of 'Eckart-Young' decomposition.
           Psychometrika 35, 283–319 (1970).
    """

    # init options
    ainit = kwargs.pop('init', _DEF_INIT)
    maxiter = kwargs.pop('max_iter', _DEF_MAXITER)
    fit_method = kwargs.pop('fit_method', _DEF_FIT_METHOD)
    conv = kwargs.pop('conv', _DEF_CONV)
    dtype = kwargs.pop('dtype', _DEF_TYPE)
    if not len(kwargs) == 0:
        raise ValueError('Unknown keywords (%s)' % (kwargs.keys()))

    N = X.ndim
    normX = norm(X)

    U = _init(ainit, X, N, rank, dtype)
    fit = 0
    for itr in range(maxiter):
        fitold = fit

        for n in range(N):
            Unew = X.uttkrp(U, n)
            Y = np.ones((rank, rank), dtype=dtype)
            for i in (list(range(n)) + list(range(n + 1, N))):
                Y = Y * np.dot(U[i].T, U[i])
            Unew = Unew.dot(pinv(Y))
            # Normalize
            if itr == 0:
                lmbda = np.sqrt((Unew**2).sum(axis=0))
            else:
                lmbda = Unew.max(axis=0)
                lmbda[lmbda < 1] = 1
            U[n] = Unew / lmbda

        P = ktensor(U, lmbda)
        if fit_method == 'full':
            normresidual = normX**2 + P.norm()**2 - 2 * P.innerprod(X)
            fit = 1 - (normresidual / normX**2)
        else:
            fit = itr
        fitchange = abs(fitold - fit)
        _log.debug('[%3d] fit: %.5f | delta: %7.1e' % (itr, fit, fitchange))
        if itr > 0 and fitchange < conv:
            break

    return P, fit, itr
예제 #36
0
    def apply(self, raw_data, method='least_squares'):
        """Apply the calibration matrix to results.

        Args:
            raw_data: The data to be corrected. Can be in a number of forms:
                 * Form1: a counts dictionary from results.get_counts
                 * Form2: a list of counts of length==len(state_labels)
                 * Form3: a list of counts of length==M*len(state_labels) where
                 M is an integer (e.g. for use with the tomography data)
                 * Form4: a qiskit Result

            method (str): fitting method. If None, then least_squares is used.
                ``pseudo_inverse``: direct inversion of the A matrix
                ``least_squares``: constrained to have physical probabilities

        Returns:
            The corrected data in the same form as raw_data

        .. code-block::

            calcircuits, state_labels = complete_measurement_calibration(
                qiskit.QuantumRegister(5))
            job = qiskit.execute(calcircuits)
            meas_fitter = CompleteMeasFitter(job.results(),
                                            state_labels)
            meas_filter = MeasurementFilter(meas_fitter.cal_matrix)

            job2 = qiskit.execute(my_circuits)
            result2 = job2.results()

            error_mitigated_counts = meas_filter.apply(
                result2.get_counts('circ1'))
        """

        # check forms of raw_data
        if isinstance(raw_data, dict):
            # counts dictionary
            data_format = 0
            # convert to form2
            raw_data2 = [np.zeros(len(self._state_labels), dtype=float)]
            for stateidx, state in enumerate(self._state_labels):
                raw_data2[0][stateidx] = raw_data.get(state, 0)

        elif isinstance(raw_data, list):
            size_ratio = len(raw_data) / len(self._state_labels)
            if len(raw_data) == len(self._state_labels):
                data_format = 1
                raw_data2 = [raw_data]
            elif int(size_ratio) == size_ratio:
                data_format = 2
                size_ratio = int(size_ratio)
                # make the list into chunks the size of state_labels for easier
                # processing
                raw_data2 = np.zeros([size_ratio, len(self._state_labels)])
                for i in range(size_ratio):
                    raw_data2[i][:] = raw_data[i *
                                               len(self._state_labels):(i +
                                                                        1) *
                                               len(self._state_labels)]
            else:
                raise QiskitError("Data list is not an integer multiple "
                                  "of the number of calibrated states")

        elif isinstance(raw_data, qiskit.result.result.Result):

            # extract out all the counts, re-call the function with the
            # counts and push back into the new result
            new_result = deepcopy(raw_data)

            new_counts_list = parallel_map(
                self._apply_correction,
                [resultidx for resultidx, _ in enumerate(raw_data.results)],
                task_args=(raw_data, method))

            for resultidx, new_counts in new_counts_list:
                new_result.results[resultidx].data.counts = \
                    Obj(**new_counts)

            return new_result

        else:
            raise QiskitError("Unrecognized type for raw_data.")

        if method == 'pseudo_inverse':
            pinv_cal_mat = la.pinv(self._cal_matrix)

        # Apply the correction
        for data_idx, _ in enumerate(raw_data2):

            if method == 'pseudo_inverse':
                raw_data2[data_idx] = np.dot(pinv_cal_mat, raw_data2[data_idx])

            elif method == 'least_squares':
                nshots = sum(raw_data2[data_idx])

                def fun(x):
                    return sum(
                        (raw_data2[data_idx] - np.dot(self._cal_matrix, x))**2)

                x0 = np.random.rand(len(self._state_labels))
                x0 = x0 / sum(x0)
                cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
                bnds = tuple((0, nshots) for x in x0)
                res = minimize(fun,
                               x0,
                               method='SLSQP',
                               constraints=cons,
                               bounds=bnds,
                               tol=1e-6)
                raw_data2[data_idx] = res.x

            else:
                raise QiskitError("Unrecognized method.")

        if data_format == 2:
            # flatten back out the list
            raw_data2 = raw_data2.flatten()

        elif data_format == 0:
            # convert back into a counts dictionary
            new_count_dict = {}
            for stateidx, state in enumerate(self._state_labels):
                if raw_data2[0][stateidx] != 0:
                    new_count_dict[state] = raw_data2[0][stateidx]

            raw_data2 = new_count_dict
        else:
            # TODO: should probably change to:
            # raw_data2 = raw_data2[0].tolist()
            raw_data2 = raw_data2[0]
        return raw_data2
예제 #37
0
if __name__ == "__main__":
    machEps = macheps()
    print "Python eps =", machEps
    #print "FORTRAN eps =", dlamch('E')
    #print "FORTRAN safe min =", dlamch('S')
    #print "FORTRAN base of the machine =", dlamch('B')
    #print "FORTRAN eps*base =", dlamch('P')
    #print "FORTRAN number of (base) digits in the mantissa =", dlamch('N')
    #print "FORTRAN rmax = ", dlamch('O')

    N = 3
    M = 3
    A0 = triu(rand(M, N) + 1.j * rand(M, N))
    A1 = copy.deepcopy(A0)
    A2 = zeros((M, N), 'D', 2)
    A2[:] = A0
    print A0
    #for j in range(A1.shape[0]):
    #A2[j] = A0[j]
    LIB_G2C = 'g2c'  # for gcc >= 4.3.0, LIB_G2C = 'gfortran'
    from scipy import linalg
    X1 = linalg.pinv(A1)  # the pseudo inverse
    X2 = computeMyPinvCC(A2, LIB_G2C)
    X3 = computeTriangleUpSolve(A1, LIB_G2C)
    print
    print "X1 - X2 = "
    print X1 - X2
    print "X1 - X3 = "
    print X1 - X3
예제 #38
0
 def solver(X, Y):
     return fast_dot(linalg.pinv(X.T.dot(X).todense()),
                     X.T.dot(Y.T)).T
예제 #39
0
    def fit(self, X, y):
        """Fit a receptive field model.

        Parameters
        ----------
        X : array, shape (n_times[, n_epochs], n_features)
            The input features for the model.
        y : array, shape (n_times[, n_epochs][, n_outputs])
            The output features for the model.

        Returns
        -------
        self : instance
            The instance so you can chain operations.
        """
        if self.scoring not in _SCORERS.keys():
            raise ValueError('scoring must be one of %s, got'
                             '%s ' % (sorted(_SCORERS.keys()), self.scoring))
        from sklearn.base import clone
        X, y, _, self._y_dim = self._check_dimensions(X, y)

        if self.tmin > self.tmax:
            raise ValueError('tmin (%s) must be at most tmax (%s)' %
                             (self.tmin, self.tmax))
        # Initialize delays
        self.delays_ = _times_to_delays(self.tmin, self.tmax, self.sfreq)

        # Define the slice that we should use in the middle
        self.valid_samples_ = _delays_to_slice(self.delays_)

        if isinstance(self.estimator, numbers.Real):
            if self.fit_intercept is None:
                self.fit_intercept = True
            estimator = TimeDelayingRidge(self.tmin,
                                          self.tmax,
                                          self.sfreq,
                                          alpha=self.estimator,
                                          fit_intercept=self.fit_intercept,
                                          n_jobs=self.n_jobs,
                                          edge_correction=self.edge_correction)
        elif is_regressor(self.estimator):
            estimator = clone(self.estimator)
            if self.fit_intercept is not None and \
                    estimator.fit_intercept != self.fit_intercept:
                raise ValueError(
                    'Estimator fit_intercept (%s) != initialization '
                    'fit_intercept (%s), initialize ReceptiveField with the '
                    'same fit_intercept value or use fit_intercept=None' %
                    (estimator.fit_intercept, self.fit_intercept))
            self.fit_intercept = estimator.fit_intercept
        else:
            raise ValueError('`estimator` must be a float or an instance'
                             ' of `BaseEstimator`,'
                             ' got type %s.' % type(self.estimator))
        self.estimator_ = estimator
        del estimator
        _check_estimator(self.estimator_)

        # Create input features
        n_times, n_epochs, n_feats = X.shape
        n_outputs = y.shape[-1]
        n_delays = len(self.delays_)

        # Update feature names if we have none
        if ((self.feature_names is not None)
                and (len(self.feature_names) != n_feats)):
            raise ValueError('n_features in X does not match feature names '
                             '(%s != %s)' % (n_feats, len(self.feature_names)))

        # Create input features
        X, y = self._delay_and_reshape(X, y)

        self.estimator_.fit(X, y)
        coef = get_coef(self.estimator_, 'coef_')  # (n_targets, n_features)
        shape = [n_feats, n_delays]
        if self._y_dim > 1:
            shape.insert(0, -1)
        self.coef_ = coef.reshape(shape)

        # Inverse-transform model weights
        if self.patterns:
            if isinstance(self.estimator_, TimeDelayingRidge):
                cov_ = self.estimator_.cov_ / float(n_times * n_epochs - 1)
                y = y.reshape(-1, y.shape[-1], order='F')
            else:
                X = X - X.mean(0, keepdims=True)
                cov_ = np.cov(X.T)
            del X

            # Inverse output covariance
            if y.ndim == 2 and y.shape[1] != 1:
                y = y - y.mean(0, keepdims=True)
                inv_Y = linalg.pinv(np.cov(y.T))
            else:
                inv_Y = 1. / float(n_times * n_epochs - 1)
            del y

            # Inverse coef according to Haufe's method
            # patterns has shape (n_feats * n_delays, n_outputs)
            coef = np.reshape(self.coef_, (n_feats * n_delays, n_outputs))
            patterns = cov_.dot(coef.dot(inv_Y))
            self.patterns_ = patterns.reshape(shape)

        return self
예제 #40
0
        os.path.join(BASE_DIR, subList[sub], "gcafMRI", "tc_fs_parcel500.mat"))
    Y = tc_task["tc"]
    # Normalize task data
    Y = Y - np.mean(Y, axis=0)
    Y = Y / np.std(Y, axis=0)
    # Load regressors for task data
    regressors = io.loadmat(
        os.path.join(BASE_DIR, subList[sub], "gcafMRI", "gcaSPM.mat"))
    regressors = regressors['SPM'][0, 0].xX[
        0, 0].X  # Contains task and SHIFTED versions of motion regressors
    X = regressors[:, 0:n_conds]
    # Normalize regressors
    X = X - np.mean(X, axis=0)
    X = X / np.std(X, axis=0)
    # beta estimation (n_conds x n_rois x n_subs)
    if method == 1:  # OLS
        beta[:, :, sub] = np.dot(linalg.pinv(X), Y)
    print 'Subject' + subList[sub] + ' beta computed'

# Max-t permutation test for activation inference
contrast_list = io.loadmat(os.path.join(BASE_DIR, "group/contrastList.mat"))
contrast_list = contrast_list['contrastList']
n_perm = 10000
sig = max_t_perm_test(beta, contrast_list, thresh, n_perm)

#    tc_rest = io.loadmat(os.path.join(BASE_DIR, sub, "restfMRI", "tc_rest_parcel500.mat"))
#    tc_rest = tc_rest["tc_parcel"]
#    S, _ = oas(tc_rest)
#    K = linalg.inv(S)
#    beta = bayesian_regression(X, Y, K)
예제 #41
0
def reach(target, stepsize, mu, spead, w, nrHidden):
    # Check if the object is on the left or right from the core of the body.
    # this assumes you already fixated on the target object, but can be changed later on to be independent from gaze direction.
    if(motion_p.getAngles('HeadYaw') > 0):
        side = 'left'
    else:
        side = 'right'
    
    
    if(side == "left"):
        jointList = ["HeadYaw", "HeadPitch", "LShoulderRoll", "LShoulderPitch"]
    elif(side == "right"):
        jointList = ["HeadYaw", "HeadPitch", "RShoulderRoll", "RShoulderPitch"]
    
    jointVector = motion_p.getAngles(jointList) # this should be the initial position of the joints
    
    if(side == "left"):
        #indices = np.where(jointList == "HeadYaw") or np.where(jointList == "RShoulderPitch") or np.where(jointList == "RShoulderRoll") 
        jointVector[1:] = -jointVector[1:]
    

    #location = useRBF(jointVector, w, mu, spread, nrHidden)     # current location of the hand
    import matlab.engine
    eng = matlab.engine.start_matlab()
    location = eng.sim(net, jointVector, nargout = nrDimensions)

    # If at the left side, the x coordinate should switch as well, for both the target and the location.
    if(side == "left"):
        locationSwitched = location
        locationSwitched[0] = 640 - location[0]

        targetSwitched = target
        targetSwitched[0] = 640 - target[0]
    
    counter  = 0    # counts how many movement steps are made
    alfa = 0  

    # move till correct location is reached or 25 movements are made 
    while(alfa != 1 and counter <= 25):
        counter += 1
        if(euclidean(target, location)> stepsize):
            alfa = stepsize/euclidean(target, location)
        else: 
            alfa = 1
     
        jacobian = calculateJacobian(DOF, nrHidden, jointVector, mu, spread)
        invJacobian = pinv(jacobian)
        difLocation = alfa*(target-location)
        jointVector = jointVector + np.dot(difLocation,invJacobian)
         
        jointVector = jointVector[0,:]

        if(side == "right"):
            motion_p.setAngles(jointList, jointVector, 0.1)

        else if(side == "left"):
            jointVector[1:] = -jointVector[1:] # change back to left-values.
            motion_p.setAngles(jointList, jointVector, 0.1)
            
        location = useRBF(jointVector, w, mu, spread, nrHidden)  
        if(side == "left"):
            locationSwitched = location
            locationSwitched[0] = 640 - location[0]
예제 #42
0
    def fit(self, X, y):
        """Fit the ARDRegression model according to the given training data
        and parameters.

        Iterative procedure to maximize the evidence

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training vector, where n_samples in the number of samples and
            n_features is the number of features.
        y : array, shape = [n_samples]
            Target values (integers)

        Returns
        -------
        self : returns an instance of self.
        """

        X = np.asanyarray(X, dtype=np.float)
        y = np.asanyarray(y, dtype=np.float)

        n_samples, n_features = X.shape
        coef_ = np.zeros(n_features)

        X, y, X_mean, y_mean, X_std = self._center_data(
            X, y, self.fit_intercept, self.normalize, self.overwrite_X)

        ### Launch the convergence loop
        keep_lambda = np.ones(n_features, dtype=bool)

        lambda_1 = self.lambda_1
        lambda_2 = self.lambda_2
        alpha_1 = self.alpha_1
        alpha_2 = self.alpha_2
        verbose = self.verbose

        ### Initialization of the values of the parameters
        alpha_ = 1. / np.var(y)
        lambda_ = np.ones(n_features)

        self.scores_ = list()
        coef_old_ = None

        ### Iterative procedure of ARDRegression
        for iter_ in range(self.n_iter):
            ### Compute mu and sigma (using Woodbury matrix identity)
            sigma_ = linalg.pinv(
                np.eye(n_samples) / alpha_ + np.dot(
                    X[:, keep_lambda] *
                    np.reshape(1. / lambda_[keep_lambda], [1, -1]),
                    X[:, keep_lambda].T))
            sigma_ = np.dot(
                sigma_, X[:, keep_lambda] *
                np.reshape(1. / lambda_[keep_lambda], [1, -1]))
            sigma_ = -np.dot(
                np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
                X[:, keep_lambda].T, sigma_)
            sigma_.flat[::(sigma_.shape[1] + 1)] += \
                          1. / lambda_[keep_lambda]
            coef_[keep_lambda] = alpha_ * np.dot(
                sigma_, np.dot(X[:, keep_lambda].T, y))

            ### Update alpha and lambda
            rmse_ = np.sum((y - np.dot(X, coef_))**2)
            gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
            lambda_[keep_lambda] = (gamma_ + 2. * lambda_1) \
                            / ((coef_[keep_lambda]) ** 2 + 2. * lambda_2)
            alpha_ = (n_samples - gamma_.sum() + 2. * alpha_1) \
                            / (rmse_ + 2. * alpha_2)

            ### Prune the weights with a precision over a threshold
            keep_lambda = lambda_ < self.threshold_lambda
            coef_[keep_lambda == False] = 0

            ### Compute the objective function
            if self.compute_score:
                s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
                s += alpha_1 * log(alpha_) - alpha_2 * alpha_
                s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
                            np.sum(np.log(lambda_)))
                s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_**2).sum())
                self.scores_.append(s)

            ### Check for convergence
            if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
                if verbose:
                    print "Converged after %s iterations" % iter_
                break
            coef_old_ = np.copy(coef_)

        self.coef_ = coef_
        self.alpha_ = alpha_
        self.sigma_ = sigma_

        self._set_intercept(X_mean, y_mean, X_std)
        return self
예제 #43
0
def _setup_hpi_amplitude_fitting(info,
                                 t_window,
                                 remove_aliased=False,
                                 ext_order=1,
                                 verbose=None):
    """Generate HPI structure for HPI localization."""
    # grab basic info.
    hpi_freqs, hpi_pick, hpi_ons = _get_hpi_info(info)
    _validate_type(t_window, (str, 'numeric'), 't_window')
    if isinstance(t_window, str):
        if t_window != 'auto':
            raise ValueError('t_window must be "auto" if a string, got %r' %
                             (t_window, ))
        t_window = max(5. / min(hpi_freqs), 1. / np.diff(hpi_freqs).min())
    t_window = float(t_window)
    if t_window <= 0:
        raise ValueError('t_window (%s) must be > 0' % (t_window, ))
    logger.info('Using time window: %0.1f ms' % (1000 * t_window, ))
    model_n_window = int(round(float(t_window) * info['sfreq']))
    # worry about resampled/filtered data.
    # What to do e.g. if Raw has been resampled and some of our
    # HPI freqs would now be aliased
    highest = info.get('lowpass')
    highest = info['sfreq'] / 2. if highest is None else highest
    keepers = hpi_freqs <= highest
    if remove_aliased:
        hpi_freqs = hpi_freqs[keepers]
        hpi_ons = hpi_ons[keepers]
    elif not keepers.all():
        raise RuntimeError('Found HPI frequencies %s above the lowpass '
                           '(or Nyquist) frequency %0.1f' %
                           (hpi_freqs[~keepers].tolist(), highest))
    if info['line_freq'] is not None:
        line_freqs = np.arange(info['line_freq'], info['sfreq'] / 3.,
                               info['line_freq'])
    else:
        line_freqs = np.zeros([0])
    logger.info('Line interference frequencies: %s Hz' %
                ' '.join(['%d' % l for l in line_freqs]))

    # build model to extract sinusoidal amplitudes.
    slope = np.linspace(-0.5, 0.5, model_n_window)[:, np.newaxis]
    rps = np.arange(model_n_window)[:, np.newaxis].astype(float)
    rps *= 2 * np.pi / info['sfreq']  # radians/sec
    f_t = hpi_freqs[np.newaxis, :] * rps
    l_t = line_freqs[np.newaxis, :] * rps
    model = [np.sin(f_t), np.cos(f_t)]  # hpi freqs
    model += [np.sin(l_t), np.cos(l_t)]  # line freqs
    model += [slope, np.ones(slope.shape)]
    model = np.concatenate(model, axis=1)
    inv_model = linalg.pinv(model)
    inv_model_reord = _reorder_inv_model(inv_model, len(hpi_freqs))
    proj, proj_op, meg_picks = _setup_ext_proj(info, ext_order)

    # Set up magnetic dipole fits
    hpi = dict(meg_picks=meg_picks,
               hpi_pick=hpi_pick,
               model=model,
               inv_model=inv_model,
               t_window=t_window,
               inv_model_reord=inv_model_reord,
               on=hpi_ons,
               n_window=model_n_window,
               proj=proj,
               proj_op=proj_op,
               freqs=hpi_freqs,
               line_freqs=line_freqs)
    return hpi
예제 #44
0
def contrast_from_cols_or_rows(L, D, pseudo=None):
    """ Construct a contrast matrix from a design matrix D

    (possibly with its pseudo inverse already computed)
    and a matrix L that either specifies something in
    the column space of D or the row space of D.

    Parameters
    ----------
    L : ndarray
       Matrix used to try and construct a contrast.
    D : ndarray
       Design matrix used to create the contrast.
    pseudo : None or array-like, optional
       If not None, gives pseudo-inverse of `D`.  Allows you to pass
       this if it is already calculated.

    Returns
    -------
    C : ndarray
       Matrix with C.shape[1] == D.shape[1] representing an estimable
       contrast.

    Notes
    -----
    From an n x p design matrix D and a matrix L, tries to determine a p
    x q contrast matrix C which determines a contrast of full rank,
    i.e. the n x q matrix

    dot(transpose(C), pinv(D))

    is full rank.

    L must satisfy either L.shape[0] == n or L.shape[1] == p.

    If L.shape[0] == n, then L is thought of as representing
    columns in the column space of D.

    If L.shape[1] == p, then L is thought of as what is known
    as a contrast matrix. In this case, this function returns an estimable
    contrast corresponding to the dot(D, L.T)

    This always produces a meaningful contrast, not always
    with the intended properties because q is always non-zero unless
    L is identically 0. That is, it produces a contrast that spans
    the column space of L (after projection onto the column space of D).
    """
    L = np.asarray(L)
    D = np.asarray(D)
    n, p = D.shape
    if L.shape[0] != n and L.shape[1] != p:
        raise ValueError('shape of L and D mismatched')
    if pseudo is None:
        pseudo = pinv(D)
    if L.shape[0] == n:
        C = np.dot(pseudo, L).T
    else:
        C = np.dot(pseudo, np.dot(D, L.T)).T
    Lp = np.dot(D, C.T)
    if len(Lp.shape) == 1:
        Lp.shape = (n, 1)
    Lp_rank = matrix_rank(Lp)
    if Lp_rank != Lp.shape[1]:
        Lp = full_rank(Lp, Lp_rank)
        C = np.dot(pseudo, Lp).T
    return np.squeeze(C)
예제 #45
0
def filter_chpi(raw,
                include_line=True,
                t_step=0.01,
                t_window=None,
                ext_order=1,
                verbose=None):
    """Remove cHPI and line noise from data.

    .. note:: This function will only work properly if cHPI was on
              during the recording.

    Parameters
    ----------
    raw : instance of Raw
        Raw data with cHPI information. Must be preloaded. Operates in-place.
    include_line : bool
        If True, also filter line noise.
    t_step : float
        Time step to use for estimation, default is 0.01 (10 ms).
    %(chpi_t_window)s
    %(chpi_ext_order)s
    %(verbose)s

    Returns
    -------
    raw : instance of Raw
        The raw data.

    Notes
    -----
    cHPI signals are in general not stationary, because head movements act
    like amplitude modulators on cHPI signals. Thus it is recommended to
    to use this procedure, which uses an iterative fitting method, to
    remove cHPI signals, as opposed to notch filtering.

    .. versionadded:: 0.12
    """
    if not raw.preload:
        raise RuntimeError('raw data must be preloaded')
    if t_window is None:
        warn(
            'The default for t_window is 0.2 in MNE 0.20 but will change '
            'to "auto" in 0.21, set it explicitly to avoid this warning',
            DeprecationWarning)
        t_window = 0.2
    t_step = float(t_step)
    if t_step <= 0:
        raise ValueError('t_step (%s) must be > 0' % (t_step, ))
    n_step = int(np.ceil(t_step * raw.info['sfreq']))
    hpi = _setup_hpi_amplitude_fitting(raw.info,
                                       t_window,
                                       remove_aliased=True,
                                       ext_order=ext_order,
                                       verbose=False)

    fit_idxs = np.arange(0, len(raw.times) + hpi['n_window'] // 2, n_step)
    n_freqs = len(hpi['freqs'])
    n_remove = 2 * n_freqs
    meg_picks = pick_types(raw.info, meg=True, exclude=())  # filter all chs
    n_times = len(raw.times)

    msg = 'Removing %s cHPI' % n_freqs
    if include_line:
        n_remove += 2 * len(hpi['line_freqs'])
        msg += ' and %s line harmonic' % len(hpi['line_freqs'])
    msg += ' frequencies from %s MEG channels' % len(meg_picks)

    recon = np.dot(hpi['model'][:, :n_remove], hpi['inv_model'][:n_remove]).T
    logger.info(msg)
    chunks = list()  # the chunks to subtract
    last_endpt = 0
    pb = ProgressBar(fit_idxs, mesg='Filtering')
    for ii, midpt in enumerate(pb):
        left_edge = midpt - hpi['n_window'] // 2
        time_sl = slice(max(left_edge, 0),
                        min(left_edge + hpi['n_window'], len(raw.times)))
        this_len = time_sl.stop - time_sl.start
        if this_len == hpi['n_window']:
            this_recon = recon
        else:  # first or last window
            model = hpi['model'][:this_len]
            inv_model = linalg.pinv(model)
            this_recon = np.dot(model[:, :n_remove], inv_model[:n_remove]).T
        this_data = raw._data[meg_picks, time_sl]
        subt_pt = min(midpt + n_step, n_times)
        if last_endpt != subt_pt:
            fit_left_edge = left_edge - time_sl.start + hpi['n_window'] // 2
            fit_sl = slice(fit_left_edge,
                           fit_left_edge + (subt_pt - last_endpt))
            chunks.append((subt_pt, np.dot(this_data, this_recon[:, fit_sl])))
        last_endpt = subt_pt

        # Consume (trailing) chunks that are now safe to remove because
        # our windows will no longer touch them
        if ii < len(fit_idxs) - 1:
            next_left_edge = fit_idxs[ii + 1] - hpi['n_window'] // 2
        else:
            next_left_edge = np.inf
        while len(chunks) > 0 and chunks[0][0] <= next_left_edge:
            right_edge, chunk = chunks.pop(0)
            raw._data[meg_picks,
                      right_edge - chunk.shape[1]:right_edge] -= chunk
    pb.done()
    return raw
예제 #46
0
    def reservoir_computing(self, train_X, train_y, test_X, test_y, reservoir_size=100, a=0.8, per_time_step=False, gridsearch=True, gridsearch_training_frac=0.7, error='accuracy'):
        # Inspired by http://minds.jacobs-university.de/mantas/code

        if gridsearch:
            reservoir_size, a = self.gridsearch_reservoir_computing(train_X, train_y, test_X, test_y, per_time_step=per_time_step, gridsearch_training_frac=gridsearch_training_frac, error=error)

        # We assume these parameters as fixed, but feel free to change them as well.
        washout_period = 10

        # Create a numerical dataset without categorical attributes.
        new_train_X, new_test_X = self.create_numerical_multiple_dataset(train_X, test_X)
        if test_y is None:
            new_train_y = self.create_numerical_single_dataset(train_y)
            new_test_y = None
        else:
            new_train_y, new_test_y = self.create_numerical_multiple_dataset(train_y, test_y)

        # We normalize the input.....
        new_train_X, new_test_X, min_X, max_X = self.normalize(new_train_X, new_test_X, 0, 1)
        new_train_y, new_test_y, min_y, max_y = self.normalize(new_train_y, new_test_y, -0.9, 0.9)

        inputs = len(new_train_X.columns)
        outputs = len(new_train_y.columns)

        # Randomly initialize our weight vectors.
        Win, W, Wback = self.initialize_echo_state_network(inputs, outputs, reservoir_size)


        # Allocate memory for our result matrices.
        X = np.zeros((len(train_X.index)-washout_period, 1+inputs+reservoir_size))
        Yt = new_train_y.ix[washout_period:len(new_train_y.index),:].as_matrix()
        Yt = np.arctanh( Yt )
        x = np.zeros((reservoir_size,1))

        # Train over all time points.
        for t in range(0, len(new_train_X.index)):

            # Set the inputs according to the values seen in the training set.
            u = new_train_X.ix[t,:].as_matrix()

            # Set the previous target value to the real value if available.
            if t > 0:
                y_prev= new_train_y.ix[t-1,:].as_matrix()
            else:
                y_prev = np.array([0]*outputs)

            # Determine the activation of the reservoir.
            x = (1-a)*x + a*np.tanh( np.dot( Win, np.vstack(np.insert(u,0,1)) ) + np.dot( W, x ) + np.dot( Wback, np.vstack(y_prev) ))

            # And store the values obtained after the washout period.
            if t >= washout_period:
                X[t-washout_period,:] = np.hstack(np.insert(np.insert(x, 0, u), 0, 1))


        # Train Wout.
        X_p = linalg.pinv(X)
        Wout = np.transpose(np.dot( X_p, Yt ))

        # And predict for both training and test set.


        pred_train_y, pred_train_y_prob = self.predict_values_echo_state_network(Win, W, Wback, Wout, a, reservoir_size, new_train_X, new_train_y, new_train_y.columns, per_time_step)
        pred_test_y, pred_test_y_prob = self.predict_values_echo_state_network(Win, W, Wback, Wout, a, reservoir_size, new_test_X, new_test_y, new_train_y.columns, per_time_step)

        pred_train_y_prob = self.denormalize(pred_train_y_prob, min_y, max_y, -0.9, 0.9)
        pred_test_y_prob = self.denormalize(pred_test_y_prob, min_y, max_y, -0.9, 0.9)

        return pred_train_y, pred_test_y, pred_train_y_prob, pred_test_y_prob
예제 #47
0
def dics_source_power(info, forward, noise_csds, data_csds, reg=0.01,
                      label=None, pick_ori=None, verbose=None):
    """Dynamic Imaging of Coherent Sources (DICS).

    Calculate source power in time and frequency windows specified in the
    calculation of the data cross-spectral density matrix or matrices. Source
    power is normalized by noise power.

    NOTE : This implementation has not been heavily tested so please
    report any issues or suggestions.

    Parameters
    ----------
    info : dict
        Measurement info, e.g. epochs.info.
    forward : dict
        Forward operator.
    noise_csds : instance or list of instances of CrossSpectralDensity
        The noise cross-spectral density matrix for a single frequency or a
        list of matrices for multiple frequencies.
    data_csds : instance or list of instances of CrossSpectralDensity
        The data cross-spectral density matrix for a single frequency or a list
        of matrices for multiple frequencies.
    reg : float
        The regularization for the cross-spectral density.
    label : Label | None
        Restricts the solution to a given label.
    pick_ori : None | 'normal'
        If 'normal', rather than pooling the orientations by taking the norm,
        only the radial component is kept.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    stc : SourceEstimate
        Source power with frequency instead of time.

    Notes
    -----
    The original reference is:
    Gross et al. Dynamic imaging of coherent sources: Studying neural
    interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
    """

    if isinstance(data_csds, CrossSpectralDensity):
        data_csds = [data_csds]

    if isinstance(noise_csds, CrossSpectralDensity):
        noise_csds = [noise_csds]

    csd_shapes = lambda x: tuple(c.data.shape for c in x)
    if (csd_shapes(data_csds) != csd_shapes(noise_csds) or
       any([len(set(csd_shapes(c))) > 1 for c in [data_csds, noise_csds]])):
        raise ValueError('One noise CSD matrix should be provided for each '
                         'data CSD matrix and vice versa. All CSD matrices '
                         'should have identical shape.')

    frequencies = []
    for data_csd, noise_csd in zip(data_csds, noise_csds):
        if not np.allclose(data_csd.frequencies, noise_csd.frequencies):
            raise ValueError('Data and noise CSDs should be calculated at '
                             'identical frequencies')

        # If CSD is summed over multiple frequencies, take the average
        # frequency
        if(len(data_csd.frequencies) > 1):
            frequencies.append(np.mean(data_csd.frequencies))
        else:
            frequencies.append(data_csd.frequencies[0])
    fmin = frequencies[0]

    if len(frequencies) > 2:
        fstep = []
        for i in range(len(frequencies) - 1):
            fstep.append(frequencies[i+1] - frequencies[i])
        if not np.allclose(fstep, np.mean(fstep), 1e-5):
            warnings.warn('Uneven frequency spacing in CSD object, '
                          'frequencies in the resulting stc file will be '
                          'inaccurate.')
        fstep = fstep[0]
    elif len(frequencies) > 1:
        fstep = frequencies[1] - frequencies[0]
    else:
        fstep = 1  # dummy value

    is_free_ori, picks, _, proj, vertno, G =\
        _prepare_beamformer_input(info, forward, label, picks=None,
                                  pick_ori=pick_ori)

    n_orient = 3 if is_free_ori else 1
    n_sources = G.shape[1] // n_orient
    source_power = np.zeros((n_sources, len(data_csds)))
    n_csds = len(data_csds)

    logger.info('Computing DICS source power...')
    for i, (data_csd, noise_csd) in enumerate(zip(data_csds, noise_csds)):
        if n_csds > 1:
            logger.info('    computing DICS spatial filter %d out of %d' %
                        (i + 1, n_csds))

        Cm = data_csd.data

        # Calculating regularized inverse, equivalent to an inverse operation
        # after the following regularization:
        # Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
        Cm_inv = linalg.pinv(Cm, reg)

        # Compute spatial filters
        W = np.dot(G.T, Cm_inv)
        for k in range(n_sources):
            Wk = W[n_orient * k: n_orient * k + n_orient]
            Gk = G[:, n_orient * k: n_orient * k + n_orient]
            Ck = np.dot(Wk, Gk)

            if is_free_ori:
                # Free source orientation
                Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
            else:
                # Fixed source orientation
                Wk /= Ck

            # Noise normalization
            noise_norm = np.dot(np.dot(Wk.conj(), noise_csd.data), Wk.T)
            noise_norm = np.abs(noise_norm).trace()

            # Calculating source power
            sp_temp = np.dot(np.dot(Wk.conj(), data_csd.data), Wk.T)
            sp_temp /= max(noise_norm, 1e-40)  # Avoid division by 0

            if pick_ori == 'normal':
                source_power[k, i] = np.abs(sp_temp)[2, 2]
            else:
                source_power[k, i] = np.abs(sp_temp).trace()

    logger.info('[done]')

    subject = _subject_from_forward(forward)
    return SourceEstimate(source_power, vertices=vertno, tmin=fmin / 1000.,
                          tstep=fstep / 1000., subject=subject)
예제 #48
0
 def GetHessian(self):
     try:
         return inv(self.M)
     except LinAlgError:
         print 'Warning, using pseudoinverse'
         return pinv(self.M)
예제 #49
0
 def inverse(self, A):
     A = np.nan_to_num(A)
     return la.pinv(A)
예제 #50
0
    def _fit(self, X, trialX=None, mXs=None, center=True, SVD=None, optimize=True):
        """ Fit the model on X

        Parameters
        ----------
            X: array-like, shape (n_samples, n_features_1, n_features_2, ...)
                Training data, where n_samples in the number of samples
                and n_features_j is the number of the j-features (where the axis correspond
                to different parameters).

            trialX: array-like, shape (n_trials, n_samples, n_features_1, n_features_2, ...)
                Trial-by-trial data. Shape is similar to X but with an additional axis at the beginning
                with different trials. If different combinations of features have different number
                of trials, then set n_samples to the maximum number of trials and fill unoccupied data
                points with NaN.

            mXs: dict with values in the shape of X
                Marginalized data, should be the result of dpca._marginalize

            center: bool
                Centers data if center = True

            SVD: list of arrays
                Singular-value decomposition of the data. Don't provide!

            optimize: bool
                Flag to turn automatic optimization of regularization parameter on or off. Needed
                internally.
        """

        def flat2d(A):
            ''' Flattens all but the first axis of an ndarray, returns view. '''
            return A.reshape((A.shape[0],-1))

        # X = check_array(X)

        n_features = X.shape[0]

        # center data
        if center:
            X = X - np.mean(flat2d(X),1).reshape((n_features,) + len(self.labels)*(1,))

        # marginalize data
        if mXs is None:
            mXs = self._marginalize(X)

        # compute optimal regularization
        if self.opt_regularizer_flag and optimize:
            if self.debug > 0:
                print("Start optimizing regularization.")

            if trialX is None:
                raise ValueError('To optimize the regularization parameter, the trial-by-trial data trialX needs to be provided.')

            self._optimize_regularization(X,trialX)

        # add regularization
        if self.regularizer > 0:
            regX, regmXs, pregX = self._add_regularization(X,mXs,self.regularizer*np.sum(X**2),SVD=SVD)
        else:
            regX, regmXs, pregX = X, mXs, pinv(X.reshape((n_features,-1)))

        # compute closed-form solution
        self.P, self.D = self._randomized_dpca(regX,regmXs,pinvX=pregX)
예제 #51
0
def test_ress(target, n_trials, peak_width, neig_width, neig_freq, show=False):
    """Test RESS."""
    sfreq = 250
    n_keep = 1
    n_chans = 10
    n_times = 1000
    data, source = create_data(n_times=n_times,
                               n_trials=n_trials,
                               n_chans=n_chans,
                               freq=target,
                               sfreq=sfreq,
                               show=False)

    out = ress.RESS(data,
                    sfreq=sfreq,
                    peak_freq=target,
                    neig_freq=neig_freq,
                    peak_width=peak_width,
                    neig_width=neig_width,
                    n_keep=n_keep)

    nfft = 500
    bins, psd = ss.welch(out.squeeze(1),
                         sfreq,
                         window="boxcar",
                         nperseg=nfft / (peak_width * 2),
                         noverlap=0,
                         axis=0,
                         average='mean')
    # psd = np.abs(np.fft.fft(out, nfft, axis=0))
    # psd = psd[0:psd.shape[0] // 2 + 1]
    # bins = np.linspace(0, sfreq // 2, psd.shape[0])
    # print(psd.shape)
    # print(bins[:10])

    psd = psd.mean(axis=-1, keepdims=True)  # average over trials
    snr = snr_spectrum(psd + psd.max() / 20, bins, skipbins=1, n_avg=2)
    # snr = snr.mean(1)
    if show:
        f, ax = plt.subplots(2)
        ax[0].plot(bins, snr, ':o')
        ax[0].axhline(1, ls=':', c='grey', zorder=0)
        ax[0].axvline(target, ls=':', c='grey', zorder=0)
        ax[0].set_ylabel('SNR (a.u.)')
        ax[0].set_xlabel('Frequency (Hz)')
        ax[0].set_xlim([0, 40])
        ax[0].set_ylim([0, 10])
        ax[1].plot(bins, psd)
        ax[1].axvline(target, ls=':', c='grey', zorder=0)
        ax[1].set_ylabel('PSD')
        ax[1].set_xlabel('Frequency (Hz)')
        ax[1].set_xlim([0, 40])
        # plt.show()

    assert snr[bins == target] > 10
    assert (snr[(bins <= target - 2) | (bins >= target + 2)] < 2).all()

    # test multiple components
    out, fromress, toress = ress.RESS(data,
                                      sfreq=sfreq,
                                      peak_freq=target,
                                      neig_freq=neig_freq,
                                      peak_width=peak_width,
                                      neig_width=neig_width,
                                      n_keep=n_keep,
                                      return_maps=True)

    proj = matmul3d(out, fromress)
    assert proj.shape == (n_times, n_chans, n_trials)

    if show:
        f, ax = plt.subplots(data.shape[1], 2, sharey='col')
        for c in range(data.shape[1]):
            ax[c, 0].plot(data[:, c].mean(-1), lw=.5, label='data')
            ax[c, 1].plot(proj[:, c].mean(-1), lw=.5, label='projection')
            if c < data.shape[1]:
                ax[c, 0].set_xticks([])
                ax[c, 1].set_xticks([])

        ax[0, 0].set_title('Before')
        ax[0, 1].set_title('After')
        plt.legend()

    # 2 comps
    _ = ress.RESS(data, sfreq=sfreq, peak_freq=target, n_keep=2)

    # All comps
    out, fromress, toress = ress.RESS(data,
                                      sfreq=sfreq,
                                      peak_freq=target,
                                      n_keep=-1,
                                      return_maps=True)

    if show:
        # Inspect mixing/unmixing matrices
        combined_data = np.array([toress, fromress, pinv(toress)])
        _max = np.amax(combined_data)

        f, ax = plt.subplots(3)
        ax[0].imshow(toress, label='toRESS')
        ax[0].set_title('toRESS')
        ax[1].imshow(fromress, label='fromRESS', vmin=-_max, vmax=_max)
        ax[1].set_title('fromRESS')
        ax[2].imshow(pinv(toress), vmin=-_max, vmax=_max)
        ax[2].set_title('toRESS$^{-1}$')
        plt.tight_layout()
        plt.show()

    print(np.sum(np.abs(pinv(toress) - fromress) >= .1))
예제 #52
0
def normal_equation(X, y, lbd = 0):
m = X.shape[1]
    theta = np.zeros((m + 1, 1))

    I = np.identity(X.shape[1])
    theta = np.matmul(np.matmul(linalg.pinv(np.add(I * lbd ,(np.matmul(np.transpose(X),X)))), np.transpose(X)), y)
예제 #53
0
def pca(data,
        axis=0,
        mask=None,
        ncomp=None,
        standardize=True,
        design_keep=None,
        design_resid='mean',
        tol_ratio=0.01):
    """Compute the SVD PCA of an array-like thing over `axis`.

    Parameters
    ----------
    data : ndarray-like (np.float)
       The array on which to perform PCA over axis `axis` (below)
    axis : int, optional
       The axis over which to perform PCA (axis identifying
       observations).  Default is 0 (first)
    mask : ndarray-like (np.bool), optional
       An optional mask, should have shape given by data axes, with
       `axis` removed, i.e.: ``s = data.shape; s.pop(axis);
       msk_shape=s``
    ncomp : {None, int}, optional
       How many component basis projections to return. If ncomp is None
       (the default) then the number of components is given by the
       calculated rank of the data, after applying `design_keep`,
       `design_resid` and `tol_ratio` below.  We always return all the
       basis vectors and percent variance for each component; `ncomp`
       refers only to the number of basis_projections returned.
    standardize : bool, optional
       If True, standardize so each time series (after application of
       `design_keep` and `design_resid`) has the same standard
       deviation, as calculated by the ``np.std`` function.
    design_keep : None or ndarray, optional
       Data is projected onto the column span of design_keep.
       None (default) equivalent to ``np.identity(data.shape[axis])``
    design_resid : str or None or ndarray, optional
       After projecting onto the column span of design_keep, data is
       projected perpendicular to the column span of this matrix.  If
       None, we do no such second projection.  If a string 'mean', then
       the mean of the data is removed, equivalent to passing a column
       vector matrix of 1s.
    tol_ratio : float, optional
       If ``XZ`` is the vector of singular values of the projection
       matrix from `design_keep` and `design_resid`, and S are the
       singular values of ``XZ``, then `tol_ratio` is the value used to
       calculate the effective rank of the projection of the design, as
       in ``rank = ((S / S.max) > tol_ratio).sum()``

    Returns
    -------
    results : dict
        $G$ is the number of non-trivial components found after applying
       `tol_ratio` to the projections of `design_keep` and
       `design_resid`.

       `results` has keys:

       * ``basis_vectors``: series over `axis`, shape (data.shape[axis], G) -
          the eigenvectors of the PCA
       * ``pcnt_var``: percent variance explained by component, shape
          (G,)
       * ``basis_projections``: PCA components, with components varying
          over axis `axis`; thus shape given by: ``s = list(data.shape);
          s[axis] = ncomp``
       * ``axis``: axis over which PCA has been performed.

    Notes
    -----
    See ``pca_image.m`` from ``fmristat`` for Keith Worsley's code on
    which some of this is based.

    See: http://en.wikipedia.org/wiki/Principal_component_analysis for
    some inspiration for naming - particularly 'basis_vectors' and
    'basis_projections'

    Examples
    --------
    >>> arr = np.random.normal(size=(17, 10, 12, 14))
    >>> msk = np.all(arr > -2, axis=0)
    >>> res = pca(arr, mask=msk, ncomp=9)

    Basis vectors are columns.  There is one column for each component.  The
    number of components is the calculated rank of the data matrix after
    applying the various projections listed in the parameters.  In this case we
    are only removing the mean, so the number of components is one less than the
    axis over which we do the PCA (here axis=0 by default).

    >>> res['basis_vectors'].shape
    (17, 16)

    Basis projections are arrays with components in the dimension over which we
    have done the PCA (axis=0 by default).  Because we set `ncomp` above, we
    only retain `ncomp` components.

    >>> res['basis_projections'].shape
    (9, 10, 12, 14)
    """
    data = np.asarray(data)
    # We roll the PCA axis to be first, for convenience
    if axis is None:
        raise ValueError('axis cannot be None')
    data = np.rollaxis(data, axis)
    if mask is not None:
        mask = np.asarray(mask)
        if not data.shape[1:] == mask.shape:
            raise ValueError('Mask should match dimensions of data other than '
                             'the axis over which to do the PCA')
    if design_resid == 'mean':
        # equivalent to: design_resid = np.ones((data.shape[0], 1))
        def project_resid(Y):
            return Y - Y.mean(0)[None, ...]
    elif design_resid is None:

        def project_resid(Y):
            return Y
    else:  # matrix passed, we hope
        projector = np.dot(design_resid, spl.pinv(design_resid))

        def project_resid(Y):
            return Y - np.dot(projector, Y)

    if standardize:

        def rmse_scales_func(std_source):
            # modifies array in place
            resid = project_resid(std_source)
            # root mean square of the residual
            rmse = np.sqrt(np.square(resid).sum(axis=0) / resid.shape[0])
            # positive 1/rmse
            return np.where(rmse <= 0, 0, 1. / rmse)
    else:
        rmse_scales_func = None
    """
    Perform the computations needed for the PCA.  This stores the
    covariance/correlation matrix of the data in the attribute 'C'.  The
    components are stored as the attributes 'components', for an fMRI
    image these are the time series explaining the most variance.

    Now, we compute projection matrices. First, data is projected onto
    the columnspace of design_keep, then it is projected perpendicular
    to column space of design_resid.
    """
    if design_keep is None:
        X = np.eye(data.shape[0])
    else:
        X = np.dot(design_keep, spl.pinv(design_keep))
    XZ = project_resid(X)
    UX, SX, VX = spl.svd(XZ, full_matrices=0)
    # The matrix UX has orthonormal columns and represents the
    # final "column space" that the data will be projected onto.
    rank = (SX / SX.max() > tol_ratio).sum()
    UX = UX[:, :rank].T
    # calculate covariance matrix in full-rank column space.  The returned
    # array is roughly: YX = dot(UX, data); C = dot(YX, YX.T), perhaps where the
    # data has been standarized, perhaps summed over slices
    C_full_rank = _get_covariance(data, UX, rmse_scales_func, mask)
    # find the eigenvalues D and eigenvectors Vs of the covariance
    # matrix
    D, Vs = spl.eigh(C_full_rank)
    # Compute basis vectors in original column space
    basis_vectors = np.dot(UX.T, Vs).T
    # sort both in descending order of eigenvalues
    order = np.argsort(-D)
    D = D[order]
    basis_vectors = basis_vectors[order]
    pcntvar = D * 100 / D.sum()
    """
    Output the component basis_projections
    """
    if ncomp is None:
        ncomp = rank
    subVX = basis_vectors[:ncomp]
    out = _get_basis_projections(data, subVX, rmse_scales_func)
    # Roll PCA image axis back to original position in data array
    if axis < 0:
        axis += data.ndim
    out = np.rollaxis(out, 0, axis + 1)
    return {
        'basis_vectors': basis_vectors.T,
        'pcnt_var': pcntvar,
        'basis_projections': out,
        'axis': axis
    }
예제 #54
0
def ridge(C,D,b,E,af,bf,abs_tol=1e-7,verbose=0):
    '''
    Computes all the ridges of a facet in the projection.

    Input:
    `C,D,b`: Original polytope data
    `E,af,bf`: Equality set and affine hull of a facet in the projection

    Output:
    `ridge_list`: A list containing all the ridges of the facet as Ridge objects
    '''

    d = C.shape[1]
    k = D.shape[1]

    Er_list = []

    q = C.shape[0]

    E_c = np.setdiff1d(range(q),E)

    C_E = C[E,:]
    D_E = D[E,:]
    b_E = b[E,:]

    C_Ec = C[E_c,:]
    D_Ec = D[E_c,:]
    b_Ec = b[E_c]

    S = C_Ec - np.dot( np.dot(D_Ec,linalg.pinv(D_E)) , C_E)
    L = np.dot(D_Ec,null_space(D_E))
    t = b_Ec - np.dot(D_Ec , np.dot(linalg.pinv(D_E) ,  b_E) )
    if rank( np.hstack([C_E, D_E]) ) < k+1:
        if verbose > 1:
            print("Doing recursive ESP call")
        u,s,v = linalg.svd(np.array([af]), full_matrices=1)
        sigma = s[0]
        v = v.T * u[0,0]    # Correct sign

        V_hat = v[:,[0]]
        V_tilde = v[:,range(1,v.shape[1])]
        Cnew = np.dot(S,V_tilde)
        Dnew = L
        bnew = t - np.dot(S,V_hat).flatten() * bf / sigma
        Anew = np.hstack([Cnew,Dnew])
        xc2,yc2,cen2 = cheby_center(Cnew,Dnew,bnew)
        bnew = bnew - np.dot(Cnew,xc2).flatten() - np.dot(Dnew,yc2).flatten()
        Gt,gt,E_t = esp(Cnew, Dnew, bnew, centered=True,abs_tol=abs_tol,verbose=0)
        if (len(E_t[0]) == 0) or (len(E_t[1]) == 0):
            raise Exception("ridge: recursive call did not return any equality sets")
        for i in range(len(E_t)):
            E_f = E_t[i]
            er = np.sort( np.hstack([E, E_c[E_f]]) )
            ar = np.dot(Gt[i,:],V_tilde.T).flatten()
            br0 = gt[i].flatten()

            # Make orthogonal to facet
            ar = ar - af*np.dot(af.flatten(),ar.flatten())
            br = br0 - bf*np.dot(af.flatten(),ar.flatten())

            # Normalize and make ridge equation point outwards
            norm = np.sqrt(np.sum(ar*ar))
            ar = ar*np.sign(br)/norm
            br = br*np.sign(br)/norm

            # Restore center
            br = br + np.dot(Gt[i,:],xc2)/norm

            if len(ar) > d:
                raise Exception("ridge: wrong length of new ridge!")
            Er_list.append(Ridge(er,ar,br))

    else:
        if verbose > 0:
            print("Doing direct calculation of ridges")
        X = np.arange(S.shape[0])
        while len(X) > 0:
            i = X[0]
            X = np.setdiff1d(X,i)
            if np.linalg.norm(S[i,:]) < abs_tol:
                continue
            Si = S[i,:]
            Si = Si / np.linalg.norm(Si)
            if np.linalg.norm(af - np.dot(Si,af)*Si) > abs_tol:

                test1 = null_space(np.vstack([  np.hstack([af, bf])  , np.hstack([ S[i,:], t[i] ])  ]), nonempty=True)
                test2 = np.hstack([S, np.array([t]).T])
                test = np.dot(test1.T , test2.T)
                test = np.sum(np.abs(test), 0)
                Q_i = np.nonzero(test > abs_tol)[0]
                Q = np.nonzero(test < abs_tol)[0]

                X = np.setdiff1d(X,Q)

                # Have Q_i
                Sq = S[Q_i,:]
                tq = t[Q_i]

                c = np.zeros(d+1)
                c[0] = 1
                Gup = np.hstack([-np.ones([Sq.shape[0],1]),Sq])
                Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])
                G = np.vstack([Gup, Gdo])
                h = np.hstack([tq, 1])

                Al = np.zeros([2, 1])
                Ar = np.vstack([af,S[i,:]])
                A = np.hstack([Al,Ar])
                bb = np.hstack([bf,t[i]])

                solvers.options['show_progress']=False
                solvers.options['LPX_K_MSGLEV'] = 0
                sol = solvers.lp(matrix(c), matrix(G) , matrix(h), matrix(A), matrix(bb), lp_solver)
                if sol['status'] == 'optimal':
                    tau = sol['x'][0]
                    if tau < -abs_tol:
                        ar = np.array([S[i,:]]).flatten()
                        br = t[i].flatten()

                        # Make orthogonal to facet
                        ar = ar - af*np.dot(af.flatten(),ar.flatten())
                        br = br - bf*np.dot(af.flatten(),ar.flatten())

                        # Normalize and make ridge equation point outwards
                        norm = np.sqrt(np.sum(ar*ar))
                        ar = ar/norm
                        br = br/norm

                        Er_list.append(Ridge(np.sort(np.hstack([E,E_c[Q]])),ar,br))
    return Er_list
예제 #55
0
def _reg_pinv(x, reg):
    """Compute a regularized pseudoinverse of a square array."""
    # This adds it to the diagonal without using np.eye
    d = reg * np.trace(x) / len(x)
    x.flat[::x.shape[0] + 1] += d
    return linalg.pinv(x)
예제 #56
0
    def apply(self, raw_data, method='least_squares'):
        """Apply the calibration matrix to results.

        Args:
            raw_data (dict or list): The data to be corrected. Can be in a number of forms:

                 Form 1: a counts dictionary from results.get_counts

                 Form 2: a list of counts of `length==len(state_labels)`

                 Form 3: a list of counts of `length==M*len(state_labels)` where M is an
                 integer (e.g. for use with the tomography data)

                 Form 4: a qiskit Result

            method (str): fitting method. If `None`, then least_squares is used.

                ``pseudo_inverse``: direct inversion of the A matrix

                ``least_squares``: constrained to have physical probabilities

        Returns:
            dict or list: The corrected data in the same form as `raw_data`

        Raises:
            QiskitError: if `raw_data` is not an integer multiple
                of the number of calibrated states.

        """

        # check forms of raw_data
        if isinstance(raw_data, dict):
            # counts dictionary
            for data_label in raw_data.keys():
                if data_label not in self._state_labels:
                    raise QiskitError("Unexpected state label '" + data_label +
                                      "', verify the fitter's state labels "
                                      "correspond to the input data")
            data_format = 0
            # convert to form2
            raw_data2 = [np.zeros(len(self._state_labels), dtype=float)]
            for stateidx, state in enumerate(self._state_labels):
                raw_data2[0][stateidx] = raw_data.get(state, 0)

        elif isinstance(raw_data, list):
            size_ratio = len(raw_data) / len(self._state_labels)
            if len(raw_data) == len(self._state_labels):
                data_format = 1
                raw_data2 = [raw_data]
            elif int(size_ratio) == size_ratio:
                data_format = 2
                size_ratio = int(size_ratio)
                # make the list into chunks the size of state_labels for easier
                # processing
                raw_data2 = np.zeros([size_ratio, len(self._state_labels)])
                for i in range(size_ratio):
                    raw_data2[i][:] = raw_data[i *
                                               len(self._state_labels):(i +
                                                                        1) *
                                               len(self._state_labels)]
            else:
                raise QiskitError("Data list is not an integer multiple "
                                  "of the number of calibrated states")

        elif isinstance(raw_data, qiskit.result.result.Result):

            # extract out all the counts, re-call the function with the
            # counts and push back into the new result
            new_result = deepcopy(raw_data)

            new_counts_list = parallel_map(
                self._apply_correction,
                [resultidx for resultidx, _ in enumerate(raw_data.results)],
                task_args=(raw_data, method))

            for resultidx, new_counts in new_counts_list:
                new_result.results[resultidx].data.counts = new_counts

            return new_result

        else:
            raise QiskitError("Unrecognized type for raw_data.")

        if method == 'pseudo_inverse':
            pinv_cal_mat = la.pinv(self._cal_matrix)

        # Apply the correction
        for data_idx, _ in enumerate(raw_data2):

            if method == 'pseudo_inverse':
                raw_data2[data_idx] = np.dot(pinv_cal_mat, raw_data2[data_idx])

            elif method == 'least_squares':
                nshots = sum(raw_data2[data_idx])

                def fun(x):
                    return sum(
                        (raw_data2[data_idx] - np.dot(self._cal_matrix, x))**2)

                x0 = np.random.rand(len(self._state_labels))
                x0 = x0 / sum(x0)
                cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
                bnds = tuple((0, nshots) for x in x0)
                res = minimize(fun,
                               x0,
                               method='SLSQP',
                               constraints=cons,
                               bounds=bnds,
                               tol=1e-6)
                raw_data2[data_idx] = res.x

            else:
                raise QiskitError("Unrecognized method.")

        if data_format == 2:
            # flatten back out the list
            raw_data2 = raw_data2.flatten()

        elif data_format == 0:
            # convert back into a counts dictionary
            new_count_dict = {}
            for stateidx, state in enumerate(self._state_labels):
                if raw_data2[0][stateidx] != 0:
                    new_count_dict[state] = raw_data2[0][stateidx]

            raw_data2 = new_count_dict
        else:
            # TODO: should probably change to:
            # raw_data2 = raw_data2[0].tolist()
            raw_data2 = raw_data2[0]
        return raw_data2
예제 #57
0
def _lcmv_source_power(info,
                       forward,
                       noise_cov,
                       data_cov,
                       reg=0.05,
                       label=None,
                       picks=None,
                       pick_ori=None,
                       rank=None,
                       verbose=None):
    """Linearly Constrained Minimum Variance (LCMV) beamformer."""
    if picks is None:
        picks = pick_types(info,
                           meg=True,
                           eeg=True,
                           ref_meg=False,
                           exclude='bads')

    is_free_ori, ch_names, proj, vertno, G =\
        _prepare_beamformer_input(
            info, forward, label, picks, pick_ori)

    # Handle whitening
    info = pick_info(
        info,
        [info['ch_names'].index(k) for k in ch_names if k in info['ch_names']])
    whitener, _ = compute_whitener(noise_cov, info, picks, rank=rank)

    # whiten the leadfield
    G = np.dot(whitener, G)

    # Apply SSPs + whitener to data covariance
    data_cov = pick_channels_cov(data_cov, include=ch_names)
    Cm = data_cov['data']
    if info['projs']:
        Cm = np.dot(proj, np.dot(Cm, proj.T))
    Cm = np.dot(whitener, np.dot(Cm, whitener.T))

    # Tikhonov regularization using reg parameter to control for
    # trade-off between spatial resolution and noise sensitivity
    # This modifies Cm inplace, regularizing it
    Cm_inv = _reg_pinv(Cm, reg)

    # Compute spatial filters
    W = np.dot(G.T, Cm_inv)
    n_orient = 3 if is_free_ori else 1
    n_sources = G.shape[1] // n_orient
    source_power = np.zeros((n_sources, 1))
    for k in range(n_sources):
        Wk = W[n_orient * k:n_orient * k + n_orient]
        Gk = G[:, n_orient * k:n_orient * k + n_orient]
        Ck = np.dot(Wk, Gk)

        if is_free_ori:
            # Free source orientation
            Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
        else:
            # Fixed source orientation
            Wk /= Ck

        # Noise normalization
        noise_norm = np.dot(Wk, Wk.T)
        noise_norm = noise_norm.trace()

        # Calculating source power
        sp_temp = np.dot(np.dot(Wk, Cm), Wk.T)
        sp_temp /= max(noise_norm, 1e-40)  # Avoid division by 0

        if pick_ori == 'normal':
            source_power[k, 0] = sp_temp[2, 2]
        else:
            source_power[k, 0] = sp_temp.trace()

    logger.info('[done]')

    subject = _subject_from_forward(forward)
    return SourceEstimate(source_power,
                          vertices=vertno,
                          tmin=1,
                          tstep=1,
                          subject=subject)
예제 #58
0
    def predict(self, X, mode='efficient', conf_intervals=False):

        N = self.__X.shape[0]
        Ntest = X.shape[0]
        T = self.__y.shape[1]

        # Get the basic kernel matrix over the training data and train-test data
        #K_basic = deepcopy(self.kernelX_)
        K_basic = deepcopy(self.__listGPs[0].kernel_)
        print('Kernel en test')
        print(K_basic)
        gp = self.__listGPs[0]

        numKernelParam = len(K_basic.hyperparameters)
        if (numKernelParam > 0) and (K_basic.hyperparameters[0].name !=
                                     'sigma_0'):
            K_basic = K_basic.clone_with_theta(
                gp.kernel_.theta[:numKernelParam])
        K_tr = K_basic(self.__X)
        K_tr_test = K_basic(self.__X, X)
        K_test = K_basic(X)

        # Compute the intratask covariance matrix (signal and noise)
        PriorW = self._PriorW
        SigmaTT = self._SigmaTT

        if mode == 'efficient':
            y_tr = self.__y
            u_sigma, s_sigma, _ = np.linalg.svd(SigmaTT, hermitian=True)
            aux1 = (u_sigma * (np.divide(1, np.sqrt(s_sigma))))
            C2 = aux1.T @ PriorW @ aux1

            u_C2, s_C2, _ = np.linalg.svd(C2, hermitian=True)
            u_K, s_K, _ = np.linalg.svd(K_tr, hermitian=True)

            s_C2_K = np.kron(s_C2, s_K)

            y_tr2 = (y_tr @ u_sigma * np.divide(1, np.sqrt(s_sigma)))
            vect_y_tr_hat = np.divide(
                1, s_C2_K + 1) * (u_K.T @ y_tr2 @ u_C2).T.ravel()
            #m_star = np.kron(C,K_tr_test.T) @ ((u_K@vect_y_tr_hat.reshape((T,N)).T@u_C2.T@(u_sigma*np.divide(1,np.sqrt(s_sigma))).T).T.ravel())
            mpred = (K_tr_test.T @ (u_K @ vect_y_tr_hat.reshape(
                (T, N)).T @ u_C2.T @ (u_sigma * np.divide(
                    1, np.sqrt(s_sigma))).T) @ PriorW).T.ravel()
            mpred = mpred.reshape((Ntest, T), order='F')
        else:
            K_test = K_basic(X)

            # Compute kron products
            C_K_tr = np.kron(PriorW, K_tr)
            C_K_tr_test = np.kron(PriorW, K_tr_test)
            C_K_test = np.kron(PriorW, K_test)
            Sigma_I = np.kron(SigmaTT, np.eye(N))

            # Get matrix inverse: ojo, hay que poner pinv por si está mal condicionada la matriz en casos con poco ruido
            Inverse = np.linalg.pinv(C_K_tr + Sigma_I)

            # Compute mean and std of the MT-GP
            mpred = C_K_tr_test.T @ Inverse @ self.__y.T.ravel()
            mpred = mpred.reshape((n_tst, T), order='F')
            #y_var = C_K_test - np.einsum("ij,ij->i", np.dot(C_K_tr_test.T, Inverse), C_K_tr_test.T)
            #y_var = C_K_test - C_K_tr_test.T @  Inverse  @ C_K_tr_test

            # y_var_negative = y_var < 0
            # if np.any(y_var_negative):
            #     warnings.warn("Predicted variances smaller than 0. "
            #                   "Setting those variances to 0.")
            #     y_var[y_var_negative] = 0.0

        if conf_intervals:
            cov = np.zeros((Ntest, T))
            for t in range(T):
                C_t = PriorW[t, t]
                Sigma_t = SigmaTT[t, t]

                C_K_tr = np.kron(C_t, K_tr)
                C_K_tr_test = np.kron(C_t, K_tr_test)
                C_K_test = np.kron(C_t, K_test)
                Sigma_I = np.kron(Sigma_t, np.eye(N))
                Sigma_I_tst = np.kron(Sigma_t, np.eye(Ntest))
                Inverse = pinv(C_K_tr + Sigma_I)

                full_cov = C_K_test - C_K_tr_test.T @ Inverse @ C_K_tr_test + Sigma_I_tst
                cov[:, t] = np.diag(full_cov)

            lower = mpred - 2 * np.sqrt(cov)
            upper = mpred + 2 * np.sqrt(cov)

            return mpred, lower, upper

        else:
            return mpred
예제 #59
0
    def apply(self, raw_data, method='least_squares'):
        """
        Apply the calibration matrices to results.

        Args:
            raw_data: The data to be corrected. Can be in a number of forms.
                a counts dictionary from results.get_countsphy data);
                or a qiskit Result

            method (str): fitting method. If None, then least_squares is used.
                'pseudo_inverse': direct inversion of the cal matrices.
                'least_squares': constrained to have physical probabilities.

        Returns:
            The corrected data in the same form as raw_data
        """

        all_states = count_keys(self.nqubits)
        num_of_states = 2**self.nqubits

        # check forms of raw_data
        if isinstance(raw_data, dict):
            # counts dictionary
            # convert to list
            raw_data2 = [np.zeros(num_of_states, dtype=float)]
            for state, count in raw_data.items():
                stateidx = int(state, 2)
                raw_data2[0][stateidx] = count

        elif isinstance(raw_data, qiskit.result.result.Result):

            # extract out all the counts, re-call the function with the
            # counts and push back into the new result
            new_result = deepcopy(raw_data)

            new_counts_list = parallel_map(
                self._apply_correction,
                [resultidx for resultidx, _ in enumerate(raw_data.results)],
                task_args=(raw_data, method))

            for resultidx, new_counts in new_counts_list:
                new_result.results[resultidx].data.counts = \
                    Obj(**new_counts)

            return new_result

        else:
            raise QiskitError("Unrecognized type for raw_data.")

        if method == 'pseudo_inverse':
            pinv_cal_matrices = []
            for cal_mat in self._cal_matrices:
                pinv_cal_matrices.append(la.pinv(cal_mat))

        # Apply the correction
        for data_idx, _ in enumerate(raw_data2):

            if method == 'pseudo_inverse':
                inv_mat_dot_raw = np.zeros([num_of_states], dtype=float)
                for state1_idx, state1 in enumerate(all_states):
                    for state2_idx, state2 in enumerate(all_states):
                        if raw_data2[data_idx][state2_idx] == 0:
                            continue

                        product = 1.
                        end_index = self.nqubits
                        for p_ind, pinv_mat in enumerate(pinv_cal_matrices):

                            start_index = end_index - \
                                self._qubit_list_sizes[p_ind]

                            state1_as_int = \
                                self._indices_list[p_ind][
                                    state1[start_index:end_index]]

                            state2_as_int = \
                                self._indices_list[p_ind][
                                    state2[start_index:end_index]]

                            end_index = start_index
                            product *= \
                                pinv_mat[state1_as_int][state2_as_int]
                            if product == 0:
                                break
                        inv_mat_dot_raw[state1_idx] += \
                            (product * raw_data2[data_idx][state2_idx])
                raw_data2[data_idx] = inv_mat_dot_raw

            elif method == 'least_squares':

                def fun(x):
                    mat_dot_x = np.zeros([num_of_states], dtype=float)
                    for state1_idx, state1 in enumerate(all_states):
                        mat_dot_x[state1_idx] = 0.
                        for state2_idx, state2 in enumerate(all_states):
                            if x[state2_idx] != 0:
                                product = 1.
                                end_index = self.nqubits
                                for c_ind, cal_mat in \
                                        enumerate(self._cal_matrices):

                                    start_index = end_index - \
                                        self._qubit_list_sizes[c_ind]

                                    state1_as_int = \
                                        self._indices_list[c_ind][
                                            state1[start_index:end_index]]

                                    state2_as_int = \
                                        self._indices_list[c_ind][
                                            state2[start_index:end_index]]

                                    end_index = start_index
                                    product *= \
                                        cal_mat[state1_as_int][state2_as_int]
                                    if product == 0:
                                        break
                                mat_dot_x[state1_idx] += \
                                    (product * x[state2_idx])
                    return sum((raw_data2[data_idx] - mat_dot_x)**2)

                x0 = np.random.rand(num_of_states)
                x0 = x0 / sum(x0)
                nshots = sum(raw_data2[data_idx])
                cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
                bnds = tuple((0, nshots) for x in x0)
                res = minimize(fun,
                               x0,
                               method='SLSQP',
                               constraints=cons,
                               bounds=bnds,
                               tol=1e-6)
                raw_data2[data_idx] = res.x

            else:
                raise QiskitError("Unrecognized method.")

        # convert back into a counts dictionary
        new_count_dict = {}
        for state_idx, state in enumerate(all_states):
            if raw_data2[0][state_idx] != 0:
                new_count_dict[state] = raw_data2[0][state_idx]

        return new_count_dict
예제 #60
0
def calc_one_fisher(this_info):
    '''
    routine called by worker bee function to do all the heavy lifting.
    '''
    lam = this_info[0]
    cfg_file = this_info[1]
    cfg_resolution = this_info[2]
    min_resolution = this_info[3]
    cfg_las = this_info[4]
    beamfwhm = this_info[5]
    master_norm = this_info[6]
    typical_err = this_info[7]
    n_samps = this_info[8]
    do_constSurfBright = this_info[9]

    if do_constSurfBright:
        print "SURFACE BRIGHTNESS of components held fixed"
    else:
        print "TOTAL FLUX density of components held fixed"

    deltax = beamfwhm * 0.01
    parvec = sp.array(
        [master_norm, deltax, deltax, cfg_resolution, cfg_resolution, 10.0])
    # param order is - norm,l0,m0,fwhm_1,fwhm_2,axis_angle - last is deg others rad.
    # initialize variables
    norm_snr = sp.zeros(n_samps)
    fwhm_snr = sp.zeros(n_samps)
    fwhm2_snr = sp.zeros(n_samps)
    pos_err0 = sp.zeros(n_samps)
    pos_err1 = sp.zeros(n_samps)
    angle_err = sp.zeros(n_samps)
    # go from resolution/5 to LAS*2
    #min_scale=cfg_resolution*0.1
    min_scale = min_resolution * 0.05
    max_scale = cfg_las * 2.5
    # compute range of models to consider-
    step_size = (max_scale - min_scale) / n_samps
    signal_fwhm = (sp.arange(n_samps) * step_size + step_size)
    # this will return the uv baselines in inverse radians-
    bl = tv.getbaselines(cfg_file, lam=lam)
    # loop over gaussian component sizes-
    print '***', step_size, max_scale, min_scale, cfg_file
    for i in range(n_samps):
        parvec[3] = signal_fwhm[i] * 1.05
        parvec[4] = signal_fwhm[i] / 1.05
        if do_constSurfBright:
            # normalize total flux so that master_norm is the flux in mJy
            #  of a component with 1" FWHM - note signal_fwhm here
            #  is in radians
            parvec[0] = master_norm * (signal_fwhm[i] * 206264.8)**2
        # set default deltas for calculating the numerical derivative
        #  default to 1% for nonzero params; 0.1xsynth beam for positions;
        #  and 0.5 deg for the axis angle-
        default_par_delta = sp.copy(parvec * 0.01)
        default_par_delta[1] = cfg_resolution * 0.1
        default_par_delta[2] = cfg_resolution * 0.1
        default_par_delta[5] = 0.5
        # put telescope gain scaling in to the errors (which are in mJy)-
        this_err = typical_err * (beamfwhm / 2.91e-4)**2
        f = tv.make_fisher_mx(bl,
                              this_err,
                              default_par_delta,
                              beamfwhm,
                              parvec,
                              brute_force=False,
                              flux_norm=True)
        # use SVD pseudo inverse instead of direct
        #  inverse for stability
        #finv=spla.inv(f)
        finv = spla.pinv(f)
        norm_snr[i] = parvec[0] / (finv[0, 0])**0.5
        # save position error = average 1D error-
        pos_err0[i] = (finv[1, 1])**0.5
        pos_err1[i] = (finv[2, 2])**0.5
        fwhm_snr[i] = parvec[3] / (finv[3, 3])**0.5
        fwhm2_snr[i] = parvec[4] / (finv[4, 4])**0.5
        angle_err[i] = (finv[5, 5])**0.5
        print cfg_file, i, norm_snr[i], fwhm_snr[i], pos_err0[i]
        # save fisher mx i here
    # save (signal_fwhm,norm_snr, fwhm_snr) here
    if do_constSurfBright:
        mystring = '-constSB'
    else:
        mystring = '-constFlux'
    fh = open(cfg_file + mystring + '.parErrs.txt', 'w')
    for i in range(n_samps):
        outstr = '{0:.3e} {1:.3e} {2:.3e} {3:.4e} {3:.4e} {4:.3e} {4:.3e} {4:.3e}'.format(
            signal_fwhm[i], beamfwhm, norm_snr[i], fwhm_snr[i], fwhm2_snr[i],
            angle_err[i], pos_err0[i], pos_err1[i])
        fh.write(outstr + '\n')
    fh.close()