Beispiel #1
0
def interpolation(interP,knots=None):
    """
    Interpolates the given points and returns an object of the Spline class 
    Arguments:
    
        * interP: interpolation points, (L x 2) matrix
        * knotP: knot points, (L+4 x 1) matrix
        
            * default: equidistant on [0,1]
                
    """
    nip=len(interP)
    ctrlP=np.zeros((nip,2))
    if knots != None:
            knots = np.array(knots,dtype='float')
            if len(ctrlP) + 4 != len(knots):
                raise ValueError('Knots is of the wrong size')

    else:
        knots = np.hstack((np.zeros(3), np.linspace(0,1,len(ctrlP)-2),
                           np.ones(3)))
    xi=(knots[1:-3]+knots[2:-2]+knots[3:-1])/3
    nMatrix=np.zeros((len(xi),len(xi)))
    for i in xrange(len(xi)):
        fun=basisFunction(i,knots)
        nMatrix[:,i]=fun(xi,3)

    ctrlP[:,0]=sl.solve(nMatrix,interP[:,0])
    ctrlP[:,1]=sl.solve(nMatrix,interP[:,1])
    
    return Spline(ctrlP)
Beispiel #2
0
    def fit(self, X, y, **params):
        """
        Fit Ridge regression model

        Parameters
        ----------
        X : numpy array of shape [n_samples,n_features]
            Training data
        y : numpy array of shape [n_samples]
            Target values

        Returns
        -------
        self : returns an instance of self.
        """
        self._set_params(**params)

        X = np.asanyarray(X, dtype=np.float)
        y = np.asanyarray(y, dtype=np.float)

        n_samples, n_features = X.shape

        X, y, Xmean, ymean = self._center_data(X, y)

        if n_samples > n_features:
            # w = inv(X^t X + alpha*Id) * X.T y
            self.coef_ = linalg.solve(np.dot(X.T, X) + self.alpha * np.eye(n_features), np.dot(X.T, y))
        else:
            # w = X.T * inv(X X^t + alpha*Id) y
            self.coef_ = np.dot(X.T, linalg.solve(np.dot(X, X.T) + self.alpha * np.eye(n_samples), y))

        self._set_intercept(Xmean, ymean)
        return self
Beispiel #3
0
def get_inner_circle(A, B, C):
	xa, ya = A[0], A[1]
	xb, yb = B[0], B[1]
	xc, yc = C[0], C[1]

	ka = (yb - ya) / (xb - xa) if xb != xa else None
	kb = (yc - yb) / (xc - xb) if xc != xb else None
	
	alpha = np.arctan(ka) if ka != None else np.pi / 2
	beta  = np.arctan(kb) if kb != None else np.pi / 2

	a = np.sqrt((xb - xc)**2 + (yb - yc)**2)
	b = np.sqrt((xa - xc)**2 + (ya - yc)**2)
	c = np.sqrt((xa - xb)**2 + (ya - yb)**2)

	ang_a = np.arccos((b**2 + c**2 - a**2) / (2 * b * c))
	ang_b = np.arccos((a**2 + c**2 - b**2) / (2 * a * c))

	# 两条角平分线的斜率
	k1 = np.tan(alpha + ang_a / 2)
	k2 = np.tan(beta + ang_b / 2)
	kv = np.tan(alpha + np.pi / 2)
	
	# 求圆心
	y, x = solve([[1.0, -k1], [1.0, -k2]], [ya - k1 * xa, yb - k2 * xb])
	ym, xm = solve([[1.0, -ka], [1.0, -kv]], [ya - ka * xa, y - kv * x])
	r1 = np.sqrt((x - xm)**2 + (y - ym)**2)

	return(x, y, r1)
Beispiel #4
0
def debias_nz(prob, reg=1e-3):
    (w, X, y, lam, a_i, c_i, ipro) = prob
    nd = X.shape[0]
    #This is a fancy way of pruning zeros. You could also do this with loops without too much worry. 
    #You could also use numpy.nonzero to do this.
    stn = array([0] + filter(lambda x : x != 0, list((w[1:] != 0) * range(1, len(w)))), dtype=int)
    Hn = ipro[:, stn]
    Hn = Hn[stn, :].copy()
    #
    nz = len(stn)
    #
    Xty = zeros(nz)
    Xty[0] = y.sum()
    for i in range(1, nz):
        if sp.issparse(X):
            Xty[i] = X[:, stn[i] - 1].T.dot(y)[0]
        else:
            Xty[i] = X[:, stn[i] - 1].dot(y)
    Xty *= 2
    #
    try:
        wdb = la.solve(Hn, Xty, sym_pos=True)
    except la.LinAlgError:
        print("Oh no! Matrix is Singular. Trying again using regularization.")
        Hn[range(nz), range(nz)] += 2 * reg
        wdb = la.solve(Hn, Xty, sym_pos=True)
    
    #Update c_i
    c_i -= ipro[:, stn].dot(wdb - w[stn])
    c_i[stn] += a_i[stn] * (wdb - w[stn])

    w[stn] = wdb

    return
Beispiel #5
0
def _solve_cholesky(X, y, alpha, sample_weight=None):
    # w = inv(X^t X + alpha*Id) * X.T y
    n_samples, n_features = X.shape
    n_targets = y.shape[1]

    has_sw = sample_weight is not None

    if has_sw:
        sample_weight = sample_weight * np.ones(n_samples)
        sample_weight_matrix = sparse.dia_matrix((sample_weight, 0),
            shape=(n_samples, n_samples))
        weighted_X = safe_sparse_dot(sample_weight_matrix, X)
        A = safe_sparse_dot(weighted_X.T, X, dense_output=True)
        Xy = safe_sparse_dot(weighted_X.T, y, dense_output=True)
    else:
        A = safe_sparse_dot(X.T, X, dense_output=True)
        Xy = safe_sparse_dot(X.T, y, dense_output=True)

    one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])

    if one_alpha:
        A.flat[::n_features + 1] += alpha[0]
        return linalg.solve(A, Xy, sym_pos=True,
                            overwrite_a=True).T
    else:
        coefs = np.empty([n_targets, n_features])
        for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
            A.flat[::n_features + 1] += current_alpha
            coef[:] = linalg.solve(A, target, sym_pos=True,
                                   overwrite_a=False).ravel()
            A.flat[::n_features + 1] -= current_alpha
        return coefs
Beispiel #6
0
def prob6(N=10):
    """Time regular and sparse linear system solvers. Plot the system size
    versus the execution times. As always, use log scales where appropriate.
    """
    domain = 2**np.arange(2,N+1)
    solve, spsolve = [], []

    for n in domain:
        A = prob5(n).tocsr()
        b = np.random.random(n)

        start = time()
        spla.spsolve(A, b)
        spsolve.append(time()-start)

        A = A.toarray()
        start = time()
        la.solve(A, b)
        solve.append(time()-start)

    plt.subplot(121)
    plt.plot(domain, spsolve, '.-', lw=2, label="spla.spsolve()")
    plt.plot(domain, solve, '.-', lw=2, label="la.solve()")
    plt.xlabel("n"); plt.ylabel("Seconds")
    plt.legend(loc="upper left")

    plt.subplot(122)
    plt.loglog(domain, spsolve, '.-', basex=2, basey=2, lw=2)
    plt.loglog(domain, solve, '.-', basex=2, basey=2, lw=2)
    plt.xlabel("n")

    plt.suptitle("Problem 6 Solution")
    plt.show()
Beispiel #7
0
 def planeintersect(self, other):
     """ returns line of intersection of this plane and another
         None is returned if planes are parallel
         20110207 NEW VERSION: M. Krauss
     """
     N1 = self.N
     N2 = other.N
     D1 = self.D
     D2 = other.D
     if (N1.cross(N2)).abs() == 0:
         # Planes are parallel
         return None
     else:
         v = N1.cross(N2)
         b = np.array([[D1],[D2]])
         p = Point(0,0,0)
         try:
             # intersection with the plane x=0
             A = np.array([[N1.y , N1.z],[N2.y , N2.z]])
             x = la.solve(A,b)
             p = Point(0,x[0],x[1])
             return Line(p, v)
         except:
             try:
                 # intersection with the plane y=0
                 A = np.array([[N1.x , N1.z],[N2.x , N2.z]])
                 x = la.solve(A,b)
                 p = Point(x[0],0,x[1])
                 return Line(p, v)
             except:
                 # intersection with the plane z=0
                 A = np.array([[N1.x , N1.y],[N2.x , N2.y]])
                 x = la.solve(A,b)
                 p = Point(x[0],x[1],0)
                 return Line(p, v)
Beispiel #8
0
    def update_values(self):
        """
        This method is for updating in the finite horizon case.  It shifts the
        current value function 

            V_t(x) = x' P_t x + d_t
        
        and the optimal policy F_t one step *back* in time, replacing the pair
        P_t and d_t with P_{t-1} and d_{t-1}, and F_t with F_{t-1}  
        """
        # === Simplify notation === #
        Q, R, A, B, C = self.Q, self.R, self.A, self.B, self.C
        P, d = self.P, self.d
        # == Some useful matrices == #
        S1 = Q + self.beta * dot(B.T, dot(P, B))   
        S2 = self.beta * dot(B.T, dot(P, A))
        S3 = self.beta * dot(A.T, dot(P, A))
        # == Compute F as (Q + B'PB)^{-1} (beta B'PA) == #
        self.F = solve(S1, S2)  
        # === Shift P back in time one step == #
        new_P = R - dot(S2.T, solve(S1, S2)) + S3  
        # == Recalling that trace(AB) = trace(BA) == #
        new_d = self.beta * (d + np.trace(dot(P, dot(C, C.T))))  
        # == Set new state == #
        self.P, self.d = new_P, new_d
Beispiel #9
0
def matlabLDiv(a, b):
    '''Implements the matlab \ operator on arrays (maybe works
    on matrices also).
    Solves the "left divide" equation: a * x = b for x
    Inputs:
    a,b arrays
    Returns: a \ b
    Results depend on dimensions of matrices. See documentation for
    matlab's MLDIVIDE operator \ .
    Theory is on the Numpy for Matlab user's page.
    http://wiki.scipy.org/NumPy_for_Matlab_Users
    See also this site, but beware of caveats
    http://stackoverflow.com/questions/1001634/array-division-translating-from-matlab-to-python
    '''
    import scipy.linalg as LA
    # Check if a is square.
    try:
        (r,c) = np.shape(a)
    except ValueError: # In case a is of dim=1, cannot unpack two vals.
        return LA.solve(a,b)
    else:
        if (r == c): # Square
            return LA.solve(a,b)
        else:
            return LA.lstsq(a,b)
    def predict(self, X_p):
        '''
        Predict the values of the GP at each position x_p
        
        From Rasmussen & Williams "Gaussian Processes for Machine Learning",
        pg. 19, algorithm 2.1
        '''
        (X, Y) = (self.X, self.Y)
        sigma_n = self.hyper_params[0]
        K = self.kernel.value(self.hyper_params[1:], X, X)
        K_p = self.kernel.value(self.hyper_params[1:], X, X_p)
        K_p_p = self.kernel.value(self.hyper_params[1:], X_p, X_p)

        (self.K, self.K_p, self.K_p_p) = K, K_p, K_p_p

        # since the kernel is (should be) pos. def. and symmetric,
        # we can solve in a quick/stable way using the cholesky decomposition
        L = np.matrix(linalg.cholesky(K + sigma_n**2 * np.eye(len(X)),
                                      lower = True))
        alpha = np.matrix(linalg.solve(L.T, linalg.solve(L, Y)))
        f_p_mean = K_p.T * alpha.T
        v = np.matrix(linalg.solve(L, K_p))
        f_p_covariance = K_p_p - v.T*v
        log_marginal = (-0.5*np.matrix(Y)*alpha.T - sum(np.log(np.diag(L))) -
                        len(X)/2.0*np.log(2*np.pi))

        return (np.array(f_p_mean).flatten(),
                f_p_covariance,
                np.array(log_marginal).flatten())
def matrix_normal_density(X, M, U, V):
    """Sample from a matrix normal distribution"""
    norm = - 0.5*np.log(la.det(2*np.pi*U)) - 0.5*np.log(la.det(2*np.pi*V))
    XM = X-M
    pptn = -0.5*np.trace( np.dot(la.solve(U,XM),la.solve(V,XM.T)) )
    pdf = norm + pptn
    return pdf
Beispiel #12
0
def get_absorption_variance(P, plain, absorbing):
    """
    Get expected times to absorption.
    Note that if an index is indicated as absorbing by its presence
    in the sequence of absorbing state indices,
    then it will be treated as absorbing
    even if the transition matrix P indicates otherwise.
    @param P: transition matrix
    @param plain: sequence of plain state indices
    @param absorbing: sequence of absorbing state indices
    @return: variance of times to absorption or 0 from absorbing states
    """
    # check that P is really a transition matrix
    MatrixUtil.assert_transition_matrix(P)
    # define some state lists
    states = np.hstack((plain, absorbing))
    # check that the index sequences match the size of P
    if sorted(states) != range(len(P)):
        raise ValueError('P is not conformant with the index sequences')
    # compute the time to absorption
    Q = P[plain, :][:, plain]
    c = np.ones_like(plain)
    I = np.eye(len(plain))
    t = linalg.solve(I - Q, c)
    # compute the variance
    vplain = 2*linalg.solve(I - Q, t) - t*(t+1)
    v = np.hstack((vplain, np.zeros_like(absorbing)))
    return v[inverse_permutation(states)]
Beispiel #13
0
    def test_care_g(self):
        A = matrix([[-2, -1],[-1, -1]])
        Q = matrix([[0, 0],[0, 1]])
        B = matrix([[1, 0],[0, 4]])
        R = matrix([[2, 0],[0, 1]])
        S = matrix([[0, 0],[0, 0]])
        E = matrix([[2, 1],[1, 2]])

        X,L,G = care(A,B,Q,R,S,E)
        # print("The solution obtained is", X)
        assert_array_almost_equal(
            A.T * X * E + E.T * X * A -
            (E.T * X * B + S) * solve(R, B.T * X * E + S.T)  + Q, zeros((2,2)))
        assert_array_almost_equal(solve(R, B.T * X * E + S.T), G)

        A = matrix([[-2, -1],[-1, -1]])
        Q = matrix([[0, 0],[0, 1]])
        B = matrix([[1],[0]])
        R = 1
        S = matrix([[1],[0]])
        E = matrix([[2, 1],[1, 2]])

        X,L,G = care(A,B,Q,R,S,E)
        # print("The solution obtained is", X)
        assert_array_almost_equal(
            A.T * X * E + E.T * X * A -
            (E.T * X * B + S) / R * (B.T * X * E + S.T) + Q , zeros((2,2)))
        assert_array_almost_equal(dot( 1/R , dot(B.T,dot(X,E)) + S.T) , G)
Beispiel #14
0
    def amp_int_one(self):
        """
        Compute transition amplitude of wave function by one particle operator 
        integration form. 
        .    A = 2k^(-1) <F, [E-H0]\psi>
        """
        v1 = self.gtos.calc_mat_sto(self.v1)
        (s, t, v0)    = self.gtos.calc_mat_stv(1)
        (sH, tH, v0H) = self.gtos_cc.calc_mat_stv(self.gtos, 1)
        m = self.gtos.calc_vec_sto(self.s)
        
        # solve driven eq
        c0 = la.solve(self.energy*s-self.z*v0-t,    m)
        c1 = la.solve(self.energy*s-self.z*v0-v1-t, m)

        # compute amplitude 
        k = np.sqrt(2.0*self.energy)
        plmx_y0p = np.dot(m, c0)
        j_plmx = np.sqrt(-k*plmx_y0p.imag)

        # compute amplitude (E-H0)
        y0p_esmh0_yp = np.dot(c0.conj(), np.dot(sH*self.energy-tH-v0H, c1))
        y0m_esmh0_yp = np.dot(c0       , np.dot(s *self.energy-t -v0 , c1))
        amp2 = (y0p_esmh0_yp - y0m_esmh0_yp)/(2.0j*j_plmx)
        return 2.0/k*amp2        
Beispiel #15
0
def fit(d):
    title("polynomial model (dim=%d)" % d)
    xlabel('x')
    ylabel('y')
    xlim(0,pi)
    ylim(0,1)
    scatter(xs, ys)
    # 単純な最小二乗法
    X = array([xs**k for k in range(d+1)]).T
    a = LA.solve(X.T.dot(X), X.T.dot(ys))

    # グラフ生成
    x = linspace(0, pi, 100)
    y = a.dot(array([x**k for k in range(d+1)]))
    plot(x, y, label="linear regression")

    # パラメータの事前分布の平均・分散の逆行列
    a0 = zeros(d+1)
    s0inv = LA.inv(diag([2**(-k) for k in range(d+1)]))
    # 誤差分布の分散. 本当は何らかの推定で決めますが, とりあえず1に.
    s = 1
    # ベイズ線形回帰
    a = LA.solve(X.T.dot(X)/s**2 + s0inv, X.T.dot(ys)/s**2 + s0inv.dot(a0))
    x = linspace(0, pi, 100)
    y = a.dot(array([x**k for k in range(d+1)]))
    plot(x, y, label="bayesian linear regression")
    legend(loc=3)
    savefig("fig3-13-%d.png" % d)
Beispiel #16
0
    def amp_int_two(self):
        """
        Compute transition amplitude of wave function by two particle operator form.
        .    A = 2k^(-1)<F, s + v1psi> 
        In this function, F is evaluated by TIWP method and CBF method.
        TIWP is based on F is proportional to the solution of H0 driven equation.
        We put driven term as s.
        .    (E-H0)psi0 = s
        .    F = Im[psi0]/Sqrt(k Im<s, psi0>)
        """
        
        v1 = self.gtos.calc_mat_sto(self.v1)
        v1H = self.gtos_cc.calc_mat_sto(self.gtos, self.v1)
        (s, t, v0) = self.gtos.calc_mat_stv(1)
        v0 = self.z * v0
        m = self.gtos.calc_vec_sto(self.s)

        # solve driven eq
        c0 = la.solve(self.energy*s-v0-t,    m)
        c1 = la.solve(self.energy*s-v0-v1-t, m)

        # compute amplitude (V2psi + muphi)
        # plmx = -m (wave packet)
        k = np.sqrt(2.0*self.energy)
        plmx_y0p = np.dot(m, c0)
        y0p_muphi = plmx_y0p.conj()
        y0p_v_yp = np.dot(c0.conj(), np.dot(v1H,c1))
        y0m_v_yp = np.dot(c0,        np.dot(v1, c1))
        imy0p_v_yp = (y0p_v_yp - y0m_v_yp)/(2.0j)
        j_plmx = np.sqrt(-np.pi*plmx_y0p.imag)
        amp = (y0p_muphi.imag + imy0p_v_yp)/j_plmx
        return amp        
Beispiel #17
0
    def _H(self):
        r"""Continuous-time linear time invariant system.

        This method is used to create a Continuous-time linear
        time invariant system for the mdof system.
        From this system we can obtain poles, impulse response,
        generate a bode, etc.


        """
        Z = np.zeros((self.n, self.n))
        I = np.eye(self.n)

        # x' = Ax + Bu
        B2 = I
        A = self.A()
        B = np.vstack([Z, la.solve(self.M, B2)])

        # y = Cx + Du
        # Observation matrices
        Cd = I
        Cv = Z
        Ca = Z

        C = np.hstack((Cd - Ca @ la.solve(self.M, self.K), Cv - Ca @ la.solve(self.M, self.C)))
        D = Ca @ la.solve(self.M, B2)

        sys = signal.lti(A, B, C, D)

        return sys
Beispiel #18
0
    def A(self):
        """State space matrix"""
        Z = np.zeros((self.n, self.n))
        I = np.eye(self.n)

        A = np.vstack([np.hstack([Z, I]), np.hstack([la.solve(-self.M, self.K), la.solve(-self.M, self.C)])])
        return A
Beispiel #19
0
def _pade(A, m):
    n = np.shape(A)[0]
    c = _padecoeff(m)
    if m != 13:
        apows = [[] for jj in range(int(np.ceil((m + 1) / 2)))]
        apows[0] = sp.eye(n, n, format='csr')
        apows[1] = A * A
        for jj in range(2, int(np.ceil((m + 1) / 2))):
            apows[jj] = apows[jj - 1] * apows[1]
        U = sp.lil_matrix((n, n)).tocsr()
        V = sp.lil_matrix((n, n)).tocsr()
        for jj in range(m, 0, -2):
            U = U + c[jj] * apows[jj // 2]
        U = A * U
        for jj in range(m - 1, -1, -2):
            V = V + c[jj] * apows[(jj + 1) // 2]
        F = la.solve((-U + V).todense(), (U + V).todense())
        return sp.lil_matrix(F).tocsr()
    elif m == 13:
        A2 = A * A
        A4 = A2 * A2
        A6 = A2 * A4
        U = A * (A6 * (c[13] * A6 + c[11] * A4 + c[9] * A2) +
                 c[7] * A6 + c[5] * A4 + c[3] * A2 +
                 c[1] * sp.eye(n, n).tocsr())
        V = A6 * (c[12] * A6 + c[10] * A4 + c[8] * A2) + c[6] * A6 + c[4] * \
            A4 + c[2] * A2 + c[0] * sp.eye(n, n).tocsr()
        F = la.solve((-U + V).todense(), (U + V).todense())
        return sp.csr_matrix(F)
Beispiel #20
0
    def test_trsv(self):
        seed(1234)
        for ind, dtype in enumerate(DTYPES):
            n = 15
            A = (rand(n, n)+eye(n)).astype(dtype)
            x = rand(n).astype(dtype)
            func, = get_blas_funcs(('trsv',), dtype=dtype)

            y1 = func(a=A, x=x)
            y2 = solve(triu(A), x)
            assert_array_almost_equal(y1, y2)

            y1 = func(a=A, x=x, lower=1)
            y2 = solve(tril(A), x)
            assert_array_almost_equal(y1, y2)

            y1 = func(a=A, x=x, diag=1)
            A[arange(n), arange(n)] = dtype(1)
            y2 = solve(triu(A), x)
            assert_array_almost_equal(y1, y2)

            y1 = func(a=A, x=x, diag=1, trans=1)
            y2 = solve(triu(A).T, x)
            assert_array_almost_equal(y1, y2)

            y1 = func(a=A, x=x, diag=1, trans=2)
            y2 = solve(triu(A).conj().T, x)
            assert_array_almost_equal(y1, y2)
Beispiel #21
0
    def test_dare(self):
        A = matrix([[-0.6, 0],[-0.1, -0.4]])
        Q = matrix([[2, 1],[1, 0]])
        B = matrix([[2, 1],[0, 1]])
        R = matrix([[1, 0],[0, 1]])

        X,L,G = dare(A,B,Q,R)
        # print("The solution obtained is", X)
        assert_array_almost_equal(
            A.T * X * A - X -
            A.T * X * B * solve(B.T * X * B + R, B.T * X * A) + Q, zeros((2,2)))
        assert_array_almost_equal(solve(B.T * X * B + R, B.T * X * A), G)
        # check for stable closed loop
        lam = eigvals(A - B * G)
        assert_array_less(abs(lam), 1.0)

        A = matrix([[1, 0],[-1, 1]])
        Q = matrix([[0, 1],[1, 1]])
        B = matrix([[1],[0]])
        R = 2

        X,L,G = dare(A,B,Q,R)
        # print("The solution obtained is", X)
        assert_array_almost_equal(
            A.T * X * A - X -
            A.T * X * B * solve(B.T *  X * B + R, B.T * X * A) + Q, zeros((2,2)))
        assert_array_almost_equal(B.T * X * A / (B.T * X * B + R), G)
        # check for stable closed loop
        lam = eigvals(A - B * G)
        assert_array_less(abs(lam), 1.0)
Beispiel #22
0
    def test_tpsv(self):
        seed(1234)
        for ind, dtype in enumerate(DTYPES):
            n = 10
            x = rand(n).astype(dtype)
            # Upper triangular array
            A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j)
            A += eye(n)
            # Form the packed storage
            c, r = tril_indices(n)
            Ap = A[r, c]
            func, = get_blas_funcs(('tpsv',), dtype=dtype)

            y1 = func(n=n, ap=Ap, x=x)
            y2 = solve(A, x)
            assert_array_almost_equal(y1, y2)

            y1 = func(n=n, ap=Ap, x=x, diag=1)
            A[arange(n), arange(n)] = dtype(1)
            y2 = solve(A, x)
            assert_array_almost_equal(y1, y2)

            y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1)
            y2 = solve(A.T, x)
            assert_array_almost_equal(y1, y2)

            y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2)
            y2 = solve(A.conj().T, x)
            assert_array_almost_equal(y1, y2)
Beispiel #23
0
   def conditional(self,xb):
      """         conditional(self,xb)  
         Calculates the mean and covariance of the conditional distribution
         when the variables xb are observed.
         Input: xb - The observed variables. It is assumed that the observed variables occupy the 
                     last positions in the array of random variables, i.e. if x is the random variable
                     associated with the object, then it is partioned as x = [xa,xb]^T.
         Output: mean_a_given_b - mean of the conditional distribution
                 cov_a_given_b  - covariance of the conditional distribution
      """
      xb         = np.array(xb,ndmin=1)
      nb         = len(xb)
      n_rand_var = len(self.mean)
      if nb >= n_rand_var:
         raise ValueError('The conditional vector should be smaller than the random variable!')
      mean = self.mean
      cov  = self.cov          

      # Partition the mean and covariance  
      na     = n_rand_var - nb
      mean_a = self.mean[:na]
      mean_b = self.mean[na:]
      cov_a  = self.cov[:na,:na]
      cov_b  = self.cov[na:,na:]
      cov_ab = self.cov[:na,na:]
      
      #Calculate the conditional mean and covariance
      mean_a_given_b = mean_a.flatten() + np.dot(cov_ab,solve(cov_b,xb.flatten()-mean_b.flatten()))
      cov_a_given_b  = cov_a - np.dot(cov_ab,solve(cov_b,cov_ab.transpose()))

      return mean_a_given_b, cov_a_given_b
def HODLRDirectSolver( K, F, minSize, tol ):
    n = K.shape[0]
    
    # Base Case
    if (n <= minSize):
        return li.solve(K, F)
    else:
        p = (n/2)
        U1,V1 = lowRankApprox(K[:p,p:], -1, tol)
        U2,V2 = lowRankApprox(K[p:,:p], -1, tol)
        
        r1 = U1.shape[1]
        r2 = U2.shape[1]
        
        d1c1 = HODLRDirectSolver(K[:p,:p], np.concatenate((U1, F[:p,:]),1), minSize, tol)
        d2c2 = HODLRDirectSolver(K[p:,p:], np.concatenate((U2, F[p:,:]),1), minSize, tol)
        
        d1 = d1c1[:,:r1]
        c1 = d1c1[:,r1:]
        d2 = d2c2[:,:r2]
        c2 = d2c2[:,r2:]
        
        S1 = np.concatenate((np.eye(r2), np.dot(V2, d1)),1)
        S2 = np.concatenate((np.dot(V1,d2), np.eye(r1)),1)
        S = np.concatenate((S1,S2),0)
        
        y = li.solve(S, np.concatenate((np.dot(V2,c1), np.dot(V1,c2)),0))
        
        y1 = y[:r2,:]
        y2 = y[r2:,:]
        
        x1 = c1 - np.dot(d1,y2)
        x2 = c2 - np.dot(d2,y1)
        
        return np.concatenate((x1, x2), 0)
Beispiel #25
0
	def computeProjectionVectors( self, P, L, U ) :	
		eK = matrix( identity( self.dim, float64 )[ 0: ,( self.dim - 1 ) ] ).T
		U = matrix(U, float64)
		U[ self.dim - 1, self.dim - 1 ] = 1.0
		# Sergio: I added this exception because in rare cases, the matrix
		# U is singular, which gives rise to a LinAlgError.
		try: 
			x1 = matrix( solve( U, eK ), float64 )
		except LinAlgError:
			print "Matrix U was singular, so we input a fake x1\n"
			print "U: ", U
			x1 = matrix(ones(self.dim))

		#print "x1", x1
		del U

		LT = matrix( L, float64, copy=False ).T
		PT = matrix( P, float64, copy=False ).T

		x2 = matrix( solve( LT*PT, eK ), float64 )
		del L
		del P
		del LT
		del PT
		del eK

		return ( x1, x2 )
Beispiel #26
0
    def computeHgg(self,f,g,Rg,Rgy,Rgyz):
        # set up self.Hgg with correspond matrix
        # also should give other two derivatives
        D = np.exp(g)*self.D0
        gp = np.gradient(g,self.dy)
        jumpsq = np.zeros(self.bins)
        driftsq = np.zeros(self.bins)
        driftsum = np.zeros(self.bins)
        for k in np.arange(self.bins):
            jumpsq[k] = sum(self.jumps[self.binpos==k]**2)
            driftsq[k] = sum( (D[k]*(f[k]+self.m[self.binpos==k])+gp[k]*D[k])**2)*self.dt*self.dt
            driftsum[k] = sum( D[k]*(f[k]+self.m[self.binpos==k])+gp[k]*D[k])*self.dt
        a1 = (jumpsq + driftsq)/D/4.0/self.dt
        a2 = driftsum/2.
        a4 = D*self.dt/2.
        A1 = np.tile(a1,(self.bins,1))*Rg+np.tile(a2,(self.bins,1))*-Rgy
        A2 = np.tile(a2,(self.bins,1))*Rg+np.tile(a4,(self.bins,1))*-Rgy
        A3 = np.tile(a1,(self.bins,1))*Rgy+np.tile(a2,(self.bins,1))*Rgyz
        A4 = np.tile(a2,(self.bins,1))*Rgy+np.tile(a4,(self.bins,1))*Rgyz

        A5 = np.tile(a1,(self.bins,1))*Rgy+np.tile(a2,(self.bins,1))*Rgyz
        A6 = np.tile(a2,(self.bins,1))*Rgy+np.tile(a4,(self.bins,1))*Rgyz
        M = np.vstack((Rg,Rgy))
        A = np.vstack((np.hstack((A1,A2)) ,np.hstack((A3,A4)) ))
        Lambda = solve(np.eye(self.bins*2)+A.astype(np.float128),M)
        Hggy = Lambda[self.bins:,:]
        Hgg = Lambda[:self.bins,:]

        Hggyz = solve(np.eye(self.bins)+A6,Rgyz-A5.dot(Hggy.T))
        return 0.5*(Hgg+Hgg.T),Hggy,0.5*(Hggyz+Hggyz.T) # first and third should be symmetric. get rid of small errors
Beispiel #27
0
def invert_low_rank(Ainv, U, C, V, diag=False):
    """
    Invert the matrix (A+UCV) where A^{-1} is known and C is lower rank than A

    Let N be rank of A and K be rank of C where K << N

    Then we can write the inverse,
    (A+UCV)^{-1} = A^{-1} - A^{-1}U (C^{-1}+VA^{-1}U)^{-1} VA^{-1}

    :param Ainv: NxN matrix A^{-1}
    :param U: NxK matrix
    :param C: KxK invertible matrix
    :param V: KxN matrix
    :return:
    """
    N,K = U.shape
    Cinv = inv(C)
    if diag:
        assert Ainv.shape == (N,)
        tmp1 = einsum('ij,j,jk->ik', V, Ainv, U)
        tmp2 = einsum('ij,j->ij', V, Ainv)
        tmp3 = solve(Cinv + tmp1, tmp2)
        # tmp4 = -U.dot(tmp3)
        tmp4 = -einsum('ij,jk->ik', U, tmp3)
        tmp4[diag_indices(N)] += 1
        return einsum('i,ij->ij', Ainv, tmp4)

    else:
        tmp = solve(Cinv + V.dot(Ainv).dot(U), V.dot(Ainv))
        return Ainv - Ainv.dot(U).dot(tmp)
Beispiel #28
0
    def b_operator(self, P):
        r"""
        The B operator, mapping P into

        .. math::

            B(P) := R - beta^2 A'PB(Q + beta B'PB)^{-1}B'PA + beta A'PA

        and also returning

        .. math::

            F := (Q + beta B'PB)^{-1} beta B'PA

        Parameters
        ----------
        P : array_like(float, ndim=2)
            A matrix that should be n x n

        Returns
        -------
        F : array_like(float, ndim=2)
            The F matrix as defined above
        new_p : array_like(float, ndim=2)
            The matrix P after applying the B operator

        """
        A, B, Q, R, beta = self.A, self.B, self.Q, self.R, self.beta
        S1 = Q + beta * dot(B.T, dot(P, B))
        S2 = beta * dot(B.T, dot(P, A))
        S3 = beta * dot(A.T, dot(P, A))
        F = solve(S1, S2)
        new_P = R - dot(S2.T, solve(S1, S2)) + S3

        return F, new_P
Beispiel #29
0
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
    # dual_coef = inv(X X^t + alpha*Id) y
    n_samples = K.shape[0]
    n_targets = y.shape[1]

    if copy:
        K = K.copy()

    alpha = np.atleast_1d(alpha)
    one_alpha = (alpha == alpha[0]).all()
    has_sw = isinstance(sample_weight, np.ndarray) \
        or sample_weight not in [1.0, None]

    if has_sw:
        # Unlike other solvers, we need to support sample_weight directly
        # because K might be a pre-computed kernel.
        sw = np.sqrt(np.atleast_1d(sample_weight))
        y = y * sw[:, np.newaxis]
        K *= np.outer(sw, sw)

    if one_alpha:
        # Only one penalty, we can solve multi-target problems in one time.
        K.flat[::n_samples + 1] += alpha[0]

        try:
            # Note: we must use overwrite_a=False in order to be able to
            #       use the fall-back solution below in case a LinAlgError
            #       is raised
            dual_coef = linalg.solve(K, y, sym_pos=True,
                                     overwrite_a=False)
        except np.linalg.LinAlgError:
            warnings.warn("Singular matrix in solving dual problem. Using "
                          "least-squares solution instead.")
            dual_coef = linalg.lstsq(K, y)[0]

        # K is expensive to compute and store in memory so change it back in
        # case it was user-given.
        K.flat[::n_samples + 1] -= alpha[0]

        if has_sw:
            dual_coef *= sw[:, np.newaxis]

        return dual_coef
    else:
        # One penalty per target. We need to solve each target separately.
        dual_coefs = np.empty([n_targets, n_samples])

        for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
            K.flat[::n_samples + 1] += current_alpha

            dual_coef[:] = linalg.solve(K, target, sym_pos=True,
                                        overwrite_a=False).ravel()

            K.flat[::n_samples + 1] -= current_alpha

        if has_sw:
            dual_coefs *= sw[np.newaxis, :]

        return dual_coefs.T
Beispiel #30
0
def LU_solve(A,rhs,p):
   # Solve with straight LU
   (M,N) = A.shape
   C = dot(A.T,A) + p*eye(N)
   P,L,U = sp.linalg.lu(C)
   y = linalg.solve(dot(P,L),rhs)
   x = linalg.solve(U,y)
   return x
Beispiel #31
0
def _woodbury_algorithm(A, ur, ll, b, k):
    '''
    Solve a cyclic banded linear system with upper right
    and lower blocks of size ``(k-1) / 2`` using
    the Woodbury formula
    
    Parameters
    ----------
    A : 2-D array, shape(k, n)
        Matrix of diagonals of original matrix(see 
        ``solve_banded`` documentation).
    ur : 2-D array, shape(bs, bs)
        Upper right block matrix.
    ll : 2-D array, shape(bs, bs)
        Lower left block matrix.
    b : 1-D array, shape(n,)
        Vector of constant terms of the system of linear equations.
    k : int
        B-spline degree.
        
    Returns
    -------
    c : 1-D array, shape(n,)
        Solution of the original system of linear equations.
        
    Notes
    -----
    This algorithm works only for systems with banded matrix A plus
    a correction term U @ V.T, where the matrix U @ V.T gives upper right
    and lower left block of A
    The system is solved with the following steps:
        1.  New systems of linear equations are constructed:
            A @ z_i = u_i,
            u_i - columnn vector of U,
            i = 1, ..., k - 1
        2.  Matrix Z is formed from vectors z_i:
            Z = [ z_1 | z_2 | ... | z_{k - 1} ]
        3.  Matrix H = (1 + V.T @ Z)^{-1}
        4.  The system A' @ y = b is solved
        5.  x = y - Z @ (H @ V.T @ y)
    Also, ``n`` should be greater than ``k``, otherwise corner block
    elements will intersect with diagonals.

    Examples
    --------
    Consider the case of n = 8, k = 5 (size of blocks - 2 x 2).
    The matrix of a system:       U:          V:
      x  x  x  *  *  a  b         a b 0 0     0 0 1 0
      x  x  x  x  *  *  c         0 c 0 0     0 0 0 1
      x  x  x  x  x  *  *         0 0 0 0     0 0 0 0
      *  x  x  x  x  x  *         0 0 0 0     0 0 0 0
      *  *  x  x  x  x  x         0 0 0 0     0 0 0 0
      d  *  *  x  x  x  x         0 0 d 0     1 0 0 0
      e  f  *  *  x  x  x         0 0 e f     0 1 0 0

    References
    ----------
    .. [1] William H. Press, Saul A. Teukolsky, William T. Vetterling
           and Brian P. Flannery, Numerical Recipes, 2007, Section 2.7.3

    '''
    k_mod = k - k % 2
    bs = int((k - 1) / 2) + (k + 1) % 2

    n = A.shape[1] + 1
    U = np.zeros((n - 1, k_mod))
    VT = np.zeros((k_mod, n - 1))  # V transpose

    # upper right block
    U[:bs, :bs] = ur
    VT[np.arange(bs), np.arange(bs) - bs] = 1

    # lower left block
    U[-bs:, -bs:] = ll
    VT[np.arange(bs) - bs, np.arange(bs)] = 1

    Z = solve_banded((bs, bs), A, U)

    H = solve(np.identity(k_mod) + VT @ Z, np.identity(k_mod))

    y = solve_banded((bs, bs), A, b)
    c = y - Z @ (H @ (VT @ y))

    return c
Beispiel #32
0
 def solve(self, v, tol=0):
     """Evaluate w = M^-1 v"""
     if self.collapsed is not None:
         return solve(self.collapsed, v)
     return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
Beispiel #33
0
def cheby_bvp_test(xa,
                   xb,
                   pts,
                   aderiv,
                   ideriv,
                   bderiv,
                   va,
                   vb,
                   F,
                   S,
                   xextra=None):
    """Test the convergence of the chebyshev spectral solver for a boundary
    value problem
    args:
        xa - float, value of left (lower) domain boundary
        xb - float, value of right (higher) domain boundary
        pts - iterable, different numbers of points to use
        aderiv - int, order of derivative for left boundary condition
        ideriv - int, order of derivative for internal nodes
                 (must be 1 or 2)
        bderiv - int, order of derivative for right boundary condition
        va - value for the left boundary
        vb - value for the right boundary
        F - solution function
        S - source function (whichever derivative of F)
    optional args:
        xextra - extra points within the domain to include as
                 collocation points
    returns:
        err - maximum relative error for each of the numerical solutions
        xs - array of grid points for each solution
        qexs - array of exact solution values for each solution
        qsps - array of numerical solution values for each solution"""

    #number of resolutions to test
    L = len(pts)
    #exact and spectral solution arrays
    qexs, qsps = [], []
    #grids
    xs = []
    #max relative error
    err = np.empty((L, ))
    #do the tests
    for i in range(L):
        #set up the grid/solver
        xhat, theta, x, A = cheby_bvp_setup(xa,
                                            xb,
                                            pts[i],
                                            aderiv,
                                            ideriv,
                                            bderiv,
                                            xextra=xextra)
        #create the source array
        b = np.empty((len(x), ))
        b[0] = va
        b[1:-1] = np.array([S(_x) for _x in x[1:-1]])
        b[-1] = vb
        #solve for the coefficients of the Cheb expansion
        coef = solve(A, b)
        #evaluate the expansion
        qsp = cheby_hat_sum(xhat, coef)
        #evaluate the exact solution
        qex = np.array([F(_x) for _x in x])
        #store the error
        err[i] = np.max(np.abs(qsp - qex) / np.abs(qex.max()))
        #store the solution stuff
        qexs.append(qex)
        qsps.append(qsp)
        xs.append(x)

    return (err, xs, qexs, qsps)
def test_cantilever_beam(plot=False):
    n = 100

    L = 3  # total size of the beam along x

    # Material Lastrobe Lescalloy
    E = 203.e9  # Pa
    rho = 7.83e3  # kg/m3

    x = np.linspace(0, L, n)
    # path
    y = np.ones_like(x)
    # tapered properties
    b_root = 0.05  # m
    b_tip = b_root  # m
    h_root = 0.05  # m
    h_tip = h_root  # m
    A_root = h_root * b_root
    A_tip = h_tip * b_tip
    Izz_root = b_root * h_root**3 / 12
    Izz_tip = b_tip * h_tip**3 / 12

    # getting nodes
    ncoords = np.vstack((x, y)).T
    nids = 1 + np.arange(ncoords.shape[0])
    nid_pos = dict(zip(nids, np.arange(len(nids))))

    n1s = nids[0:-1]
    n2s = nids[1:]

    K = np.zeros((3 * n, 3 * n))
    M = np.zeros((3 * n, 3 * n))
    beams = []
    for n1, n2 in zip(n1s, n2s):
        pos1 = nid_pos[n1]
        pos2 = nid_pos[n2]
        x1, y1 = ncoords[pos1]
        x2, y2 = ncoords[pos2]
        A1 = A_root + (A_tip - A_root) * x1 * A_tip / (L - 0)
        A2 = A_root + (A_tip - A_root) * x2 * A_tip / (L - 0)
        Izz1 = Izz_root + (Izz_tip - Izz_root) * x1 * Izz_tip / (L - 0)
        Izz2 = Izz_root + (Izz_tip - Izz_root) * x2 * Izz_tip / (L - 0)
        beam = Beam2D()
        beam.n1 = n1
        beam.n2 = n2
        beam.E = E
        beam.rho = rho
        beam.A1, beam.A2 = A1, A2
        beam.Izz1, beam.Izz2 = Izz1, Izz2
        update_K(beam, nid_pos, ncoords, K)
        update_M(beam, nid_pos, M)
        beams.append(beam)

    # applying boundary conditions
    bk = np.zeros(K.shape[0], dtype=bool)  #array to store known DOFs
    check = np.isclose(x, 0.)  # locating node at root
    # clamping at root
    for i in range(DOF):
        bk[i::DOF] = check
    bu = ~bk  # same as np.logical_not, defining unknown DOFs

    # sub-matrices corresponding to unknown DOFs
    Kuu = K[bu, :][:, bu]
    Muu = M[bu, :][:, bu]

    # test
    Fy = 700
    f = np.zeros(K.shape[0])
    f[-2] = Fy
    fu = f[bu]

    # solving
    uu = solve(Kuu, fu)

    # vector u containing displacements for all DOFs
    u = np.zeros(K.shape[0], dtype=float)
    u[bu] = uu

    u = u.reshape(n, -1)

    a = L
    deflection = Fy * a**3 / (3 * E * Izz_root) * (1 + 3 * (L - a) / 2 * a)
    print('Theoretical deflection', deflection)
    print('Numerical deflection', u[-1, 1])
    assert np.isclose(deflection, u[-1, 1])

    if plot:
        # plotting
        import matplotlib
        matplotlib.use('TkAgg')
        import matplotlib.pyplot as plt

        plt.plot(x, y, '-k')
        plt.plot(x + u[:, 0], y + u[:, 1], '--r')
        plt.show()
Beispiel #35
0
def subsolv(m,n,epsimin,low,upp,alfa,beta,p0,q0,P,Q,a0,a,b,c,d):
    
    """
    This function subsolv solves the MMA subproblem:
             
    minimize SUM[p0j/(uppj-xj) +q0j/(xj-lowj)]+a0*z+SUM[ci*yi+0.5*di*(yi)^2],
    
    subject to SUM[pij/(uppj-xj) + qij/(xj-lowj)] - ai*z - yi <= bi,
        alfaj <=  xj <=  betaj,  yi >= 0,  z >= 0.
           
    Input:  m, n, low, upp, alfa, beta, p0, q0, P, Q, a0, a, b, c, d.
    Output: xmma,ymma,zmma, slack variables and Lagrange multiplers.
    """
    
    een = np.ones((n,1))
    eem = np.ones((m,1))
    epsi = 1
    epsvecn = epsi*een
    epsvecm = epsi*eem
    x = 0.5*(alfa+beta)
    y = eem.copy()
    z = np.array([[1.0]])
    lam = eem.copy()
    xsi = een/(x-alfa)
    xsi = np.maximum(xsi,een)
    eta = een/(beta-x)
    eta = np.maximum(eta,een)
    mu = np.maximum(eem,0.5*c)
    zet = np.array([[1.0]])
    s = eem.copy()
    itera = 0
    # Start while epsi>epsimin
    while epsi > epsimin:
        epsvecn = epsi*een
        epsvecm = epsi*eem
        ux1 = upp-x
        xl1 = x-low
        ux2 = ux1*ux1
        xl2 = xl1*xl1
        uxinv1 = een/ux1
        xlinv1 = een/xl1
        plam = p0+np.dot(P.T,lam)
        qlam = q0+np.dot(Q.T,lam)
        gvec = np.dot(P,uxinv1)+np.dot(Q,xlinv1)
        dpsidx = plam/ux2-qlam/xl2
        rex = dpsidx-xsi+eta
        rey = c+d*y-mu-lam
        rez = a0-zet-np.dot(a.T,lam)
        relam = gvec-a*z-y+s-b
        rexsi = xsi*(x-alfa)-epsvecn
        reeta = eta*(beta-x)-epsvecn
        remu = mu*y-epsvecm
        rezet = zet*z-epsi
        res = lam*s-epsvecm
        residu1 = np.concatenate((rex, rey, rez), axis = 0)
        residu2 = np.concatenate((relam, rexsi, reeta, remu, rezet, res), axis = 0)
        residu = np.concatenate((residu1, residu2), axis = 0)
        residunorm = np.sqrt((np.dot(residu.T,residu)).item())
        residumax = np.max(np.abs(residu))
        ittt = 0
        # Start while (residumax>0.9*epsi) and (ittt<200)
        while (residumax > 0.9*epsi) and (ittt < 200):
            ittt = ittt+1
            itera = itera+1
            ux1 = upp-x
            xl1 = x-low
            ux2 = ux1*ux1
            xl2 = xl1*xl1
            ux3 = ux1*ux2
            xl3 = xl1*xl2
            uxinv1 = een/ux1
            xlinv1 = een/xl1
            uxinv2 = een/ux2
            xlinv2 = een/xl2
            plam = p0+np.dot(P.T,lam)
            qlam = q0+np.dot(Q.T,lam)
            gvec = np.dot(P,uxinv1)+np.dot(Q,xlinv1)
            GG = (diags(uxinv2.flatten(),0).dot(P.T)).T-(diags\
                                     (xlinv2.flatten(),0).dot(Q.T)).T 		
            dpsidx = plam/ux2-qlam/xl2
            delx = dpsidx-epsvecn/(x-alfa)+epsvecn/(beta-x)
            dely = c+d*y-lam-epsvecm/y
            delz = a0-np.dot(a.T,lam)-epsi/z
            dellam = gvec-a*z-y-b+epsvecm/lam
            diagx = plam/ux3+qlam/xl3
            diagx = 2*diagx+xsi/(x-alfa)+eta/(beta-x)
            diagxinv = een/diagx
            diagy = d+mu/y
            diagyinv = eem/diagy
            diaglam = s/lam
            diaglamyi = diaglam+diagyinv
            # Start if m<n
            if m < n:
                blam = dellam+dely/diagy-np.dot(GG,(delx/diagx))
                bb = np.concatenate((blam,delz),axis = 0)
                Alam = np.asarray(diags(diaglamyi.flatten(),0) \
                    +(diags(diagxinv.flatten(),0).dot(GG.T).T).dot(GG.T))
                AAr1 = np.concatenate((Alam,a),axis = 1)
                AAr2 = np.concatenate((a,-zet/z),axis = 0).T
                AA = np.concatenate((AAr1,AAr2),axis = 0)
                solut = solve(AA,bb)
                dlam = solut[0:m]
                dz = solut[m:m+1]
                dx = -delx/diagx-np.dot(GG.T,dlam)/diagx
            else:
                diaglamyiinv = eem/diaglamyi
                dellamyi = dellam+dely/diagy
                Axx = np.asarray(diags(diagx.flatten(),0) \
                    +(diags(diaglamyiinv.flatten(),0).dot(GG).T).dot(GG)) 
                azz = zet/z+np.dot(a.T,(a/diaglamyi))
                axz = np.dot(-GG.T,(a/diaglamyi))
                bx = delx+np.dot(GG.T,(dellamyi/diaglamyi))
                bz = delz-np.dot(a.T,(dellamyi/diaglamyi))
                AAr1 = np.concatenate((Axx,axz),axis = 1)
                AAr2 = np.concatenate((axz.T,azz),axis = 1)
                AA = np.concatenate((AAr1,AAr2),axis = 0)
                bb = np.concatenate((-bx,-bz),axis = 0)
                solut = solve(AA,bb)
                dx = solut[0:n]
                dz = solut[n:n+1]
                dlam = np.dot(GG,dx)/diaglamyi-dz*(a/diaglamyi)\
                    +dellamyi/diaglamyi
                # End if m<n
            dy = -dely/diagy+dlam/diagy
            dxsi = -xsi+epsvecn/(x-alfa)-(xsi*dx)/(x-alfa)
            deta = -eta+epsvecn/(beta-x)+(eta*dx)/(beta-x)
            dmu = -mu+epsvecm/y-(mu*dy)/y
            dzet = -zet+epsi/z-zet*dz/z
            ds = -s+epsvecm/lam-(s*dlam)/lam
            xx = np.concatenate((y,z,lam,xsi,eta,mu,zet,s),axis = 0)
            dxx = np.concatenate((dy,dz,dlam,dxsi,deta,dmu,dzet,ds),axis = 0)
            #
            stepxx = -1.01*dxx/xx
            stmxx = np.max(stepxx)
            stepalfa = -1.01*dx/(x-alfa)
            stmalfa = np.max(stepalfa)
            stepbeta = 1.01*dx/(beta-x)
            stmbeta = np.max(stepbeta)
            stmalbe = max(stmalfa,stmbeta)
            stmalbexx = max(stmalbe,stmxx)
            stminv = max(stmalbexx,1.0)
            steg = 1.0/stminv
            #
            xold = x.copy()
            yold = y.copy()
            zold = z.copy()
            lamold = lam.copy()
            xsiold = xsi.copy()
            etaold = eta.copy()
            muold = mu.copy()
            zetold = zet.copy()
            sold = s.copy()
            #
            itto = 0
            resinew = 2*residunorm
            # Start: while (resinew>residunorm) and (itto<50)
            while (resinew > residunorm) and (itto < 50):
                itto = itto+1
                x = xold+steg*dx
                y = yold+steg*dy
                z = zold+steg*dz
                lam = lamold+steg*dlam
                xsi = xsiold+steg*dxsi
                eta = etaold+steg*deta
                mu = muold+steg*dmu
                zet = zetold+steg*dzet
                s = sold+steg*ds
                ux1 = upp-x
                xl1 = x-low
                ux2 = ux1*ux1
                xl2 = xl1*xl1
                uxinv1 = een/ux1
                xlinv1 = een/xl1
                plam = p0+np.dot(P.T,lam) 
                qlam = q0+np.dot(Q.T,lam)
                gvec = np.dot(P,uxinv1)+np.dot(Q,xlinv1)
                dpsidx = plam/ux2-qlam/xl2 
                rex = dpsidx-xsi+eta
                rey = c+d*y-mu-lam
                rez = a0-zet-np.dot(a.T,lam)
                relam = gvec-np.dot(a,z)-y+s-b
                rexsi = xsi*(x-alfa)-epsvecn
                reeta = eta*(beta-x)-epsvecn
                remu = mu*y-epsvecm
                rezet = np.dot(zet,z)-epsi
                res = lam*s-epsvecm
                residu1 = np.concatenate((rex,rey,rez),axis = 0)
                residu2 = np.concatenate((relam,rexsi,reeta,remu,rezet,res), \
                                         axis = 0)
                residu = np.concatenate((residu1,residu2),axis = 0)
                resinew = np.sqrt(np.dot(residu.T,residu))
                steg = steg/2
                # End: while (resinew>residunorm) and (itto<50)
            residunorm = resinew.copy()
            residumax = max(abs(residu))
            steg = 2*steg
            # End: while (residumax>0.9*epsi) and (ittt<200)
        epsi = 0.1*epsi
        # End: while epsi>epsimin
    xmma = x.copy()
    ymma = y.copy()
    zmma = z.copy()
    lamma = lam
    xsimma = xsi
    etamma = eta
    mumma = mu
    zetmma = zet
    smma = s
    # Return values
    return xmma,ymma,zmma,lamma,xsimma,etamma,mumma,zetmma,smma
Beispiel #36
0
def fdm_poisson_1d(N, bcs, pm):
    '''The simplest 1D diffusion implementation.'''

    # Gridsize
    h = 1.0/N

    # Define grid points
    x = np.linspace(0, 1, N+1)

    # Define zero matrix A of right size and insert
    # non zero entries
    A = np.zeros((N+1, N+1))
    
    eps = pm['eps']
    # Define tridiagonal part of A
    for i in range(1, N):
        A[i, i-1] = eps + h
        A[i, i] = -2*eps - h
        A[i, i+1] = eps
    
    print("Hello Again")
    # Compute rhs for f = sin(2 pi x)
    # F = -h**2*np.sin(2*np.pi*x)
    F = -h**2*x

    # Now adapt matrix and rhs according to bc data
    # Left boundary
    bc0 = bcs[0]
    if bc0[0] == "D":
        A[0, 0] = 1
        F[0] = bc0[1]
    elif bc0[0] == "N":
        # Apply a first order difference operator
        A[0, 0] = 1
        A[0, 1] = -1
        F[0] = h*bc0[1]
        # Should we add an improved variant?

    # Right boundary
    bc1 = bcs[1]
    if bc1[0] == "D":
        A[N, N] = 1
        F[N] = bc1[1]
    elif bc1[0] == "N":
        # Apply a first order difference operator
        A[N, N] = 1
        A[N, N-1] = -1
        F[N] = h*bc1[1]

    # Solve AU = F
    # (We will introduce a sparse solver when we look at 2D problems)
    U = la.solve(A, F)

    # Compute real solution and error at grid points
    x_hr = np.linspace(0, 1, N+1)
    u = 1/(2*np.pi)**2*np.sin(2*np.pi*x_hr)

    err = np.abs(u - U)
    print("Error |U - u|")
    print(err)
    print("Error max |U - u| ")
    print(err.max())

    # Clear figure first
    plt.clf()

    # Plot solution on a high resolution grid
    # plt.plot(x_hr, u, "+-b")

    # Plot discrete solution on chosen discretization grid
    plt.plot(x, U, "x-r")

    # Show figure (for non inline plotting)
    plt.show()
Beispiel #37
0
 def time_solve(self, size, contig, module):
     if module == 'numpy':
         nl.solve(self.a, self.b)
     else:
         sl.solve(self.a, self.b)
Beispiel #38
0
import numpy as np
from scipy import linalg

A = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
b = np.array([2, 4, -1])

x = linalg.solve(A, b)

print(x)
Beispiel #39
0
def asjacobian(J):
    """
    Convert given object to one suitable for use as a Jacobian.
    """
    spsolve = scipy.sparse.linalg.spsolve
    if isinstance(J, Jacobian):
        return J
    elif inspect.isclass(J) and issubclass(J, Jacobian):
        return J()
    elif isinstance(J, np.ndarray):
        if J.ndim > 2:
            raise ValueError('array must have rank <= 2')
        J = np.atleast_2d(np.asarray(J))
        if J.shape[0] != J.shape[1]:
            raise ValueError('array must be square')

        return Jacobian(matvec=lambda v: dot(J, v),
                        rmatvec=lambda v: dot(J.conj().T, v),
                        solve=lambda v: solve(J, v),
                        rsolve=lambda v: solve(J.conj().T, v),
                        dtype=J.dtype,
                        shape=J.shape)
    elif scipy.sparse.isspmatrix(J):
        if J.shape[0] != J.shape[1]:
            raise ValueError('matrix must be square')
        return Jacobian(matvec=lambda v: J * v,
                        rmatvec=lambda v: J.conj().T * v,
                        solve=lambda v: spsolve(J, v),
                        rsolve=lambda v: spsolve(J.conj().T, v),
                        dtype=J.dtype,
                        shape=J.shape)
    elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
        return Jacobian(matvec=getattr(J, 'matvec'),
                        rmatvec=getattr(J, 'rmatvec'),
                        solve=J.solve,
                        rsolve=getattr(J, 'rsolve'),
                        update=getattr(J, 'update'),
                        setup=getattr(J, 'setup'),
                        dtype=J.dtype,
                        shape=J.shape)
    elif callable(J):
        # Assume it's a function J(x) that returns the Jacobian
        class Jac(Jacobian):
            def update(self, x, F):
                self.x = x

            def solve(self, v, tol=0):
                m = J(self.x)
                if isinstance(m, np.ndarray):
                    return solve(m, v)
                elif scipy.sparse.isspmatrix(m):
                    return spsolve(m, v)
                else:
                    raise ValueError("Unknown matrix type")

            def matvec(self, v):
                m = J(self.x)
                if isinstance(m, np.ndarray):
                    return dot(m, v)
                elif scipy.sparse.isspmatrix(m):
                    return m * v
                else:
                    raise ValueError("Unknown matrix type")

            def rsolve(self, v, tol=0):
                m = J(self.x)
                if isinstance(m, np.ndarray):
                    return solve(m.conj().T, v)
                elif scipy.sparse.isspmatrix(m):
                    return spsolve(m.conj().T, v)
                else:
                    raise ValueError("Unknown matrix type")

            def rmatvec(self, v):
                m = J(self.x)
                if isinstance(m, np.ndarray):
                    return dot(m.conj().T, v)
                elif scipy.sparse.isspmatrix(m):
                    return m.conj().T * v
                else:
                    raise ValueError("Unknown matrix type")

        return Jac()
    elif isinstance(J, str):
        return dict(broyden1=BroydenFirst,
                    broyden2=BroydenSecond,
                    anderson=Anderson,
                    diagbroyden=DiagBroyden,
                    linearmixing=LinearMixing,
                    excitingmixing=ExcitingMixing,
                    krylov=KrylovJacobian)[J]()
    else:
        raise TypeError('Cannot convert object to a Jacobian')
Beispiel #40
0
def _update_code_slow(X, subset, alpha, learning_rate,
                      offset,
                      Q, stat,
                      impute, debug):
    """Compute code for a mini-batch and update algorithm statistics accordingly

    Parameters
    ----------
    X: ndarray, (batch_size, len_subset)
        Mini-batch of masked data to perform the update from
    subset: ndarray (len_subset),
        Mask used on X
    alpha: float,
        Regularization of the code (ridge penalty)
    learning_rate: float in [0.5, 1],
        Controls the sequence of weights in
         the update of the surrogate function
    offset: float,
        Offset in the sequence of weights in
         the update of the surrogate function
    Q: ndarray (n_components, n_features):
        Dictionary to perform ridge regression
    stat: DictMFStats,
        Statistics kept by the algorithm, to be updated by the function
    impute: boolean,
        Online update of the Gram matrix (Experimental)
    debug: boolean,
        Keeps track of the surrogate loss function
    Returns
    -------
    P: ndarray,
        Code for the mini-batch X
    """
    batch_size, n_cols = X.shape
    n_components = Q.shape[0]

    Q_subset = Q[:, subset]

    w_A, w_B = _get_weights(subset, stat.counter, batch_size,
                            learning_rate, offset)

    stat.counter[0] += batch_size
    stat.counter[subset + 1] += batch_size

    if impute:
        stat.T[:, 0] -= stat.T[:, subset + 1].sum(axis=1)
        Qx = stat.T[:, 0][:, np.newaxis]
        Qx += Q_subset.dot(X.T)
        stat.T[:, subset + 1] = Q_subset * X.mean(axis=0)
        stat.T[:, 0] += stat.T[:, subset + 1].sum(axis=1)
        stat.G.flat[::n_components + 1] += alpha
    else:
        Qx = np.dot(Q_subset, X.T)
        G = np.dot(Q_subset, Q_subset.T)
        G.flat[::n_components + 1] += alpha
    P = linalg.solve(G, Qx, sym_pos=True, overwrite_a=True, check_finite=False)

    if debug:
        dict_loss = .5 * np.sum(Q.dot(Q.T) * stat.A) - np.sum(Q * stat.B)
        stat.loss_indep *= (1 - w_A)
        stat.loss_indep += (.5 * np.sum(X ** 2) +
                            alpha * np.sum(P ** 2)) * w_A / batch_size
        stat.loss[stat.n_iter] = stat.loss_indep + dict_loss

    stat.A *= 1 - w_A
    stat.A += P.dot(P.T) * w_A / batch_size
    stat.B[:, subset] *= 1 - w_B
    stat.B[:, subset] += P.dot(X) * w_B / batch_size

    return P.T
Beispiel #41
0
 def fit(self, X, t):
     Xtil = np.c_[np.ones(X.shape[0]), X]
     A = np.dot(Xtil.T, Xtil)
     b = np.dot(Xtil.T, t)
     self.w_ = linalg.solve(A, b)
def train(modelState, X, W, plan):
    '''
    Creates a new query state object for a topic model based on side-information. 
    This contains all those estimated parameters that are specific to the actual
    date being queried - this must be used in conjunction with a model state.
    
    The parameters are
    
    modelState - the model state with all the model parameters
    X          - the D x F matrix of side information vectors
    W          - the D x V matrix of word **count** vectors.
    iterations - how long to iterate for
    epsilon    - currently ignored, in future, allows us to stop early.
    logInterval  - the interval between iterations where we calculate and display
                   the log-likelihood bound
    plotInterval - the interval between iterations we we display the log-likelihood
                   bound values calcuated at each log-interval
    fastButInaccurate - if true, we may use a psedo-inverse instead of an inverse
                        when solving for Y when the true inverse is unavailable.
    
    This returns a tuple of new model-state and query-state. The latter object will
    contain X and W and also
    
    s      - A D-dimensional vector describing the offset in our bound on the true value of ln sum_k e^theta_dk 
    lxi    - A DxK matrix used in the above bound, containing the negative Jakkola function applied to the 
             quadratic term xi
    lambda - the topics we've inferred for the current batch of documents
    nu     - the variance of topics we've inferred (independent)
    '''
    # Unpack the model state tuple for ease of use and maybe speed improvements
    K, Q, F, P, T, A, varA, Y, omY, sigY, sigT, U, V, vocab, sigmaSq, alphaSq, kappaSq, tauSq = modelState.K, modelState.Q, modelState.F, modelState.P, modelState.T, modelState.A, modelState.varA, modelState.Y, modelState.omY, modelState.sigY, modelState.sigT, modelState.U, modelState.V, modelState.vocab, modelState.topicVar, modelState.featVar, modelState.lowTopicVar, modelState.lowFeatVar
    iterations, epsilon, logCount, plot, plotFile, plotIncremental, fastButInaccurate = plan.iterations, plan.epsilon, plan.logFrequency, plan.plot, plan.plotFile, plan.plotIncremental, plan.fastButInaccurate

    mu0 = 0.0001

    if W.dtype.kind == 'i':  # for the sparseScalorQuotientOfDot() method to work
        W = W.astype(DTYPE)

    # Get ready to plot the evolution of the likelihood, with multiplicative updates (e.g. 1, 2, 4, 8, 16, 32, ...)
    if logCount > 0:
        multiStepSize = np.power(iterations, 1. / logCount)
        logIter = 1
        elbos = []
        likes = []
        iters = []
    else:
        logIter = iterations + 1
    lastVarBoundValue = -sys.float_info.max

    # We'll need the total word count per doc, and total count of docs
    docLen = np.squeeze(
        np.asarray(W.sum(axis=1))
    )  # Force to a one-dimensional array for np.newaxis trick to work
    D = len(docLen)

    # No need to recompute this every time
    if X.dtype != DTYPE:
        X = X.astype(DTYPE)
    XTX = X.T.dot(X)

    # Identity matrices that occur
    I_P = ssp.eye(P, P, 0, DTYPE)
    I_Q = ssp.eye(Q, Q, 0, DTYPE)
    I_QP = ssp.eye(Q * P, Q * P, 0, DTYPE)
    I_F = ssp.eye(
        F, F, 0, DTYPE, "csc"
    )  # X is CSR, XTX is consequently CSC, sparse inverse requires CSC
    T_QP = sp_vec_trans_matrix(Y.shape)

    # Assign initial values to the query parameters
    expLmda = np.exp(rd.random((D, K)).astype(DTYPE))
    nu = np.ones((D, K), DTYPE)
    s = np.zeros((D, ), DTYPE)
    lxi = negJakkola(np.ones((D, K), DTYPE))

    # If we don't bother optimising either tau or sigma we can just do all this here once only
    tsq = tauSq
    ssq = sigmaSq
    overTsq = 1. / tsq
    overSsq = 1. / ssq
    overTsqSsq = 1. / (tsq * ssq)

    # TODO the inverse being almost always dense means that it might
    # be faster to convert to dense and use the normal solver, despite
    # the size constraints.
    #    varA = 1./K * sla.inv (overTsq * I_F + overSsq * XTX)
    tI_sXTX = (overTsq * I_F + overSsq * XTX).todense()
    omA = la.inv(tI_sXTX)
    scaledWordCounts = W.copy()

    for iteration in range(iterations):

        # =============================================================
        # E-Step
        #   Model dists are q(Theta|A;Lambda;nu) q(A|Y) q(Y) and q(Z)....
        #   Where lambda is the posterior mean of theta.
        # =============================================================

        # Y, sigY, omY
        #
        # If U'U is invertible, use inverse to convert Y to a Sylvester eqn
        # which has a much, much faster solver. Recall update for Y is of the form
        #   Y + AYB = C where A = U'U, B = V'V and C=U'AV
        #
        VTV = V.T.dot(V)
        UTU = U.T.dot(U)

        sigy = la.inv(I_QP + overTsqSsq * np.kron(VTV, UTU))
        _quickPrintElbo("E-Step: q(Y) [sigY]", iteration, X, W, K, Q, F, P, T,
                        A, omA, Y, omY, sigY, sigT, U, V, vocab, tau, sigma,
                        expLmda, nu, lxi, s, docLen)

        Y = mu0 + np.reshape(overTsqSsq * sigy.dot(vec(U.T.dot(A).dot(V))),
                             (Q, P),
                             order='F')
        _quickPrintElbo("E-Step: q(Y) [Mean]", iteration, X, W, K, Q, F, P, T,
                        A, omA, Y, omY, sigY, sigT, U, V, vocab, tau, sigma,
                        expLmda, nu, lxi, s, docLen)

        # A
        #
        # So it's normally A = (UYV' + L'X) omA with omA = inv(t*I_F + s*XTX)
        #   so A inv(omA) = UYV' + L'X
        #   so inv(omA)' A' = VY'U' + X'L
        # at which point we can use a built-in solve
        #
        #       A = (overTsq * U.dot(Y).dot(V.T) + X.T.dot(expLmda).T).dot(omA)
        lmda = np.log(expLmda, out=expLmda)
        A = la.solve(tI_sXTX, X.T.dot(lmda) + V.dot(Y.T).dot(U.T)).T
        np.exp(expLmda, out=expLmda)
        _quickPrintElbo("E-Step: q(A)", iteration, X, W, K, Q, F, P, T, A, omA,
                        Y, omY, sigY, sigT, U, V, vocab, tau, sigma, expLmda,
                        nu, lxi, s, docLen)

        # lmda_dk, nu_dk, s_d, and xi_dk
        #
        XAT = X.dot(A.T)
        query (VbSideTopicModelState (K, Q, F, P, T, A, omA, Y, omY, sigY, sigT, U, V, vocab, tau, sigma), \
               X, W, \
               VbSideTopicQueryState(expLmda, nu, lxi, s, docLen), \
               scaledWordCounts=scaledWordCounts, \
               XAT = XAT, \
               iterations=10, \
               logInterval = 0, plotInterval = 0)

        # =============================================================
        # M-Step
        #    Parameters for the softmax bound: lxi and s
        #    The projection used for A: U and V
        #    The vocabulary : vocab
        #    The variances: tau, sigma
        # =============================================================

        # U
        #
        try:
            U = A.dot(V).dot(Y.T).dot (la.inv( \
                    Y.dot(V.T).dot(V).dot(Y.T) \
                    + (vec_transpose_csr(T_QP, P).T.dot(np.kron(I_QP, VTV)).dot(vec_transpose(T_QP.dot(sigy), P))).T
            ))
        except np.linalg.linalg.LinAlgError as e:
            print(str(e))
            print("Ruh-ro")

        # order of last line above reversed to handle numpy bug preventing dot product from dense to sparse
        _quickPrintElbo("M-Step: U", iteration, X, W, K, Q, F, P, T, A, omA, Y,
                        omY, sigY, sigT, U, V, vocab, tau, sigma, expLmda, nu,
                        lxi, s, docLen)

        # V
        #
        # Temporarily this requires that we re-order sigY until I've implemented a fortran order
        # vec transpose in Cython
        sigY = sigY.T.copy()
        V = A.T.dot(U).dot(Y).dot (la.inv ( \
            Y.T.dot(U.T).dot(U).dot(Y) \
            + vec_transpose (sigY, Q).T.dot(np.kron(I_QP, UTU).dot(vec_transpose(I_QP, Q))) \
        ))
        _quickPrintElbo("M-Step: V", iteration, X, W, K, Q, F, P, T, A, omA, Y,
                        omY, sigY, sigT, U, V, vocab, tau, sigma, expLmda, nu,
                        lxi, s, docLen)

        # vocab
        #
        factor = (scaledWordCounts.T.dot(expLmda)
                  ).T  # Gets materialized as a dense matrix...
        vocab *= factor
        normalizerows_ip(vocab)
        _quickPrintElbo("M-Step: \u03A6", iteration, X, W, K, Q, F, P, T, A,
                        omA, Y, omY, sigY, sigT, U, V, vocab, tau, sigma,
                        expLmda, nu, lxi, s, docLen)

        # =============================================================
        # Handle logging of variational bound, likelihood, etc.
        # =============================================================
        if iteration == logIter:
            modelState = VbSideTopicModelState(K, Q, F, P, T, A, omA, Y, omY,
                                               sigY, sigT, U, V, vocab,
                                               sigmaSq, alphaSq, kappaSq,
                                               tauSq)
            queryState = VbSideTopicQueryState(expLmda, nu, lxi, s, docLen)

            elbo = varBound(modelState, queryState, X, W, None, XAT, XTX)
            likely = log_likelihood(
                modelState, X, W,
                queryState)  #recons_error(modelState, X, W, queryState)

            elbos.append(elbo)
            iters.append(iteration)
            likes.append(likely)
            print("Iteration %5d  ELBO %15f   Log-Likelihood %15f" %
                  (iteration, elbo, likely))

            logIter = min(np.ceil(logIter * multiStepSize), iterations - 1)

            if elbo - lastVarBoundValue < epsilon:
                break
            else:
                lastVarBoundValue = elbo

            if plot and plotIncremental:
                plot_bound(plotFile + "-iter-" + str(iteration),
                           np.array(iters), np.array(elbos), np.array(likes))

    # Right before we end, plot the evolution of the bound and likelihood
    # if we've been asked to do so.
    if plot:
        plot_bound(plotFile, iters, elbos, likes)


    return VbSideTopicModelState (K, Q, F, P, T, A, omA, Y, omY, sigY, U, V, vocab, tau, sigma), \
           VbSideTopicQueryState (expLmda, nu, lxi, s, docLen)
Beispiel #43
0
def ode2es(L, rho0):
    """Creates an exponential series that describes the time evolution for the
    initial density matrix (or state vector) `rho0`, given the Liouvillian
    (or Hamiltonian) `L`.

    Parameters
    ----------
    L : qobj
        Liouvillian of the system.

    rho0 : qobj
        Initial state vector or density matrix.

    Returns
    -------
    eseries : :class:`qutip.eseries`
        ``eseries`` represention of the system dynamics.

    """
    if issuper(L):
        # check initial state
        if isket(rho0):
            # Got a wave function as initial state: convert to density matrix.
            rho0 = rho0 * rho0.dag()
        # check if state is below error threshold
        if abs(rho0.full()).sum() < 1e-10 + 1e-24:
            # enforce zero operator
            return eseries(qzero(rho0.dims[0]))
        w, v = L.eigenstates()
        v = np.hstack([ket.full() for ket in v])
        # w[i]   = eigenvalue i
        # v[:,i] = eigenvector i

        rlen = np.prod(rho0.shape)
        r0 = mat2vec(rho0.full())
        v0 = la.solve(v, r0)
        vv = v * sp.spdiags(v0.T, 0, rlen, rlen)

        out = None
        for i in range(rlen):
            qo = Qobj(vec2mat(vv[:, i]), dims=rho0.dims, shape=rho0.shape)
            if out:
                out += eseries(qo, w[i])
            else:
                out = eseries(qo, w[i])

    elif isoper(L):

        if not isket(rho0):
            raise TypeError('Second argument must be a ket if first' +
                            'is a Hamiltonian.')

        # check if state is below error threshold
        if abs(rho0.full()).sum() < 1e-5 + 1e-20:
            # enforce zero operator
            dims = rho0.dims
            return eseries(
                Qobj(sp.csr_matrix((dims[0][0], dims[1][0]), dtype=complex)))

        w, v = L.eigenstates()
        v = np.hstack([ket.full() for ket in v])
        # w[i]   = eigenvalue i
        # v[:,i] = eigenvector i

        rlen = np.prod(rho0.shape)
        r0 = rho0.full()
        v0 = la.solve(v, r0)
        vv = v * sp.spdiags(v0.T, 0, rlen, rlen)

        out = None
        for i in range(rlen):
            qo = Qobj(np.matrix(vv[:, i]).T, dims=rho0.dims, shape=rho0.shape)
            if out:
                out += eseries(qo, -1.0j * w[i])
            else:
                out = eseries(qo, -1.0j * w[i])

    else:
        raise TypeError('First argument must be a Hamiltonian or Liouvillian.')

    return estidy(out)
Beispiel #44
0
from scipy.linalg import solve
import matplotlib.pyplot as plt

X = np.array([0, 1, 2, 3, 4])
Y = np.array([0.98, -3.01, -6.99, -11.01, -15.0])

# Calcula os elementos das marizes
a11 = np.sum(X**2)
a12 = np.sum(X)
a22 = len(X)
b1 = np.sum(X * Y)
b2 = np.sum(Y)

# Monta e resolve o sistema
A = np.array([[a11, a12], [a12, a22]])

B = np.array([b1, b2])

a = solve(A, B)
print(a)

# define a funcao g(x) para plotar
g = lambda x: a[0] * x + a[1]

# cria pontos (x, y) da reta
Xr = np.arange(-1, 5, 0.5)

# Plota os pontos e a reta
plt.plot(X, Y, ".", Xr, g(Xr), "-")
plt.grid()
plt.show()
Beispiel #45
0
 def get_fractional_coords(self, cartesian_coords):
     return la.solve(self.uc.T, np.array(cartesian_coords).T).T
Beispiel #46
0
    [901, 387, 738, 965, 972, 906],
    [1063, 386, 655, 911, 969, 936],
    [1089, 389, 679, 723, 268, 860],
])
n = []
for i in range(0, 6):
    n.append(sum(m[i]))
s = [[], [], [], [], [], []]
for i in range(0, 6):
    s[i].append(n[i] * a[i])
s = np.array(s)

s1 = np.array([[51775785], [58598322], [48705107], [62332394], [62399316],
               [51069435]])
# print(s)
x = solve(m, s)
t = np.array([[11321], [12223], [15707], [14070], [12152], [11502]])
y = solve(m, s1)

for i in range(0, 6):
    y[i] = trans(str(y[i][0]))
# print(x)
print(y)

plt.ylabel('时段内的平均等待时间/分钟', size=15)
plt.xlabel('时间段', size=15)
print(m.dot(t))
for i in range(0, 6):
    print(s1[i][0] / sum(m[i]), end=',')

xnew = np.linspace(
Beispiel #47
0
def method_solve(A, b):
    t51 = tm.time()
    x5 = scl.solve(A, b)
    t52 = tm.time() - t51
    return x5, t52
Beispiel #48
0
    def test_condition_pointswise(self):
        """
        Generate samples and random field  by conditioning on pointwise data
        """
        #
        # Initialize Gaussian Random Field
        #
        # Resolution
        max_res = 10
        n = 2**max_res + 1  # size

        # Hurst parameter
        H = 0.5  # Hurst parameter in [0.5,1]

        # Form covariance and precision matrices
        x = np.arange(1, n)
        X, Y = np.meshgrid(x, x)
        K = fbm_cov(X, Y, H)

        # Compute the precision matrix
        I = np.identity(n - 1)
        Q = linalg.solve(K, I)

        # Define mean
        mean = np.random.rand(n - 1, 1)

        # Define Gaussian field
        u_cov = GaussianField(n - 1, mean=mean, K=K, mode='covariance')
        u_prc = GaussianField(n - 1, mean=mean, K=Q, mode='precision')

        # Define generating white noise
        z = u_cov.iid_gauss(n_samples=10)

        u_obs = u_cov.sample(z=z)

        # Index of measured observations
        A = np.arange(0, n - 1, 2)

        # observed quantities
        e = u_obs[A, 0][:, None]
        #print('e shape', e.shape)

        # change A into matrix
        k = len(A)
        rows = np.arange(k)
        cols = A
        vals = np.ones(k)
        AA = sp.coo_matrix((vals, (rows, cols)), shape=(k, n - 1)).toarray()

        AKAt = AA.dot(K.dot(AA.T))
        KAt = K.dot(AA.T)

        U, S, Vt = linalg.svd(AA)
        #print(U)
        #print(S)
        #print(Vt)

        #print(AA.dot(u_obs)-e)

        k = e.shape[0]
        Ko = 0.01 * np.identity(k)

        # Debug
        K = u_cov.covariance()
        #U_spp = u_cov.support()
        #A_cmp = A.dot(U_spp)

        u_cond = u_cov.condition(A, e, Ko=Ko, n_samples=100)
        """
Beispiel #49
0
def locally_linear_embedding(X, k_neighbors, t_dimensions, reg_factor=1e-3):
    """
    Parameters
    ----------
    X : numpy array
        input data, shape [n_samples, n_features], dtype must be numpy.float64.
    k_neighbors : integer
        number of nearest neighbors to consider for each point.
    t_dimensions : integer
        number of dimensions in the output data.
    reg_factor : float
        regularization factor, for the case k_neighbors > n_features.

    Return
    -------
    Y : numpy array
        dimension-reduced data, shape [n_samples, t_dimensions].
    """

    # check X data: must be a 2-D numpy array, must be np.float64
    if not isinstance(X, np.ndarray) or X.ndim != 2:
        raise TypeError("Your input data is NOT a 2-D numpy array")
    if X.dtype != np.float64:
        raise TypeError("Your input data is NOT type: numpy.float64")

    n_samples, n_features = X.shape

    # check Parameters
    if t_dimensions > n_features or t_dimensions < 1:
        raise ValueError(
            "Your input does NOT satisfy: 1 <= output dimension <= input dimension"
        )
    if k_neighbors >= n_samples or k_neighbors <= 0:
        raise ValueError(
            "Your input does NOT satisfy: 0 < k_neighbors < n_samples")

    print("#### LLE algorithm started! ####")

    k_take = k_neighbors + 1

    # step 1, compute the k nearest neighbors of each point
    print("\tStage 1: compute distance and find k-nearest neighbor")
    idx = np.argpartition(cdist(X, X), (1, k_take), axis=0)[1:k_take].T

    # step 1, compute the k-nn of each point (using scikit-learn)
    # knn = NearestNeighbors(k_take).fit(X)
    # idx = knn.kneighbors(X, return_distance=False)[:, 1:]

    Z = X[idx].transpose(0, 2, 1)  # own implementation

    # step 2, compute co-variance matrix and then the weights
    print("\tStage 2: construct the Weight matrix")
    # the Matrix to contain the Weights:
    Weights = np.empty((n_samples, k_neighbors), dtype=X.dtype)
    # the ALL-ONE vector:
    Ones = np.ones(k_neighbors, dtype=X.dtype)

    for i, P in enumerate(Z):

        # each neighbors - this point
        D = P.T - X[i]

        # Cov is the local covariance matrix
        Cov = np.dot(D, D.T)

        # regularization
        # Cov = Cov + eye(K,K) * factor * (Cov.trace > 0 ? Cov.trace : 1)
        r = reg_factor
        trace = np.trace(Cov)
        if trace > 0:
            r *= trace
        Cov.flat[::
                 k_take] += r  # add the reg factor to the main diagonal of Cov

        # find the weights of each neighbors
        w = solve(Cov, Ones, overwrite_a=True, assume_a='pos')

        # make sum(w) = 1
        Weights[i, :] = w / np.sum(w)

    # put the Weights in to a sparse matrix
    W = csr_matrix((Weights.ravel(), idx.ravel(),
                    np.arange(0, n_samples * k_neighbors + 1, k_neighbors)),
                   shape=(n_samples, n_samples))

    # Step 3 compute M = (I-W)'(I-W)
    M = (W.T * W - W - W.T).toarray()
    M.flat[::n_samples + 1] += 1

    # Step 4 compute the eigen_values and eigen_vectors of M
    print("\tStage 3: compute the eigenvectors and output")
    eigen_values, eigen_vectors = eigh(M,
                                       eigvals=(1, t_dimensions),
                                       overwrite_a=True)

    # Step 5 the 2nd to the d+1'th eigen_vectors is the output
    print("#### LLE algorithm ended! ####")
    return eigen_vectors[:, np.argsort(np.abs(eigen_values))]
Beispiel #50
0
    def test_condition_ptwise(self):
        #
        # Initialize Gaussian Random Field
        #
        # Resolution
        l_max = 9
        n = 2**l_max + 1  # size

        # Hurst parameter
        H = 0.5  # Hurst parameter in [0.5,1]

        # Form covariance and precision matrices
        x = np.arange(1, n + 1)
        X, Y = np.meshgrid(x, x)
        K = fbm_cov(X, Y, H)

        # Compute the precision matrix
        I = np.identity(n)
        Q = linalg.solve(K, I)

        # Plot meshes
        fig, ax = plt.subplots(1, 1)
        n = 2**l_max + 1
        for l in range(l_max):
            nl = 2**l + 1
            i_spp = [i * 2**(l_max - l) for i in range(nl)]
            ax.plot(x[i_spp], l * np.ones(nl), '.')
        #ax.plot(x,'.', markersize=0.1)
        #plt.show()

        # Plot conditioned field
        fig, ax = plt.subplots(3, 3)

        # Define original field
        u = []
        n = 2**(l_max) + 1
        for l in range(l_max):
            nl = 2**l + 1
            i_spp = [i * 2**(l_max - l) for i in range(nl)]
            V_spp = I[:, i_spp]
            if l == 0:
                u_fne = GaussianField(n, K=K, mode='covariance',\
                                      support=V_spp)
                u_obs = u_fne.sample()
                i_obs = np.array(i_spp)
            else:
                u_fne = GaussianField(n, K=K, mode='covariance',\
                                      support=V_spp)
                u_cnd = u_fne.condition(i_obs, u_obs[i_obs], output='field')
                u_obs = u_cnd.sample()
                i_obs = np.array(i_spp)
            u.append(u_obs)

            # Plot
            for ll in range(l, l_max):
                i, j = np.unravel_index(ll, (3, 3))
                if ll == l:
                    ax[i, j].plot(x[i_spp],
                                  5 * np.exp(0.01 * u_obs[i_spp]),
                                  linewidth=0.5)
                else:
                    ax[i, j].plot(x[i_spp],
                                  5 * np.exp(0.01 * u_obs[i_spp]),
                                  'g',
                                  linewidth=0.1,
                                  alpha=0.1)
            fig.savefig('successive_conditioning.pdf')
 def calculate_lambda_ir(self):
     lambd = []
     for i, p in enumerate(self.p_r):
         np.fill_diagonal(p, -1)
         lambd.append(linalg.solve(p.T, -self.lambda_0_ir[:, i]))
     return np.array(lambd).T
Beispiel #52
0
    j = np.argmin((x[i] - x)**2 + (y[i] + dx - y)**2)
    if i != j: G[i, j] = vy[i] / (2 * dx)
    j = np.argmin((x[i] - x)**2 + (y[i] - dx - y)**2)
    if i != j: G[i, j] = -vy[i] / (2 * dx)

# Form operator
A = L - G

# Boundary conditions
b = np.zeros(n)
for i in range(n):
    if (x[i] == 0 or x[i] == 1 or y[i] == 0 or y[i] == 1):
        A[i, :] = 0
        A[i, i] = 1

    if x[i] == 0:
        b[i] = np.exp(-10 * (y[i] - 0.3)**2)

# Solve
from scipy.linalg import solve
u = solve(A, b)

# Plot
U = to_matrix(u)
plt.imshow(U, extent=(min(x), max(x), max(y), min(y)))
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Temperature distriubtion of plate')
plt.show()
Beispiel #53
0
        if Triangles[i][0] < 14 * kr and Triangles[i][
                1] < 14 * kr and Triangles[i][2] < 14 * kr:
            continue
        Triangles2.append([Triangles[i][0], Triangles[i][1], Triangles[i][2]])
    Triangles = np.array(Triangles2)
    A, B = sestaviMatriki(Points, Triangles, 14 * kr)
    #print(Points.shape)
    #print(Triangles.shape)
    #print(Points[30:])
    fig, (ax1, ax2) = plt.subplots(1, 2)
    fig.set_size_inches((10, 5))
    kot = minkot(Triangles, Points)
    ax1.triplot(Points[:, 0], Points[:, 1], Triangles)
    #ax1.set_aspect(1)
    #x = np.abs(SOR(A,B,1.5))
    x = np.abs(lin.solve(A, B))
    #x = np.abs(lin.lstsq(A,B)[0])
    C = poisKoef(x, Triangles, Points, 14 * kr)
    cs = ax2.tricontourf(Points[:, 0],
                         Points[:, 1],
                         np.concatenate((np.zeros(14 * kr), x.flatten())),
                         levels=np.linspace(0, x[np.argmax(x)], 50),
                         cmap=plt.get_cmap("hot"))
    plt.colorbar(cs)
    #ax2.set_aspect(1)
    plt.suptitle(
        "Random, Najmanjši kot je {} stopinj, Pois. koef. je {}".format(
            round(kot, 2), round(C, 4)))
    plt.savefig("druga/random5.pdf")
if 0:
    kr = 30
Beispiel #54
0
def _make_interp_per_full_matr(x, y, t, k):
    '''
    Returns a solution of a system for B-spline interpolation with periodic
    boundary conditions. First ``k - 1`` rows of matrix are condtions of
    periodicity (continuity of ``k - 1`` derivatives at the boundary points).
    Last ``n`` rows are interpolation conditions.
    RHS is ``k - 1`` zeros and ``n`` ordinates in this case.

    Parameters
    ----------
    x : 1-D array, shape (n,)
        Values of x - coordinate of a given set of points.
    y : 1-D array, shape (n,)
        Values of y - coordinate of a given set of points.
    t : 1-D array, shape(n+2*k,)
        Vector of knots.
    k : int
        The maximum degree of spline

    Returns
    -------
    c : 1-D array, shape (n+k-1,)
        B-spline coefficients

    Notes
    -----
    ``t`` is supposed to be taken on circle.

    '''

    x, y, t = map(np.asarray, (x, y, t))

    n = x.size
    # LHS: the collocation matrix + derivatives at edges
    matr = np.zeros((n + k - 1, n + k - 1))

    # derivatives at x[0] and x[-1]:
    for i in range(k - 1):
        bb = _bspl.evaluate_all_bspl(t, k, x[0], k, nu=i + 1)
        matr[i, :k + 1] += bb
        bb = _bspl.evaluate_all_bspl(t, k, x[-1], n + k - 1, nu=i + 1)[:-1]
        matr[i, -k:] -= bb

    # collocation matrix
    for i in range(n):
        xval = x[i]
        # find interval
        if xval == t[k]:
            left = k
        else:
            left = np.searchsorted(t, xval) - 1

        # fill a row
        bb = _bspl.evaluate_all_bspl(t, k, xval, left)
        matr[i + k - 1, left - k:left + 1] = bb

    # RHS
    b = np.r_[[0] * (k - 1), y]

    c = solve(matr, b)
    return c
Beispiel #55
0
    def calculate_pi_alpha(self):
        """
        Computes the equilibrium probability quantities "pi_alpha" used
        in MMVT theory. The value self.pi_alpha gets set by this 
        function.
        """
        if self.k_alpha_beta is None:
            raise Exception("Unable to call calculate_pi_alpha(): "\
                            "No statistics present in Data Sample.")
        # First, determine if there is a bulk anchor, and if so, use it as
        # the "pivot", if not, make our own pivot.
        # The "pivot" is the entry in the flux_matrix that
        bulk_index = None
        for alpha, anchor in enumerate(self.model.anchors):
            if anchor.bulkstate:
                assert bulk_index is None, "Only one bulk state is allowed "\
                    "in model"
                bulk_index = alpha

        if bulk_index is None:
            # Then we have a model without a bulk anchor: we need to make our
            # own pivot
            pivot_index = self.model.num_anchors
            flux_matrix_dimension = self.model.num_anchors + 1
        else:
            # If a bulk anchor exists, we can use it as the pivot
            assert bulk_index == self.model.num_anchors - 1, "The bulk "\
                "anchor must be the last one in the model."
            pivot_index = bulk_index
            flux_matrix_dimension = self.model.num_anchors

        self.pi_alpha = np.zeros(flux_matrix_dimension)
        flux_matrix = np.zeros((flux_matrix_dimension, flux_matrix_dimension))
        column_sum = np.zeros(flux_matrix_dimension)
        flux_matrix[pivot_index, pivot_index] = 1.0
        for alpha, anchor1 in enumerate(self.model.anchors):
            flux_matrix[alpha, pivot_index] = 1.0
            if anchor1.bulkstate:
                continue
            id_alias = anchor1.alias_from_neighbor_id(pivot_index)
            flux_matrix[pivot_index, alpha] = 0.0
            dead_end_anchor = False
            if len(anchor1.milestones) == 1:
                dead_end_anchor = True
            for beta, anchor2 in enumerate(self.model.anchors):
                if beta == pivot_index:
                    continue
                if alpha == beta:
                    pass
                else:
                    id_alias = anchor1.alias_from_neighbor_id(anchor2.index)
                    if id_alias is None:
                        flux_matrix[alpha, beta] = 0.0
                    else:
                        if dead_end_anchor:
                            # This line was supposed to work for a 1D
                            # Smoluchowski system, but with a 3D
                            # spherical system, the 2.0 needs to be 1.0.
                            #flux_matrix[alpha, beta] = 2.0 *\
                            #     self.k_alpha_beta[(alpha, beta)]
                            flux_matrix[alpha, beta] = 1.0 *\
                                 self.k_alpha_beta[(alpha, beta)]
                        else:
                            flux_matrix[alpha, beta] = \
                                self.k_alpha_beta[(alpha, beta)]
                        column_sum[alpha] += flux_matrix[alpha, beta]

            flux_matrix[alpha, alpha] = -column_sum[alpha]

        flux_matrix[pivot_index, pivot_index - 1] = HIGH_FLUX
        prob_equil = np.zeros((flux_matrix_dimension, 1))
        prob_equil[pivot_index] = 1.0
        self.pi_alpha = abs(la.solve(flux_matrix.T, prob_equil))

        # refine pi_alpha
        pi_alpha_slice = np.zeros((flux_matrix_dimension - 1, 1))
        for i in range(flux_matrix_dimension - 1):
            pi_alpha_slice[i, 0] = -self.pi_alpha[i, 0] * flux_matrix[i, i]

        K = flux_matrix_to_K(flux_matrix)
        K_inf = np.linalg.matrix_power(K, FLUX_MATRIX_K_EXPONENT)
        stationary_dist = K_inf.T @ pi_alpha_slice
        for i in range(flux_matrix_dimension - 1):
            self.pi_alpha[i, 0] = -stationary_dist[i, 0] / flux_matrix[i, i]
        self.pi_alpha[-1, 0] = 0.0
        self.pi_alpha = self.pi_alpha / np.sum(self.pi_alpha)
        return
Beispiel #56
0
 def rsolve(self, v, tol=0):
     """Evaluate w = M^-H v"""
     if self.collapsed is not None:
         return solve(self.collapsed.T.conj(), v)
     return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
Beispiel #57
0
        # v = v0 # set initial eigenvector guess

        # print('\n Eigenvectors: ', eigvecs_truth)
        # print('\nStarting guess v: ', v)
        # print(' ')

        print('\nEigenvalues: ', eigvals_truth)
        print('\nStarting guess lambda: ', lambda0)
        print(' ')

        # print(f'\ni = 0, lambda = {lambda0}')

        for i in range(1, num_iterations):
            B = A_symm - lam * np.eye(m)
            try:
                omega = la.solve(B, v)
            except:
                print("Matrix is singular! Converged solution")
                break

            v = omega / la.norm(omega, 2)
            lam = v.T @ (A_symm @ v)
            # print(f'i = {i}, lambda = {lam}')
            eigval_guesses.append(lam)
            eigvec_guesses.append(v)
            error = np.abs(eigval_guesses[-1] - eigval_guesses[-2])

        rqiter_eigval_approx.append(np.round(np.abs(eigval_guesses[-1]), 8))
        print('\nEigenvalue approximation: ', eigval_guesses[-1])

        if allUnique(rqiter_eigval_approx) == False:
Beispiel #58
0
def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp,
               maxupdate, mast, pivot):
    """
    The purpose of phase one is to find an initial basic feasible solution
    (BFS) to the original problem.

    Generates an auxiliary problem with a trivial BFS and an objective that
    minimizes infeasibility of the original problem. Solves the auxiliary
    problem using the main simplex routine (phase two). This either yields
    a BFS to the original problem or determines that the original problem is
    infeasible. If feasible, phase one detects redundant rows in the original
    constraint matrix and removes them, then chooses additional indices as
    necessary to complete a basis/BFS for the original problem.
    """

    m, n = A.shape
    status = 0

    # generate auxiliary problem to get initial BFS
    A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol)

    if status == 6:
        residual = c.dot(x)
        iter_k = 0
        return x, basis, A, b, residual, status, iter_k

    # solve auxiliary problem
    phase_one_n = n
    iter_k = 0
    x, basis, status, iter_k = _phase_two(c, A, x, basis, callback,
                                          postsolve_args,
                                          maxiter, tol, disp,
                                          maxupdate, mast, pivot,
                                          iter_k, phase_one_n)

    # check for infeasibility
    residual = c.dot(x)
    if status == 0 and residual > tol:
        status = 2

    # drive artificial variables out of basis
    # TODO: test redundant row removal better
    # TODO: make solve more efficient with BGLU? This could take a while.
    keep_rows = np.ones(m, dtype=bool)
    for basis_column in basis[basis >= n]:
        B = A[:, basis]
        try:
            basis_finder = np.abs(solve(B, A))  # inefficient
            pertinent_row = np.argmax(basis_finder[:, basis_column])
            eligible_columns = np.ones(n, dtype=bool)
            eligible_columns[basis[basis < n]] = 0
            eligible_column_indices = np.where(eligible_columns)[0]
            index = np.argmax(basis_finder[:, :n]
                              [pertinent_row, eligible_columns])
            new_basis_column = eligible_column_indices[index]
            if basis_finder[pertinent_row, new_basis_column] < tol:
                keep_rows[pertinent_row] = False
            else:
                basis[basis == basis_column] = new_basis_column
        except LinAlgError:
            status = 4

    # form solution to original problem
    A = A[keep_rows, :n]
    basis = basis[keep_rows]
    x = x[:n]
    m = A.shape[0]
    return x, basis, A, b, residual, status, iter_k
Beispiel #59
0
def get_freq_modes_over_f(power_mat,
                          window_function,
                          frequency,
                          n_modes,
                          plots=False):
    """Fines the most correlated frequency modes and fits thier noise."""

    n_f = len(frequency)
    d_f = sp.mean(sp.diff(frequency))
    dt = 1. / 2. / frequency[-1]
    n_chan = power_mat.shape[-1]
    n_time = window_function.shape[0]
    # The threshold for assuming there isn't enough data to measure anything.
    no_data_thres = 10. / n_time
    # Initialize the dictionary that will hold all the parameters.
    output_params = {}
    # First take the low frequency part of the spetrum matrix and average over
    # enough bins to get a well conditioned matrix.
    low_f_mat = sp.mean(power_mat[:4 * n_chan, :, :].real, 0)
    # Factor the matrix to get the most correlated modes.
    e, v = linalg.eigh(low_f_mat)
    # Make sure they are sorted.
    if not sp.alltrue(sp.diff(e) >= 0):
        raise RuntimeError("Eigenvalues not sorted")
    # Power matrix striped of the biggest modes.
    reduced_power = sp.copy(power_mat)
    mode_list = []
    # Solve for the spectra of these modes.
    for ii in range(n_modes):
        this_mode_params = {}
        # Get power spectrum and window function for this mode.
        mode = v[:, -1 - ii]
        mode_power = sp.sum(mode * power_mat.real, -1)
        mode_power = sp.sum(mode * mode_power, -1)
        mode_window = sp.sum(mode[:, None]**2 * window_function, 1)
        mode_window = sp.sum(mode_window * mode[None, :]**2, 1)
        # Protect against no data.
        if sp.mean(mode_window).real < no_data_thres:
            this_mode_params['amplitude'] = 0.
            this_mode_params['index'] = 0.
            this_mode_params['f_0'] = 1.
            this_mode_params['thermal'] = T_infinity**2 * dt
        else:
            # Fit the spectrum.
            p = fit_overf_const(mode_power, mode_window, frequency)
            # Put all the parameters we measured into the output.
            this_mode_params['amplitude'] = p[0]
            this_mode_params['index'] = p[1]
            this_mode_params['f_0'] = p[2]
            this_mode_params['thermal'] = p[3]
        this_mode_params['mode'] = mode
        output_params['over_f_mode_' + str(ii)] = this_mode_params
        # Remove the mode from the power matrix.
        tmp_amp = sp.sum(reduced_power * mode, -1)
        tmp_amp2 = sp.sum(reduced_power * mode[:, None], -2)
        tmp_amp3 = sp.sum(tmp_amp2 * mode, -1)
        reduced_power -= tmp_amp[:, :, None] * mode
        reduced_power -= tmp_amp2[:, None, :] * mode[:, None]
        reduced_power += tmp_amp3[:, None, None] * mode[:, None] * mode
        mode_list.append(mode)
    # Initialize the compensation matrix, that will be used to restore thermal
    # noise that gets subtracted out.  See Jan 29, Feb 17th, 2012 of Kiyo's
    # notes.
    compensation = sp.eye(n_chan, dtype=float)
    for mode1 in mode_list:
        compensation.flat[::n_chan + 1] -= 2 * mode1**2
        for mode2 in mode_list:
            mode_prod = mode1 * mode2
            compensation += mode_prod[:, None] * mode_prod[None, :]
    # Now that we've striped the noisiest modes, measure the auto power
    # spectrum, averaged over channels.
    auto_spec_mean = reduced_power.view()
    auto_spec_mean.shape = (n_f, n_chan**2)
    auto_spec_mean = auto_spec_mean[:, ::n_chan + 1].real
    auto_spec_mean = sp.mean(auto_spec_mean, -1)
    diag_window = window_function.view()
    diag_window.shape = (n_time, n_chan**2)
    diag_window = diag_window[:, ::n_chan + 1]
    auto_spec_window = sp.mean(diag_window, -1)
    if sp.mean(auto_spec_window).real < no_data_thres:
        auto_cross_over = 0.
        auto_index = 0.
        auto_thermal = 0
    else:
        auto_spec_params = fit_overf_const(auto_spec_mean, auto_spec_window,
                                           frequency)
        auto_thermal = auto_spec_params[3]
        if (auto_spec_params[0] <= 0 or auto_spec_params[3] <= 0
                or auto_spec_params[1] > -0.599):
            auto_cross_over = 0.
            auto_index = 0.
        else:
            auto_index = auto_spec_params[1]
            auto_cross_over = auto_spec_params[2] * (
                auto_spec_params[0] / auto_spec_params[3])**(-1. / auto_index)
            #if auto_cross_over < d_f:
            #    auto_index = 0.
            #    auto_cross_over = 0.
    # Plot the mean auto spectrum if desired.
    if plots:
        h = plt.gcf()
        a = h.add_subplot(*h.current_subplot)
        norm = sp.mean(auto_spec_window).real
        auto_plot = auto_spec_mean / norm
        plotable = auto_plot > 0
        lines = a.loglog(frequency[plotable], auto_plot[plotable])
        c = lines[-1].get_color()
        # And plot the fit in a light color.
        if auto_cross_over > d_f / 4.:
            spec = npow.overf_power_spectrum(auto_thermal, auto_index,
                                             auto_cross_over, dt, n_time)
        else:
            spec = sp.zeros(n_time, dtype=float)
        spec += auto_thermal
        spec[0] = 0
        spec = npow.convolve_power(spec, auto_spec_window)
        spec = npow.prune_power(spec)
        spec = spec[1:].real
        if norm > no_data_thres:
            spec /= norm
        plotable = spec > 0
        a.loglog(frequency[plotable],
                 spec[plotable],
                 c=c,
                 alpha=0.4,
                 linestyle=':')
    output_params['all_channel_index'] = auto_index
    output_params['all_channel_corner_f'] = auto_cross_over
    # Finally measure the thermal part of the noise in each channel.
    cross_over_ind = sp.digitize([auto_cross_over * 4], frequency)[0]
    cross_over_ind = max(cross_over_ind, n_f // 2)
    cross_over_ind = min(cross_over_ind, int(9. * n_f / 10.))
    thermal = reduced_power[cross_over_ind:, :, :].real
    n_high_f = thermal.shape[0]
    thermal.shape = (n_high_f, n_chan**2)
    thermal = sp.mean(thermal[:, ::n_chan + 1], 0)
    thermal_norms = sp.mean(diag_window, 0).real
    bad_inds = thermal_norms < no_data_thres
    thermal_norms[bad_inds] = 1.
    # Compensate for power lost in mode subtraction.
    compensation[:, bad_inds] = 0
    compensation[bad_inds, :] = 0
    for ii in xrange(n_chan):
        if bad_inds[ii]:
            compensation[ii, ii] = 1.
    thermal = linalg.solve(compensation, thermal)
    # Normalize
    thermal /= thermal_norms
    thermal[bad_inds] = T_infinity**2 * dt
    # Occationally the compensation fails horribly on a few channels.
    # When this happens, zero out the offending indices.
    thermal[thermal < 0] = 0
    output_params['thermal'] = thermal
    # Now that we know what thermal is, we can subtract it out of the modes we
    # already measured.
    for ii in range(n_modes):
        mode_params = output_params['over_f_mode_' + str(ii)]
        thermal_contribution = sp.sum(mode_params['mode']**2 * thermal)
        # Subtract a maximum of 90% of the white noise to keep things positive
        # definate.
        new_white = max(mode_params['thermal'] - thermal_contribution,
                        0.1 * mode_params['thermal'])
        if mode_params['thermal'] < 0.5 * T_infinity**2 * dt:
            mode_params['thermal'] = new_white
    return output_params
def dirac_recon_joint_alg(G,
                          measurement,
                          num_dirac,
                          shape_b,
                          flatten_order='F',
                          num_band=1,
                          noise_level=0,
                          max_ini=100,
                          stop_cri='mse',
                          max_inner_iter=20,
                          max_num_same_x=1,
                          max_num_same_y=1):
    """
    ALGORITHM that reconstructs 2D Dirac deltas jointly
        min     |a - Gb|^2
        s.t.    c_1 * b = 0
                c_2 * b = 0

    This is the exact form that we have in the paper without any alternations for
    performance considerations, e.g., reusing intermediate results, etc.

    The new formulation exploit the fact that c_1 and c_2 are linearly indepdnet.
    Hence, the effective number of unknowns are less than the total size of the two filters.

    :param G: the linear mapping that links the unknown uniformly sampled
            sinusoids to the given measurements
    :param measurement: the given measurements of the 2D Dirac deltas
    :param num_dirac: number of Dirac deltas
    :param shape_b: shape of the (2D) uniformly sampled sinusoids
    :param flatten_order: flatten order to be used. This is related to how G is build.
            If the dimension 0 of G is 'C' ordered, then flattern_order = 'C';
            otherwise, flattern_order = 'F'.
    :param noise_level: noise level present in the given measurements
    :param max_ini: maximum number of random initializations
    :param stop_cri: stopping criterion, either 'mse' or 'max_iter'
    :param max_inner_iter: maximum number of inner iterations for each random initializations
    :param max_num_same_x: maximum number of Dirac deltas that have the same horizontal locations.
            This will impose the minimum dimension of the annihilating filter used.
    :param max_num_same_y: maximum number of Dirac deltas that have the same vertical locations
            This will impose the minimum dimension of the annihilating filter used.
    :return:
    """
    compute_mse = (stop_cri == 'mse')
    measurement = measurement.flatten(flatten_order)
    num_non_zero = num_dirac + 2

    # choose the shapes of the 2D annihilating filters (as square as possible)
    # total number of entries should be at least num_dirac + 1
    shape_c_0 = int(np.ceil(np.sqrt(num_non_zero)))
    shape_c_1 = int(np.ceil(num_non_zero / shape_c_0))
    if shape_c_0 > shape_c_1:
        shape_c_1, shape_c_0 = shape_c_0, shape_c_1

    # sanity check
    assert shape_c_0 * shape_c_1 >= num_non_zero

    # in case of common roots, the filter has to satisfy a certain minimum dimension
    shape_c_1 = max(shape_c_1, max_num_same_y + 1)
    shape_c_0 = int(np.ceil(num_non_zero / shape_c_1))
    shape_c_0 = max(shape_c_0, max_num_same_x + 1)

    shape_c = (shape_c_0, shape_c_1)

    # total number of coefficients in c1 and c2
    num_coef = shape_c_0 * shape_c_1

    if num_band > 1:

        def func_build_R(coef1, coef2, shape_in):
            R_mtx_band = R_mtx_joint(coef1, coef2, shape_in)
            return linalg.block_diag(*[R_mtx_band for _ in range(num_band)])
    else:

        def func_build_R(coef1, coef2, shape_in):
            return R_mtx_joint(coef1, coef2, shape_in)

    # determine the effective row rank of the joint annihilation right-dual matrix
    c1_test = np.random.randn(shape_c_0, shape_c_1) + \
              1j * np.random.randn(shape_c_0, shape_c_1)
    c2_test = np.random.randn(shape_c_0, shape_c_1) + \
              1j * np.random.randn(shape_c_0, shape_c_1)
    R_test = func_build_R(c1_test, c2_test, shape_b)
    try:
        s_test = linalg.svd(R_test, compute_uv=False)
        shape_Tb0_effective = min(
            R_test.shape) - np.where(np.abs(s_test) < 1e-12)[0].size
    except ValueError:
        # the effective number of equations as predicted by the derivation
        shape_Tb0_effective = \
            min(max(np.prod(shape_b) - compute_effective_num_eq_2d(shape_c, shape_c),
                    num_coef - 1 + num_coef - 1),
                R_test.shape[0])

    # sizes of various matrices / vectors
    sz_coef = num_coef * 2 - 1  # -1 because of linear independence

    sz_S0 = num_coef * 2 - 2 * num_non_zero
    sz_R1 = np.prod(shape_b) * num_band

    # a few indices that are fixed
    idx_bg0_Tb = sz_coef
    idx_end0_Tb = sz_coef + shape_Tb0_effective
    idx_bg1_Tb = 0
    idx_end1_Tb = sz_coef

    idx_bg0_TbH = 0
    idx_end0_TbH = sz_coef
    idx_bg1_TbH = sz_coef
    idx_end1_TbH = sz_coef + shape_Tb0_effective

    idx_bg0_Rc = sz_coef
    idx_end0_Rc = sz_coef + shape_Tb0_effective
    idx_bg1_Rc = sz_coef + shape_Tb0_effective
    idx_end1_Rc = sz_coef + shape_Tb0_effective + sz_R1

    idx_bg0_RcH = sz_coef + shape_Tb0_effective
    idx_end0_RcH = sz_coef + shape_Tb0_effective + sz_R1
    idx_bg1_RcH = sz_coef
    idx_end1_RcH = sz_coef + shape_Tb0_effective

    # pre-compute a few things
    GtG = np.dot(G.conj().T, G)
    Gt_a = np.dot(G.conj().T, measurement)
    try:
        beta = linalg.lstsq(G, measurement)[0]
    except np.linalg.linalg.LinAlgError:
        beta = linalg.solve(GtG, Gt_a)
    beta_reshaped = np.reshape(beta, (shape_b[0], shape_b[1], num_band),
                               order='F')
    Tbeta0 = np.vstack([
        T_mtx_joint(beta_reshaped[:, :, band_count], shape_c, shape_c)
        for band_count in range(num_band)
    ])
    # QR-decomposition of Tbeta0.T
    Tbeta_band = np.vstack([
        convmtx2_valid(beta_reshaped[:, :, band_count], shape_c[0], shape_c[1])
        for band_count in range(num_band)
    ])
    Qtilde_full = linalg.qr(Tbeta_band.conj().T,
                            mode='economic',
                            pivoting=False)[0]
    Qtilde1 = Qtilde_full
    Qtilde2 = Qtilde_full[:, 1:]
    Qtilde_mtx = linalg.block_diag(Qtilde1, Qtilde2)
    Tbeta0_Qtilde = np.dot(Tbeta0, Qtilde_mtx)

    # initializations
    min_error = np.inf
    rhs = np.concatenate(
        (np.zeros(sz_coef + shape_Tb0_effective + sz_R1 + sz_S0,
                  dtype=float), np.append(np.ones(2, dtype=float), 0)))

    rhs_bl = np.concatenate(
        (Gt_a, np.zeros(shape_Tb0_effective, dtype=Gt_a.dtype)))
    c1_opt = None
    c2_opt = None

    # iterations over different random initializations of the annihilating filter coefficients
    ini = 0
    while ini < max_ini:
        ini += 1
        c1 = np.random.randn(shape_c_0, shape_c_1) + \
             1j * np.random.randn(shape_c_0, shape_c_1)
        c2 = np.random.randn(shape_c_0, shape_c_1) + \
             1j * np.random.randn(shape_c_0, shape_c_1)

        # build a selection matrix that chooses a subset of c1 and c2 to ZERO OUT
        S = np.dot(
            planar_sel_coef_subset((shape_c_0, shape_c_1),
                                   (shape_c_0, shape_c_1),
                                   num_non_zero=num_non_zero,
                                   max_num_same_x=max_num_same_x,
                                   max_num_same_y=max_num_same_y), Qtilde_mtx)
        S_H = S.conj().T

        # the initializations of the annihilating filter coefficients
        c0 = np.column_stack(
            (linalg.block_diag(
                np.dot(Qtilde1.T, c1.flatten('F'))[:, np.newaxis],
                np.dot(Qtilde2.T, c2.flatten('F'))[:, np.newaxis]),
             np.concatenate((np.dot(Qtilde1.T, c2.flatten('F')),
                             np.dot(Qtilde2.T, c1.flatten('F'))))[:,
                                                                  np.newaxis]))

        mtx_S_row = np.hstack(
            (S, np.zeros((sz_S0, shape_Tb0_effective + sz_R1 + sz_S0 + 3))))

        # last row in mtx_loop
        mtx_last_row = np.hstack(
            (c0.T, np.zeros((3, shape_Tb0_effective + sz_R1 + sz_S0 + 3))))

        R_loop = func_build_R(c1, c2, shape_b)
        # use QR decomposition to extract effective lines of equations
        Q_H = linalg.qr(R_loop, mode='economic',
                        pivoting=False)[0][:, :shape_Tb0_effective].conj().T
        R_loop = np.dot(Q_H, R_loop)
        Tbeta_loop = np.dot(Q_H, Tbeta0_Qtilde)

        # inner loop for each random initialization
        for inner in range(max_inner_iter):
            if inner == 0:
                mtx_loop = np.vstack(
                    (np.hstack((np.zeros(
                        (sz_coef, sz_coef)), Tbeta_loop.conj().T,
                                np.zeros((sz_coef, sz_R1)), S_H, c0.conj())),
                     np.hstack(
                         (Tbeta_loop,
                          np.zeros((shape_Tb0_effective, shape_Tb0_effective)),
                          -R_loop, np.zeros(
                              (shape_Tb0_effective, 3 + sz_S0)))),
                     np.hstack(
                         (np.zeros((sz_R1, sz_coef)), -R_loop.conj().T, GtG,
                          np.zeros(
                              (sz_R1, 3 + sz_S0)))), mtx_S_row, mtx_last_row))
            else:
                mtx_loop[idx_bg0_Tb:idx_end0_Tb,
                         idx_bg1_Tb:idx_end1_Tb] = Tbeta_loop
                mtx_loop[idx_bg0_TbH:idx_end0_TbH,
                         idx_bg1_TbH:idx_end1_TbH] = Tbeta_loop.conj().T
                mtx_loop[idx_bg0_Rc:idx_end0_Rc,
                         idx_bg1_Rc:idx_end1_Rc] = -R_loop
                mtx_loop[idx_bg0_RcH:idx_end0_RcH,
                         idx_bg1_RcH:idx_end1_RcH] = -R_loop.conj().T

            # solve annihilating filter coefficients
            try:
                coef = np.dot(Qtilde_mtx,
                              linalg.solve(mtx_loop, rhs)[:sz_coef])
            except linalg.LinAlgError:
                break
            c1 = np.reshape(coef[:num_coef], shape_c, order='F')
            c2 = np.reshape(coef[num_coef:], shape_c, order='F')

            # update the right-dual matrix R and T based on the new coefficients
            R_loop = func_build_R(c1, c2, shape_b)
            # use QR decomposition to extract effective lines of equations
            Q_H = linalg.qr(
                R_loop, mode='economic',
                pivoting=False)[0][:, :shape_Tb0_effective].conj().T
            R_loop = np.dot(Q_H, R_loop)
            Tbeta_loop = np.dot(Q_H, Tbeta0_Qtilde)

            # reconstruct b
            if inner == 0:
                mtx_brecon = np.vstack(
                    (np.hstack((GtG, R_loop.conj().T)),
                     np.hstack(
                         (R_loop,
                          np.zeros(
                              (shape_Tb0_effective, shape_Tb0_effective))))))
            else:
                mtx_brecon[:sz_R1, sz_R1:] = R_loop.conj().T
                mtx_brecon[sz_R1:, :sz_R1] = R_loop

            try:
                b_recon = linalg.solve(mtx_brecon, rhs_bl)[:sz_R1]
            except linalg.LinAlgError:
                break

            # compute fitting error
            error_loop = linalg.norm(measurement - np.dot(G, b_recon))

            if 0 <= error_loop < min_error:
                # check that the number of non-zero entries are
                # indeed num_dirac + 1 (could be less)
                c1[np.abs(c1) < 1e-2 * np.max(np.abs(c1))] = 0
                c2[np.abs(c2) < 1e-2 * np.max(np.abs(c2))] = 0
                nnz_cond = \
                    np.sum(1 - np.isclose(np.abs(c1), 0).astype(int)) == num_non_zero and \
                    np.sum(1 - np.isclose(np.abs(c2), 0).astype(int)) == num_non_zero
                cord1_0, cord1_1 = np.nonzero(c1)
                cord2_0, cord2_1 = np.nonzero(c2)
                min_order_cond = \
                    (np.max(cord2_0) - np.min(cord2_0) + 1 >= max_num_same_x + 1) and \
                    (np.max(cord1_1) - np.min(cord1_1) + 1 >= max_num_same_y + 1)
                if nnz_cond and min_order_cond:
                    min_error = error_loop
                    b_opt = b_recon
                    c1_opt = c1
                    c2_opt = c2

            if compute_mse and min_error < noise_level:
                break

        if compute_mse and min_error < noise_level:
            break

        if c1_opt is None or c2_opt is None:
            max_ini += 1

    print('fitting SNR {:.2f}'.format(
        20 * np.log10(linalg.norm(measurement) / min_error)))
    return c1_opt, c2_opt, min_error, b_opt, ini