def get_toneMap(img, P):
    P = np.float64(P) / 255.0
    J = natural_histogram_matching(img)
    J = cv2.blur(J, (10, 10))
    theta = 0.2

    height, width = img.shape

    P = cv2.resize(P, (height, width))
    P = P.reshape((1, height * width))
    logP = np.log(P)
    logP = spdiags(logP, 0, width * height, width * height)


    J = cv2.resize(J, (height, width))
    J = J.reshape( height * width)
    logJ = np.log(J)

    e = np.ones(width * height)

    Dx = spdiags([-e, e], np.array([0, height]), width *height, width * height)
    Dy = spdiags([-e, e], np.array([0, 1]), width * height, width * height)


    A = theta * (Dx.dot(Dx.transpose()) + Dy.dot(Dy.transpose())) + logP.dot(logP.transpose())

    b= logP.transpose().dot(logJ)
    beta, info = cg(A, b , tol = 1e-6, maxiter = 60)

    beta = beta.reshape((height, width))
    P = P.reshape((height, width))
    T = np.power(P, beta)

    return T
Esempio n. 2
0
    def makeFD(self, pmc):
        ''''A routine that will define the following operators:
            d1: derivative mapping from on grid to half grid
            d2: derivative mapping from half grid to on grid
            po: pml scaling for on grid
            ph: pml scaling for half grid
        '''
            
        hi = np.zeros(self.nx+1);
    
        hi[:self.npml] = (pmc)*np.linspace(1,0,self.npml);
        hi[(self.nx-self.npml+1):(self.nx+1)] = \
                  (pmc)*np.linspace(0,1,self.npml);

        h = self.dx*np.ones(self.nx+1,dtype='complex128') + 1j*hi
        # h = self.dx*np.ones(self.nx+1,dtype='complex128')
        # matOut.savemat('Hout', {'h':h})
        
        opr = np.ones((2,self.nx+1))
        opr[0,:] = -1
        
        self.d1 = sparse.spdiags(opr, [-1, 0], self.nx+1, self.nx);
        self.d2 = sparse.spdiags(opr, [0,  1], self.nx, self.nx+1);

        # averaging operator for getting to the in-between grid points
        avg_n_c = sparse.spdiags(0.5*np.ones((2,self.nx+1)), [0, 1], self.nx,self.nx+1);

        #creating the half and non-half space stretchers.
        self.ph = sparse.spdiags(1/h, 0, self.nx+1,self.nx+1);
        self.po = sparse.spdiags(1/(avg_n_c*h), 0, self.nx,self.nx);
Esempio n. 3
0
def build_hallway(N,g,p):
    
        P = sps.spdiags((1-p)*np.ones(N),0,N,N) + sps.spdiags(p*np.ones(N),-1,N,N)
        P = P.tolil()
        P[0,-1] = p

        Q = sps.spdiags((1-p)*np.ones(N),0,N,N) + sps.spdiags(p*np.ones(N),1,N,N)
        Q = Q.tolil()
        Q[-1,0] = p
        
        I = sps.eye(N)
        B = I - g * P
        C = I - g*Q
        M = sps.bmat([[None,B,C],
                      [-B,None,None],
                      [-C,None,None]])

        c = np.ones(N)
        c[int(N/2)] = 0
        
        q = np.empty(3*N)
        q[:D] = -np.ones(N) / N
        q[D:(2*D)] = c
        q[(2*D):] = c

        return (M,q)
Esempio n. 4
0
def get_cotan_Laplacian(tris,points):
    v1s = points[tris[:,0],:]
    v2s = points[tris[:,1],:]
    v3s = points[tris[:,2],:]

    e1s = normalize_vectors(v2s-v1s)
    e2s = normalize_vectors(v3s-v2s)
    e3s = normalize_vectors(v1s-v3s)



    alphas = np.arccos((e1s*(-e3s)).sum(axis=1))
    betas = np.arccos((e2s*(-e1s)).sum(axis=1))
    gammas = np.arccos((e3s*(-e2s)).sum(axis=1))

    data = 0.5*cotangent([gammas,alphas,betas]).flatten()
    row_ind = np.array([tris[:,0],tris[:,1],tris[:,2]]).flatten()
    col_ind = np.array([tris[:,1],tris[:,2],tris[:,0]]).flatten()
    M = points.shape[0]
    N = points.shape[0]
    Cotan = sparse.csr_matrix((data, (row_ind, col_ind)), shape=(M, N))
    Cotan = 0.5*(Cotan+Cotan.T)
    diagonal = Cotan.sum(axis=1).A.flatten()
    Cotan = Cotan-sparse.spdiags(diagonal,0,M,N)
    vertex_areas = get_vertex_area_matrix(tris, points)
    M = sparse.spdiags(vertex_areas,0,M,N)
    return M, Cotan
Esempio n. 5
0
    def fit(self, X, y):
        
        n_samples, n_features = X.shape

        # Masks for positive and negative samples
        pos_samples = sp.spdiags(y, 0, n_samples, n_samples)
        neg_samples = sp.spdiags(1-y, 0, n_samples, n_samples)

        # Extract positive and negative samples
        X_pos = pos_samples*X
        X_neg = neg_samples*X

        # tp: number of positive samples that contain given term
        # fp: number of positive samples that do not contain given term
        # fn: number of negative samples that contain given term
        # tn: number of negative samples that do not contain given term
        tp = np.bincount(X_pos.indices, minlength=n_features)
        fp = np.sum(y)-tp
        fn = np.bincount(X_neg.indices, minlength=n_features)
        tn = np.sum(1-y)-fn

        # Smooth document frequencies
        self._tp = tp + 1.0
        self._fp = fp + 1.0
        self._fn = fn + 1.0
        self._tn = tn + 1.0

        self._n_samples = n_samples
        self._n_features = n_features

        return self
def generateTimeMatrix(v0,t,lengthOfMatrix):
	row=1.0
	tau=1.0
	sigma=1.0
	g=980.0
	gamma=(row*tau*v0)/sigma
	w=gamma*math.sqrt(v0**2 +2*g*t)
	wprime=gamma/(math.sqrt(v0**2+2*g*t))
	Nx=lengthOfMatrix
	if(t==0):
		r=(k**2)/(h**2)
		rr=r=(k**2)/(2*h)
		iden=linspace(1,1,Nx)
		d_main = 2.0*(-1.0-(r*(1.0/(w-1.0))))*iden  # values that will go on main diagonal
		d_sub = (1.0/((w-1.0)))*(r+(rr*wprime))*iden     # values that will go on subdiagonal
		d_super = (1.0/((w-1.0)))*(r-(rr*wprime))*iden      # values that will go on superdiagonal
		data = [d_sub, d_main, d_super]   # list of all the data
		diags = [-1,0,1]     
		             # which diagonal each vector goes into
		A = l.spdiags(data,diags,Nx,Nx,format='csc')  # create the matrix
	else:
		r=(k**2)/(h**2)
		rr=r=(k**2)/(2*h)
		iden=linspace(1,1,Nx)
		d_main = (-1.0-(r*(1.0/(w-1.0))))*iden  # values that will go on main diagonal
		d_sub = (1.0/((w-1.0)))*(r+(rr*wprime))*iden     # values that will go on subdiagonal
		d_super = (1.0/((w-1.0)))*(r-(rr*wprime))*iden      # values that will go on superdiagonal
		data = [d_sub, d_main, d_super]   # list of all the data
		diags = [-1,0,1]                  # which diagonal each vector goes into
		A = l.spdiags(data,diags,Nx,Nx,format='csc')  # create the matrix

	print A.todense()
	return A.todense()
Esempio n. 7
0
def order_components(A, C):
    """Order components based on their maximum temporal value and size

    Parameters:
    -----------
    A:   sparse matrix (d x K)
         spatial components

    C:   matrix or np.ndarray (K x T)
         temporal components

    Returns:
    -------
    A_or:  np.ndarray
        ordered spatial components

    C_or:  np.ndarray
        ordered temporal components

    srt:   np.ndarray
        sorting mapping

    """
    A = np.array(A.todense())
    nA2 = np.sqrt(np.sum(A**2, axis=0))
    K = len(nA2)
    A = np.array(np.matrix(A) * spdiags(old_div(1, nA2), 0, K, K))
    nA4 = np.sum(A**4, axis=0)**0.25
    C = np.array(spdiags(nA2, 0, K, K) * np.matrix(C))
    mC = np.ndarray.max(np.array(C), axis=1)
    srt = np.argsort(nA4 * mC)[::-1]
    A_or = A[:, srt] * spdiags(nA2[srt], 0, K, K)
    C_or = spdiags(old_div(1., nA2[srt]), 0, K, K) * (C[srt, :])

    return A_or, C_or, srt
    def fit(self, raw_documents, y=None):
        """Learn vocabulary and log-entropy from training set.
        Parameters
        ----------
        raw_documents : iterable
            an iterable which yields either str, unicode or file objects
        Returns
        -------
        self : LogEntropyVectorizer
        """
        X = super(LogEntropyVectorizer, self).fit_transform(raw_documents)

        n_samples, n_features = X.shape
        gf = np.ravel(X.sum(axis=0)) # count total number of each words

        if self.smooth_idf:
            n_samples += int(self.smooth_idf)
            gf += int(self.smooth_idf)

        P = (X * sp.spdiags(1./gf, diags=0, m=n_features, n=n_features)) # probability of word occurence
        p = P.data
        P.data = (p * np.log2(p) / np.log2(n_samples))
        g = 1 + np.ravel(P.sum(axis=0))
        f = np.log2(1 + X.data)
        X.data = f
        # global weights
        self._G = sp.spdiags(g, diags=0, m=n_features, n=n_features)
        return self
Esempio n. 9
0
def smooth2a(arrayin, nr, nc):

    # Building matrices that will compute running sums.  The left-matrix, eL,
    # smooths along the rows.  The right-matrix, eR, smooths along the
    # columns.  You end up replacing element "i" by the mean of a (2*Nr+1)-by- 
    # (2*Nc+1) rectangle centered on element "i".


    row = arrayin.shape[0]
    col = arrayin.shape[1]

    el = spdiags(np.ones((2*nr+1, row)),range(-nr,nr+1), row, row).todense()
    er = spdiags(np.ones((2*nc+1, col)), range(-nc,nc+1), col, col).todense()

    # Setting all "nan" elements of "arrayin" to zero so that these will not
    # affect the summation.  (If this isn't done, any sum that includes a nan
    # will also become nan.)

    a = np.isnan(arrayin)
    arrayin[a] = 0.

    # For each element, we have to count how many non-nan elements went into
    # the sums.  This is so we can divide by that number to get a mean.  We use
    # the same matrices to do this (ie, "el" and "er").

    nrmlize = el.dot((~a).dot(er))
    nrmlize[a] = None

    # Actually taking the mean.

    arrayout = el.dot(arrayin.dot(er))
    arrayout = arrayout/nrmlize

    return arrayout
def problem2(l):
    '''
    print the solution to the second problem in Beam Buckling
    Inputs:
        l -- length of beam in feet
    '''
    # initialize constants, unit conversions
    r = 1.0
    E1 = r*12**2*10**7
    E2 = 4.35*r*12**2*10**6
    E3 = 5*r*12**2*10**5
    L = 20.0
    I = np.pi*r**4/4
    n = 100
    h = L/n
    
    # build the sparse matrix B
    b_diag = np.ones(n)
    b_diag[0:n/3] = E1*I/h**2
    b_diag[n/3:n/3+n/3] = E2*I/h**2
    b_diag[n/3+n/3:] = E3*I/h**2
    B = spar.spdiags(b_diag, np.array([0]), n, n, format='csc')

    # build the sparse matrix A
    diag = -2*np.ones(n)
    odiag = np.ones(n)
    A = spar.spdiags(np.vstack((-odiag, -odiag, -diag)),
                     np.array([-1,1,0]), n, n, format='csc')
    
    # calculate and print the smallest eigenvalue                 
    evals = sparla.eigs(B.dot(A), which='SM')
    print evals[0].min()
Esempio n. 11
0
    def test_leftright_precond(self):
        """Check that QMR works with left and right preconditioners"""

        from scipy.sparse.linalg.dsolve import splu
        from scipy.sparse.linalg.interface import LinearOperator

        n = 100

        dat = ones(n)
        A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1] ,n,n)
        b = arange(n,dtype='d')

        L = spdiags([-dat/2, dat], [-1,0], n, n)
        U = spdiags([4*dat, -dat], [ 0,1], n, n)

        L_solver = splu(L)
        U_solver = splu(U)

        def L_solve(b):
            return L_solver.solve(b)
        def U_solve(b):
            return U_solver.solve(b)
        def LT_solve(b):
            return L_solver.solve(b,'T')
        def UT_solve(b):
            return U_solver.solve(b,'T')

        M1 = LinearOperator( (n,n), matvec=L_solve, rmatvec=LT_solve )
        M2 = LinearOperator( (n,n), matvec=U_solve, rmatvec=UT_solve )

        x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)

        assert_equal(info,0)
        assert_normclose(A*x, b, tol=1e-8)
Esempio n. 12
0
def BoundaryIndices(mesh):

    
    dim = mesh.geometry().dim()

    if dim == 3:
        EdgeBoundary = BoundaryEdge(mesh)
        EdgeBoundary = numpy.sort(EdgeBoundary)[::2]
    else:
        B = BoundaryMesh(mesh,"exterior",False)
        EdgeBoundary = B.entity_map(1).array()

    MagneticBoundary = np.ones(mesh.num_edges())
    MagneticBoundary[EdgeBoundary] = 0
    Magnetic = spdiags(MagneticBoundary,0,mesh.num_edges(),mesh.num_edges())

    B = BoundaryMesh(mesh,"exterior",False)
    NodalBoundary = B.entity_map(0).array()#.astype("int","C")
    LagrangeBoundary = np.ones(mesh.num_vertices())
    LagrangeBoundary[NodalBoundary] = 0
    Lagrange = spdiags(LagrangeBoundary,0,mesh.num_vertices(),mesh.num_vertices())
    
    if dim == 3:
        VelocityBoundary = np.concatenate((LagrangeBoundary,LagrangeBoundary,LagrangeBoundary),axis=1)
    else:
        VelocityBoundary = np.concatenate((LagrangeBoundary,LagrangeBoundary),axis=1)
    Velocity = spdiags(VelocityBoundary,0,dim*mesh.num_vertices(),dim*mesh.num_vertices())

    return [Velocity, Magnetic, Lagrange]
Esempio n. 13
0
    def _compute_u(self, p, D, dydx, dx, dx1, n):
        if p is None or p != 0:
            data = [dx[1:n - 1], 2 * (dx[:n - 2] + dx[1:n - 1]), dx[:n - 2]]
            R = sparse.spdiags(data, [-1, 0, 1], n - 2, n - 2)

        if p is None or p < 1:
            Q = sparse.spdiags(
                [dx1[:n - 2], -(dx1[:n - 2] + dx1[1:n - 1]), dx1[1:n - 1]],
                [0, -1, -2], n, n - 2)
            QDQ = (Q.T * D * Q)
            if p is None or p < 0:
                # Estimate p
                p = 1. / \
                    (1. + QDQ.diagonal().sum() /
                     (100. * R.diagonal().sum() ** 2))

            if p == 0:
                QQ = 6 * QDQ
            else:
                QQ = (6 * (1 - p)) * (QDQ) + p * R
        else:
            QQ = R

        # Make sure it uses symmetric matrix solver
        ddydx = diff(dydx, axis=0)
        # sp.linalg.use_solver(useUmfpack=True)
        u = 2 * sparse.linalg.spsolve((QQ + QQ.T), ddydx)  # @UndefinedVariable
        return u.reshape(n - 2, -1), p
Esempio n. 14
0
def _make_TV_operators(n):
    # these are the sparse horizontal and vertical differencing
    # operators for calculating total total variation (TV)

    # these are constructed under the assumption that the 2D image
    # has been flattened from a row-major storage -- IE, two adjacent
    # pixels in a row are can be found at locations x[i] and x[i+1],
    # and two adjacent pixels in a column can be found at locations
    # x[k] and x[k+ncol]

    # horizontal difference
    # want a pattern of [-1]*n-1 + [0] repeated n times on the main diagonal
    k0 = np.concatenate( (-np.ones((n,n-1)), np.zeros((n,1))), axis=1).ravel()
    k1 = np.concatenate( (np.zeros((n,1)), np.ones((n,n-1))), axis=1).ravel()
    Dh = sparse.spdiags(np.array([k0,k1]), (0,1), n*n, n*n)

    # vertical difference
    # want a pattern of [-1]*n*(n-1) + [0]*n on the diagonal
    # and a pattern of 1s on the nth super-diagonal
    k0 = np.zeros(n*n)
    k0[:n*n-n] = -1
    k1 = np.ones(n*n)
    k1[:n] = 0
    Dv = sparse.spdiags(np.array([k0,k1]), (0,n), n*n, n*n)

    return Dh, Dv
Esempio n. 15
0
def heat_Crank_Nicolson(init_conditions,x_subintervals,t_subintervals,x_interval=[-10,10],T=1.,flag3d="off",nu=1.):
	'''
	Parameters
	nu: diffusive constant
	L, T: Solve on the Cartesian rectangle (x,t) in x_interval x [0,T]
	x_subintervals: Number of subintervals in spatial dimension
	t_subintervals: Number of subintervals in time dimension
	a, b = x_interval[0], x_interval[1]
	'''
	a, b = x_interval[0], x_interval[1]
	delta_x, delta_t = (b-a)/x_subintervals, T/t_subintervals
		
	K = .5*nu*delta_t/delta_x**2.
		
	D0,D1,diags = (1-2.*K)*np.ones((1,(x_subintervals-1))), K*np.ones((1,(x_subintervals-1))), np.array([0,-1,1])
	data = np.concatenate((D0,D1,D1),axis=0) # This stacks up rows
	A=spdiags(data,diags,(x_subintervals-1),(x_subintervals-1)).asformat('csr') 
	# print K
	# print A.todense()
	D0,D1,diags = (1.+2.*K)*np.ones((1,(x_subintervals-1))), -K*np.ones((1,(x_subintervals-1))), np.array([0,-1,1])
	data = np.concatenate((D0,D1,D1),axis=0) # This stacks up rows
	B=spdiags(data,diags,(x_subintervals-1),(x_subintervals-1)).asformat('csr')
	
	U = np.zeros((x_subintervals+1,t_subintervals+1))
	U[:,0] = init_conditions 
	for j in range(0,int(t_subintervals)+1):
		if j>0: U[1:-1,j] =  spsolve(B,A*U[1:-1,j-1] )
	return np.linspace(a,b,x_subintervals+1), U
Esempio n. 16
0
def MK_EQSYSTEM (A , X , Y ):
    # Total no of internal lattice sites
    sites = X *( Y - 2)
    #print "sites:", sites
    # Allocate space for the nonzero upper diagonals
    main_diag = zeros(sites)
    upper_diag1 = zeros(sites - 1)
    upper_diag2 = zeros(sites - X)
    # Calculates the nonzero upper diagonals
    #print A
    main_diag = A[X:X*(Y-1), 0] + A[X:X*(Y-1), 1] + A[0:X*(Y-2), 1] + A[X-1:X*(Y-1)-1, 0]
    upper_diag1 = A [X:X*(Y-1)-1, 0]
    upper_diag2 = A [X:X*(Y-2), 1]
    main_diag[where(main_diag == 0)] = 1
    # Constructing B which is symmetric , lower = upper diagonals .
    B = dia_matrix ((sites , sites)) # B *u = t
    B = - spdiags ( upper_diag1 , -1 , sites , sites )
    B = B + - spdiags ( upper_diag2 ,-X , sites , sites )
    B = B + B.T + spdiags ( main_diag , 0 , sites , sites )
    # Constructing C
    C = zeros(sites)
    #    C = dia_matrix ( (sites , 1) )
    C[0:X] = A[0:X, 1]
    C[-1-X+1:-1] = 0*A [-1 -2*X + 1:-1-X, 1]
    return B , C
def get_preconditioner():
    """Compute the preconditioner M"""
    diags_x = zeros((3, nx))
    diags_x[0, :] = 1 / hx / hx
    diags_x[1, :] = -2 / hx / hx
    diags_x[2, :] = 1 / hx / hx
    Lx = spdiags(diags_x, [-1, 0, 1], nx, nx)

    diags_y = zeros((3, ny))
    diags_y[0, :] = 1 / hy / hy
    diags_y[1, :] = -2 / hy / hy
    diags_y[2, :] = 1 / hy / hy
    Ly = spdiags(diags_y, [-1, 0, 1], ny, ny)

    J1 = spkron(Lx, eye(ny)) + spkron(eye(nx), Ly)

    # Now we have the matrix `J_1`. We need to find its inverse `M` --
    # however, since an approximate inverse is enough, we can use
    # the *incomplete LU* decomposition

    J1_ilu = spilu(J1)

    # This returns an object with a method .solve() that evaluates
    # the corresponding matrix-vector product. We need to wrap it into
    # a LinearOperator before it can be passed to the Krylov methods:

    M = LinearOperator(shape=(nx * ny, nx * ny), matvec=J1_ilu.solve)
    return M
Esempio n. 18
0
def compute_loss_laplacian(z, mask=None):
    """ mask and ground_truth have shape: (nlabel x npixel)"""

    nlabel = len(z)
    binz = np.argmax(z, axis=0) == np.c_[np.arange(nlabel)]
    # binz = z

    size = z[0].size
    if mask is None:
        gt = binz
        npix = size
    else:
        npix = np.sum(mask[0])
        gt = binz * mask

    weight = 1.0 / float((nlabel - 1) * npix)

    A_blocks = []
    for l2 in range(nlabel):
        A_blocks_row = []
        for l11 in range(l2):
            A_blocks_row.append(sparse.coo_matrix((size, size)))
        for l12 in range(l2, nlabel):
            A_blocks_row.append(sparse.spdiags(1.0 * np.logical_xor(gt[l12], gt[l2]), 0, size, size))
        A_blocks.append(A_blocks_row)
    A_loss = sparse.bmat(A_blocks)

    A_loss = A_loss + A_loss.T
    D_loss = np.asarray(A_loss.sum(axis=0)).ravel()
    L_loss = sparse.spdiags(D_loss, 0, *A_loss.shape) - A_loss

    return -weight * L_loss.tocsr()
Esempio n. 19
0
 def L2_norm_row(self, X):  # apply L2 normalization to the row of X
     if sps.issparse(X):
         diag = sparse.spdiags((1. / (np.sqrt(X.multiply(X).sum(1)) + eps)).T, 0, X.shape[0],
                               X.shape[0])  # in case X is sparse
     else:
         diag = sparse.spdiags(1. / (np.sqrt(np.sum(np.multiply(X, X), axis=1)) + eps).T, 0, X.shape[0], X.shape[0])
     return np.dot(diag, X)
def segment(img):
    '''
    Compute two segments of the image as described in the text.
    Use your adjacency function to calculate W and D.
    Compute L, the laplacian matrix.
    Then compute D^(-1/2)LD^(-1/2), and find the eigenvector
    corresponding to the second smallest eigenvalue.
    Use this eigenvector to calculate a mask that will be used
    to extract the segments of the image.
    Inputs:
        img -- image array of shape (n,m)
    Returns:
        seg1 -- an array the same size as img, but with 0's
                for each pixel not included in the positive
                segment (which corresponds to the positive
                entries of the computed eigenvector)
        seg2 -- an array the same size as img, but with 0's
                for each pixel not included in the negative
                segment.
    '''
    # call the function adjacency to obtain the adjacency matrix W
    # and the degree array D. 
    W,D = adjacency(img)
    
    # calculate D^(-1/2)
    Dsq = np.sqrt(1.0/D)
    
    # convert D and D^(-1/2) into diagonal sparse matrices (format = 'csc')
    Ds = spar.spdiags(D, 0, D.shape[1], D.shape[1], format = 'csc')
    Dsqs = spar.spdiags(Dsq, 0, D.shape[1], D.shape[1], format = 'csc')
    
    # calculate the Laplacian, L
    L = Ds - W
    
    # calculate the matrix whose eigenvalues we will compute, D^(-1/2)LD^(-1/2)
    # np.dot will not work on sparse arrays. Instead, if P and Q are sparse
    # matrices that we would like to multiply, use P.dot(Q)
    P = Dsqs.dot(L.dot(Dsqs))
    
    # calculate the eigenvector. Use the eigs function in sparla. 
    # Be sure to set the appropriate keyword argument so that you 
    # compute the two eigenvalues with the smallest real part.
    e = sparla.eigs(P, k=2, which="SR")
    eigvec = e[1][:,1]
    
    # create a mask array that is True wherever the eigenvector is positive.
    # reshape it to be the size of img.
    mask = (eigvec>0).reshape(img.shape)
    
    # create the positive segment by masking out the pixels in img 
    # belonging to the negative segment.
    pos = img*mask
    
    # create the negative segment by masking out the pixels in img 
    # belonging to the posative segment.
    neg = img*~mask
    
    # return the two segments (positive first)
    return pos, neg
Esempio n. 21
0
def controlled(U, ctrl=(1,), dim=None):
    r"""Controlled gate.

    Returns the (t+1)-qudit controlled-U gate, where t == length(ctrl).
    U has to be a square matrix.

    ctrl is an integer vector defining the control nodes. It has one entry k per
    control qudit, denoting the required computational basis state :math:`|k\rangle`
    for that particular qudit. Value k == -1 denotes no control.

    dim is the dimensions vector for the control qudits. If not given, all controls
    are assumed to be qubits.

    Examples:

      controlled(NOT, [1]) gives the standard CNOT gate.
      controlled(NOT, [1, 1]) gives the Toffoli gate.
    """
    # Ville Bergholm 2009-2011

    # TODO generalization, uniformly controlled gates?
    if isscalar(dim): dim = (dim,)  # scalar into a tuple
    t = len(ctrl)
    if dim == None:
        dim = qubits(t) # qubits by default

    if t != len(dim):
        raise ValueError('ctrl and dim vectors have unequal lengths.')

    if any(array(ctrl) >= array(dim)):
        raise ValueError('Control on non-existant state.')

    yes = 1  # just the diagonal
    for k in range(t):
        if ctrl[k] >= 0:
            temp = zeros(dim[k])
            temp[ctrl[k]] = 1  # control on k
            yes = kron(yes, temp) 
        else:
            yes = kron(yes, ones(dim[k])) # no control on this qudit

    no = 1 - yes
    T = prod(dim)
    dim = list(dim)

    if isinstance(U, lmap):
        d1 = dim + list(U.dim[0])
        d2 = dim + list(U.dim[1])
        U = U.data
    else:
        d1 = dim + [U.shape[0]]
        d2 = dim + [U.shape[1]]

    # controlled gates only make sense for square matrices U (we need an identity transformation for the 'no' cases!)
    U_dim = U.shape[0]
    S = U_dim * T

    out = sparse.spdiags(kron(no, ones(U_dim)), 0, S, S) + sparse.kron(sparse.spdiags(yes, 0, T, T), U)
    return lmap(out, (d1, d2))
Esempio n. 22
0
File: rotate.py Progetto: natl/bg
 def stepRK4(self):
   '''
   Perform a timestep using the RK4 Interaction Picture
   This routine also considers real/imaginary parts
   '''
   n2 = self.npt ** 2.
   sq = [ self.npt, self.npt ]
   
   psi   = self.psi.reshape( n2 )
   
   #Linear operator parts
   psiI = self.intpic( self.psi ).reshape( n2 )
   
   #Nonlinear operator parts
   grav = spdiags( +1.j * self.gravity().reshape( n2 ), 0, n2, n2 )
   harm = spdiags( -1.j * self.V.reshape( n2 ), 0, n2, n2 )
   sint = spdiags( -1.j * self.g * abs( self.psi.reshape( n2 ) ) ** 2., 0, n2, n2 )
   
   nonlin = self.Lz + grav + harm + sint 
   
   #timesteps - note, we need to keep the pointer to self.psi the same
   #as this is requried if FFTW is ever implemented. We still need to update
   #self.psi at each step though to calculate self.gravity()
   ############################################################################
   k1 = self.intpic( ( nonlin.dot( psi ) ).reshape( sq ) )
   #self.psi[:] = self.intpic( psiI.reshape( sq ) + k1 / 2. )
   
   #grav = spdiags( +1.j * self.gravity().reshape( n2 ), 0, n2, n2 )
   #sint = spdiags( -1.j * self.g * abs( self.psi.reshape( n2 ) ) ** 2., 0, n2, n2 )
   #nonlin = self.Lz + grav + harm + sint 
   ############################################################################
   k2 = ( nonlin.dot( psiI + self.dt * k1.reshape( n2 ) / 2. ) ).reshape( sq )
   #self.psi[:] = self.intpic( psiI.reshape( sq ) + k2 / 2. )
   
   #grav = spdiags( +1.j * self.gravity().reshape( n2 ), 0, n2, n2 )
   #sint = spdiags( -1.j * self.g * abs( self.psi.reshape( n2 ) ) ** 2., 0, n2, n2 )
   #nonlin = self.Lz + grav + harm + sint 
   ############################################################################
   k3 = ( nonlin.dot( psiI + self.dt * k2.reshape( n2 ) / 2. ) ).reshape( sq )
   #self.psi[:] = self.intpic( psiI.reshape( sq ) + k3 )
   
   #grav = spdiags( +1.j * self.gravity().reshape( n2 ), 0, n2, n2 )
   #sint = spdiags( -1.j * self.g * abs( self.psi.reshape( n2 ) ) ** 2., 0, n2, n2 )
   #nonlin = self.Lz + grav + harm + sint 
   ############################################################################
   
   k4 = nonlin.dot( 
              self.intpic( psiI.reshape( sq ) + self.dt * k3 ).reshape( n2 ) 
                  ).reshape( sq )
   
   ############################################################################
   
   
   self.psi[:] = self.intpic( psiI.reshape(sq) + 
                       self.dt * ( k1 + 2 * ( k2 +k3 ) ) / 6. ) * k4 / 2.
   if self.wick == True: self.normalise()
   print id(self.psi)
   return None
Esempio n. 23
0
    def laplacian(G): 
        """Returns Laplacian of graph."""  
        n = G.shape[0]

        # FIXME: Potential for memory savings, if assignment is used
        G = G - sparse.spdiags(G.diagonal(), 0, n, n)
        G = -G + sparse.spdiags(G.sum(0), 0, n, n)

        return G
Esempio n. 24
0
def compute_mesh_laplacian(verts, tris):
    """
    computes a sparse matrix representing the discretized laplace-beltrami operator of the mesh
    given by n vertex positions ("verts") and a m triangles ("tris") 
    
    verts: (n, 3) array (float)
    tris: (m, 3) array (int) - indices into the verts array

    computes the conformal weights ("cotangent weights") for the mesh, ie:
    w_ij = - .5 * (cot \alpha + cot \beta)

    See:
        Olga Sorkine, "Laplacian Mesh Processing"
        and for theoretical comparison of different discretizations, see 
        Max Wardetzky et al., "Discrete Laplace operators: No free lunch"

    returns matrix L that computes the laplacian coordinates, e.g. L * x = delta
    """
    n = len(verts)
    W_ij = np.empty(0)
    I = np.empty(0, np.int32)
    J = np.empty(0, np.int32)
    for i1, i2, i3 in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]: # for edge i2 --> i3 facing vertex i1
        vi1 = tris[:,i1] # vertex index of i1
        vi2 = tris[:,i2]
        vi3 = tris[:,i3]
        # vertex vi1 faces the edge between vi2--vi3
        # compute the angle at v1
        # add cotangent angle at v1 to opposite edge v2--v3
        # the cotangent weights are symmetric
        u = verts[vi2] - verts[vi1]
        v = verts[vi3] - verts[vi1]
        cotan = (u * v).sum(axis=1) / veclen(np.cross(u, v))
        W_ij = np.append(W_ij, 0.5 * cotan)
        I = np.append(I, vi2)
        J = np.append(J, vi3)
        W_ij = np.append(W_ij, 0.5 * cotan)
        I = np.append(I, vi3)
        J = np.append(J, vi2)
    L = sparse.csr_matrix((W_ij, (I, J)), shape=(n, n))
    # compute diagonal entries
    L = L - sparse.spdiags(L * np.ones(n), 0, n, n)
    L = L.tocsr()
    # area matrix
    e1 = verts[tris[:,1]] - verts[tris[:,0]]
    e2 = verts[tris[:,2]] - verts[tris[:,0]]
    n = np.cross(e1, e2)
    triangle_area = .5 * veclen(n)
    # compute per-vertex area
    vertex_area = np.zeros(len(verts))
    ta3 = triangle_area / 3
    for i in xrange(tris.shape[1]):
        bc = np.bincount(tris[:,i].astype(int), ta3)
        vertex_area[:len(bc)] += bc
    VA = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
    return L, VA
Esempio n. 25
0
def HiptmairBCsetup(C, P, mesh, Func):
    tic()
    W = Func[0]*Func[1]
    def boundary(x, on_boundary):
        return on_boundary
    bcW = DirichletBC(W.sub(0), Expression(("1.0","1.0","1.0")), boundary)

    bcuW = DirichletBC(W.sub(1), Expression(("1.0")), boundary)
    # Wv,Wq=TestFunctions(W)
    # Wu,Wp=TrialFunctions(W)


    dim = mesh.geometry().dim()
    tic()
    if dim == 3:
        EdgeBoundary = BoundaryEdge(mesh)
    else:
        B = BoundaryMesh(Magnetic.mesh(),"exterior",False)
        EdgeBoundary = numpy.sort(B.entity_map(1).array().astype("int","C"))


    B = BoundaryMesh(mesh,"exterior",False)
    NodalBoundary = B.entity_map(0).array()#.astype("int","C")
    onelagrange = numpy.ones(mesh.num_vertices())
    onelagrange[NodalBoundary] = 0
    Diaglagrange = spdiags(onelagrange,0,mesh.num_vertices(),mesh.num_vertices())

    onemagnetiic = numpy.ones(mesh.num_edges())
    onemagnetiic[EdgeBoundary.astype("int","C")] = 0
    Diagmagnetic = spdiags(onemagnetiic,0,mesh.num_edges(),mesh.num_edges())


    print ("{:40}").format("Work out boundary matrices, time: "), " ==>  ",("{:4f}").format(toc())

    tic()
    C = Diagmagnetic*C*Diaglagrange
    G = PETSc.Mat().createAIJ(size=C.shape,csr=(C.indptr, C.indices, C.data))
    print ("{:40}").format("BC applied to gradient, time: "), " ==>  ",("{:4f}").format(toc())

    if dim == 2:
        tic()
        Px = Diagmagnetic*P[0]*Diaglagrange
        Py = Diagmagnetic*P[1]*Diaglagrange
        print ("{:40}").format("BC applied to Prolongation, time: "), " ==>  ",("{:4f}").format(toc())
        P = [PETSc.Mat().createAIJ(size=Px.shape,csr=(Px.indptr, Px.indices, Px.data)),PETSc.Mat().createAIJ(size=Py.shape,csr=(Py.indptr, Py.indices, Py.data))]
    else:
        tic()
        Px = Diagmagnetic*P[0]*Diaglagrange
        Py = Diagmagnetic*P[1]*Diaglagrange
        Pz = Diagmagnetic*P[2]*Diaglagrange
        print ("{:40}").format("BC applied to Prolongation, time: "), " ==>  ",("{:4f}").format(toc())
        P = [PETSc.Mat().createAIJ(size=Px.shape,csr=(Px.indptr, Px.indices, Px.data)),PETSc.Mat().createAIJ(size=Py.shape,csr=(Py.indptr, Py.indices, Py.data)),PETSc.Mat().createAIJ(size=Pz.shape,csr=(Pz.indptr, Pz.indices, Pz.data))]

    return  G, P
Esempio n. 26
0
def HiptmairBCsetupBoundary(C, P, mesh):

    dim = mesh.geometry().dim()
    tic()
    if dim == 3:
        EdgeBoundary = BoundaryEdge(mesh)
    else:
        B = BoundaryMesh(mesh,"exterior",False)
        EdgeBoundary = numpy.sort(B.entity_map(1).array().astype("int","C"))


    B = BoundaryMesh(mesh,"exterior",False)
    NodalBoundary = B.entity_map(0).array()#.astype("int","C")
    onelagrange = numpy.ones(mesh.num_vertices())
    onelagrange[NodalBoundary] = 0
    Diaglagrange = spdiags(onelagrange,0,mesh.num_vertices(),mesh.num_vertices())

    onemagnetiic = numpy.ones(mesh.num_edges())
    onemagnetiic[EdgeBoundary.astype("int","C")] = 0
    Diagmagnetic = spdiags(onemagnetiic,0,mesh.num_edges(),mesh.num_edges())

    del mesh
    tic()
    C = Diagmagnetic*C*Diaglagrange
    # C = C
    G = PETSc.Mat().createAIJ(size=C.shape,csr=(C.indptr, C.indices, C.data))
    end = toc()
    MO.StrTimePrint("BC applied to gradient, time: ",end)

    if dim == 2:
        tic()
        # Px = P[0]
        # Py = P[1]
        Px = Diagmagnetic*P[0]*Diaglagrange
        Py = Diagmagnetic*P[1]*Diaglagrange
        end = toc()
        MO.StrTimePrint("BC applied to Prolongation, time: ",end)
        P = [PETSc.Mat().createAIJ(size=Px.shape,csr=(Px.indptr, Px.indices, Px.data)),PETSc.Mat().createAIJ(size=Py.shape,csr=(Py.indptr, Py.indices, Py.data))]
    else:
        tic()
        # Px = P[0]
        # Py = P[1]
        # Pz = P[2]
        Px = Diagmagnetic*P[0]*Diaglagrange
        Py = Diagmagnetic*P[1]*Diaglagrange
        Pz = Diagmagnetic*P[2]*Diaglagrange
        end = toc()
        MO.StrTimePrint("BC applied to Prolongation, time: ",end)
        P = [PETSc.Mat().createAIJ(size=Px.shape,csr=(Px.indptr, Px.indices, Px.data)),PETSc.Mat().createAIJ(size=Py.shape,csr=(Py.indptr, Py.indices, Py.data)),PETSc.Mat().createAIJ(size=Pz.shape,csr=(Pz.indptr, Pz.indices, Pz.data))]
    del Px, Py, Diaglagrange
    return  G, P
Esempio n. 27
0
def diff1fd24(n,order=2,h=1):
    """
    D = diff1fd24(n,order,grid-spacing)
    
    Notes:
        * Local (i.e. uses neighboring grid points) differentiation matrix
        * Select either 2nd or 4th order accuracy
        * Uses centered-differences for the interior
        * Uses one-sided differencing for the boundaries

    Input:
        n -- number of grid points
        order -- 2nd or 4th order
        h -- grid spacing
        
    Output:
        D -- global differentiation matrix
        
    Edits:
        02 JUL 2015:
        First working version diff1fd24 (KJW)
        Unit test added, f(x) = exp(sin(pi*x)) (KJW)
    """ 
    from scipy.sparse import spdiags, lil_matrix
    e = ones(n)
    if order == 2:
        #Construct stencil weights
        stencil = array([-e/2.,e/2.])
        #Centered difference stencils are located on the off-diagonals
        diag_array = array([-1,1])
        D = spdiags(stencil,diag_array,n,n)
        D = lil_matrix(D)
        #Differentiaion at the boundary is approximated using one-sided stencil
        D[0,0:3] = array([-3./2,2.,-1./2])
        D[-1,-3:] = array([1/2.,-2.,3./2])
    elif order == 4:
        #Construct stencil weights        
        stencil = array([e/12., -2*e/3.,2*e/3.,-e/12.])
        #Centered difference stencils are located on the off-diagonals
        diag_array = array([-2,-1,1,2])
        D = spdiags(stencil,diag_array,n,n)
        D = lil_matrix(D)
        #Differentiaion at the boundary is approximated using one-sided stencil
        D[1,0:6] = array([0.,-25./12,4.,-3.,4./3,-1./4])
        D[-2,-6:] = array([1./4,-4./3,3.,-4.,25./12,0.])
        D[0,0:5] = array([-25./12,4.,-3.,4./3,-1./4])
        D[-1,-5:] = array([1./4,-4./3,3.,-4.,25./12])
    else:
        print("*** Only 2nd or 4th order approximaitons are considered.")
    return D/h
Esempio n. 28
0
def get_implicit_directed_adjacency_matrix(stationary_distribution, rw_transition):
    number_of_nodes = rw_transition.shape[0]

    sqrtp = sp.sqrt(stationary_distribution)
    Q = spsp.spdiags(sqrtp, [0], number_of_nodes, number_of_nodes) * rw_transition * spsp.spdiags(1.0/sqrtp, [0], number_of_nodes, number_of_nodes)

    effective_adjacency_matrix = (Q + Q.T) /2.0

    effective_adjacency_matrix = spsp.coo_matrix(spsp.csr_matrix(effective_adjacency_matrix))
    effective_adjacency_matrix.data = np.real(effective_adjacency_matrix.data)

    effective_adjacency_matrix = spsp.csr_matrix(effective_adjacency_matrix)

    return effective_adjacency_matrix, np.ones(number_of_nodes, dtype=np.float64)
Esempio n. 29
0
    def aggregatorSemiParallel(self,S, comm):
        ''' Do the aggregation step in parallel whoop! '''
        ''' oh this is going to get trickier when we return to 3d & multivariates'''
        n = S[0].fwd.getPSize()
        nX = S[0].fwd.getXSize()
        
        ''' matrix method, i.e. TM, TE3D req'd' '''
        uL = sparse.lil_matrix((n,n),dtype='complex128')
        bL = np.zeros(n,dtype='complex128')
        
        for L in S:
            M = L.s*(sparse.spdiags(L.fwd.x2u.T*(L.ub+L.us),0,nX,nX))*self.fwd.p2x
            uL = uL + M.T.conj()*M
            bL = bL + M.T.conj()*(L.X + L.Z)
            
        U = sparse.lil_matrix((n,n),dtype='complex128')
        B = np.zeros(n,dtype='complex128')

        U = comm.allreduce(uL,U,op=MPI.SUM)
        B = comm.allreduce(bL,B,op=MPI.SUM)
        ''' interesting: += isn't implemented?'''
        U = U + sparse.spdiags((self.lmb/self.rho)*np.ones(n), 0, n, n)
        
        P = lin.spsolve(U,B)
        self.pL.append(lin.spsolve(uL,bL))
        
        ''' matrix free method '''
#        uL = np.zeros(n,dtype='complex128')
#        bL = np.zeros(n,dtype='complex128')
#        for L in S:
#            uL += self.s*self.fwd.Md*(self.us + self.ub)
#            bL += self.X + self.Z

#        U = np.zeros(n,dtype='complex128')
#        B = np.zeros(n,dtype='complex128')
#        
#        U = comm.allreduce(uL,U,op=MPI.SUM)
#        B = comm.allreduce(bL,B,op=MPI.SUM)
#        
#        num = self.rho*(U.real*B.real + U.imag*B.imag)
#        den = self.rho*(U.conj()*U) + self.lmb
#        
#        P = (num/den)
#        self.pL.append((uL.real*bL.real + uL.imag*bL.imag)/(uL.conj()*uL))        
        
        ''' finish off '''
        P = P.real
        P = np.maximum(P,0)
        P = np.minimum(P,self.upperBound)
        return P
Esempio n. 30
0
def Boundary(Space,BoundaryMarkers):
    key = BoundaryMarkers.keys()
    BC = zeros(0)
    for i in range(len(key)):
        BC = append(BC,int(str(key[i])))
    Boundary = ones(Space.dim())
    Boundary[array(BC).astype('int')] = 0
    BCmarkers = spdiags(Boundary,0,Space.dim(),Space.dim())

    Boundary = zeros(Space.dim())
    Boundary[array(BC).astype('int')] = 1
    BC = spdiags(Boundary,0,Space.dim(),Space.dim())

    return BCmarkers, BC
Esempio n. 31
0
import matplotlib.pyplot as plt
from scipy.sparse import spdiags

np.random.seed(1)
D = 150  # 总样本数
obs = 10  # 可观测的数量
xs = np.linspace(0, 1, D)  # x坐标轴

perm = np.arange(D)
np.random.shuffle(perm)

obsNdx = perm[:obs]  # 观测样本索引
hidNdx = np.array(list(set(np.arange(D)) - set(obsNdx)))  # 缺失数据索引
xobs = np.random.randn(obs).reshape(-1, 1)  # 生成观测值

L = spdiags((np.ones(shape=(D, 1)) * np.array([-1, 2, -1])).T, [0, 1, 2],
            D - 2, D).toarray()  # 生成L矩阵
lam = 30  # 控制先验的lambda
L = lam * L

L1 = L[:, hidNdx]
L2 = L[:, obsNdx]
lam11 = np.dot(L1.T, L1)
lam12 = np.dot(L1.T, L2)
posterior_Sigma = np.linalg.pinv(lam11)
posterior_mu = -np.dot(np.dot(np.linalg.pinv(lam11), lam12), xobs).reshape(
    -1, 1)

# plot
figure = plt.figure(1)
ax1 = plt.subplot(1, 2, 1)
ax1.plot(xs[hidNdx], posterior_mu.ravel(), linewidth=2)
def directed_laplacian_matrix(G,
                              nodelist=None,
                              weight='weight',
                              walk_type=None,
                              alpha=0.95):
    r"""Return the directed Laplacian matrix of G.

    The graph directed Laplacian is the matrix

    .. math::

        L = I - (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} ) / 2

    where `I` is the identity matrix, `P` is the transition matrix of the
    graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and
    zeros elsewhere.

    Depending on the value of walk_type, `P` can be the transition matrix
    induced by a random walk, a lazy random walk, or a random walk with
    teleportation (PageRank).

    Parameters
    ----------
    G : DiGraph
       A NetworkX graph

    nodelist : list, optional
       The rows and columns are ordered according to the nodes in nodelist.
       If nodelist is None, then the ordering is produced by G.nodes().

    weight : string or None, optional (default='weight')
       The edge data key used to compute each value in the matrix.
       If None, then each edge has weight 1.

    walk_type : string or None, optional (default=None)
       If None, `P` is selected depending on the properties of the
       graph. Otherwise is one of 'random', 'lazy', or 'pagerank'

    alpha : real
       (1 - alpha) is the teleportation probability used with pagerank

    Returns
    -------
    L : NumPy array
      Normalized Laplacian of G.

    Raises
    ------
    NetworkXError
        If NumPy cannot be imported

    NetworkXNotImplemnted
        If G is not a DiGraph

    Notes
    -----
    Only implemented for DiGraphs

    See Also
    --------
    laplacian_matrix

    References
    ----------
    .. [1] Fan Chung (2005).
       Laplacians and the Cheeger inequality for directed graphs.
       Annals of Combinatorics, 9(1), 2005
    """
    import scipy as sp
    from scipy.sparse import identity, spdiags, linalg
    if walk_type is None:
        if nx.is_strongly_connected(G):
            if nx.is_aperiodic(G):
                walk_type = "random"
            else:
                walk_type = "lazy"
        else:
            walk_type = "pagerank"

    M = nx.to_scipy_sparse_matrix(G,
                                  nodelist=nodelist,
                                  weight=weight,
                                  dtype=float)
    n, m = M.shape
    if walk_type in ["random", "lazy"]:
        DI = spdiags(1.0 / sp.array(M.sum(axis=1).flat), [0], n, n)
        if walk_type == "random":
            P = DI * M
        else:
            I = identity(n)
            P = (I + DI * M) / 2.0

    elif walk_type == "pagerank":
        if not (0 < alpha < 1):
            raise nx.NetworkXError('alpha must be between 0 and 1')
        # this is using a dense representation
        M = M.todense()
        # add constant to dangling nodes' row
        dangling = sp.where(M.sum(axis=1) == 0)
        for d in dangling[0]:
            M[d] = 1.0 / n
        # normalize
        M = M / M.sum(axis=1)
        P = alpha * M + (1 - alpha) / n
    else:
        raise nx.NetworkXError("walk_type must be random, lazy, or pagerank")

    evals, evecs = linalg.eigs(P.T, k=1)
    v = evecs.flatten().real
    p = v / v.sum()
    sqrtp = sp.sqrt(p)
    Q = spdiags(sqrtp, [0], n, n) * P * spdiags(1.0 / sqrtp, [0], n, n)
    I = sp.identity(len(G))

    return I - (Q + Q.T) / 2.0
Esempio n. 33
0
def Stokes(V, Q, BC, f, params, FS, InitialTol, Neumann=None, A=0, b=0):
    if A == 0:
        # parameters['linear_algebra_backend'] = 'uBLAS'

        # parameters = CP.ParameterSetup()
        # parameters["form_compiler"]["quadrature_degree"] = 6
        parameters['reorder_dofs_serial'] = False
        Split = 'No'
        if Split == 'No':
            W = V * Q

            (u, p) = TrialFunctions(W)
            (v, q) = TestFunctions(W)
            print FS

            def boundary(x, on_boundary):
                return on_boundary

            bcu = DirichletBC(W.sub(0), BC, boundary)
            u_k = Function(V)

            if FS == "DG":
                h = CellSize(V.mesh())
                h_avg = avg(h)
                a11 = inner(grad(v), grad(u)) * dx
                a12 = div(v) * p * dx
                a21 = div(u) * q * dx
                a22 = 0.1 * h_avg * jump(p) * jump(q) * dS
                L1 = inner(v, f) * dx
                a = params[2] * a11 - a12 - a21 - a22
            else:
                if W.sub(0).__str__().find("Bubble") == -1 and W.sub(
                        0).__str__().find("CG1") != -1:

                    print "Bubble Bubble Bubble Bubble Bubble"
                    a11 = inner(grad(v), grad(u)) * dx
                    a12 = div(v) * p * dx
                    a21 = div(u) * q * dx
                    h = CellSize(V.mesh())
                    beta = 0.2
                    delta = beta * h * h
                    a22 = delta * inner(grad(p), grad(q)) * dx
                    a = params[2] * a11 - a12 - a21 - a22

                    L1 = inner(v - delta * grad(q), f) * dx
                else:
                    a11 = inner(grad(v), grad(u)) * dx
                    a12 = div(v) * p * dx
                    a21 = div(u) * q * dx
                    L1 = inner(v, f) * dx
                    a = params[2] * a11 - a12 - a21

            tic()
            AA, bb = assemble_system(a, L1, bcu)
            A, b = CP.Assemble(AA, bb)
            del AA
            x = b.duplicate()

            pp = params[2] * inner(
                grad(v), grad(u)) * dx + (1. / params[2]) * p * q * dx
            PP, Pb = assemble_system(pp, L1, bcu)
            P = CP.Assemble(PP)
            del PP
        else:
            u = TrialFunction(V)
            v = TestFunction(V)
            p = TrialFunction(Q)
            q = TestFunction(Q)

            def boundary(x, on_boundary):
                return on_boundary

            W = V * Q
            bcu = DirichletBC(V, BC, boundary)
            u_k = Function(V)

            if FS == "DG":
                h = CellSize(V.mesh())
                h_avg = avg(h)
                a11 = inner(grad(v), grad(u)) * dx
                a12 = div(v) * p * dx
                a21 = div(u) * q * dx
                a22 = 0.1 * h_avg * jump(p) * jump(q) * dS
                L1 = inner(v, f) * dx
            else:
                if V.__str__().find(
                        "Bubble") == -1 and V.__str__().find("CG1") != -1:
                    a11 = params[2] * inner(grad(v), grad(u)) * dx
                    a12 = -div(v) * p * dx
                    a21 = div(u) * q * dx
                    L1 = inner(v, f) * dx
                    h = CellSize(V.mesh())
                    beta = 0.2
                    delta = beta * h * h
                    a22 = delta * inner(grad(p), grad(q)) * dx
                else:
                    a11 = params[2] * inner(grad(v), grad(u)) * dx
                    a12 = -div(v) * p * dx
                    a21 = div(u) * q * dx
                    L1 = inner(v, f) * dx

            AA = assemble(a11)
            bcu.apply(AA)
            b = assemble(L1)
            bcu.apply(b)

            AA, b = assemble_system(a11, L1, bcu)
            BB = assemble(a12)
            AAA = AA
            AA = AA.sparray()
            mesh = V.mesh()
            dim = mesh.geometry().dim()
            LagrangeBoundary = np.zeros(dim * mesh.num_vertices())
            Row = (AA.sum(0)[0, :dim * mesh.num_vertices()] -
                   AA.diagonal()[:dim * mesh.num_vertices()])
            VelocityBoundary = np.abs(Row.A1) > 1e-4
            LagrangeBoundary[VelocityBoundary] = 1
            BubbleDOF = np.ones(dim * mesh.num_cells())
            VelocityBoundary = np.concatenate((LagrangeBoundary, BubbleDOF),
                                              axis=1)
            BC = sp.spdiags(VelocityBoundary, 0, V.dim(), V.dim())
            B = BB.sparray().T * BC
            A = CP.Scipy2PETSc(sp.bmat([[AA, B.T], [B, None]]))
            io.savemat("A.mat", {"A": sp.bmat([[AA, B.T], [B, None]])})
            b = IO.arrayToVec(
                np.concatenate((b.array(), np.zeros(Q.dim())), axis=0))

            mass = (1. / params[2]) * p * q * dx
            mass1 = assemble(mass).sparray()
            mass2 = assemble(mass)
            W = V * Q
            io.savemat("P.mat", {"P": sp.bmat([[AA, None], [None, (mass1)]])})
            P = CP.Scipy2PETSc(sp.bmat([[AA, None], [None, (mass1)]]))
    else:
        W = V * Q

        (u, p) = TrialFunctions(W)
        (v, q) = TestFunctions(W)
        print FS

        def boundary(x, on_boundary):
            return on_boundary

        bcu = DirichletBC(W.sub(0), BC, boundary)
        u_k = Function(V)

        pp = params[2] * inner(grad(v),
                               grad(u)) * dx + (1. / params[2]) * p * q * dx
        P = assemble(pp)
        bcu.apply(P)
        P = CP.Assemble(P)

    ksp = PETSc.KSP().create()
    pc = ksp.getPC()

    ksp.setType(ksp.Type.MINRES)
    ksp.setTolerances(InitialTol)
    pc.setType(PETSc.PC.Type.PYTHON)

    if Split == "Yes":
        A = PETSc.Mat().createPython([W.dim(), W.dim()])
        A.setType('python')
        print AAA
        a = MM.Mat2x2multi(W, [CP.Assemble(AAA), CP.Scipy2PETSc(B)])
        A.setPythonContext(a)
        pc.setPythonContext(
            SP.ApproxSplit(W, CP.Assemble(AAA), CP.Assemble(mass2)))
    else:
        pc.setPythonContext(SP.Approx(W, P))

    ksp.setOperators(A, P)
    x = b.duplicate()
    tic()
    ksp.solve(b, x)
    print("{:40}").format("Stokes solve, time: "), " ==>  ", ("{:4f}").format(
        toc()), ("{:9}").format("   Its: "), ("{:4}").format(
            ksp.its), ("{:9}").format("   time: "), ("{:4}").format(
                time.strftime('%X %x %Z')[0:5])
    X = x.array
    print np.linalg.norm(x)
    # print X
    ksp.destroy()
    x = X[0:V.dim()]
    p = X[V.dim():]
    # x =
    u = Function(V)
    u.vector()[:] = x
    # print u.vector().array()
    pp = Function(Q)
    n = p.shape
    pp.vector()[:] = p

    ones = Function(Q)
    ones.vector()[:] = (0 * ones.vector().array() + 1)
    pp.vector()[:] += -assemble(pp * dx) / assemble(ones * dx)

    #
    #    PP = io.loadmat('Ptrue.mat')["Ptrue"]
    #    PP = CP.Scipy2PETSc(PP)
    #    x = IO.arrayToVec(np.random.rand(W.dim()))
    #    y = x.duplicate()
    #    yy = x.duplicate()
    #    SP.ApproxFunc(W,PP,x,y)
    #    SP.ApproxSplitFunc(W,CP.Scipy2PETSc(AA),CP.Scipy2PETSc(mass),x,yy)
    #    print np.linalg.norm(y.array-yy.array)
    #    sssss

    return u, pp
Esempio n. 34
0
    def __init__(self):
        # list of tuples (solver, symmetric, positive_definite )
        solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres]
        sym_solvers = [minres, cg]
        posdef_solvers = [cg]
        real_solvers = [minres]

        self.solvers = solvers

        # list of tuples (A, symmetric, positive_definite )
        self.cases = []

        # Symmetric and Positive Definite
        N = 40
        data = ones((3, N))
        data[0, :] = 2
        data[1, :] = -1
        data[2, :] = -1
        Poisson1D = spdiags(data, [0, -1, 1], N, N, format='csr')
        self.Poisson1D = Case("poisson1d", Poisson1D)
        self.cases.append(Case("poisson1d", Poisson1D))
        # note: minres fails for single precision
        self.cases.append(
            Case("poisson1d", Poisson1D.astype('f'), skip=[minres]))

        # Symmetric and Negative Definite
        self.cases.append(
            Case("neg-poisson1d", -Poisson1D, skip=posdef_solvers))
        # note: minres fails for single precision
        self.cases.append(
            Case("neg-poisson1d", (-Poisson1D).astype('f'),
                 skip=posdef_solvers + [minres]))

        # Symmetric and Indefinite
        data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]], dtype='d')
        RandDiag = spdiags(data, [0], 10, 10, format='csr')
        self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers))
        self.cases.append(
            Case("rand-diag", RandDiag.astype('f'), skip=posdef_solvers))

        # Random real-valued
        np.random.seed(1234)
        data = np.random.rand(4, 4)
        self.cases.append(Case("rand", data,
                               skip=posdef_solvers + sym_solvers))
        self.cases.append(
            Case("rand", data.astype('f'), skip=posdef_solvers + sym_solvers))

        # Random symmetric real-valued
        np.random.seed(1234)
        data = np.random.rand(4, 4)
        data = data + data.T
        self.cases.append(Case("rand-sym", data, skip=posdef_solvers))
        self.cases.append(
            Case("rand-sym", data.astype('f'), skip=posdef_solvers))

        # Random pos-def symmetric real
        np.random.seed(1234)
        data = np.random.rand(9, 9)
        data = np.dot(data.conj(), data.T)
        self.cases.append(Case("rand-sym-pd", data))
        # note: minres fails for single precision
        self.cases.append(Case("rand-sym-pd", data.astype('f'), skip=[minres]))

        # Random complex-valued
        np.random.seed(1234)
        data = np.random.rand(4, 4) + 1j * np.random.rand(4, 4)
        self.cases.append(
            Case("rand-cmplx",
                 data,
                 skip=posdef_solvers + sym_solvers + real_solvers))
        self.cases.append(
            Case("rand-cmplx",
                 data.astype('F'),
                 skip=posdef_solvers + sym_solvers + real_solvers))

        # Random hermitian complex-valued
        np.random.seed(1234)
        data = np.random.rand(4, 4) + 1j * np.random.rand(4, 4)
        data = data + data.T.conj()
        self.cases.append(
            Case("rand-cmplx-herm", data, skip=posdef_solvers + real_solvers))
        self.cases.append(
            Case("rand-cmplx-herm",
                 data.astype('F'),
                 skip=posdef_solvers + real_solvers))

        # Random pos-def hermitian complex-valued
        np.random.seed(1234)
        data = np.random.rand(9, 9) + 1j * np.random.rand(9, 9)
        data = np.dot(data.conj(), data.T)
        self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers))
        self.cases.append(
            Case("rand-cmplx-sym-pd", data.astype('F'), skip=real_solvers))

        # Non-symmetric and Positive Definite
        #
        # cgs, qmr, and bicg fail to converge on this one
        #   -- algorithmic limitation apparently
        data = ones((2, 10))
        data[0, :] = 2
        data[1, :] = -1
        A = spdiags(data, [0, -1], 10, 10, format='csr')
        self.cases.append(
            Case("nonsymposdef", A, skip=sym_solvers + [cgs, qmr, bicg]))
        self.cases.append(
            Case("nonsymposdef",
                 A.astype('F'),
                 skip=sym_solvers + [cgs, qmr, bicg]))
Esempio n. 35
0
def evolution_strength_of_connection(A,
                                     B='ones',
                                     epsilon=4.0,
                                     k=2,
                                     proj_type="l2",
                                     block_flag=False,
                                     symmetrize_measure=True):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B='ones', then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    block_flag : {boolean}
        If True, use a block D inverse as preconditioner for A during
        weighted-Jacobi

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil = numpy.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A, numpy.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius
    from pyamg.relaxation.chebyshev import chebyshev_polynomial_coefficients

    #====================================================================
    #Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise VaueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    #====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B == 'ones':
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's and have
    # sorted indices
    if (not sparse.isspmatrix_csr(A)):
        csrflag = False
        numPDEs = A.blocksize[0]
        D = A.diagonal()
        # Calculate Dinv*A
        if block_flag:
            Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
            Dinv = sparse.bsr_matrix(
                (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
                shape=A.shape)
            Dinv_A = (Dinv * A).tocsr()
        else:
            Dinv = np.zeros_like(D)
            mask = (D != 0.0)
            Dinv[mask] = 1.0 / D[mask]
            Dinv[D == 0] = 1.0
            Dinv_A = scale_rows(A, Dinv, copy=True)
        A = A.tocsr()
    else:
        csrflag = True
        numPDEs = 1
        D = A.diagonal()
        Dinv = np.zeros_like(D)
        mask = (D != 0.0)
        Dinv[mask] = 1.0 / D[mask]
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)

    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    # Get spectral radius of Dinv*A, this will be used to scale the time step
    # size for the ODE
    rho_DinvA = approximate_spectral_radius(Dinv_A)

    #Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    #      In order to later access columns, we calculate the transpose in
    #      CSR format so that columns will be accessed efficiently
    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0 / rho_DinvA) * Dinv_A)
    Atilde = Atilde.T.tocsr()

    #Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(range(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            Atilde = Atilde * Atilde

        JacobiStep = (I - (1.0 / rho_DinvA) * Dinv_A).T.tocsr()
        for i in range(ninc):
            Atilde = Atilde * JacobiStep
        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

        del mask

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

        del mask

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare - 1):
            Atilde = Atilde * Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)

        #Calculate Approximation ratio
        Atilde.data = Atilde.data / data

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        #Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        #Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(range(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]]

        # Use constrained min problem to define strength
        amg_core.evolution_strength_helper(
            Atilde.data, Atilde.indptr, Atilde.indices, Atilde.shape[0],
            np.ravel(np.asarray(B)),
            np.ravel(np.asarray((D_A * np.conjugate(B)).T)),
            np.ravel(np.asarray(BDB)), BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    #Apply drop tolerance
    if symmetrize_measure:
        Atilde = 0.5 * (Atilde + Atilde.T)

    if epsilon != np.inf:
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0] * Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks, ))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        #Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix(
            (CSRdata, Atilde.indices, Atilde.indptr),
            shape=(Atilde.shape[0] / numPDEs, Atilde.shape[1] / numPDEs))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0 / Atilde.data

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde
def diag(array):
    n = len(array)
    return spdiags(array, 0, n, n)
Esempio n. 37
0
 def setUp(self):
     n = 40
     d = arange(n) + 1
     self.n = n
     self.A = spdiags((d, 2 * d, d[::-1]), (-3, 0, 5), n, n)
     random.seed(1234)
Esempio n. 38
0
def thermal_dm(N, n, method='operator'):
    """Density matrix for a thermal state of n particles

    Parameters
    ----------
    N : int
        Number of basis states in Hilbert space.

    n : float
        Expectation value for number of particles in thermal state.

    method : string {'operator', 'analytic'}
        ``string`` that sets the method used to generate the
        thermal state probabilities

    Returns
    -------
    dm : qobj
        Thermal state density matrix.

    Examples
    --------
    >>> thermal_dm(5, 1) # doctest: +SKIP
    Quantum object: dims = [[5], [5]], \
shape = [5, 5], type = oper, isHerm = True
    Qobj data =
    [[ 0.51612903  0.          0.          0.          0.        ]
     [ 0.          0.25806452  0.          0.          0.        ]
     [ 0.          0.          0.12903226  0.          0.        ]
     [ 0.          0.          0.          0.06451613  0.        ]
     [ 0.          0.          0.          0.          0.03225806]]


    >>> thermal_dm(5, 1, 'analytic') # doctest: +SKIP
    Quantum object: dims = [[5], [5]], \
shape = [5, 5], type = oper, isHerm = True
    Qobj data =
    [[ 0.5      0.       0.       0.       0.     ]
     [ 0.       0.25     0.       0.       0.     ]
     [ 0.       0.       0.125    0.       0.     ]
     [ 0.       0.       0.       0.0625   0.     ]
     [ 0.       0.       0.       0.       0.03125]]

    Notes
    -----
    The 'operator' method (default) generates
    the thermal state using the truncated number operator ``num(N)``. This
    is the method that should be used in computations. The
    'analytic' method uses the analytic coefficients derived in
    an infinite Hilbert space. The analytic form is not necessarily normalized,
    if truncated too aggressively.

    """
    if n == 0:
        return fock_dm(N, 0)
    else:
        i = arange(N)
        if method == 'operator':
            beta = np.log(1.0 / n + 1.0)
            diags = np.exp(-beta * i)
            diags = diags / np.sum(diags)
            # populates diagonal terms using truncated operator expression
            rm = sp.spdiags(diags, 0, N, N, format='csr')
        elif method == 'analytic':
            # populates diagonal terms using analytic values
            rm = sp.spdiags((1.0 + n)**(-1.0) * (n / (1.0 + n))**(i),
                            0,
                            N,
                            N,
                            format='csr')
        else:
            raise ValueError(
                "'method' keyword argument must be 'operator' or 'analytic'")
    return Qobj(rm)
def ObtainDemographic(params=updated_Dpara()):
    """ lamb, gamma, Mortrate, and Matrate are arrays of size
        equal to the number of age groups """
    Pop0 = params['Pop0']
    Nage = params['Nage']
    AgeMort = params['Mortrate']
    Maturation = params['Matrate']
    Nstrata = params['Nstrata']
    AgeFrac = np.ones(Nage) * 1. / Nage
    Sources = np.zeros(Nage * Nstrata)
    """-----------------Obtain Infection Free equilibrium populations-------------------------------------"""
    lamb = np.zeros(Nage, dtype=float)
    gamma = np.zeros(Nage, dtype=float)

    AgeMort_vector = np.array(
        [elm * np.ones(Nstrata) for i, elm in enumerate(AgeMort)]).flatten()
    hstrata0 = np.array([
        np.append(elm * Pop0, np.zeros(Nstrata - 1))
        for i, elm in enumerate(AgeFrac)
    ]).flatten()
    Sources.itemset(0, np.sum(AgeMort_vector * hstrata0))
    dhstrata = -Sources  #this is b
    """ Getting Amatrix= matrix of coefficients to solve for x in Ax=b"""
    Amatrix = np.zeros((Nage * Nstrata, Nage * Nstrata))  #matrix of matrices
    L_diag = np.zeros(Nstrata)
    #CH
    i = 0
    L_diag.fill(lamb[i / Nstrata])
    Main_diag = [
        -(lamb[i / Nstrata] + Maturation[i / Nstrata] + AgeMort[i / Nstrata] +
          float(j) * gamma[i / Nstrata]) for j in xrange(Nstrata)
    ]
    Main_diag = np.asarray(Main_diag)
    U_diag = [float(j) * gamma[i] for j in xrange(Nstrata)]
    U_diag = np.asarray(U_diag)
    data = np.vstack((L_diag, Main_diag, U_diag))
    diags = np.array([-1, 0, 1])
    Amatrix[i:i + Nstrata, i:i + Nstrata] = spdiags(data, diags, Nstrata,
                                                    Nstrata).toarray()

    #SA and Y or any ages between children and adults
    for i in xrange(Nstrata, (Nage - 1) * Nstrata, Nstrata):
        L_diag.fill(lamb[i / Nstrata])
        Main_diag = [
            -(lamb[i / Nstrata] + Maturation[i / Nstrata] +
              AgeMort[i / Nstrata] + float(j) * gamma[i / Nstrata])
            for j in xrange(Nstrata)
        ]
        Main_diag = np.asarray(
            Main_diag
        )  # -lamb_vector[i/Nstrata] -VRate_t[i/Nstrata]   #vaccine updates
        U_diag = [float(j) * gamma[i / Nstrata] for j in xrange(Nstrata)]
        U_diag = np.asarray(U_diag)
        data = np.vstack((L_diag, Main_diag, U_diag))
        diags = np.array([-1, 0, 1])
        Amatrix[i:i + Nstrata,
                i:i + Nstrata] = spdiags(data, diags, Nstrata,
                                         Nstrata).toarray()
        Amatrix[i:i + Nstrata, i -
                Nstrata:i] = Maturation[i / Nstrata - 1] * np.identity(Nstrata)

    #O
    i = (Nage - 1) * Nstrata
    L_diag.fill(lamb[i / Nstrata])
    Main_diag = [
        -(lamb[i / Nstrata] + AgeMort[i / Nstrata] +
          float(j) * gamma[i / Nstrata]) for j in xrange(Nstrata)
    ]
    Main_diag = np.asarray(
        Main_diag
    )  #-lamb_vector[i/Nstrata] -VRate_t[i/Nstrata]   #vaccine updates
    U_diag = [float(j) * gamma[i / Nstrata] for j in xrange(Nstrata)]
    U_diag = np.asarray(U_diag)
    data = np.vstack((L_diag, Main_diag, U_diag))
    diags = np.array([-1, 0, 1])
    Amatrix[i:i + Nstrata, i:i + Nstrata] = spdiags(data, diags, Nstrata,
                                                    Nstrata).toarray()
    Amatrix[i:i + Nstrata,
            i - Nstrata:i] = Maturation[i / Nstrata - 1] * np.identity(Nstrata)
    """ --------------- END of Amatrix -------------------------"""

    equi_h = linalg.solve(Amatrix, dhstrata)
    nn = np.arange(0, Nstrata, dtype=float)
    nn_vector = np.array([nn for i, elm in enumerate(AgeMort)]).flatten()
    Normal_equi_h = equi_h.copy()
    AgeFrac = np.zeros(Nage)
    #get normalized numbers
    for i in xrange(0, Nage * Nstrata, Nstrata):
        Number = equi_h[i:i + Nstrata].sum()
        Normal_equi_h[i:i + Nstrata] = equi_h[i:i + Nstrata] / Number
        AgeFrac[i / Nstrata] = equi_h[i:i + Nstrata].sum() / equi_h.sum()

    return AgeFrac, equi_h
Esempio n. 40
0
R = (xs**2. + ys**2. + zs**2.)**(0.5)
bs = np.linspace(1.4, 2.0, 13)
es = []
for b in bs:
    print 'Bond length = ' + str(b)
    r1 = ((xs + 0.5 * b)**2. + ys**2. + zs**2.)**(0.5)
    r2 = ((xs - 0.5 * b)**2. + ys**2. + zs**2.)**(0.5)
    Vext = -1. / r1 - 1. / r2
    Enn = 1. / b**2.
    e = np.linspace(1, 1, g)
    ncomp = np.exp(-R**2. / 2.)
    ncomp = -2. * ncomp / sum(ncomp) / h**3.
    ncomppot = -2. / R * erf(R / math.sqrt(2.))
    from scipy.sparse import spdiags, eye, kron
    from scipy.sparse.linalg import eigsh, cgs
    L = spdiags([e, -2 * e, e], [-1, 0, 1], g, g) / h**2
    I = eye(g, g)
    L3 = kron(kron(L, I), I) + kron(kron(I, L), I) + kron(kron(I, I), L)
    Vtot = Vext
    #ncomp =
    tol = 1e-3
    print 'Iter'.rjust(6), 'Eigenvalue'.rjust(10), 'KE'.rjust(
        8), 'Exch. E'.rjust(8), 'Ext. E'.rjust(8), 'Pot. E.'.rjust(
            8), 'E_tot'.rjust(8), 'diff'.rjust(8)
    print '----'.rjust(6), '----------'.rjust(10), '----'.rjust(
        8), '-------'.rjust(8), '------'.rjust(8), '------'.rjust(
            8), '-----'.rjust(8), '-----'.rjust(8)
    count = 1
    diff = 1.
    pi = 3.14159
    Elast = 0.
Esempio n. 41
0
N = 10  # problem size
tol = 1e-4  # tolerance for solution
imax = 1000  # max number of iterations
method = "SOR"  # SIM method
omega = 1.2  # factor for SOR
x = np.zeros(N)  # starting guess for solution
z = np.linspace(0, 1, N)  # generate grid

# Assemble tri-diagonal system matrix for CDS operator
data = np.zeros((3, N))
data[0, 0:N - 1] = 1  # super diagonal
data[1, :] = -2  # diagonal
data[2, 1:N] = 1  # sub diagonal
offsets = np.array([-1, 0, 1])
A = sp.spdiags(data, offsets, N, N, format="csc")

# Assemble source vector
b = np.zeros(N)
b[1:N] = -8 / (N - 1) ^ 2

u1 = spsolve(A, b)  # direct solution
u2, _, iter, _, G = SIMPy.solve(A, b, "sor", 500, 1e-4, 1, np.ones(N),
                                False)  # iterative solution

## Plotting
plt.plot(u1, z, '--o', linewidth=2, label="direct solution")
plt.plot(u2, z, '--o', linewidth=2, label="iterative solution")
plt.legend()
plt.show()
plt.ion()
Esempio n. 42
0
def tridiag(n, x=None, y=None, z=None):
    """
    tridiag  tridiagonal matrix (sparse).
         tridiag(x, y, z) is the sparse tridiagonal matrix with
         subdiagonal x, diagonal y, and superdiagonal z.
         x and z must be vectors of dimension one less than y.
         Alternatively tridiag(n, c, d, e), where c, d, and e are all
         scalars, yields the toeplitz tridiagonal matrix of order n
         with subdiagonal elements c, diagonal elements d, and superdiagonal
         elements e.   This matrix has eigenvalues (todd 1977)
                  d + 2*sqrt(c*e)*cos(k*pi/(n+1)), k=1:n.
         tridiag(n) is the same as tridiag(n,-1,2,-1), which is
         a symmetric positive definite m-matrix (the negative of the
         second difference matrix).

         References:
         J. Todd, Basic Numerical Mathematics, Vol. 2: Numerical Algebra,
           Birkhauser, Basel, and Academic Press, New York, 1977, p. 155.
         D.E. Rutherford, Some continuant determinants arising in physics and
           chemistry---II, Proc. Royal Soc. Edin., 63, A (1952), pp. 232-241.
    """
    try:
        # First see if they are arrays
        nx, = n.shape
        ny, = x.shape
        nz, = y.shape
        if (ny - nx - 1) != 0 or  (ny - nz - 1) != 0:
            raise Higham('Dimensions of vector arguments are incorrect.')
        # Now swap to match above
        z = y
        y = x
        x = n

    except AttributeError:
        # They are not arrays
        if n < 2:
            raise Higham("n must be 2 or greater")

        if x == None and y == None and z == None:
            x = -1
            y =  2
            z = -1

        x = x * np.ones(n - 1)
        z = z * np.ones(n - 1)
        y = y * np.ones(n)

    except ValueError:
        raise Higham("x, y, z must be all scalars or 1-D vectors")

    # t = diag(x, -1) + diag(y) + diag(z, 1);  % For non-sparse matrix.
    n  = np.max(np.size(y))
    za = np.zeros(1)

    # Use the (*)stack functions instead of the r_[] notation in
    # an attempt to be more readable. (Doesn't look like it helped much)
    t = sparse.spdiags(np.vstack((np.hstack((x, za)), y,
                                  np.hstack((za, z)))),
                       np.array([-1, 0, 1]), n, n)

    return t
Esempio n. 43
0
def flow_operator_quadr(u, v, du, dv, It, Ix, Iy, S, lmbda):
    '''using quadratic function '''
    sz = np.shape(Ix)
    npixels = Ix.shape[1] * Ix.shape[0]

    FU = sparse.lil_matrix((npixels, npixels), dtype=np.float32)
    FV = sparse.lil_matrix((npixels, npixels), dtype=np.float32)
    quadr_ov_x = np.vectorize(deriv_quadra_over_x)
    for i in range(len(S)):
        M = conv_matrix(S[i], sz)

        u_ = sparse.lil_matrix.dot(M, np.reshape((u + du), (npixels, 1), 'F'))
        v_ = sparse.lil_matrix.dot(M, np.reshape((v + dv), (npixels, 1), 'F'))

        pp_su = quadr_ov_x(u_, 1)
        pp_sv = quadr_ov_x(v_, 1)

        FU = FU + sparse.lil_matrix.dot(
            M.T, sparse.lil_matrix.dot(spdiags(pp_su.T, 0, npixels, npixels),
                                       M))
        FV = FV + sparse.lil_matrix.dot(
            M.T, sparse.lil_matrix.dot(spdiags(pp_sv.T, 0, npixels, npixels),
                                       M))

    MM = sparse.vstack(
        (sparse.hstack((-FU, sparse.lil_matrix((npixels, npixels)))),
         sparse.hstack((sparse.lil_matrix((npixels, npixels)), -FV))))

    Ix2 = Ix * Ix
    Iy2 = Iy * Iy
    Ixy = Ix * Iy
    Itx = It * Ix
    Ity = It * Iy

    It = It + Ix * du + Iy * dv

    pp_d = deriv_quadra_over_x(np.reshape(It, (npixels, 1), 'F'), (1.5 / 0.03))

    tmp = pp_d * np.reshape(Ix2, (npixels, 1), 'F')
    duu = spdiags(tmp.T, 0, npixels, npixels)

    tmp = pp_d * np.reshape(Iy2, (npixels, 1), 'F')

    dvv = spdiags(tmp.T, 0, npixels, npixels)

    tmp = pp_d * np.reshape(Ixy, (npixels, 1), 'F')

    dduv = spdiags(tmp.T, 0, npixels, npixels)

    #A = sparse.vstack( (sparse.hstack ( ( duu, dduv ) )  ,  sparse.hstack( ( dduv , dvv ) )  )) - lmbda*MM
    A = sparse.vstack((sparse.hstack((duu, dduv)), sparse.hstack((dduv, dvv))))
    print('AA')
    print(A)
    print('MM')
    print(MM)
    A = A - lmbda * MM

    b = sparse.lil_matrix.dot(
        lmbda * MM,
        np.vstack(
            (np.reshape(u, (npixels, 1), 'F'), np.reshape(
                v, (npixels, 1), 'F')))) - np.vstack(
                    (pp_d * np.reshape(Itx, (npixels, 1), 'F'),
                     pp_d * np.reshape(Ity, (npixels, 1), 'F')))

    if (((np.max(pp_su) - np.max(pp_su) < 1E-06))
            and ((np.max(pp_sv) - np.max(pp_sv) < 1E-06))
            and ((np.max(pp_d) - np.max(pp_d) < 1E-06))):
        iterative = False
    else:
        iterative = True
    return [A, b, iterative]
Esempio n. 44
0
def arPLS(x_input, y_input, **kwargs):
    """
    arPLS: (automatic) Baseline correction using asymmetrically reweighted penalized least squares smoothing. 
    Baek et al. 2015, Analyst 140: 250-257;
    
    Allows subtracting a baseline under a x y spectrum.
    
    Parameters
    ----------
    x_input : ndarray
        x values.
    
    y_input : ndarray
        y values.
        
    kwargs:  #optional parameters
        lam = kwargs.get('lam',1.0*10**5)
        ratio = kwargs.get('ratio',0.01)

    Returns
    -------
    out1 : ndarray
        Contain the corrected signal.
    out2 : ndarray
        Contain the baseline.
    """
    # we get the signals in the bir
    # yafit_unscaled = get_portion_interest(x_input,y_input,bir)

    # signal standard standardization with sklearn
    # this helps for polynomial fitting
    X_scaler = StandardScaler().fit(x_input.reshape(-1, 1))
    Y_scaler = StandardScaler().fit(y_input.reshape(-1, 1))

    # transformation
    x = X_scaler.transform(x_input.reshape(-1, 1))
    y = Y_scaler.transform(y_input.reshape(-1, 1))

    #yafit = np.copy(yafit_unscaled)
    #yafit[:,0] = X_scaler.transform(yafit_unscaled[:,0].reshape(-1, 1))[:,0]
    #yafit[:,1] = Y_scaler.transform(yafit_unscaled[:,1].reshape(-1, 1))[:,0]

    y = y.reshape(len(y_input))

    # optional parameters
    lam = kwargs.get('lam', 1.0 * 10**5)
    ratio = kwargs.get('ratio', 0.01)

    N = len(y)
    D = sparse.csc_matrix(np.diff(np.eye(N), 2))
    w = np.ones(N)

    while True:
        W = sparse.spdiags(w, 0, N, N)
        Z = W + lam * D.dot(D.transpose())
        z = sparse.linalg.spsolve(Z, w * y)
        d = y - z
        # make d- and get w^t with m and s
        dn = d[d < 0]
        m = np.mean(dn)
        s = np.std(dn)
        wt = 1.0 / (1 + np.exp(2 * (d - (2 * s - m)) / s))
        # check exit condition and backup
        if norm(w - wt) / norm(w) < ratio:
            break
        w = wt

    baseline_fitted = z

    return y_input.reshape(-1, 1) - Y_scaler.inverse_transform(
        baseline_fitted.reshape(-1, 1)), Y_scaler.inverse_transform(
            baseline_fitted.reshape(-1, 1))
Esempio n. 45
0
    def _compute_coefs(self, xx, yy, p=None, var=1):
        x, y = np.atleast_1d(xx, yy)
        x = x.ravel()
        dx = np.diff(x)
        must_sort = (dx < 0).any()
        if must_sort:
            ind = x.argsort()
            x = x[ind]
            y = y[..., ind]
            dx = np.diff(x)

        n = len(x)

        # ndy = y.ndim
        szy = y.shape

        nd = prod(szy[:-1])
        ny = szy[-1]

        if n < 2:
            raise ValueError('There must be >=2 data points.')
        elif (dx <= 0).any():
            raise ValueError('Two consecutive values in x can not be equal.')
        elif n != ny:
            raise ValueError('x and y must have the same length.')

        dydx = np.diff(y) / dx

        if (n == 2):  # % straight line
            coefs = np.vstack([dydx.ravel(), y[0, :]])
        else:

            dx1 = 1. / dx
            D = sparse.spdiags(var * ones(n), 0, n, n)  # The variance

            u, p = self._compute_u(p, D, dydx, dx, dx1, n)
            dx1.shape = (n - 1, -1)
            dx.shape = (n - 1, -1)
            zrs = zeros(nd)
            if p < 1:
                # faster than yi-6*(1-p)*Q*u
                Qu = D * diff(vstack(
                    [zrs, diff(vstack([zrs, u, zrs]), axis=0) * dx1, zrs]),
                              axis=0)
                ai = (y - (6 * (1 - p) * Qu).T).T
            else:
                ai = y.reshape(n, -1)

            # The piecewise polynominals are written as
            # fi=ai+bi*(x-xi)+ci*(x-xi)^2+di*(x-xi)^3
            # where the derivatives in the knots according to Carl de Boor are:
            #    ddfi  = 6*p*[0;u] = 2*ci;
            #    dddfi = 2*diff([ci;0])./dx = 6*di;
            #    dfi   = diff(ai)./dx-(ci+di.*dx).*dx = bi;

            ci = np.vstack([zrs, 3 * p * u])
            di = (diff(vstack([ci, zrs]), axis=0) * dx1 / 3)
            bi = (diff(ai, axis=0) * dx1 - (ci + di * dx) * dx)
            ai = ai[:n - 1, ...]
            if nd > 1:
                di = di.T
                ci = ci.T
                ai = ai.T
            if not any(di):
                if not any(ci):
                    coefs = vstack([bi.ravel(), ai.ravel()])
                else:
                    coefs = vstack([ci.ravel(), bi.ravel(), ai.ravel()])
            else:
                coefs = vstack(
                    [di.ravel(),
                     ci.ravel(),
                     bi.ravel(),
                     ai.ravel()])

        return coefs, x
Esempio n. 46
0
def solveLinearEquation(IN, wx, wy, lamb):
    if len(IN.shape) == 2:
        IN = np.expand_dims(IN, axis=2)
    r, c, ch = IN.shape
    k = r * c
    dx = -lamb * np.reshape(wx, (wx.size, 1), order='F')
    dy = -lamb * np.reshape(wy, (wy.size, 1), order='F')
    tempx = np.concatenate((wx[:, -1], wx[:, 0:-1]), axis=1)
    tempy = np.concatenate((np.expand_dims(wy[-1, :], axis=0), wy[0:-1, :]),
                           axis=0)
    dxa = -lamb * np.reshape(tempx, (tempx.size, 1), order='F')
    dya = -lamb * np.reshape(tempy, (tempy.size, 1), order='F')
    tempx = np.concatenate((wx[:, -1], np.zeros((r, c - 1))), axis=1)
    tempy = np.concatenate(
        (np.expand_dims(wy[-1, :], axis=0), np.zeros((r - 1, c))), axis=0)
    dxd1 = -lamb * np.reshape(tempx, (tempx.size, 1), order='F')
    dyd1 = -lamb * np.reshape(tempy, (tempy.size, 1), order='F')
    wx[:, -1] = 0
    wy[-1, :] = 0
    dxd2 = -lamb * np.reshape(wx, (wx.size, 1), order='F')
    dyd2 = -lamb * np.reshape(wy, (wy.size, 1), order='F')

    Ax = spdiags(np.concatenate((dxd1, dxd2), axis=1).T, [-k + r, -r], k, k)
    Ay = spdiags(np.concatenate((dyd1, dyd2), axis=1).T, [-r + 1, -1], k, k)
    # diagonals stored row-wise; in MatLab the diagonals are stored column-wise

    D = 1 - (dx + dy + dxa + dya)  # column vector

    Axy = Ax + Ay
    A = Axy + Axy.T + spdiags(D.T, 0, k, k)

    fast = True
    if fast:
        OUT = IN
        for ii in range(ch):
            tin = IN[:, :, ii]
            tin = np.reshape(tin, (tin.size, 1), order='F')
            # start_amg = time.time()
            # ml = pyamg.ruge_stuben_solver(A)
            # tout = ml.solve(tin)
            # end_amg = time.time()
            # time_amg = end_amg-start_amg
            # print(time_amg)  # run time of 1.04663395882 seconds

            start_cholmod = time.time()
            factor = cholesky(A)
            tout = factor(tin)
            end_cholmod = time.time()
            time_cholmod = end_cholmod - start_cholmod
            print(time_cholmod)  # run time of 0.731502056122 seconds

            OUT[:, :, ii] = np.reshape(
                tout, (r, c),
                order='F')  # matches the A\tin(:), not the ichol from matlab
    else:
        # Solving A*x = tin is extremely slow using np.linalg.lstsq
        OUT = IN
        for ii in range(ch):
            tin = IN[:, :, ii]
            tin = np.reshape(tin, (tin.size, 1), order='F')
            tout = np.linalg.lstsq(A.toarray(), tin)
            OUT[:, :, ii] = np.reshape(tout, (r, c), order='F')

    return OUT
Esempio n. 47
0
Bde = np.zeros((n, n), dtype=bool)

u0 = np.concatenate(
    [np.reshape(vx, n * n),
     np.reshape(vy, n * n),
     np.reshape(de, n * n)])

B = np.concatenate(
    [np.reshape(Bvx, n * n),
     np.reshape(Bvy, n * n),
     np.reshape(Bde, n * n)])

#−1/2	0	1/2

l = np.ones(n)
M = sp.spdiags([l * -0.5, l * 0.5], [-1, 1], n, n).todense()
M[0, 0:3] = [-3 / 2, 2, -1 / 2]
M[n - 1, n - 3:n] = [1 / 2, -2, 3 / 2]
M = sp.csr_matrix(M)

(Dx, Dy) = suport_functions.getDxDy(M)
Dx = Dx / (x[1] - x[0])
Dy = Dy / (y[1] - y[0])


def uDerivative(t, u):
    u = np.reshape(u, (3, n * n))
    vx = u[0, ]
    vy = u[1, ]
    de = u[2, ]
Esempio n. 48
0
        print(last + ' Arnorm  =  %12.4e' % (Arnorm,))
        print(last + msg[istop+1])

    if istop == 6:
        info = maxiter
    else:
        info = 0

    return (postprocess(x),info)


if __name__ == '__main__':
    from scipy import ones, arange
    from scipy.linalg import norm
    from scipy.sparse import spdiags

    n = 10

    residuals = []

    def cb(x):
        residuals.append(norm(b - A*x))

    # A = poisson((10,),format='csr')
    A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
    M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
    A.psolve = M.matvec
    b = 0*ones(A.shape[0])
    x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
    # x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
Esempio n. 49
0
def calc_orbitals(self):
    """
    calculate molecular orbitals and eigenvalues
    """

    assert len(self.veff) != 0, "Veff is not set"

    Nelem = self.grid.Nelem

    if self.Nmo != 0:

        #Inverse volume element
        W = spdiags(data=self.grid.w, diags=0, m=Nelem, n=Nelem)
        W = csc_matrix(W)

        #Build effective potential operator
        Veff = spdiags(data=W @ self.veff, diags=0, m=Nelem, n=Nelem)

        if self.H0 is None:
            self.hamiltionian()

        #Construct Hamiltonian
        H = self.H0 + Veff

        #Solve eigenvalue problem
        self.eig, self.phi = eigs(spsolve(W, H),
                                  k=self.Nmo,
                                  sigma=self.e0,
                                  v0=self.opt["v0"])
        self.eig = self.eig.real
        self.phi = self.phi.real
        e0 = self.e0

        while np.isnan(self.phi).all() != np.zeros_like(self.phi).all():
            e0 = e0 - 0.1
            self.eig, self.phi = eigs(spsolve(W, H),
                                      k=self.Nmo,
                                      sigma=e0,
                                      v0=self.opt["v0"])
            self.eig = self.eig.real
            self.phi = self.phi.real

        #Check for degenerate and nearly degenerate orbitals
        for i in range(self.Nmo - 1):
            for j in range(i + 1, self.Nmo):
                if np.abs(self.eig[i] - self.eig[j]) < 1e-9:
                    even = self.phi[:, i] + self.grid.mirror(
                        self.phi[:, i]) + self.phi[:, j] + self.grid.mirror(
                            self.phi[:, j])
                    odd = self.phi[:, i] - self.grid.mirror(
                        self.phi[:, i]) + self.phi[:, j] - self.grid.mirror(
                            self.phi[:, j])
                    self.phi[:, i] = even / norm(even)
                    self.phi[:, j] = odd / norm(odd)

        if self.SYM is True:
            for i in range(self.Nmo):
                if self.phi[:, i].T @ self.grid.mirror(self.phi[:, i]) > 0:

                    self.phi[:, i] = self.phi[:, i] + self.grid.mirror(
                        self.phi[:, i])
                    self.phi[:, i] = self.phi[:, i] / norm(self.phi[:, i])

                else:

                    self.phi[:, i] = self.phi[:, i] - self.grid.mirror(
                        self.phi[:, i])

    else:
        "Molecular Orbital equals zero"
        self.eig = -1 / spacing(1)
Esempio n. 50
0
def ode2es(L, rho0):
    """Creates an exponential series that describes the time evolution for the
    initial density matrix (or state vector) `rho0`, given the Liouvillian
    (or Hamiltonian) `L`.

    Parameters
    ----------
    L : qobj
        Liouvillian of the system.

    rho0 : qobj
        Initial state vector or density matrix.

    Returns
    -------
    eseries : :class:`qutip.eseries`
        ``eseries`` represention of the system dynamics.

    """

    if issuper(L):

        # check initial state
        if isket(rho0):
            # Got a wave function as initial state: convert to density matrix.
            rho0 = rho0 * rho0.dag()

        # check if state is below error threshold
        if abs(rho0.full().sum()) < 1e-10 + 1e-24:
            # enforce zero operator
            return eseries(qzero(rho0.dims[0]))

        w, v = L.eigenstates()
        v = np.hstack([ket.full() for ket in v])
        # w[i]   = eigenvalue i
        # v[:,i] = eigenvector i

        rlen = np.prod(rho0.shape)
        r0 = mat2vec(rho0.full())
        v0 = la.solve(v, r0)
        vv = v * sp.spdiags(v0.T, 0, rlen, rlen)

        out = None
        for i in range(rlen):
            qo = Qobj(vec2mat(vv[:, i]), dims=rho0.dims, shape=rho0.shape)
            if out:
                out += eseries(qo, w[i])
            else:
                out = eseries(qo, w[i])

    elif isoper(L):

        if not isket(rho0):
            raise TypeError('Second argument must be a ket if first' +
                            'is a Hamiltonian.')

        # check if state is below error threshold
        if abs(rho0.full().sum()) < 1e-5 + 1e-20:
            # enforce zero operator
            dims = rho0.dims
            return eseries(
                Qobj(sp.csr_matrix((dims[0][0], dims[1][0]), dtype=complex)))

        w, v = L.eigenstates()
        v = np.hstack([ket.full() for ket in v])
        # w[i]   = eigenvalue i
        # v[:,i] = eigenvector i

        rlen = np.prod(rho0.shape)
        r0 = rho0.full()
        v0 = la.solve(v, r0)
        vv = v * sp.spdiags(v0.T, 0, rlen, rlen)

        out = None
        for i in range(rlen):
            qo = Qobj(np.matrix(vv[:, i]).T, dims=rho0.dims, shape=rho0.shape)
            if out:
                out += eseries(qo, -1.0j * w[i])
            else:
                out = eseries(qo, -1.0j * w[i])

    else:
        raise TypeError('First argument must be a Hamiltonian or Liouvillian.')

    return estidy(out)
Esempio n. 51
0
def binormalize(A, tol=1e-5, maxiter=10):
    """Binormalize matrix A.  Attempt to create unit l_1 norm rows.

    Parameters
    ----------
    A : csr_matrix
        sparse matrix (n x n)
    tol : float
        tolerance
    x : array
        guess at the diagonal
    maxiter : int
        maximum number of iterations to try

    Returns
    -------
    C : csr_matrix
        diagonally scaled A, C=DAD

    Notes
    -----
        - Goal: Scale A so that l_1 norm of the rows are equal to 1:
        - B = DAD
        - want row sum of B = 1
        - easily done with tol=0 if B=DA, but this is not symmetric
        - algorithm is O(N log (1.0/tol))

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.classical import binormalize
    >>> A = poisson((10,),format='csr')
    >>> C = binormalize(A)

    References
    ----------
    .. [1] Livne, Golub, "Scaling by Binormalization"
       Tech Report SCCM-03-12, SCCM, Stanford, 2003
       http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.1679

    """
    if not isspmatrix(A):
        raise TypeError('expecting sparse matrix A')

    if A.dtype == complex:
        raise NotImplementedError('complex A not implemented')

    n = A.shape[0]
    it = 0
    x = np.ones((n, 1)).ravel()

    # 1.
    B = A.multiply(A).tocsc()  # power(A,2) inconsistent in numpy, scipy.sparse
    d = B.diagonal().ravel()

    # 2.
    beta = B * x
    betabar = (1.0 / n) * np.dot(x, beta)
    stdev = rowsum_stdev(x, beta)

    # 3
    while stdev > tol and it < maxiter:
        for i in range(0, n):
            # solve equation x_i, keeping x_j's fixed
            # see equation (12)
            c2 = (n - 1) * d[i]
            c1 = (n - 2) * (beta[i] - d[i] * x[i])
            c0 = -d[i] * x[i] * x[i] + 2 * beta[i] * x[i] - n * betabar
            if (-c0 < 1e-14):
                print('warning: A nearly un-binormalizable...')
                return A
            else:
                # see equation (12)
                xnew = (2 * c0) / (-c1 - np.sqrt(c1 * c1 - 4 * c0 * c2))
            dx = xnew - x[i]

            # here we assume input matrix is symmetric since we grab a row of B
            # instead of a column
            ii = B.indptr[i]
            iii = B.indptr[i + 1]
            dot_Bcol = np.dot(x[B.indices[ii:iii]], B.data[ii:iii])

            betabar = betabar + (1.0 / n) * dx * (dot_Bcol + beta[i] +
                                                  d[i] * dx)
            beta[B.indices[ii:iii]] += dx * B.data[ii:iii]

            x[i] = xnew

        stdev = rowsum_stdev(x, beta)
        it += 1

    # rescale for unit 2-norm
    d = np.sqrt(x)
    D = spdiags(d.ravel(), [0], n, n)
    C = D * A * D
    C = C.tocsr()
    beta = C.multiply(C).sum(axis=1)
    scale = np.sqrt((1.0 / n) * np.sum(beta))
    return (1 / scale) * C
Esempio n. 52
0
def _tracemin_fiedler(L, X, normalized, tol, method, num_vecs=None):
    """Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
    """
    n = X.shape[0]
    if num_vecs is None: num_vecs = 1

    if normalized:
        # Form the normalized Laplacian matrix and determine the eigenvector of
        # its nullspace.
        e = sqrt(L.diagonal())
        D = spdiags(1. / e, [0], n, n, format='csr')
        L = D * L * D
        e *= 1. / norm(e, 2)

    if not normalized:

        def project(X):
            """Make X orthogonal to the nullspace of L.
            """
            X = asarray(X)
            for j in range(X.shape[1]):
                X[:, j] -= X[:, j].sum() / n
    else:

        def project(X):
            """Make X orthogonal to the nullspace of L.
            """
            X = asarray(X)
            for j in range(X.shape[1]):
                X[:, j] -= dot(X[:, j], e) * e

    if method is None:
        method = 'pcg'
    if method == 'pcg':
        # See comments below for the semantics of P and D.
        def P(x):
            x -= asarray(x * X * X.T)[0, :]
            if not normalized:
                x -= x.sum() / n
            else:
                x = daxpy(e, x, a=-ddot(x, e))
            return x

        solver = _PCGSolver(lambda x: P(L * P(x)), lambda x: D * x)
    elif method == 'chol' or method == 'lu':
        # Convert A to CSC to suppress SparseEfficiencyWarning.
        A = csc_matrix(L, dtype=float, copy=True)
        # Force A to be nonsingular. Since A is the Laplacian matrix of a
        # connected graph, its rank deficiency is one, and thus one diagonal
        # element needs to modified. Changing to infinity forces a zero in the
        # corresponding element in the solution.
        i = (A.indptr[1:] - A.indptr[:-1]).argmax()
        A[i, i] = float('inf')
        solver = (_CholeskySolver if method == 'chol' else _LUSolver)(A)
    else:
        raise nx.NetworkXError('unknown linear system solver.')

    # Initialize.
    Lnorm = abs(L).sum(axis=1).flatten().max()
    project(X)
    W = asmatrix(ndarray(X.shape, order='F'))
    #The converged vectors
    X_conv = asmatrix(ndarray(X.shape, order='F'))
    sig_conv = zeros(num_vecs)
    nconv = 0
    while True:
        # Orthonormalize X.
        X = qr(X)[0]
        # Compute interation matrix H.
        W[:, :] = L * X
        H = X.T * W
        sigma, Y = eigh(H, overwrite_a=True)
        # Compute the Ritz vectors.
        X = X * Y
        # Test for convergence exploiting the fact that L * X == W * Y.

        #Test convergence,
        #This is really the number of consecutive vectors that converged
        max_conv = 0
        num_remaining = num_vecs - nconv
        errs = []
        for i in range(num_remaining):
            err = dasum(W * asmatrix(Y)[:, i] - sigma[i] * X[:, i]) / Lnorm
            errs.append(err)
            if err < tol:
                max_conv = i + 1
            else:
                break

        #print('this is what we are doing')
        #print(nconv, errs)
        if max_conv == num_remaining:
            X_conv[:, nconv:nconv + max_conv] = X[:, :max_conv]
            sig_conv[nconv:nconv + max_conv] = sigma[:max_conv]
            break

        # Depending on the linear solver to be used, two mathematically
        # equivalent formulations are used.
        if method == 'pcg':
            # Compute X = X - (P * L * P) \ (P * L * X) where
            # P = I - [e X] * [e X]' is a projection onto the orthogonal
            # complement of [e X].
            W *= Y  # L * X == W * Y
            W -= (W.T * X * X.T).T
            project(W)
            # Compute the diagonal of P * L * P as a Jacobi preconditioner.
            D = L.diagonal().astype(float)
            D += 2. * (asarray(X) * asarray(W)).sum(axis=1)
            D += (asarray(X) * asarray(X * (W.T * X))).sum(axis=1)
            D[D < tol * Lnorm] = 1.
            D = 1. / D
            # Since TraceMIN is globally convergent, the relative residual can
            # be loose.
            #Perform deflation if needed
            W -= X_conv[:, :nconv] * (X_conv[:, :nconv].T * W)
            X -= X_conv[:, :nconv] * (X_conv[:, :nconv].T * X)
            if max_conv:
                X_conv[:, nconv:nconv + max_conv] = X[:, :max_conv]
                sig_conv[nconv:nconv + max_conv] = sigma[:max_conv]
                X = X[:, max_conv:]
                W = W[:, max_conv:]
                nconv += max_conv
            X -= solver.solve(W, 0.1)
        else:
            # Compute X = L \ X / (X' * (L \ X)). L \ X can have an arbitrary
            # projection on the nullspace of L, which will be eliminated.
            W[:, :] = solver.solve(X)
            project(W)
            X = (inv(W.T * X) * W.T).T  # Preserves Fortran storage order.

    return sig_conv, asarray(X_conv)
Esempio n. 53
0
def view_patches_bar(Yr, A, C, b, f, d1, d2, YrA=None, img=None):
    """view spatial and temporal components interactively

     Parameters:
     -----------
     Yr:    np.ndarray
            movie in format pixels (d) x frames (T)

     A:     sparse matrix
                matrix of spatial components (d x K)

     C:     np.ndarray
                matrix of temporal components (K x T)

     b:     np.ndarray
                spatial background (vector of length d)

     f:     np.ndarray
                temporal background (vector of length T)

     d1,d2: np.ndarray
                frame dimensions

     YrA:   np.ndarray
                 ROI filtered residual as it is given from update_temporal_components
                 If not given, then it is computed (K x T)

     img:   np.ndarray
                background image for contour plotting. Default is the image of all spatial components (d1 x d2)

    """

    pl.ion()
    nr, T = C.shape
    nb = f.shape[0]
    A2 = A.copy()
    A2.data **= 2
    nA2 = np.sqrt(np.array(A2.sum(axis=0))).squeeze()
    if YrA is None:
        Y_r = np.array(A.T * np.matrix(Yr) - (A.T * np.matrix(b[:, np.newaxis])) * np.matrix(
            f[np.newaxis]) - (A.T.dot(A)) * np.matrix(C) + C)
    else:
        Y_r = YrA + C

    A = A * spdiags(old_div(1, nA2), 0, nr, nr)
    A = A.todense()
    imgs = np.reshape(np.array(A), (d1, d2, nr), order='F')
    if img is None:
        img = np.mean(imgs[:, :, :-1], axis=-1)

    bkgrnd = np.reshape(b, (d1, d2) + (nb,), order='F')
    fig = pl.figure(figsize=(10, 10))

    axcomp = pl.axes([0.05, 0.05, 0.9, 0.03])

    ax1 = pl.axes([0.05, 0.55, 0.4, 0.4])
#    ax1.axis('off')
    ax3 = pl.axes([0.55, 0.55, 0.4, 0.4])
#    ax1.axis('off')
    ax2 = pl.axes([0.05, 0.1, 0.9, 0.4])
#    axcolor = 'lightgoldenrodyellow'
#    axcomp = pl.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)

    s_comp = Slider(axcomp, 'Component', 0, nr + nb - 1, valinit=0)
    vmax = np.percentile(img, 98)

    def update(val):
        i = np.int(np.round(s_comp.val))
        print(('Component:' + str(i)))

        if i < nr:

            ax1.cla()
            imgtmp = imgs[:, :, i]
            ax1.imshow(imgtmp, interpolation='None', cmap=pl.cm.gray)
            ax1.set_title('Spatial component ' + str(i + 1))
            ax1.axis('off')

            ax2.cla()
            ax2.plot(np.arange(T), np.squeeze(np.array(Y_r[i, :])), 'c', linewidth=3)
            ax2.plot(np.arange(T), np.squeeze(np.array(C[i, :])), 'r', linewidth=2)
            ax2.set_title('Temporal component ' + str(i + 1))
            ax2.legend(labels=['Filtered raw data', 'Inferred trace'])

            ax3.cla()
            ax3.imshow(img, interpolation='None', cmap=pl.cm.gray, vmax=vmax)
            imgtmp2 = imgtmp.copy()
            imgtmp2[imgtmp2 == 0] = np.nan
            ax3.imshow(imgtmp2, interpolation='None', alpha=0.5, cmap=pl.cm.hot)
            ax3.axis('off')
        else:
            ax1.cla()
            ax1.imshow(bkgrnd[:, :, i - nr], interpolation='None')
            ax1.set_title('Spatial background ' + str(i + 1 - nr))
            ax1.axis('off')

            ax2.cla()
            ax2.plot(np.arange(T), np.squeeze(np.array(f[i - nr, :])))
            ax2.set_title('Temporal background ' + str(i + 1 - nr))

    def arrow_key_image_control(event):

        if event.key == 'left':
            new_val = np.round(s_comp.val - 1)
            if new_val < 0:
                new_val = 0
            s_comp.set_val(new_val)

        elif event.key == 'right':
            new_val = np.round(s_comp.val + 1)
            if new_val > nr + nb:
                new_val = nr + nb
            s_comp.set_val(new_val)
        else:
            pass

    s_comp.on_changed(update)
    s_comp.set_val(0)
    fig.canvas.mpl_connect('key_release_event', arrow_key_image_control)
    pl.show()
Esempio n. 54
0
def build_pc_diag(A: spmatrix) -> spmatrix:
    """Diagonal preconditioner."""
    return sp.spdiags(1.0 / A.diagonal(), 0, A.shape[0], A.shape[0])
Esempio n. 55
0
def nb_view_patches(Yr, A, C, b, f, d1, d2, YrA = None, image_neurons=None, thr=0.99, denoised_color=None,cmap='jet'):
    """
    Interactive plotting utility for ipython notebook

    Parameters:
    -----------
    Yr: np.ndarray
        movie

    A,C,b,f: np.ndarrays
        outputs of matrix factorization algorithm

    d1,d2: floats
        dimensions of movie (x and y)

    YrA:   np.ndarray
        ROI filtered residual as it is given from update_temporal_components
        If not given, then it is computed (K x T)        

    image_neurons: np.ndarray
        image to be overlaid to neurons (for instance the average)

    thr: double
        threshold regulating the extent of the displayed patches

    denoised_color: string or None
        color name (e.g. 'red') or hex color code (e.g. '#F0027F')

    cmap: string
        name of colormap (e.g. 'viridis') used to plot image_neurons
    """
    colormap = cm.get_cmap(cmap)
    grayp = [mpl.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))]
    nr, T = C.shape
    nA2 = np.ravel(A.power(2).sum(0))
    b = np.squeeze(b)
    f = np.squeeze(f)
    if YrA is None:
        Y_r = np.array(spdiags(old_div(1, nA2), 0, nr, nr) *
                   (A.T * np.matrix(Yr) -
                    (A.T * np.matrix(b[:, np.newaxis])) * np.matrix(f[np.newaxis]) -
                    A.T.dot(A) * np.matrix(C)) + C)
    else:
        Y_r = C + YrA
            

    x = np.arange(T)
    z = old_div(np.squeeze(np.array(Y_r[:, :].T)), 100)
    if image_neurons is None:
        image_neurons = A.mean(1).reshape((d1, d2), order='F')

    coors = get_contours(A, (d1, d2), thr)
    cc1 = [cor['coordinates'][:, 0] for cor in coors]
    cc2 = [cor['coordinates'][:, 1] for cor in coors]
    c1 = cc1[0]
    c2 = cc2[0]

    # split sources up, such that Bokeh does not warn
    # "ColumnDataSource's columns must be of the same length"
    source = ColumnDataSource(data=dict(x=x, y=z[:, 0], y2=C[0] / 100))
    source_ = ColumnDataSource(data=dict(z=z.T, z2=C / 100))
    source2 = ColumnDataSource(data=dict(c1=c1, c2=c2))
    source2_ = ColumnDataSource(data=dict(cc1=cc1, cc2=cc2))

    callback = CustomJS(args=dict(source=source, source_=source_, source2=source2, source2_=source2_), code="""
            var data = source.get('data')
            var data_ = source_.get('data')
            var f = cb_obj.get('value')-1
            x = data['x']
            y = data['y']
            y2 = data['y2']

            for (i = 0; i < x.length; i++) {
                y[i] = data_['z'][i+f*x.length]
                y2[i] = data_['z2'][i+f*x.length]
            }

            var data2_ = source2_.get('data');
            var data2 = source2.get('data');
            c1 = data2['c1'];
            c2 = data2['c2'];
            cc1 = data2_['cc1'];
            cc2 = data2_['cc2'];

            for (i = 0; i < c1.length; i++) {
                   c1[i] = cc1[f][i]
                   c2[i] = cc2[f][i]
            }
            source2.trigger('change')
            source.trigger('change')
        """)

    plot = bpl.figure(plot_width=600, plot_height=300)
    plot.line('x', 'y', source=source, line_width=1, line_alpha=0.6)
    if denoised_color is not None:
        plot.line('x', 'y2', source=source, line_width=1, line_alpha=0.6, color=denoised_color)

    slider = bokeh.models.Slider(start=1, end=Y_r.shape[0], value=1, step=1,
                                 title="Neuron Number", callback=callback)
    xr = Range1d(start=0, end=image_neurons.shape[1])
    yr = Range1d(start=image_neurons.shape[0], end=0)
    plot1 = bpl.figure(x_range=xr, y_range=yr, plot_width=300, plot_height=300)

    plot1.image(image=[image_neurons[::-1, :]], x=0,
                y=image_neurons.shape[0], dw=d2, dh=d1, palette=grayp)
    plot1.patch('c1', 'c2', alpha=0.6, color='purple', line_width=2, source=source2)

    bpl.show(bokeh.layouts.layout([[slider], [bokeh.layouts.row(plot1, plot)]]))

    return Y_r
Esempio n. 56
0
# Plots the optimal trade-off curve between ||Dx||_2 and ||x-x_cor||_2 by
# solving the following problem for different values of delta:
#           minimize    ||x - x_cor||^2 + delta*||Dx||^2
# where x_cor is the a problem parameter, ||Dx|| is a measure of smoothness

# Input data
n = 400
t = np.array(range(0, n))

exact = 0.5 * sin(2 * np.pi * t / n) * sin(0.01 * t)
corrupt = exact + 0.05 * np.random.randn(len(exact))
corrupt = cvxopt.matrix(corrupt)

e = np.ones(n).T
ee = np.column_stack((-e, e)).T
D = sparse.spdiags(ee, range(-1, 1), n, n)
D = D.todense()
D = cvxopt.matrix(D)

# Solve in parallel
nopts = 10
lambdas = np.linspace(0, 50, nopts)
# Frame the problem with a parameter
lamb = Parameter(nonneg=True)
x = Variable(n)
p = Problem(Minimize(norm(x - corrupt) + norm(D * x) * lamb))


# For a value of lambda g, we solve the problem
# Returns [ ||Dx||_2 and ||x-x_cor||_2 ]
def get_value(g):
Esempio n. 57
0
            return _lambda, blockVectorX, residualNormsHistory
        else:
            return _lambda, blockVectorX


###########################################################################
if __name__ == '__main__':
    from scipy.sparse import spdiags, speye, issparse
    import time

    ##     def B( vec ):
    ##         return vec

    n = 100
    vals = [np.arange(n, dtype=np.float64) + 1]
    A = spdiags(vals, 0, n, n)
    B = speye(n, n)
    #    B[0,0] = 0
    B = np.eye(n, n)
    Y = np.eye(n, 3)

    #    X = sp.rand( n, 3 )
    xfile = {100: 'X.txt', 1000: 'X2.txt', 10000: 'X3.txt'}
    X = np.fromfile(xfile[n], dtype=np.float64, sep=' ')
    X.shape = (n, 3)

    ivals = [1. / vals[0]]

    def precond(x):
        invA = spdiags(ivals, 0, n, n)
        y = invA * x
Esempio n. 58
0
def main(args):
    data = scipy.io.loadmat(args.ifm_data)['IFMdata']

    CM_inInd = data['CM_inInd'][0][0]
    CM_neighInd = data['CM_neighInd'][0][0]
    CM_flows = data['CM_flows'][0][0]

    LOC_inInd = data['LOC_inInd'][0][0]
    LOC_flowRows = data['LOC_flowRows'][0][0]
    LOC_flowCols = data['LOC_flowCols'][0][0]
    LOC_flows = data['LOC_flows'][0][0]

    IU_inInd = data['IU_inInd'][0][0]
    IU_neighInd = data['IU_neighInd'][0][0]
    IU_flows = data['IU_flows'][0][0]

    kToU = data['kToU'][0][0]
    kToUconf = data['kToUconf'][0][0]

    known = data['known'][0][0]

    h, w = kToU.shape
    N = h * w

    # Convert indices from matlab to numpy format
    CM_inInd = convert_index(CM_inInd, h, w)
    CM_neighInd = convert_index(CM_neighInd, h, w)
    LOC_inInd = convert_index(LOC_inInd, h, w)
    LOC_flowRows = convert_index(LOC_flowRows, h, w)
    LOC_flowCols = convert_index(LOC_flowCols, h, w)
    IU_inInd = convert_index(IU_inInd, h, w)
    IU_neighInd = convert_index(IU_neighInd, h, w)

    CM_weights = np.ones((N, ))
    LOC_weights = np.ones((N, ))
    IU_weights = np.ones((N, ))
    KU_weights = np.ones((N, ))

    cm_mult = 1
    loc_mult = 1
    iu_mult = 0.01
    ku_mult = 0.05
    lmbda = 100

    print("Assembling linear system")

    A = cm_mult*color_mixture_laplacian(N, CM_inInd, CM_neighInd, CM_flows, CM_weights) + \
        loc_mult*matting_laplacian(N, LOC_inInd, LOC_flowRows, LOC_flowCols, LOC_flows, LOC_weights) + \
        iu_mult*similarity_laplacian(N, IU_inInd, IU_neighInd, IU_flows, IU_weights) + \
        ku_mult*sp.spdiags(np.ravel(KU_weights), 0, N, N).dot(sp.spdiags(np.ravel(kToUconf), 0, N, N)) + \
        lmbda*sp.spdiags(np.ravel(known).astype(np.float64), 0, N, N)

    b = (ku_mult*sp.spdiags(np.ravel(KU_weights), 0, N, N).dot(sp.spdiags(np.ravel(kToUconf), 0, N, N)) + \
        lmbda*sp.spdiags(np.ravel(known).astype(np.float64), 0, N, N)).dot(np.ravel(kToU))

    print("Solving")
    alpha, info = cg(A, b, tol=1e-6, maxiter=2000)
    print(np.amin(alpha), " ", np.amax(alpha))

    alpha = np.clip(alpha, 0, 1)
    alpha = np.reshape(alpha, (h, w))

    print(info)
    skimage.io.imsave("alpha.png", alpha)
Esempio n. 59
0
def make_diag_mtx(vec):
    dof = vec.size
    return spsp.spdiags(vec, [0], dof, dof)