Exemplo n.º 1
0
def cg(A, b, tol, maxiter, x0=None, I=None, pc="diag", verbose=True, viewiters=False):
    """Conjugate gradient solver wrapped for FEM purposes."""
    print 'Starting conjugate gradient with preconditioner "' + pc + '"...'

    def callback(x):
        if viewiters:
            print "- Vector-2 norm: " + str(np.linalg.norm(x))

    if pc == "diag":
        # diagonal preconditioner
        M = sp.spdiags(1 / (A[I].T[I].diagonal()), 0, I.shape[0], I.shape[0])

    if I is None:
        u = spl.cg(A, b, x0=x0, maxiter=maxiter, M=M, tol=tol, callback=callback)
    else:
        if x0 is None:
            u = spl.cg(A[I].T[I].T, b[I], maxiter=maxiter, M=M, tol=tol, callback=callback)
        else:
            u = spl.cg(A[I].T[I].T, b[I], x0=x0[I], maxiter=maxiter, M=M, tol=tol, callback=callback)

    if verbose:
        if u[1] == 0:
            print "* Achieved tolerance " + str(tol) + "."
        elif u[1] > 0:
            print "* WARNING! Maximum number of iterations " + str(maxiter) + " reached."

    if I is None:
        return u[0]
    else:
        U = np.zeros(A.shape[0])
        U[I] = u[0]
        return U
    def poisson_solver(self):
        # split into r, g, b 3 channels and
        # iterate through all pixels in the cloning region indexed in idx_map
        for i in range(len(self.idx_map)):
            neighbors, flag = self.count_neighbor(self.idx_map[i])
            x, y = self.idx_map[i]
            if neighbors == 4:
                # degraded form if neighbors are all within clone region
                self.b_r[i] = 4*self.f[x,y,0] - (self.f[x-1,y,0] +self.f[x+1,y,0] + self.f[x,y-1,0] + self.f[x,y+1,0])
                self.b_g[i] = 4*self.f[x,y,1] - (self.f[x-1,y,1] +self.f[x+1,y,1] + self.f[x,y-1,1] + self.f[x,y+1,1])
                self.b_b[i] = 4*self.f[x,y,2] - (self.f[x-1,y,2] +self.f[x+1,y,2] + self.f[x,y-1,2] + self.f[x,y+1,2])
            # have neighbor(s) on the clone region boundary, include background terms  
            else: 
                self.b_r[i] = 4*self.f[x,y,0] - (self.f[x-1,y,0] +self.f[x+1,y,0] + self.f[x,y-1,0] + self.f[x,y+1,0])
                self.b_g[i] = 4*self.f[x,y,1] - (self.f[x-1,y,1] +self.f[x+1,y,1] + self.f[x,y-1,1] + self.f[x,y+1,1])
                self.b_b[i] = 4*self.f[x,y,2] - (self.f[x-1,y,2] +self.f[x+1,y,2] + self.f[x,y-1,2] + self.f[x,y+1,2])
                self.b_r[i] += flag[0] * self.b[x-1,y,0] + flag[1] * self.b[x+1,y,0] + flag[2] * self.b[x,y-1,0] + flag[3] * self.b[x,y+1,0]
                self.b_g[i] += flag[0] * self.b[x-1,y,1] + flag[1] * self.b[x+1,y,1] + flag[2] * self.b[x,y-1,1] + flag[3] * self.b[x,y+1,1]
                self.b_b[i] += flag[0] * self.b[x-1,y,2] + flag[1] * self.b[x+1,y,2] + flag[2] * self.b[x,y-1,2] + flag[3] * self.b[x,y+1,2]


        # use conjugate gradient to solve for u
        u_r = splinalg.cg(self.A, self.b_r)[0]
        u_g = splinalg.cg(self.A, self.b_g)[0]
        u_b = splinalg.cg(self.A, self.b_b)[0]   

        return u_r, u_g, u_b
Exemplo n.º 3
0
 def time_solve(self, n, solver):
     if solver == 'dense':
         linalg.solve(self.P_dense, self.b)
     elif solver == 'cg':
         cg(self.P_sparse, self.b)
     elif solver == 'minres':
         minres(self.P_sparse, self.b)
     elif solver == 'spsolve':
         spsolve(self.P_sparse, self.b)
     else:
         raise ValueError('Unknown solver: %r' % solver)
Exemplo n.º 4
0
def test_laplacian():

    import pylab as pl
    # Setup grid
    nx, ny = 500, 500
    d = 2*pi/nx
    Lx, Ly = nx * d, ny * d


    g = 0
    # make grid
    x = np.arange(-g, nx+g)*d
    y = np.arange(-g, ny+g)*d

    dx = x[1]-x[0]
    dy = y[1]-y[0]

    x, y =  np.meshgrid(x, y, indexing='ij')

    # build laplacian
    A = build_laplacian_matrix(nx,ny,d=d)
    # A = poisson((nx, ny), spacing=(dx, dy), format='csr')/d/d

    # right hand side
    f = np.sin(x)*np.cos(2*y)
    f = np.random.rand(A.shape[0])

    p_ex = np.sin(x)*np.cos(2*y)/(-1 - 4)

    print("Timing information")
    print("==================")
    print("")


    # with elapsed_timer() as elapsed:
    #     p_ap = la.spsolve(A, f.ravel())
    #     print("spsolve {0}".format(elapsed()))

    with elapsed_timer() as elapsed:
        p_cg = la.cg(A, f.ravel())[0]
        print("cg {0}".format(elapsed()))

    # with elapsed_timer() as elapsed:
    #     p_ap = la.gmres(A, f.ravel())
    #     print("gmres {0}".format(elapsed()))


    ml = ruge_stuben_solver(A)
    M = ml.aspreconditioner()
    with elapsed_timer() as elapsed:
        p_amg = la.cg(A, f.ravel(),M=M)
        print("pyamg cg {0}".format(elapsed()))

    pl.show()
Exemplo n.º 5
0
def test_lincg_L_diag_heavy_precond():
    vals['Ldh'] = copy.copy(vals['L'])
    rdiag = numpy.random.rand(nparams) * 100
    for i in xrange(len(vals['L'])):
        vals['Ldh'][i,i] += rdiag[i]
    vals['Ldh_inv_g'] = linalg.cho_solve(linalg.cho_factor(vals['Ldh']), vals['g'])
    symb['M'] = T.vector('M')
    vals['M'] = numpy.diag(vals['Ldh'])

    ### without preconditioning ###
    [sol, niter, rerr] = lincg.linear_cg(
            lambda x: [T.dot(symb['L'], x)],
            [symb['g']],
            M = None,
            rtol=1e-20,
            maxiter = 10000,
            floatX = floatX)

    f = theano.function([symb['L'], symb['g']], sol + [niter, rerr])
    t1 = time.time()
    [Linv_g, niter, rerr] = f(vals['Ldh'], vals['g'])
    print 'No precond: test_lincg runtime (s):', time.time() - t1
    print '\t niter = ', niter
    print '\t residual error = ', rerr
    numpy.testing.assert_almost_equal(Linv_g, vals['Ldh_inv_g'], decimal=3)

    ### with preconditioning ###
    [sol, niter, rerr] = lincg.linear_cg(
            lambda x: [T.dot(symb['L'], x)],
            [symb['g']],
            M = [symb['M']],
            rtol=1e-20,
            maxiter = 10000,
            floatX = floatX)

    f = theano.function([symb['L'], symb['g'], symb['M']], sol + [niter, rerr])
    t1 = time.time()
    [Linv_g, niter, rerr] = f(vals['Ldh'], vals['g'], vals['M'])
    print 'With precond: test_lincg runtime (s):', time.time() - t1
    print '\t niter = ', niter
    print '\t residual error = ', rerr
    numpy.testing.assert_almost_equal(Linv_g, vals['Ldh_inv_g'], decimal=3)

    ### test scipy implementation ###
    t1 = time.time()
    cg(vals['Ldh'], vals['g'], maxiter=10000, tol=1e-10)
    print 'scipy.sparse.linalg.cg (no preconditioning): Elapsed ', time.time() - t1
    t1 = time.time()
    cg(vals['Ldh'], vals['g'], maxiter=10000, tol=1e-10, M=numpy.diag(1./vals['M']))
    print 'scipy.sparse.linalg.cg (preconditioning): Elapsed ', time.time() - t1
Exemplo n.º 6
0
def score(labels, kernel, labelbias=True):
    """
    Given a (combined) kernel and a label vector, do label propagation,
    optionally with GeneMANIA's label biasing scheme for small positive
    sets.
    """
    tstart = time.clock()
    if labelbias:
        numpos = (labels == 1).sum()
        numneg = (labels == -1).sum()
        labels[labels == 0] = (numpos - numneg) / (numpos + numneg)

    kernel = kernel.tocoo(copy=True)

    colsums = np.asarray(kernel.sum(axis=0)).squeeze()
    diag = 1. / np.sqrt(colsums + np.finfo(np.float64).eps)
    kernel.data *= diag[kernel.row] * diag[kernel.col]
    
    numelem = len(diag)
    diag_indices = np.concatenate((np.arange(numelem).reshape(1, numelem),
                                   np.arange(numelem).reshape(1, numelem)),
                                  axis=0)
    normalizer_elems = 1 + np.asarray(kernel.sum(axis=0)).squeeze()
    normalizer = sparse.coo_matrix((normalizer_elems, diag_indices))
    laplacian = normalizer - kernel
    laplacian = (laplacian + laplacian.T) / 2.

    discriminant, info = splinalg.cg(laplacian, labels)
    return discriminant
    def _do_one_inner_iteration(self, A, b, **kwargs):
        r"""
        This method solves AX = b and returns the result to the corresponding
        algorithm.
        """
        logger.info('Solving AX = b for the sparse matrices')

        if A is None:
            A = self.A
        if b is None:
            b = self.b
        if self._iterative_solver is None:
            X = sprslin.spsolve(A, b)
        else:
            if self._iterative_solver not in ['cg', 'gmres']:
                raise Exception('GenericLinearTransport does not support the' +
                                ' requested iterative solver!')
            params = kwargs.copy()
            solver_params = ['x0', 'tol', 'maxiter', 'xtype', 'M', 'callback']
            [params.pop(item, None) for item in kwargs.keys()
             if item not in solver_params]
            tol = kwargs.get('tol')
            if tol is None:
                tol = 1e-20
            params['tol'] = tol
            if self._iterative_solver == 'cg':
                result = sprslin.cg(A, b, **params)
            elif self._iterative_solver == 'gmres':
                result = sprslin.gmres(A, b, **params)
            X = result[0]
            self._iterative_solver_info = result[1]
        return X
Exemplo n.º 8
0
 def trainWithLabels(self):
     regparam = self.regparam
     #regparam = 0.
     if self.qidmap != None:
         P = sp.lil_matrix((self.size, len(self.qidmap.keys())))
         for qidind in range(len(self.indslist)):
             inds = self.indslist[qidind]
             qsize = len(inds)
             for i in inds:
                 P[i, qidind] = 1. / sqrt(qsize)
         P = P.tocsr()
         PT = P.tocsc().T
     else:
         P = 1./sqrt(self.size)*(np.mat(np.ones((self.size,1), dtype=np.float64)))
         PT = P.T
     X = self.X.tocsc()
     X_csr = X.tocsr()
     def mv(v):
         v = np.mat(v).T
         return X_csr*(X.T*v)-X_csr*(P*(PT*(X.T*v)))+regparam*v
     G = LinearOperator((X.shape[0],X.shape[0]), matvec=mv, dtype=np.float64)
     Y = self.Y
     if not self.callbackfun == None:
         def cb(v):
             self.A = np.mat(v).T
             self.b = np.mat(np.zeros((1,1)))
             self.callback()
     else:
         cb = None
     XLY = X_csr*Y-X_csr*(P*(PT*Y))
     try:
         self.A = np.mat(cg(G, XLY, callback=cb)[0]).T
     except Finished, e:
         pass
 def poisson_solver(self):
     # split into r, g, b 3 channels and
     # iterate through all pixels in the cloning region indexed in idx_map
     for i in range(len(self.idx_map)):
         count, flag, neighbor_idx = self.count_neighbor(self.idx_map[i])
         x, y = self.idx_map[i]
         # set neighboring pixel index in A to -1
         for s in range(4):
             if neighbor_idx[s]!=-1:
                 self.A[i ,neighbor_idx[s]] = -1
                     
         # b is degraded form if neighbors are all within clone region
         for channel in range(3):
             self.b[channel][i] = 4*self.F[x,y,channel] - (self.F[x-1,y,channel] +self.F[x+1,y,channel] + self.F[x,y-1,channel] + self.F[x,y+1,channel])
         
         # have neighbor(s) on the clone region boundary, include background terms  
         if count!=4:
             # dummy variable flag used to distinguish between neighbor within the cloning region and on the bounday
             for channel in range(3):
                 self.b[channel][i] += flag[0]*self.B[x-1,y,channel] + flag[1]*self.B[x+1,y,channel] + flag[2]*self.B[x,y-1,channel] + flag[3]*self.B[x,y+1,channel]
     
     # use conjugate gradient to solve for u
     u = np.zeros((3,self.n))
     for channel in range(3):
         u[channel] = splinalg.cg(self.A, self.b[channel])[0]
 
     return u
Exemplo n.º 10
0
 def __init__(self, X, train_preferences, regparam = 1., **kwargs):
     self.regparam = regparam
     self.callbackfun = None
     self.pairs = train_preferences
     self.X = csc_matrix(X.T)
     regparam = self.regparam
     X = self.X.tocsc()
     X_csr = X.tocsr()
     vals = np.concatenate([np.ones((self.pairs.shape[0]), dtype=np.float64), -np.ones((self.pairs.shape[0]), dtype=np.float64)])
     row = np.concatenate([np.arange(self.pairs.shape[0]),np.arange(self.pairs.shape[0])])
     col = np.concatenate([self.pairs[:,0], self.pairs[:,1]])
     coo = coo_matrix((vals, (row, col)), shape=(self.pairs.shape[0], X.shape[1]))
     pairs_csr = coo.tocsr()
     pairs_csc = coo.tocsc()
     def mv(v):
         vmat = np.mat(v).T
         ret = np.array(X_csr * (pairs_csc.T * (pairs_csr * (X.T * vmat))))+regparam*vmat
         return ret
     G = LinearOperator((X.shape[0], X.shape[0]), matvec=mv, dtype=np.float64)
     self.As = []
     M = np.mat(np.ones((self.pairs.shape[0], 1)))
     if not self.callbackfun is None:
         def cb(v):
             self.A = np.mat(v).T
             self.b = np.mat(np.zeros((1,1)))
             self.callbackfun.callback()
     else:
         cb = None
     XLY = X_csr * (pairs_csc.T * M)
     self.A = np.mat(cg(G, XLY, callback=cb)[0]).T
     self.b = np.mat(np.zeros((1,self.A.shape[1])))
     self.predictor = predictor.LinearPredictor(self.A, self.b)
Exemplo n.º 11
0
def linear_solver(Afun=None, ATfun=None, B=None, x0=None, par=None,
                  solver=None, callback=None):
    if callback is not None:
        callback(x0)
    if solver == 'CG':
        x, info = CG(Afun, B, x0=x0, par=par, callback=callback)
    elif solver == 'iterative':
        x, info = richardson(Afun, B, x0, par=par, callback=callback)
    else:
        from scipy.sparse.linalg import LinearOperator, cg, bicg
        if solver == 'scipy_cg':
            Afun.define_operand(B)
            Afunvec = LinearOperator(Afun.shape, matvec=Afun.matvec,
                                     dtype=np.float64)
            xcol, info = cg(Afunvec, B.vec(), x0=x0.vec(),
                            tol=par['tol'],
                            maxiter=par['maxiter'],
                            xtype=None, M=None, callback=callback)
        elif solver == 'scipy_bicg':
            Afun.define_operand(B)
            ATfun.define_operand(B)
            Afunvec = LinearOperator(Afun.shape, matvec=Afun.matvec,
                                     rmatvec=ATfun.matvec, dtype=np.float64)
            xcol, info = bicg(Afunvec, B.vec(), x0=x0.vec(),
                              tol=par['tol'],
                              maxiter=par['maxiter'],
                              xtype=None, M=None, callback=callback)
        res = dict()
        res['info'] = info
        x = VecTri(val=np.reshape(xcol, B.dN()))
    return x, info
Exemplo n.º 12
0
    def solve(self, x_0=None, tol=1e-05, max_iter=100, exchange_zero=1e-16):
        """
        Solve the equation *A * x = b* using an linear equation solver. After
        solving old unknown vectors and volume field values are overwritten.
        """
        if self.solver   == "cg":
            solver_return = lin.cg(self.A, self.b, x_0, tol, max_iter)
        elif self.solver == "bicg":
            solver_return = lin.bicg(self.A, self.b, x_0, tol, max_iter)
        elif self.solver == "lu_solver":
             solver_return = mylin.lu_solver_plu(self.p, self.l, self.u, self.b)
        elif self.solver == "lu_solver":
             solver_return = mylin.gs(self.A, self.b, x_0, tol, max_iter)
        self.x         = solver_return[0]
        self.x_old_old = self.x_old
        
        # find max absolute residual
        diff_ = abs(self.x - self.x_old)
        max_diff_ = max(diff_)
        if max_diff_ == 0.:
            self.residuals.append(exchange_zero)
        else:
            self.residuals.append(max_diff_)
        print 'Residual for ' + self.field.name + ': ' + str(max(diff_))
        
        #update volume field
        self.field.V = self.x_old + self.under_relax*(self.x - self.x_old)
#        self.field.V = self.x
        
        return True
	def stepTime(self):
		# solve for intermediate velocity
		self.calculateRN()
		self.qStar, _ = sla.cg(self.A, self.rn)

		# projection step
		self.q = self.qStar
Exemplo n.º 14
0
 def trainWithPreferences(self):
     regparam = self.regparam
     X = self.X.tocsc()
     X_csr = X.tocsr()
     vals = np.concatenate([np.ones((self.pairs.shape[0]), dtype=np.float64), -np.ones((self.pairs.shape[0]), dtype=np.float64)])
     row = np.concatenate([np.arange(self.pairs.shape[0]),np.arange(self.pairs.shape[0])])
     col = np.concatenate([self.pairs[:,0], self.pairs[:,1]])
     coo = coo_matrix((vals, (row, col)), shape=(self.pairs.shape[0], X.shape[1]))
     pairs_csr = coo.tocsr()
     pairs_csc = coo.tocsc()
     def mv(v):
         vmat = np.mat(v).T
         ret = np.array(X_csr * (pairs_csc.T * (pairs_csr * (X.T * vmat))))+regparam*vmat
         return ret
     G = LinearOperator((X.shape[0], X.shape[0]), matvec=mv, dtype=np.float64)
     self.As = []
     M = np.mat(np.ones((self.pairs.shape[0], 1)))
     if not self.callbackfun == None:
         def cb(v):
             self.A = np.mat(v).T
             self.b = np.mat(np.zeros((1,1)))
             self.callback()
     else:
         cb = None
     XLY = X_csr * (pairs_csc.T * M)
     self.A = np.mat(cg(G, XLY, callback=cb)[0]).T
     self.b = np.mat(np.zeros((1,self.A.shape[1])))
     self.results['model'] = self.getModel()
Exemplo n.º 15
0
    def solve_divided(self):
        """ Solve the divided system of equations using
        a Schur compliment method.
        """
        f1 = self.rhs[:self.mesh.get_number_of_faces()]
        f2 = self.rhs[self.mesh.get_number_of_faces():]

        current_p = np.zeros(self.mesh.get_number_of_cells())


        def apply_lhs(x):
            return -self.div.dot(self.cg_inner(self.m.dot,
                                         self.div_t.dot(x),
                                         tol=1.e-16))

        f1_tilde = linalg.cg(self.m, f1)[0]

        cg_rhs = f2-self.div.dot(f1_tilde)

        current_p = self.cg(apply_lhs, cg_rhs)
        print "solver residual=>", np.linalg.norm(apply_lhs(current_p)-cg_rhs)

        current_v = -self.cg(self.m.dot,
                               self.div_t.dot(current_p),
                               tol=1.e-10)+f1_tilde

        self.solution = np.concatenate((current_v, current_p))
Exemplo n.º 16
0
def _solve(A, b, solver, tol):
    # helper method for ridge_regression, A is symmetric positive

    if solver == 'auto':
        if hasattr(A, 'todense'):
            solver = 'sparse_cg'
        else:
            solver = 'dense_cholesky'

    if solver == 'sparse_cg':
        if b.ndim < 2:
            from scipy.sparse import linalg as sp_linalg
            sol, error = sp_linalg.cg(A, b, tol=tol)
            if error:
                raise ValueError("Failed with error code %d" % error)
            return sol
        else:
            # sparse_cg cannot handle a 2-d b.
            sol = []
            for j in range(b.shape[1]):
                sol.append(_solve(A, b[:, j], solver="sparse_cg", tol=tol))
            return np.array(sol).T

    elif solver == 'dense_cholesky':
        from scipy import linalg
        if hasattr(A, 'todense'):
            A = A.todense()
        return linalg.solve(A, b, sym_pos=True, overwrite_a=True)
    else:
        raise NotImplementedError('Solver %s not implemented' % solver)
def get_toneMap(img, P):
    P = np.float64(P) / 255.0
    J = natural_histogram_matching(img)
    J = cv2.blur(J, (10, 10))
    theta = 0.2

    height, width = img.shape

    P = cv2.resize(P, (height, width))
    P = P.reshape((1, height * width))
    logP = np.log(P)
    logP = spdiags(logP, 0, width * height, width * height)


    J = cv2.resize(J, (height, width))
    J = J.reshape( height * width)
    logJ = np.log(J)

    e = np.ones(width * height)

    Dx = spdiags([-e, e], np.array([0, height]), width *height, width * height)
    Dy = spdiags([-e, e], np.array([0, 1]), width * height, width * height)


    A = theta * (Dx.dot(Dx.transpose()) + Dy.dot(Dy.transpose())) + logP.dot(logP.transpose())

    b= logP.transpose().dot(logJ)
    beta, info = cg(A, b , tol = 1e-6, maxiter = 60)

    beta = beta.reshape((height, width))
    P = P.reshape((height, width))
    T = np.power(P, beta)

    return T
Exemplo n.º 18
0
def test_ScalarView_mpl_unknown():
    mesh = Mesh()
    mesh.load(domain_mesh)
    mesh.refine_element(0)
    shapeset = H1Shapeset()
    pss = PrecalcShapeset(shapeset)

    # create an H1 space
    space = H1Space(mesh, shapeset)
    space.set_uniform_order(5)
    space.assign_dofs()

    # initialize the discrete problem
    wf = WeakForm(1)
    set_forms(wf)

    solver = DummySolver()
    sys = LinSystem(wf, solver)
    sys.set_spaces(space)
    sys.set_pss(pss)

    # assemble the stiffness matrix and solve the system
    sys.assemble()
    A = sys.get_matrix()
    b = sys.get_rhs()
    from scipy.sparse.linalg import cg

    x, res = cg(A, b)
    sln = Solution()
    sln.set_fe_solution(space, pss, x)

    view = ScalarView("Solution")
Exemplo n.º 19
0
def lsqr(X, y, tol=1e-3):
    import scipy.sparse.linalg as sp_linalg
    from ..utils.extmath import safe_sparse_dot

    if hasattr(sp_linalg, "lsqr"):
        # scipy 0.8 or greater
        return sp_linalg.lsqr(X, y)
    else:
        n_samples, n_features = X.shape
        if n_samples > n_features:
            coef, _ = sp_linalg.cg(safe_sparse_dot(X.T, X), safe_sparse_dot(X.T, y), tol=tol)
        else:
            coef, _ = sp_linalg.cg(safe_sparse_dot(X, X.T), y, tol=tol)
            coef = safe_sparse_dot(X.T, coef)

        residues = y - safe_sparse_dot(X, coef)
        return coef, None, None, residues
Exemplo n.º 20
0
 def h_sol_approx(x, lambdak, tol):
     # returns an approximate solution of the inner optimization
     K = pairwise_kernels(Xt, gamma=np.exp(lambdak[0]), metric='rbf')
     (out, success) = splinalg.cg(
         K + np.exp(lambdak[1]) * np.eye(x0.size), yt, x0=x)
     if success is False:
         raise ValueError
     return out
Exemplo n.º 21
0
 def solve(self, A, F,Solver):
     # "spsolve" for internal solver
     # "cg" for conjugate gradient
     A = A.tocsc()
     #F = F.toarray()
     if (Solver == "spsolve"):
         self.u = linalg.spsolve(A.astype(float32),F.astype(float32))
     if (Solver == "cg"):    
         self.u = linalg.cg(A.toarray(),F.toarray())[0]
Exemplo n.º 22
0
def conj_loss(X, y, Xy, M, epsilon, sol0):
    # conjugate of the loss function
    n_features = X.shape[1]
    matvec = lambda z: X.rmatvec((X.matvec(z))) + epsilon * z
    K = splinalg.LinearOperator((n_features, n_features), matvec, dtype=X.dtype)
    sol = splinalg.cg(K, M.ravel(order='F') + Xy, maxiter=20, x0=sol0)[0]
    p = np.dot(sol, M.ravel(order='F')) - .5 * (linalg.norm(y - X.matvec(sol)) ** 2)
    p -= 0.5 * epsilon * (linalg.norm(sol) ** 2)
    return p, sol
Exemplo n.º 23
0
def _solve_cg(lap_sparse, B, tol):
    lap_sparse = lap_sparse.tocsc()
    X = []
    for i in range(len(B)):
        x0 = cg(lap_sparse, -B[i].todense(), tol=tol)[0]
        X.append(x0)
    X = np.array(X)
    X = np.argmax(X, axis=0)
    return X
Exemplo n.º 24
0
def calculate_potential2(space, x, y):
    b = space.flatten()
    potential_space = np.zeros_like(space)
    v0 = potential_space.flatten()
    from scipy.sparse.linalg import cg
    L = get_Laplacian_LinearOperator(space.shape)
    vsol, err = cg(L, b, x0=v0)
    potential_space = vsol.reshape(space.shape)
    return potential_space
Exemplo n.º 25
0
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
    n_samples, n_features = X.shape
    X1 = sp_linalg.aslinearoperator(X)
    coefs = np.empty((y.shape[1], n_features))

    if n_features > n_samples:

        def create_mv(curr_alpha):
            def _mv(x):
                return X1.matvec(X1.rmatvec(x)) + curr_alpha * x

            return _mv

    else:

        def create_mv(curr_alpha):
            def _mv(x):
                return X1.rmatvec(X1.matvec(x)) + curr_alpha * x

            return _mv

    for i in range(y.shape[1]):
        y_column = y[:, i]

        mv = create_mv(alpha[i])
        if n_features > n_samples:
            # kernel ridge
            # w = X.T * inv(X X^t + alpha*Id) y
            C = sp_linalg.LinearOperator((n_samples, n_samples), matvec=mv, dtype=X.dtype)
            coef, info = sp_linalg.cg(C, y_column, tol=tol)
            coefs[i] = X1.rmatvec(coef)
        else:
            # linear ridge
            # w = inv(X^t X + alpha*Id) * X.T y
            y_column = X1.rmatvec(y_column)
            C = sp_linalg.LinearOperator((n_features, n_features), matvec=mv, dtype=X.dtype)
            coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol)
        if info < 0:
            raise ValueError("Failed with error code %d" % info)

        if max_iter is None and info > 0 and verbose:
            warnings.warn("sparse_cg did not converge after %d iterations." % info)

    return coefs
Exemplo n.º 26
0
def sbtv (measurements, side_size, n_angles, angle_step, BP, FP, c_mu=5e-1, c_lambda=1e-1, max_iter_cgs=10, max_iter=10):
    size_img2d = (side_size, side_size)
    size_img1d = side_size * side_size

    # Define a fucntion of the LinearOperator
    def A_func(recon_slice):
        recon_slice = np.reshape (recon_slice, size_img2d)
        DxtDx = Dxt(Dx(recon_slice))
        DytDy = Dyt(Dy(recon_slice))
        result = c_mu * BP(FP(recon_slice, n_angles, angle_step), n_angles, angle_step) + c_lambda * (DxtDx + DytDy)
        return np.real(np.reshape(result,(size_img1d,)))

    # Create LinearOperator
    M = LinearOperator((size_img1d, size_img1d), matvec = A_func, dtype=complex)

    # Split Bregman buffers
    dx = np.zeros(size_img2d)
    dy = np.zeros(size_img2d)
    bx = np.zeros(size_img2d)
    by = np.zeros(size_img2d)

    # Zeros and Eps buffers
    Z   = np.zeros(size_img2d)
    EPS = Z + 1e-12

    # Current solution
    u  = np.zeros((side_size, side_size))

    Atf = BP(measurements, n_angles, angle_step)

    i = 0
    while i < max_iter:
        print "SBTV iter: %d  | mu: %f lambda: %f" % (i, c_mu, c_lambda)
        tmpx = Dxt(dx-bx)
        tmpy = Dyt(dy-by)
        b = c_mu * Atf + c_lambda * (tmpx + tmpy)

        u_vec = np.reshape (u, (size_img1d,))
        b_vec = np.reshape (b, (size_img1d,))
        u_vec, flag = cg (A=M, b=b_vec, x0=u_vec, maxiter=max_iter_cgs)

        u = np.reshape (u_vec, size_img2d)
        tmpx = Dx(u) + bx
        tmpy = Dy(u) + by

        # Shrinkage
        s = np.array(np.sqrt (np.power(tmpx,2) + np.power(tmpy,2)))
        thresh = np.maximum(s - 1 / c_lambda, Z) / np.maximum(EPS, s)
        dx = thresh * tmpx
        dy = thresh * tmpy

        bx = tmpx - dx
        by = tmpy - dy
        i = i + 1

    return u
Exemplo n.º 27
0
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, coef_init=None):
    n_samples, n_features = X.shape
    X1 = sp_linalg.aslinearoperator(X)
    coefs = np.empty((y.shape[1], n_features))

    if n_features > n_samples:
        def create_mv(curr_alpha):
            def _mv(x):
                return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
            return _mv
    else:
        def create_mv(curr_alpha):
            def _mv(x):
                return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
            return _mv

    for i in range(y.shape[1]):
        y_column = y[:, i]

        mv = create_mv(alpha[i])
        if n_features > n_samples:
            # kernel ridge
            # w = X.T * inv(X X^t + alpha*Id) y
            C = sp_linalg.LinearOperator(
                (n_samples, n_samples), matvec=mv, dtype=X.dtype)
            if coef_init is not None:
                x0 = X1.matvec(coef_init)
            else:
                x0 = None
            coef, info = sp_linalg.cg(C, y_column, x0=x0, tol=tol)
            coefs[i] = X1.rmatvec(coef)
        else:
            # linear ridge
            # w = inv(X^t X + alpha*Id) * X.T y
            y_column = X1.rmatvec(y_column)
            C = sp_linalg.LinearOperator(
                (n_features, n_features), matvec=mv, dtype=X.dtype)
            coefs[i], info = sp_linalg.cg(C, y_column, x0=coef_init, maxiter=max_iter,
                                          tol=tol)
        if info != 0:
            raise ValueError("Failed with error code %d" % info)

    return coefs
Exemplo n.º 28
0
def solveW(H):
	bw=np.ravel(Xlo.rmatvec(Y.dot(H)),order='F')
	H2=H.T.dot(H)
	def mvW(s):
		S=np.reshape(s,(nd,nk),order='F')
		return np.ravel(Xlo.rmatvec(Xlo.matmat(S.dot(H2))),order='F') + alpha*s

	Cw=sp_linalg.LinearOperator((nd*nk, nd*nk), matvec=mvW, dtype=Xlo.dtype)
	w, info = sp_linalg.cg(Cw, bw, maxiter=max_iter,tol=tol)
	return np.reshape(w,(nd,nk),order='F')
Exemplo n.º 29
0
 def possion_solver(self):
     
     self.create_possion_equation()
     
     #Use Conjugate Gradient iteration to solve A x = b
     x_r=linalg.cg(self.A,self.b[:,0])[0];
     x_g=linalg.cg(self.A,self.b[:,1])[0];
     x_b=linalg.cg(self.A,self.b[:,2])[0];
     
     self.newImage = self.target
         
     for i in range(self.b.shape[0]):
         x,y = self.maskidx2Corrd[i]
         self.newImage[x,y,0] = np.clip(x_r[i],0,255);
         self.newImage[x,y,1] = np.clip(x_g[i],0,255);
         self.newImage[x,y,2] = np.clip(x_b[i],0,255);
     
     self.newImage = Image.fromarray(self.newImage)
     return self.newImage
Exemplo n.º 30
0
def solveH(W):
	h=np.empty((nl,nk))
	A=sp_linalg.aslinearoperator(Xlo.matvec(W))
	def mvH(s):
		return A.rmatvec(A.matvec(s))+alpha*s
	for i in range(Y.shape[1]):
		Ch=sp_linalg.LinearOperator((nk,nk),matvec=mvH,dtype=Xlo.dtype)
		bh=A.rmatvec(Y[:,i])
		h[i], info=sp_linalg.cg(Ch,bh,maxiter=max_iter,tol=tol)
	return h
F = space.source_vector(pde.source, dim=3)
A, F = bc.apply(A, F, uh)

if False:
    uh.T.flat[:] = spsolve(A, F)
elif False:
    N = len(F)
    print(N)
    start = timer()
    ilu = spilu(A.tocsc(), drop_tol=1e-6, fill_factor=40)
    end = timer()
    print('time:', end - start)

    M = LinearOperator((N, N), lambda x: ilu.solve(x))
    start = timer()
    uh.T.flat[:], info = cg(A, F, tol=1e-8, M=M)  # solve with CG
    print(info)
    end = timer()
    print('time:', end - start)
elif True:
    P = space.stiff_matrix(c=2 * pde.mu)
    isBdDof = space.set_dirichlet_bc(uh,
                                     pde.dirichlet,
                                     threshold=pde.is_dirichlet_boundary)
    solver = LinearElasticityLFEMFastSolver(A, P, isBdDof)
    start = timer()
    uh[:] = solver.solve(uh, F)
    end = timer()
    print('time:', end - start, 'dof:', A.shape)
else:
    aspace = CrouzeixRaviartFiniteElementSpace(mesh)
Exemplo n.º 32
0
K = assembleGlobalStiffness(nodes, E, gN, p)

F = assembleGlobalLoading(fun, nodes, E, gN, p)

dirNodes = np.array([0, numNodes - 1])

dirVals = np.array([u_a, u_b])

[Kg, Fg] = applyDirichlet(K, F, dirNodes, dirVals)

#--------------------------------------------------------
# use the finite element functions to obtain the solution
#--------------------------------------------------------

[uSol, status] = cg(Kg, Fg)

#-----------------------------------------
# define the functionS for f
# and the exact solution you have computed
#------------------------------------------

u_exact = ((L**2/np.pi**2)*np.cos(np.pi*k/L) + ((alpha*(1-k**2)*L**2)/(4*np.pi**2*k**2))*np.sin(2*np.pi*k/L) - (L**2/np.pi**2) + 1)*nodes \
  -(L**2/np.pi**2)*np.cos(np.pi*k*nodes/L) - ((alpha*(1-k**2)*L**2)/(4*np.pi**2*k**2))*np.sin(2*np.pi*k*nodes/L) + (L**2/np.pi**2)

#Plotting
plt.figure()
plt.plot(nodes, u_exact, 'b', label='Exact Solution')
plt.plot(nodes, uSol, 'r--', label='FEM Solution')
plt.xlabel('Nodal Coordinates')
plt.ylabel('Solution Values')
Exemplo n.º 33
0
        proc = proc + 1
        K = Kdata[N * i:N * (i + 1), N * j:N * (j + 1)]
        #Grid
        n = int(N / 2)
        kk = np.asarray(
            list(range(0, n + 1)) +
            list(range(-n + 1, 0)))  #kk = [0:N/2-1 N/2 -N/2+1:-1] in matlab
        xk, yk = np.meshgrid(kk, kk)

        #Solve g1 & g2
        b1 = 1j * xk * np.fft.fftn(K)
        b2 = 1j * yk * np.fft.fftn(K)

        A = LinearOperator((N**2, N**2), matvec=spec2D)

        g1, exitcode1 = cg(A, b1.reshape(-1))
        if exitcode1 != 0:
            print("cg not converged: %d, %d, %s" % (exitcode1, proc, 'g1'))
            g1, exitcode1 = gmres(A, b1.reshape(-1), x0=g1)
    # print('g1 convergence:', exitcode1)

        g2, exitcode2 = cg(A, b2.reshape(-1))
        if exitcode2 != 0:
            print("cg not converged: %d, %d, g2" % (exitcode1, proc))
            g2, exitcode2 = gmres(A, b2.reshape(-1), x0=g2)
        if proc % (2 * d_up) == 0:
            print("Processing: [%d/%d]" % (proc, d_up**2))

        gm1 = g1.reshape(N, N)
        gm2 = g2.reshape(N, N)
Exemplo n.º 34
0
def CGSolve(c1,dt24,c,p):
    tv = cg(np.eye(p.size)*(1+c1) + dt24*c , p, x0=(p/(1.0+c1)) )[0]
    return tv[:,np.newaxis]
Exemplo n.º 35
0
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING):

    logging.basicConfig(level=loglev)

    import numpy as np
    import numpy.linalg as npla
    import itertools
    import time

    import TensorToolbox as DT
    import TensorToolbox.multilinalg as mla

    if PLOTTING:
        from matplotlib import pyplot as plt

    nsucc = 0
    nfail = 0

    ####################################################################################
    # Test Conjugate Gradient method on simple multidim laplace equation
    ####################################################################################

    import scipy.sparse as sp
    import scipy.sparse.linalg as spla

    span = np.array([0., 1.])
    d = 2
    N = 64
    h = 1 / float(N - 1)
    eps_cg = 1e-3
    eps_round = 1e-6

    # sys.stdout.write("Conjugate-Gradient: Laplace  N=%4d   , d=%3d      [START] \n" % (N,d))
    # sys.stdout.flush()

    dofull = True
    try:
        # Construct d-D Laplace (with 2nd order finite diff)
        D = -1. / h**2. * (np.diag(np.ones(
            (N - 1)), -1) + np.diag(np.ones(
                (N - 1)), 1) + np.diag(-2. * np.ones((N)), 0))
        D[0, 0:2] = np.array([1., 0.])
        D[-1, -2:] = np.array([0., 1.])
        D_sp = sp.coo_matrix(D)
        I_sp = sp.identity(N)
        I = np.eye(N)
        FULL_LAP = sp.coo_matrix((N**d, N**d))
        for i in range(d):
            tmp = sp.identity((1))
            for j in range(d):
                if i != j: tmp = sp.kron(tmp, I_sp)
                else: tmp = sp.kron(tmp, D_sp)
            FULL_LAP = FULL_LAP + tmp
    except MemoryError:
        print("FULL CG: Memory Error")
        dofull = False

    # Construction of TT Laplace operator
    CPtmp = []
    D_flat = D.flatten()
    I_flat = I.flatten()
    for i in range(d):
        CPi = np.empty((d, N**2))
        for alpha in range(d):
            if i != alpha:
                CPi[alpha, :] = I_flat
            else:
                CPi[alpha, :] = D_flat
        CPtmp.append(CPi)

    CP_lap = DT.Candecomp(CPtmp)
    TT_LAP = DT.TTmat(CP_lap, nrows=N, ncols=N, is_sparse=[True] * d)
    TT_LAP.build(eps_round)
    TT_LAP.rounding(eps_round)
    CPtmp = None
    CP_lap = None

    # Construct Right hand-side (b=1, Dirichlet BC = 0)
    X = np.linspace(span[0], span[1], N)
    b1D = np.ones(N)
    b1D[0] = 0.
    b1D[-1] = 0.

    if dofull:
        # Construct the d-D right handside
        tmp = np.array([1.])
        for j in range(d):
            tmp = np.kron(tmp, b1D)
        FULL_b = tmp

    # Construct the TT right handside
    CPtmp = []
    for i in range(d):
        CPi = np.empty((1, N))
        CPi[0, :] = b1D
        CPtmp.append(CPi)
    CP_b = DT.Candecomp(CPtmp)
    TT_b = DT.TTvec(CP_b)
    TT_b.build()
    TT_b.rounding(eps_round)

    if dofull:
        # Solve full system using npla.solve
        (FULL_RES, FULL_CONV) = spla.cg(FULL_LAP, FULL_b, tol=eps_cg)

    if PLOTTING:
        from mpl_toolkits.mplot3d import Axes3D
        from matplotlib import cm
        (XX, YY) = np.meshgrid(X, X)
        fig = plt.figure(figsize=(18, 7))
        plt.suptitle("CG")
        if d == 2:
            # Plot function
            ax = fig.add_subplot(131, projection='3d')
            ax.plot_surface(XX,
                            YY,
                            FULL_RES.reshape((N, N)),
                            rstride=1,
                            cstride=1,
                            cmap=cm.coolwarm,
                            linewidth=0,
                            antialiased=False)
            plt.show(block=False)

    # Solve TT cg
    x0 = DT.zerosvec(d, N)
    (TT_RES, TT_conv, TT_info) = mla.cg(TT_LAP,
                                        TT_b,
                                        x0=x0,
                                        eps=eps_cg,
                                        ext_info=True,
                                        eps_round=eps_round)
    if PLOTTING and d == 2:
        # Plot function
        ax = fig.add_subplot(132, projection='3d')
        ax.plot_surface(XX,
                        YY,
                        TT_RES.to_tensor(),
                        rstride=1,
                        cstride=1,
                        cmap=cm.coolwarm,
                        linewidth=0,
                        antialiased=False)
        plt.show(block=False)

    # Error
    if PLOTTING and d == 2:
        # Plot function
        ax = fig.add_subplot(133, projection='3d')
        ax.plot_surface(XX,
                        YY,
                        np.abs(TT_RES.to_tensor() - FULL_RES.reshape((N, N))),
                        rstride=1,
                        cstride=1,
                        cmap=cm.coolwarm,
                        linewidth=0,
                        antialiased=False)
        plt.show(block=False)

    err2 = npla.norm(TT_RES.to_tensor().flatten() - FULL_RES, 2)
    if err2 < 1e-2:
        print_ok("5.1 CG: Laplace  N=%4d   , d=%3d  , 2-err=%f" % (N, d, err2))
        nsucc += 1
    else:
        print_fail("5.1 CG: Laplace  N=%4d   , d=%3d  , 2-err=%f" %
                   (N, d, err2))
        nfail += 1

    print_summary("TT CG", nsucc, nfail)

    return (nsucc, nfail)
###############################################################################
# Scipy conjugate gradient
# ------------------------

from scipy.sparse import diags
from scipy.sparse.linalg import aslinearoperator, cg
from scipy.sparse.linalg.interface import IdentityOperator

print(
    "Solving a Gaussian linear system, with {} points in dimension {}.".format(
        N, D))
start = time.time()
A = aslinearoperator(diags(alpha * np.ones(N))) + aslinearoperator(Kxx)
c_sp = np.zeros((N, Dv))
for i in range(Dv):
    c_sp[:, i] = cg(A, b[:, i])[0]

end = time.time()
print('Timing (KeOps + scipy implementation):', round(end - start, 5), 's')

###############################################################################
# Compare with a straightforward Numpy implementation:
#

start = time.time()
K_xx = alpha * np.eye(N) + np.exp(-g * np.sum(
    (x[:, None, :] - x[None, :, :])**2, axis=2))
c_np = np.linalg.solve(K_xx, b)
end = time.time()
print('Timing (Numpy implementation):', round(end - start, 5), 's')
print("Relative error (KeOps) = ",
Exemplo n.º 37
0
 def get_offline_result(self,i):
     ids = self.trunc_ids[i]
     trunc_lap = self.lap_alpha[ids][:, ids]
     scores, _ = linalg.cg(trunc_lap, self.trunc_init, tol=1e-6, maxiter=20)
     return scores
Exemplo n.º 38
0
def runTest(conf, kernel, load, layerDepth, pp=None):
    err_ = None
    data = {
        "h": [],
        "L2 Error": [],
        "Rates": [],
        "Assembly Time": [],
        "nV_Omega": []
    }
    u_exact = load["solution"]

    # Delta is assumed to be of the form deltaK/10 in the mesh, so we obtain deltaK by
    deltaK = int(np.round(kernel["horizon"] * 10))
    if not deltaK:
        raise ValueError(
            "Delta has to be of the form delta = deltaK/10. for deltaK in N.")
    n_start = 10 + 2 * deltaK
    N = [n_start * 2**l for l in list(range(layerDepth))]
    N_fine = N[-1] * 4

    for n in N:
        mesh = RegMesh2D(kernel["horizon"],
                         n,
                         ufunc=u_exact,
                         ansatz=conf["ansatz"],
                         outdim=kernel["outputdim"])
        print("\n h: ", mesh.h)
        data["h"].append(mesh.h)
        data["nV_Omega"].append(mesh.nV_Omega)

        # Assembly ------------------------------------------------------------------------
        start = time()
        A = nlfem.stiffnessMatrix(mesh.__dict__, kernel, conf)
        f_OI = nlfem.loadVector(mesh.__dict__, load, conf)
        data["Assembly Time"].append(time() - start)

        A_O = A[mesh.nodeLabels > 0][:, mesh.nodeLabels > 0]
        A_I = A[mesh.nodeLabels > 0][:, mesh.nodeLabels < 0]
        f = f_OI[mesh.nodeLabels > 0]

        if conf["ansatz"] == "CG":
            g = np.apply_along_axis(u_exact, 1,
                                    mesh.vertices[mesh.vertexLabels < 0])
        else:
            g = np.zeros(((mesh.K - mesh.K_Omega) // mesh.outdim, mesh.outdim))
            for i, E in enumerate(mesh.elements[mesh.elementLabels < 0]):
                for ii, Vdx in enumerate(E):
                    vert = mesh.vertices[Vdx]
                    g[(mesh.dim + 1) * i + ii] = u_exact(vert)
        f -= A_I @ g.ravel()

        # Solve ---------------------------------------------------------------------------
        print("Solve...")
        # mesh.write_ud(np.linalg.solve(A_O, f), conf.u_exact)
        x = cg(A_O, f, f)[0].reshape((-1, mesh.outdim))
        # print("CG Solve:\nIterations: ", solution["its"], "\tError: ", solution["res"])
        mesh.write_ud(x, u_exact)
        if kernel["outputdim"] == 1:
            mesh.plot_ud(pp)
        # Some random quick Check....
        # filter = np.array(assemble.read_arma_mat("data/result.fd").flatten(), dtype=bool)
        # plt.scatter(mesh.vertices[filter][:,0], mesh.vertices[filter][:,1])
        # plt.scatter(mesh.vertices[np.invert(filter)][:,0], mesh.vertices[np.invert(filter)][:,1])
        # plt.show()

        # Refine to N_fine ----------------------------------------------------------------
        mesh = RegMesh2D(kernel["horizon"],
                         N_fine,
                         ufunc=u_exact,
                         coarseMesh=mesh,
                         is_constructAdjaciencyGraph=False,
                         ansatz=conf["ansatz"],
                         outdim=kernel["outputdim"])
        #mesh.plot_ud(pp)
        #mesh.plot_u_exact(pp)
        # Evaluate L2 Error ---------------------------------------------------------------
        u_diff = (mesh.u_exact - mesh.ud)[(mesh.nodeLabels > 0)[::mesh.outdim]]
        Mu_udiff = nlfem.evaluateMass(mesh, u_diff,
                                      conf["quadrature"]["outer"]["points"],
                                      conf["quadrature"]["outer"]["weights"])
        err = np.sqrt(u_diff.ravel() @ Mu_udiff)

        # Print Rates ---------------------------------------------------------------------
        print("L2 Error: ", err)
        data["L2 Error"].append(err)
        if err_ is not None:
            rate = np.log2(err_ / err)
            print("Rate: \t", rate)
            data["Rates"].append(rate)
        else:
            data["Rates"].append(0)
        err_ = err
    #pp.close()
    return data
Exemplo n.º 39
0
 def solve(self, y, A, Denoiser, cg_param=None):
     '''
     Use the plug-and-play ADMM algorithm for compressive sensing recovery
     For the sub-least-square step:
         conjugate gradient method is used when A is given as a linear operator
         direct inverse is calcuated when A is given as a matrix 
     Input:
         y: numpy array of shape (M,)
         A: numpy matrix of shape (M, N) or scipy.sparse.linalg.LinearOperator
         Denoiser: Denoiser class (see module import_neural_networks), must have vector output
         cg_param: parameter for scipy.sparse.linalg.cg
     Reture:
         x_star: recovered signal of shape (N,)
         info: information stored from callback functions
     '''
     if isinstance(A,linalg.LinearOperator):
         # copy cg_param
         if cg_param is None:
             self.cg_param = {}
         else:
             self.cg_param = deepcopy(cg_param)
             
         if "tol" not in self.cg_param:
             self.cg_param["tol"] = 1e-5
         
         if "maxiter" not in self.cg_param:
             self.cg_param["maxiter"] = None
             
         # Build new linear operators for cg
         P_mv = lambda x: A.rmatvec(A.matvec(x)) + self.algo_param["rho"]*x
         P = linalg.LinearOperator((self.shape[1], self.shape[1]), matvec=P_mv, rmatvec=P_mv)
         q = A.rmatvec(y)
         # Initial iterations
         loss_func = lambda x: np.sum(np.square(y - A.matvec(x)))  # define objective function
         loss = loss_func(self.algo_param["x0"])
         loss_star = loss
         x = self.algo_param["x0"]
         z = x
         u = np.zeros_like(x)
         k = 0
         tol = self.algo_param["tol"]
         loss_record = np.array([])
         z_record = []
         callback_res = []
         # Start iterations
         while k < self.algo_param["maxiter"] and tol > 0:
             # least square step
             x, _ = linalg.cg(P, q + self.algo_param["rho"]*(z - u), x0=z, 
                              tol=self.cg_param["tol"], maxiter=self.cg_param["maxiter"])
             # denoising step
             z = Denoiser.denoise(x + u)
             # dual variable update
             u += x - z
             # monitor the loss
             loss =  loss_func(z)
             if loss < loss_star:
                 loss_star = loss
             else:
                 tol -= 1
                 
             loss_record = np.append(loss_record, loss)
             # record all the denoised signals
             z_record.append(z)
             # callback functions
             if self.algo_param["callback"] is not None:
                 callback_res.append(self.algo_param["callback"](x, z, u, loss))
                 
             k += 1
      
         x_star = z_record[np.argmin(loss_record)]
         
     else:      
         # One time calculation
         P = np.linalg.inv(A.T.dot(A) + self.algo_param["rho"]*np.eye(self.shape[1]))
         q = P.dot(A.T.dot(y))
         # Initial iterations
         loss_func = lambda x: np.sum(np.square(y - A.dot(x)))  # define objective function
         loss = loss_func(self.algo_param["x0"])
         loss_star = loss
         x = self.algo_param["x0"]
         z = x
         u = np.zeros_like(x)
         k = 0
         tol = self.algo_param["tol"]
         loss_record = np.array([])
         z_record = []
         callback_res = []
         # Start iterations
         while k < self.algo_param["maxiter"] and tol > 0:
             # least square step
             x = q +  self.algo_param["rho"]*P.dot(z-u)
             # denoising step
             z = Denoiser.denoise(x + u)
             # dual variable update
             u += x - z
             # monitor the loss
             loss =  loss_func(z)
             if loss < loss_star:
                 loss_star = loss
             else:
                 tol -= 1
                 
             loss_record = np.append(loss_record, loss)
             # record all the denoised signals
             z_record.append(z)
             # callback functions
             if self.algo_param["callback"] is not None:
                 callback_res.append(self.algo_param["callback"](x, z, u, loss))
                 
             k += 1
      
         x_star = z_record[np.argmin(loss_record)]
     
     return x_star, callback_res
Exemplo n.º 40
0
            ## displace atoms in far-field boundary according to u_bc = -EGF.f(II)
            logging.debug('  EGF displacement for {} along {}'.format(
                j, direction[d]))
            u_bc = setBC(j, grid, size_in, size_all, GEn, phi_R_grid, N,
                         t_mag * a0, np.reshape(f, (size_in, 3)))

            ## add the "correction forces" out in the buffer region
            ## f_eff = f(II) - (-D.(-EGF.f(II)) = f(II) + D.u_bc
            f += D.dot(np.reshape(u_bc, 3 * size_all))

            ## solve Dii.u = f_eff for u
            logging.debug('  entering solver for {} along {}'.format(
                j, direction[d]))
            t1 = time.time()
            [uf, conv] = sla.cg(Din, f, tol=args.tol)
            logging.debug('  %d, solve time: %f' % (conv, time.time() - t1))

            ## since I put in initial forces of unit magnitude,
            ## the column vector uf = column of LGF matrix
            if ((j == LGF_jmin) and (d == 0)):
                G = uf[0:3 * size_123].copy()
            else:
                G = np.column_stack((G, uf[0:3 * size_123]))

            logging.info('Atom {} direction {}'.format(j, direction[d]))

    with h5py.File(args.Gfile, 'w') as f:
        f.attrs['size_1'] = size_1
        f.attrs['size_12'] = size_12
        f.attrs['size_123'] = size_123
Exemplo n.º 41
0
def hoag_lbfgs(h_func_grad,
               h_hessian,
               h_crossed,
               g_func_grad,
               x0,
               bounds=None,
               lambda0=0.,
               disp=None,
               maxcor=10,
               maxiter=100,
               maxiter_inner=10000,
               only_fit=False,
               iprint=-1,
               maxls=20,
               tolerance_decrease='exponential',
               callback=None,
               verbose=0,
               epsilon_tol_init=1e-3,
               exponential_decrease_factor=0.9):
    """
    HOAG algorithm using L-BFGS-B in the inner optimization algorithm.

    Options
    -------
    eps : float
        Step size used for numerical approximation of the Jacobian.
    disp : int
        Set to True to print convergence messages.
    maxfun : int
        Maximum number of function evaluations.
    maxiter : int
        Maximum number of iterations.
    maxls : int, optional
        Maximum number of line search steps (per iteration). Default is 20.
    """
    m = maxcor
    lambdak = lambda0
    if verbose > 0:
        print('started hoag')

    x0 = asarray(x0).ravel()
    n, = x0.shape

    if bounds is None:
        bounds = [(None, None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')
    # unbounded variables must use None, not +-inf, for optimizer to work properly
    bounds = [(None if l == -np.inf else l, None if u == np.inf else u)
              for l, u in bounds]

    if disp is not None:
        if disp == 0:
            iprint = -1
        else:
            iprint = disp

    nbd = zeros(n, int32)
    low_bnd = zeros(n, float64)
    upper_bnd = zeros(n, float64)
    bounds_map = {(None, None): 0, (1, None): 1, (1, 1): 2, (None, 1): 3}
    for i in range(0, n):
        l, u = bounds[i]
        if l is not None:
            low_bnd[i] = l
            l = 1
        if u is not None:
            upper_bnd[i] = u
            u = 1
        nbd[i] = bounds_map[l, u]

    if not maxls > 0:
        raise ValueError('maxls must be positive.')

    x = array(x0, float64)
    wa = zeros(2 * m * n + 5 * n + 11 * m * m + 8 * m, float64)
    iwa = zeros(3 * n, int32)
    task = zeros(1, 'S60')
    csave = zeros(1, 'S60')
    lsave = zeros(4, int32)
    isave = zeros(44, int32)
    dsave = zeros(29, float64)

    exact_epsilon = 1e-12
    if tolerance_decrease == 'exact':
        epsilon_tol = exact_epsilon
    else:
        epsilon_tol = epsilon_tol_init

    Bxk = None
    L_lambda = None
    g_func_old = np.inf

    if callback is not None:
        callback(x, lambdak)

    # n_eval, F = wrap_function(F, ())
    h_func, h_grad = h_func_grad(x, lambdak)
    norm_init = linalg.norm(h_grad)
    old_grads = []

    for it in range(1, maxiter):
        h_func, h_grad = h_func_grad(x, lambdak)
        n_iterations = 0
        task[:] = 'START'
        old_x = x.copy()
        while 1:
            pgtol_lbfgs = 1e-120
            factr = 1e-120  # / np.finfo(float).eps
            _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, h_func, h_grad,
                           factr, pgtol_lbfgs, wa, iwa, task, iprint, csave,
                           lsave, isave, dsave, maxls)
            task_str = task.tostring()
            if task_str.startswith(b'FG'):
                # minimization routine wants h_func and h_grad at the current x
                # Overwrite h_func and h_grad:
                h_func, h_grad = h_func_grad(x, lambdak)
                if linalg.norm(h_grad) / np.exp(
                        np.min(lambdak)) < epsilon_tol * norm_init:
                    # this one is finished
                    break

            elif task_str.startswith(b'NEW_X'):
                # new iteration
                if n_iterations > maxiter_inner:
                    task[:] = 'STOP: TOTAL NO. of ITERATIONS EXCEEDS LIMIT'
                    print('ITERATIONS EXCEEDS LIMIT')
                    continue
                    # break
                else:
                    n_iterations += 1
            else:
                if verbose > 1:
                    print('LBFGS decided finish!')
                    print(task_str)
                break
        else:
            pass

        if only_fit:
            break

        if verbose > 0:
            h_func, h_grad = h_func_grad(x, lambdak)
            print(
                'inner level iterations: %s, inner objective %s, grad norm %s'
                % (n_iterations, h_func, linalg.norm(h_grad)))

        fhs = h_hessian(x, lambdak)
        B_op = splinalg.LinearOperator(shape=(x.size, x.size),
                                       matvec=lambda z: fhs(z))

        g_func, g_grad = g_func_grad(x, lambdak)
        if Bxk is None:
            Bxk = x.copy()
        residual_init = linalg.norm(g_grad)
        if verbose > 1:
            print('Inverting matrix with precision %s' %
                  (epsilon_tol * residual_init))
        Bxk, success = splinalg.cg(B_op,
                                   g_grad,
                                   x0=Bxk,
                                   tol=epsilon_tol * residual_init,
                                   maxiter=maxiter_inner)
        if success != 0:
            print('CG did not converge to the desired precision')
        old_epsilon_tol = epsilon_tol
        if tolerance_decrease == 'quadratic':
            epsilon_tol = epsilon_tol_init / (it**2)
        elif tolerance_decrease == 'cubic':
            epsilon_tol = epsilon_tol_init / (it**3)
        elif tolerance_decrease == 'exponential':
            epsilon_tol *= exponential_decrease_factor
        elif tolerance_decrease == 'exact':
            epsilon_tol = 1e-24
        else:
            raise NotImplementedError

        epsilon_tol = max(epsilon_tol, exact_epsilon)
        # .. update hyperparameters ..
        grad_lambda = -h_crossed(x, lambdak).dot(Bxk)
        if linalg.norm(grad_lambda) == 0:
            # increase tolerance
            if verbose > 0:
                print('too low tolerance %s, moving to next iteration' %
                      epsilon_tol)
            continue
        old_grads.append(linalg.norm(grad_lambda))

        if L_lambda is None:
            if old_grads[-1] > 1e-3:
                # make sure we are not selecting a step size that is too smal
                L_lambda = old_grads[-1] / np.sqrt(len(lambdak))
            else:
                L_lambda = 1

        step_size = (1. / L_lambda)

        old_lambdak = lambdak.copy()
        lambdak -= step_size * grad_lambda

        # projection
        lambdak[lambdak < -6] = -6
        lambdak[lambdak > 6] = 6
        incr = linalg.norm(lambdak - old_lambdak)

        C = 0.25
        factor_L_lambda = 1.0
        if g_func <= g_func_old + C * epsilon_tol + \
                old_epsilon_tol * (C + factor_L_lambda) * incr - factor_L_lambda * (L_lambda) * incr * incr:
            L_lambda *= 0.95
            if verbose > 1:
                print('increased step size')
        elif g_func >= 1.2 * g_func_old:
            if verbose > 1:
                print('decrease step size')
            # decrease step size
            L_lambda *= 2
            lambdak = old_lambdak
            print('!!step size rejected!!', g_func, g_func_old)
            g_func_old, g_grad_old = g_func_grad(x, old_lambdak)
            # tighten tolerance
            epsilon_tol *= 0.5
        else:
            pass

        # if g_func - g_func_old > 0:
        #     raise ValueError
        norm_grad_lambda = linalg.norm(grad_lambda)
        if verbose > 0:
            print(('it %s, g: %s, incr: %s, sum lambda %s, epsilon: %s, ' +
                   'L: %s, norm grad_lambda: %s') %
                  (it, g_func, g_func - g_func_old, lambdak.sum(), epsilon_tol,
                   L_lambda, norm_grad_lambda))
        g_func_old = g_func

        if callback is not None:
            callback(x, lambdak)

    task_str = task.tostring().strip(b'\x00').strip()
    if task_str.startswith(b'CONV'):
        warnflag = 0
    else:
        warnflag = 2

    return x, lambdak, warnflag
Exemplo n.º 42
0
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:17:26.926070Z", "iopub.execute_input": "2020-09-12T14:17:26.927415Z", "iopub.status.idle": "2020-09-12T14:17:26.931809Z", "shell.execute_reply": "2020-09-12T14:17:26.932624Z"}}
x=op.solve(b)
spl.norm(A*x-b)

# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Conjugate Gradient

# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:17:26.941001Z", "iopub.execute_input": "2020-09-12T14:17:26.946217Z", "iopub.status.idle": "2020-09-12T14:17:26.949574Z", "shell.execute_reply": "2020-09-12T14:17:26.950147Z"}}
global k
k=0
def f(xk): # function called at every iterations
     global k
     print ("iteration {0:2d} residu = {1:7.3g}".format(k,spl.norm(A*xk-b)))
     k += 1

x,info=spspl.cg(A,b,x0=np.zeros(N),tol=1.0e-12,maxiter=N,M=None,callback=f)

# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Preconditioned conjugate gradient

# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:17:26.954769Z", "iopub.execute_input": "2020-09-12T14:17:26.955871Z", "iopub.status.idle": "2020-09-12T14:17:26.959314Z", "shell.execute_reply": "2020-09-12T14:17:26.959905Z"}}
pc=spspl.spilu(A,drop_tol=0.1)  # pc is an ILU decomposition
xp=pc.solve(b)
spl.norm(A*xp-b)


# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:17:26.966049Z", "iopub.execute_input": "2020-09-12T14:17:26.966944Z", "iopub.status.idle": "2020-09-12T14:17:26.973399Z", "shell.execute_reply": "2020-09-12T14:17:26.973984Z"}}
def mv(v):
    return pc.solve(v)
lo = spspl.LinearOperator((N,N),matvec=mv)
k = 0
Exemplo n.º 43
0
# Create discrete space, a square from [-1, 1] x [-1, 1] with (11 x 11) points
space = odl.uniform_discr([-1, -1], [1, 1], [11, 11])

# Create odl operator for negative laplacian
laplacian = -odl.Laplacian(space)

# Create right hand side, a gaussian around the point (0, 0)
rhs = space.element(lambda x: np.exp(-(x[0]**2 + x[1]**2) / 0.1**2))

# Convert laplacian to scipy operator
scipy_laplacian = odl.operator.oputils.as_scipy_operator(laplacian)

# Convert to array and flatten
rhs_arr = rhs.asarray().ravel()

# Solve using scipy
result, info = sl.cg(scipy_laplacian, rhs_arr)

# Other options include
# result, info = sl.cgs(scipy_laplacian, rhs_arr)
# result, info = sl.gmres(scipy_op, rhs_arr)
# result, info = sl.lgmres(scipy_op, rhs_arr)
# result, info = sl.bicg(scipy_op, rhs_arr)
# result, info = sl.bicgstab(scipy_op, rhs_arr)

# Convert back to odl and display result
result_odl = space.element(result.reshape(space.shape))  # result is flat
result_odl.show('Result')
(rhs - laplacian(result_odl)).show('Residual', force_show=True)
Exemplo n.º 44
0
 def _cg_wrapper(A, b, x0=None, tol=1e-5, maxiter=None):
     return cg(A, b, x0=x0, tol=tol, maxiter=maxiter)
Exemplo n.º 45
0
def geodesicBB1D(nO,
                 nD,
                 Nit,
                 mub0,
                 mub1,
                 cCongestion=0.0,
                 potentialV=None,
                 detailStudy=False,
                 eps=10**(-5)):
    """
	Main function which is called to do the computation
	
	Inputs: 
	nO: number of discretization points of the source space [0,1]
	nD: number of discretization points per dimension of the target space
	Nit: number of ADMM iterations performed
	mub0, mub1: nD**2-arrays with temporal boundary conditions
	cCongestion: scale of the quadratic penalization of the density in the running cost 
	potentialV: nD**2-array thought as a function on D
	detailStudy: to decide wether we compute the objective functional every iteration or not (slower if marked true)  
	
	Outputs: 
	mu: nO*nD*nD array with the values of the density  
	E0, E1: nO*nD*nD arrays with the values of the momentum  
	objectiveValue: value of the Lagrangian along the iterations of the ADMM  
	primalResidual, dualResidual: values of the L^2 norms of the primal and dual residuals along the iterations of the ADMM
	"""

    startProgram = time.time()

    print("Parameters ----------------------")
    print("nO: " + str(nO))
    print("nD: " + str(nD))
    print("cCongestion: " + str(cCongestion))
    print()

    #****************************************************************************************************
    # Domain Building
    #****************************************************************************************************

    # Domain Omega: centered grid
    xOmegaC = np.linspace(0, 1, nO)

    # Step Omega
    DeltaOmega = xOmegaC[1] - xOmegaC[0]

    # Domain Omega: staggered grid
    xOmegaS = np.linspace(-DeltaOmega / 2, 1 + DeltaOmega / 2, nO + 1)

    # Domain D (grid is "periodic") : centered grid
    xDC = np.linspace(0, 1 - 1 / nD, nD)
    yDC = np.linspace(0, 1 - 1 / nD, nD)
    xGridDC, yGridDC = np.meshgrid(xDC, yDC, indexing='ij')

    # Step D
    DeltaD = xDC[1] - xDC[0]

    # Domain D: staggered grid
    xDS = np.linspace(DeltaD / 2, 1 - DeltaD / 2, nD)
    yDS = np.linspace(DeltaD / 2, 1 - DeltaD / 2, nD)
    xGridDS, yGridDS = np.meshgrid(xDC, yDC, indexing='ij')

    # In D
    # The neighbors of the point i of centered are i-1 and i in staggered
    # The neighbors of the point i of staggered are i and i+1 in centered

    #***************************************************************************************************
    # Function building
    #****************************************************************************************************

    # Lagrange multiplier associated to mu. Centered everywhere
    mu = np.zeros((nO, nD, nD))

    # Momentum E, lagrange mutliplier. Centered everywhere. The two last components indicate to which dr_i phi^alpha it corresponds
    # First number is component in Omega, second is component in D
    E0 = np.zeros((nO, nD, nD, 2, 2))
    E1 = np.zeros((nO, nD, nD, 2, 2))

    # Dual variable phi (phi^alpha staggered in alpha, centered everywhere else)
    phi = np.zeros((nO + 1, nD, nD))

    # Primal Variable A : A^alpha beta which corresponds to dr_beta phi^alpha. Same pattern as muTilde
    A = np.zeros((nO, nD, nD))

    # Primal variable B, same pattern as E
    # First number is component in Omega, second is component in D
    B0 = np.zeros((nO, nD, nD, 2, 2))
    B1 = np.zeros((nO, nD, nD, 2, 2))

    # Lagrange multiplier associated to the congestion
    lambdaC = np.zeros((nO, nD, nD))

    if potentialV is None:
        potentialV = np.zeros((nD, nD))

    #****************************************************************************************************
    # Boundary values
    #****************************************************************************************************

    # Normalization ----------------------------------------------------------------------------

    mub0 /= (np.sum(mub0))
    mub1 /= (np.sum(mub1))

    # Build the boundary term -----------------------------------------------------------------

    BT = np.zeros((nO + 1, nD, nD))

    BT[0, :, :] = -mub0[:, :] / DeltaOmega

    BT[-1, :, :] = mub1[:, :] / DeltaOmega

    #****************************************************************************************************
    # Scalar product  	#***************************************************************************************************
    def scalarProduct(a, b):
        return np.sum(np.multiply(a, b))

    #****************************************************************************************************
    # Differential, averaging and projection operators
    #****************************************************************************************************

    # Derivate along Omega of a staggered function. Return a centered function --------------------------

    def gradOmega(input):
        output = (input[1:, :, :] - input[:-1, :, :]) / DeltaOmega
        return output

    # MINUS Adjoint of the two operator

    def gradAOmega(input):

        inputSize = input.shape
        output = np.zeros((inputSize[0] + 1, inputSize[1], inputSize[2]))

        output[1:-1, :, :] = (input[1:, :, :] - input[:-1, :, :]) / DeltaOmega
        output[0, :, :] = input[0, :, :] / DeltaOmega
        output[-1, :, :] = -input[-1, :, :] / DeltaOmega

        return output

    # Derivate along D of a staggered function. Return a centered function ----------------------------

    def gradD0(input):

        inputSize = input.shape
        output = np.zeros(inputSize)

        output[:, 1:, :] = (input[:, 1:, :] - input[:, :-1, :]) / DeltaD
        output[:, 0, :] = (input[:, 0, :] - input[:, -1, :]) / DeltaD

        return output

    def gradD1(input):

        inputSize = input.shape
        output = np.zeros(inputSize)

        output[:, :, 1:] = (input[:, :, 1:] - input[:, :, :-1]) / DeltaD
        output[:, :, 0] = (input[:, :, 0] - input[:, :, -1]) / DeltaD

        return output

    # MINUS Adjoint of the two previous operators -- Same as derivative along D of a centered function, return a staggered one

    def gradAD0(input):

        inputSize = input.shape
        output = np.zeros(inputSize)

        output[:, :-1, :] = (input[:, 1:, :] - input[:, :-1, :]) / DeltaD
        output[:, -1, :] = (input[:, 0, :] - input[:, -1, :]) / DeltaD

        return output

    def gradAD1(input):

        inputSize = input.shape
        output = np.zeros(inputSize)

        output[:, :, :-1] = (input[:, :, 1:] - input[:, :, :-1]) / DeltaD
        output[:, :, -1] = (input[:, :, 0] - input[:, :, -1]) / DeltaD

        return output

    # Splitting operator and its adjoint ------------------------------------

    # The input has the same staggered pattern as grad_D phi

    def splitting(input0, input1):

        output0 = np.zeros((nO, nD, nD, 2, 2))
        output1 = np.zeros((nO, nD, nD, 2, 2))

        # Output 0

        output0[:, 0, :, 0, 0] = input0[:-1, -1, :]
        output0[:, 1:, :, 0, 0] = input0[:-1, :-1, :]

        output0[:, :, :, 0, 1] = input0[:-1, :, :]

        output0[:, 0, :, 1, 0] = input0[1:, -1, :]
        output0[:, 1:, :, 1, 0] = input0[1:, :-1, :]

        output0[:, :, :, 1, 1] = input0[1:, :, :]

        # Output 1

        output1[:, :, 0, 0, 0] = input1[:-1, :, -1]
        output1[:, :, 1:, 0, 0] = input1[:-1, :, :-1]

        output1[:, :, :, 0, 1] = input1[:-1, :, :]

        output1[:, :, 0, 1, 0] = input1[1:, :, -1]
        output1[:, :, 1:, 1, 0] = input1[1:, :, :-1]

        output1[:, :, :, 1, 1] = input1[1:, :, :]

        return output0, output1

    # Adjoint of the splitting operator. Take something which has the same staggered pattern as B, E and returns something which is like grad_D phi

    def splittingA(input0, input1):

        output0 = np.zeros((nO + 1, nD, nD))
        output1 = np.zeros((nO + 1, nD, nD))

        # Output 0

        output0[:-1, -1, :] += input0[:, 0, :, 0, 0]
        output0[:-1, :-1, :] += input0[:, 1:, :, 0, 0]

        output0[:-1, :, :] += input0[:, :, :, 0, 1]

        output0[1:, -1, :] += input0[:, 0, :, 1, 0]
        output0[1:, :-1, :] += input0[:, 1:, :, 1, 0]

        output0[1:, :, :] += input0[:, :, :, 1, 1]

        # Output 1

        output1[:-1, :, -1] += input1[:, :, 0, 0, 0]
        output1[:-1, :, :-1] += input1[:, :, 1:, 0, 0]

        output1[:-1, :, :] += input1[:, :, :, 0, 1]

        output1[1:, :, -1] += input1[:, :, 0, 1, 0]
        output1[1:, :, :-1] += input1[:, :, 1:, 1, 0]

        output1[1:, :, :] += input1[:, :, :, 1, 1]

        return output0, output1

    # Returning the derivatives of phi -----------------------------------------------------------

    # Derivatives wrt Omega of phi
    def derivativeOphi():

        return gradOmega(phi)

    # Derivatives wrt D of phi. As phi is centered, we use the adjoint of the gradient

    def derivativeDphi():

        return gradAD0(phi), gradAD1(phi)

    # Derivatives wrt D and splitting. Return an object which has the same staggered pattern as B and E

    def derivativeSplittingDphi():

        output0 = np.zeros((nO, nD, nD, 2, 2))
        output1 = np.zeros((nO, nD, nD, 2, 2))

        dD0phi, dD1phi = derivativeDphi()

        output0, output1 = splitting(dD0phi, dD1phi)

        return output0, output1

    #****************************************************************************************************
    # Laplace Matrix and preconditionner for its inverse
    #****************************************************************************************************

    # Build the Laplace operator -------------------------------------

    auxCrap0 = np.zeros((nO + 1, nD, nD))
    auxCrap1 = np.zeros((nO + 1, nD, nD))

    def LaplaceFunction(input):

        inputShaped = input.reshape((nO + 1, nD, nD))

        output = np.zeros((nO + 1, nD, nD))

        # Derivatives in Omega
        output += gradAOmega(gradOmega(inputShaped))

        # Derivatives and splitting in D

        aux0, aux1 = splitting(gradAD0(inputShaped), gradAD1(inputShaped))
        dD0, dD1 = splittingA(aux0, aux1)

        # Update output

        output += gradD0(dD0)
        output += gradD1(dD1)

        return output.reshape((nO + 1) * nD * nD) + eps * input

    LaplaceOp = scspl.LinearOperator(((nO + 1) * nD * nD, (nO + 1) * nD * nD),
                                     matvec=LaplaceFunction)

    # Build the preconditionner for the Laplace matrix using FFT: here we do not take into account S*S, where S is the splitting operator, it is why it is only a preconditionner and not the true inverse

    # We build the diagonal coefficients in the Fourier basis -----------------------------------------

    # Beware of the fact that there is a factor 4 in front of the derivatives in D because of the multiplicity of E

    LaplaceDiagInv = np.zeros((nO + 1, nD, nD))

    for alpha in range(nO + 1):
        for i in range(nD):
            for j in range(nD):

                toInv = 0.0

                # Derivatives in Omega
                toInv += 2 * (1 - cos(alpha * pi / nO)) / (DeltaOmega**2)

                # Derivatives in D
                toInv += 8 * (1 - cos(2 * pi * i / nD)) / (DeltaD**2)
                toInv += 8 * (1 - cos(2 * pi * j / nD)) / (DeltaD**2)

                if abs(toInv) <= 10**(-10):
                    LaplaceDiagInv[alpha, i, j] = 0.0
                else:
                    LaplaceDiagInv[alpha, i, j] = -1 / (toInv + eps)

    # Compute the multiplicative constant for the operator idct( dct )

    AuxFFT = np.random.rand(nO)
    ImAuxFFT = dct(AuxFFT, type=1)
    InvAuxFFT = idct(ImAuxFFT, type=1)

    ConstantFFT = AuxFFT[0] / InvAuxFFT[0]

    # Then we build the preconditionners as functions

    def precondFunction(input):

        inputShaped = input.reshape((nO + 1, nD, nD))

        # Applying FFT

        input_FFT = dct(fft(fft(inputShaped, axis=2), axis=1), type=1, axis=0)

        # Multiplication by the diagonal matrix

        solution_FFT = np.multiply(LaplaceDiagInv, input_FFT)

        # Inverse transformation

        solution = ConstantFFT * idct(
            ifft(ifft(solution_FFT, axis=2), axis=1), type=1, axis=0)

        # Storage of the results

        output = solution.real

        return output.reshape((nO + 1) * nD * nD)

    # And Finally we transform them as operators

    precondOp = scspl.LinearOperator(((nO + 1) * nD * nD, (nO + 1) * nD * nD),
                                     matvec=precondFunction)

    #****************************************************************************************************
    # Objective functional
    #****************************************************************************************************

    def objectiveFunctional():

        output = 0.0

        # Boundary term
        output += scalarProduct(phi, BT) * DeltaOmega

        # Computing the derivatives of phi and split them in D
        dOphi = derivativeOphi()
        dSD0phi, dSD1phi = derivativeSplittingDphi()

        # Lagrange multiplier mu
        output += scalarProduct(A + lambdaC - dOphi + potentialV,
                                mu) * DeltaOmega

        # Lagrange multiplier E.
        output += scalarProduct(B0 - dSD0phi, E0) * DeltaOmega
        output += scalarProduct(B1 - dSD1phi, E1) * DeltaOmega

        # Penalization of congestion
        if abs(cCongestion) >= 10**(-8):
            output -= 1 / (2. * cCongestion) * scalarProduct(
                lambdaC, lambdaC) * DeltaOmega * DeltaD**2

        # Penalty in A, phi
        output -= r / 2 * scalarProduct(
            A + lambdaC + potentialV - dOphi,
            A + lambdaC + potentialV - dOphi) * DeltaOmega * DeltaD**2

        # Penalty in B, phi.
        output -= r / 2 * scalarProduct(B0 - dSD0phi,
                                        B0 - dSD0phi) * DeltaOmega * DeltaD**2
        output -= r / 2 * scalarProduct(B1 - dSD1phi,
                                        B1 - dSD1phi) * DeltaOmega * DeltaD**2

        return output

    #****************************************************************************************************
    # Algorithm iteration
    #****************************************************************************************************

    # Value of the augmentation parameter (updated during the ADMM iterations)
    r = 1.

    # Initialize the array which will contain the values of the objective functional
    if detailStudy:
        objectiveValue = np.zeros(3 * Nit)
    else:
        objectiveValue = np.zeros((Nit // 10))

    # Residuals
    primalResidual = np.zeros(Nit)
    dualResidual = np.zeros(Nit)

    # Main Loop

    for counterMain in range(Nit):

        print(30 * "-" + " Iteration " + str(counterMain + 1) + " " + 30 * "-")

        if detailStudy:
            objectiveValue[3 * counterMain] = objectiveFunctional()
        elif (counterMain % 10) == 0:
            objectiveValue[counterMain // 10] = objectiveFunctional()

        # Laplace problem -----------------------------------------------------------------------------

        startLaplace = time.time()

        # Build the RHS

        RHS = np.zeros((nO + 1, nD, nD))

        RHS -= BT * DeltaOmega

        RHS -= gradAOmega(mu) * DeltaOmega

        RHS += r * gradAOmega(A + lambdaC +
                              potentialV) * DeltaOmega * DeltaD**2

        # Take the splitting adjoint of both E and B
        ES0, ES1 = splittingA(E0, E1)
        BS0, BS1 = splittingA(B0, B1)

        RHS -= gradD0(ES0) * DeltaOmega
        RHS -= gradD1(ES1) * DeltaOmega

        RHS += r * gradD0(BS0) * DeltaOmega * DeltaD**2
        RHS += r * gradD1(BS1) * DeltaOmega * DeltaD**2

        # Solve the system

        solution, res = scspl.cg(LaplaceOp,
                                 RHS.reshape(((nO + 1) * nD * nD)),
                                 M=precondOp,
                                 maxiter=50)

        # print("Resolution of the linear system: " + str(res))

        phi = solution.reshape((nO + 1, nD, nD)) / (r * DeltaOmega * DeltaD**2)

        endLaplace = time.time()
        print("Solving the Laplace system: " +
              str(round(endLaplace - startLaplace, 2)) + "s.")

        if detailStudy:
            objectiveValue[3 * counterMain + 1] = objectiveFunctional()

        # Projection over a convex set ---------------------------------------------------------
        # It projects on the set Tr(A) + 1/2 |B|^2 <= 0. We reduce to a 1D projection, then use a Newton method with a fixed number of iteration.

        startProj = time.time()

        # Computing the derivatives of phi and split them in D
        dOphi = derivativeOphi()
        dSD0phi, dSD1phi = derivativeSplittingDphi()

        # Compute what needs to be projected

        # On A
        aArray = dOphi - potentialV + mu / (r * DeltaD**2)

        # On B
        toProjectB0 = dSD0phi + E0 / (r * DeltaD**2)
        toProjectB1 = dSD1phi + E1 / (r * DeltaD**2)

        bSquaredArray = np.sum(np.square(toProjectB0) + np.square(toProjectB1),
                               axis=(-1, -2)) / 8

        # Compute the array discriminating between the values already on the convex and the others
        # Value of the objective functional. For the points not in the convex, we want it to vanish.
        projObjective = aArray + bSquaredArray
        # projDiscriminating is 1 is the point needs to be projected, 0 if it is already in the convex
        projDiscriminating = np.greater(projObjective, 10**(-16) * np.ones(
            (nO, nD, nD)))
        projDiscriminating = projDiscriminating.astype(int)
        projDiscriminating = projDiscriminating.astype(float)

        # Newton method iteration

        # Value of the Lagrange multiplier. Initialized at 0, not updated if already in the convex set
        xProj = np.zeros((nO, nD, nD))

        for counterProj in range(20):
            # Objective functional
            projObjective = aArray + 4 * (
                1. + cCongestion * r) * xProj + np.divide(
                    bSquaredArray, np.square(1 - xProj))
            # Derivative of the ojective functional
            dProjObjective = 4 * (1. + cCongestion * r) - 2 * np.divide(
                bSquaredArray, np.power(xProj - 1, 3))
            # Update of xProj
            xProj -= np.divide(np.multiply(projDiscriminating, projObjective),
                               dProjObjective)

        # Update of A and B as a result

        A = aArray + 4 * (1. + cCongestion * r) * xProj

        # Update lambdaC
        lambdaC = -4 * cCongestion * r * xProj

        # Rescale xProj so as it has the same dimension as B and E
        xProj = np.kron(xProj.reshape((nO * nD * nD)), np.ones(4)).reshape(
            (nO, nD, nD, 2, 2))

        B0 = np.divide(toProjectB0, (1 - xProj))
        B1 = np.divide(toProjectB1, (1 - xProj))

        # Print the info

        endProj = time.time()
        print("Pointwise projection: " + str(round(endProj - startProj, 2)) +
              "s.")

        if detailStudy:
            objectiveValue[3 * counterMain + 2] = objectiveFunctional()

        # Gradient descent in (E,muTilde), i.e. in the dual -----------------------------------------------

        # No need to recompute the derivatives of phi

        # Update for muTilde -- no need to update the cross terms, they vanish
        mu -= r * (A + lambdaC + potentialV - dOphi) * DeltaD**2

        # Update for E
        E0 -= r * (B0 - dSD0phi) * DeltaD**2
        E1 -= r * (B1 - dSD1phi) * DeltaD**2

        # Compute the residuals ------------------------------------------------------------------

        # For the primal residual, just sum what was the update in the dual
        primalResidual[counterMain] = sqrt(DeltaOmega) * DeltaD * lin.norm(
            np.array([
                lin.norm(A + lambdaC + potentialV - dOphi),
                lin.norm(B0 - dSD0phi),
                lin.norm(B1 - dSD1phi)
            ]))

        # For the residual, take the RHS of the Laplace system and conserve only BT and the dual variables mu, E

        dualResidualAux = np.zeros((nO + 1, nD, nD))

        dualResidualAux -= BT

        dualResidualAux -= gradAOmega(mu)

        # Take the splitting adjoint of both E and B
        ES0, ES1 = splittingA(E0, E1)

        dualResidualAux -= gradD0(ES0)
        dualResidualAux -= gradD1(ES1)

        dualResidual[counterMain] = r * sqrt(DeltaOmega) * lin.norm(
            dualResidualAux)

        # Update the parameter r -----------------------------------------------------------------

        # cf. Boyd et al. for an explanantion of the rule

        if primalResidual[counterMain] >= 10 * dualResidual[counterMain]:
            r *= 2
        elif 10 * primalResidual[counterMain] <= dualResidual[counterMain]:
            r /= 2

        # Printing some results ------------------------------------------------------------------

        if detailStudy:

            print("Maximizing in phi, should go up: " +
                  str(objectiveValue[3 * counterMain + 1] -
                      objectiveValue[3 * counterMain]))
            print("Maximizing in A,B, should go up: " +
                  str(objectiveValue[3 * counterMain + 2] -
                      objectiveValue[3 * counterMain + 1]))
            if counterMain >= 1:
                print("Dual update: should go down: " +
                      str(objectiveValue[3 * counterMain] -
                          objectiveValue[3 * counterMain - 1]))

        print("Values of phi0:")
        print(np.max(phi))
        print(np.min(phi))

        print("Values of A")
        print(np.max(A))
        print(np.min(A))

        print("Values of mu")
        print(np.max(mu))
        print(np.min(mu))

        print("Values of E0")
        print(np.max(E0))
        print(np.min(E0))

        print("r")
        print(r)

    #****************************************************************************************************
    # End of the program, printing  and returning the results 	#****************************************************************************************************
    print()
    print(30 * "-" + " End of the ADMM iterations " + 30 * "-")

    # Integral of the density
    intMu = np.sum(mu, axis=(-1, -2))

    print("Minimal and maximal value of integral of the density")
    print(np.min(intMu))
    print(np.max(intMu))

    print("Maximal and minimal value of the density")
    print(np.min(mu) / DeltaD**2)
    print(np.max(mu) / DeltaD**2)

    print("Discrepancy between lambdaC and mu")
    print(np.max(lambdaC - mu * cCongestion / (DeltaD**2)))

    print("Final value of the augmentation paramter")
    print(r)

    print("Final value of the objective functional")
    print(objectiveValue[-1])

    endProgramm = time.time()

    print("Total time taken by the program: " +
          str(round(endProgramm - startProgram, 2)) + "s.")

    return mu, E0, E1, objectiveValue, primalResidual, dualResidual
Exemplo n.º 46
0
    def solve_linear_system(self, animate=False):
        x0 = self.initial_guess()

        if animate:
            plt.figure(1)
            plt.clf()
            for c, v, xy in self.dirichlet_bcs:
                plt.annotate(str(v), xy)
            coll = self.grid.plot_cells(values=self.expand(x0))
            coll.set_lw(0)
            coll.set_clim([0, 1])
            plt.axis('equal')
            plt.pause(0.01)

        ctr = itertools.count()

        def plot_progress(xk):
            count = next(ctr)
            log.debug("Count: %d" % count)
            if animate and count % 1000 == 0:
                coll.set_array(self.expand(xk))
                plt.title(str(count))
                plt.pause(0.01)

        # I think that cgs means the matrix doesn't have to be
        # symmetric, which makes boundary conditions easier
        # with only showing progress every 100 steps,
        # this takes maybe a minute on a 28k cell grid.
        # But cgs seems to have more convergence problems with
        # pure diffusion.

        if animate:
            coll.set_clim([0, 1])

        maxiter = int(1.5 * self.grid.Ncells())
        code = -1
        if 1:
            C_solved = linalg.spsolve(self.A.tocsr(), self.b)
            code = 0
        elif 1:
            C_solved, code = linalg.cgs(self.A,
                                        self.b,
                                        x0=x0,
                                        callback=plot_progress,
                                        tol=self.solve_tol,
                                        maxiter=maxiter)
        elif 0:
            C_solved, code = linalg.cg(self.A,
                                       self.b,
                                       x0=x0,
                                       callback=plot_progress,
                                       tol=self.tol,
                                       maxiter=maxiter)
        elif 1:
            C_solved, code = linalg.bicgstab(self.A,
                                             self.b,
                                             x0=x0,
                                             callback=plot_progress,
                                             tol=self.solve_tol,
                                             maxiter=maxiter)
        elif 1:
            log.debug("Time integration")
            x = x0
            for i in range(maxiter):
                x = A.dot(x)
                plot_progress(x)
        else:

            def print_progress(rk):
                count = next(ctr)
                if count % 1000 == 0:
                    log.debug("count=%d rk=%s" % (count, rk))

            C_solved, code = linalg.gmres(self.A,
                                          self.b,
                                          x0=x0,
                                          tol=self.solve_tol,
                                          callback=print_progress)

        self.C_solved = self.expand(C_solved)
        for c, v, xy in self.dirichlet_bcs:
            self.C_solved[c] = v

        self.code = code

        if animate:
            evenly_spaced = np.zeros(len(C_solved))
            evenly_spaced[np.argsort(C_solved)] = np.arange(len(C_solved))
            coll.set_array(evenly_spaced)
            coll.set_clim([0, self.grid.Ncells()])
            plt.draw()
Exemplo n.º 47
0
PDE = poisson(geometry=geo,
              bc_dirichlet=bc_dirichlet,
              bc_neumann=bc_neumann,
              AllDirichlet=AllDirichlet,
              metric=Metric)
PDE.assembly()
PDE.solve()

# getting scipy matrix
A = PDE.system.get()

b = np.ones(PDE.size)

print "Using cg."
x = cg(A, b, tol=tol, maxiter=maxiter)

print "Using cgs."
x = cgs(A, b, tol=tol, maxiter=maxiter)

print "Using bicg."
x = bicg(A, b, tol=tol, maxiter=maxiter)

print "Using bicgstab."
x = bicgstab(A, b, tol=tol, maxiter=maxiter)

print "Using gmres."
x = gmres(A, b, tol=tol, maxiter=maxiter)

print "Using splu."
op = splu(A.tocsc())
        xCorrect = np.linalg.solve(A, y)

        startT = time.process_time()
        if ALGORITHM=="general":
            x=np.linalg.solve(A,y)
        elif ALGORITHM=="sparse":
            x=la.spsolve(A,y)
        elif ALGORITHM=="biconjugate-gradient-iter":
            x=la.bicg(A,y)
            print(x.shape)
            print(A.shape,y.shape)
        elif ALGORITHM=="biconjugate-gradient-stabilized":
            x=la.bicgstab(A,y)
        elif ALGORITHM=="conjugate-gradient-iter":
            x=la.cg(A,y)
        elif ALGORITHM == "conjugate-gradient-squared":
            x = la.cgs(A, y)
        elif ALGORITHM == "conjugate-gradient-squared":
            x = la.cgs(A, y)
        elif ALGORITHM == "generalized-min-res":
            x = la.gmres(A, y)
        elif ALGORITHM == "improved-generalized-min-res":
            x = la.lgmres(A, y)
        elif ALGORITHM =="min-res":
            x=la.minres(A,y)
        elif ALGORITHM =="quasi-min-res":
            x=la.qmr(A,y)


        endT=time.process_time()
Exemplo n.º 49
0
def update_W(m_opts, m_vars):
    # print "Updating W"
    if not m_opts['use_grad']:
        sigma = m_vars['X_batch_T'].dot(
            m_vars['X_batch']) + m_opts['lam_w'] * ssp.eye(
                m_vars['n_features'], format="csr")
        m_vars['sigma_W'] = (
            1 - m_vars['gamma']) * m_vars['sigma_W'] + m_vars['gamma'] * sigma

        x = m_vars['X_batch'].T.dot(m_vars['U_batch'])
        m_vars['x_W'] = (1 -
                         m_vars['gamma']) * m_vars['x_W'] + m_vars['gamma'] * x

    if m_opts[
            'use_cg'] != True:  # For the Ridge regression on W matrix with the closed form solutions
        if ssp.issparse(m_vars['sigma_W']):
            m_vars['sigma_W'] = m_vars['sigma_W'].todense()
        sigma = linalg.inv(
            m_vars['sigma_W'])  # O(N^3) time for N x N matrix inversion
        m_vars['W'] = np.asarray(sigma.dot(m_vars['x_W'])).T
    else:  # For the CG on the ridge loss to calculate W matrix
        if not m_opts['use_grad']:
            # assert m_vars['X_batch'].shape[0] == m_vars['U_batch'].shape[0]
            X = m_vars['sigma_W']
            for i in range(m_opts['n_components']):
                y = m_vars['x_W'][:, i]
                w, info = sp_linalg.cg(X,
                                       y,
                                       x0=m_vars['W'][i, :],
                                       maxiter=m_opts['cg_iters'])
                if info < 0:
                    print "WARNING: sp_linalg.cg info: illegal input or breakdown"
                m_vars['W'][i, :] = w.T
        else:
            ''' Solving X*W' = U '''
            # print "Using grad!"
            my_invert = lambda x: x if x < 1 else 1.0 / x
            l2_norm = lambda x: np.sqrt((x**2).sum())

            def clip_by_norm(x, clip_max):
                x_norm = l2_norm(x)
                if x_norm > clip_max:
                    # print "Clipped!",clip_max
                    x = clip_max * (x / x_norm)
                return x

            lr = m_opts['grad_alpha'] * (
                1.0 + np.arange(m_opts['cg_iters'] * 10))**(-0.9)  #(-0.75)
            try:
                W_old = m_vars['W'].copy()
                tail_norm, curr_norm = 1.0, 1.0
                for iter_idx in range(m_opts['cg_iters'] * 10):
                    grad = m_vars['X_batch_T'].dot(
                        m_vars['X_batch'].dot(m_vars['W'].T) -
                        m_vars['U_batch'])
                    grad = lr[iter_idx] * (grad.T +
                                           m_opts['lam_w'] * m_vars['W'])
                    tail_norm = 0.5 * curr_norm + (1 - 0.5) * tail_norm
                    curr_norm = l2_norm(grad)
                    if curr_norm < 1e-15:
                        return
                    elif iter_idx > 10 and my_invert(
                            np.abs(tail_norm / curr_norm)) > 0.8:
                        # print "Halved!"
                        lr = lr / 2.0

                    m_vars['W'] = m_vars['W'] - clip_by_norm(
                        grad, 1e0)  # Clip by norm

                Delta_W = l2_norm(m_vars['W'] - W_old)
            except FloatingPointError:
                print "FloatingPointError in:"
                print grad
                assert False
Exemplo n.º 50
0
 def idot(self, x):
     tmp = ssl.cg(self.K + self.Sigmay, x, tol=1e-8, M=self.Mop)
     if tmp[1] > 0:
         print "Warning cg tol not achieved"
     return tmp[0]
Exemplo n.º 51
0
 def solve(self, kper, kstp):
     converged = False
     print 'Solving stress period: {0:5d} time step: {1:5d}'.format(kper+1, kstp+1)
     for outer in xrange(self.outeriterations):
         #--create initial x (x0) from a copy of x
         x0 = np.copy(self.x)
         #--assemble conductance matrix
         self.__assemble()
         #--create sparse matrix for residual calculation and conductance formulation
         self.acsr = csr_matrix((self.a, self.ja, self.ia), shape=(self.neq, self.neq))
         #--save sparse matrix with conductance
         if self.newtonraphson:
             self.ccsr = self.acsr.copy()
         else:
             self.ccsr = self.acsr
         #--calculate initial residual
         #  do not attempt a solution if the initial solution is an order of
         #  magnitude less than rclose
         rmax0 = self.__calculateResidual(x0)
         #if outer == 0 and abs(rmax0) <= 0.1 * self.rclose:
         #    break
         if self.backtracking:
             l2norm0 = np.linalg.norm(self.r)
         if self.newtonraphson:
             self.__assemble(nr=True)
             self.acsr = csr_matrix((self.a, self.ja, self.ia), shape=(self.neq, self.neq))
             b = -self.r.copy()
             aif self.headsolution:
                 t = self.acsr.dot(x0)
                 b += t
             else:
                 self.x.fill(0.0)
         else:
             b = self.rhs.copy()
         #--construct the preconditioner
         #M = self.get_preconditioner(fill_factor=3, drop_tol=1e-4)
         M = self.get_preconditioner(fill_factor=3, drop_tol=1e-4)
         #--solve matrix
         info = 0
         if self.newtonraphson:
             self.x[:], info = bicgstab(self.acsr, b, x0=self.x, tol=self.rclose, maxiter=self.inneriterations, M=M)
         else:
             self.x[:], info = cg(self.acsr, b, x0=self.x, tol=self.rclose, maxiter=self.inneriterations, M=M)
         if info < 0:
             raise Exception('illegal input or breakdown in linear solver...')
         #--add upgrade to x0
         #if self.newtonraphson:
         #    if not self.headsolution:
         #        self.x += x0
         #--calculate updated residual
         rmax1 = self.__calculateResidual(self.x)
         #
         if self.bottomflag:
             self.adjusthead(self.x)
         #--back tracking
         if self.backtracking and rmax1 > self.rclose:
             l2norm1 = np.linalg.norm(self.r)
             if l2norm1 > 0.99 * l2norm0:
                 if self.headsolution:
                     dx = self.x - x0
                 else:
                     dx = self.x
                 lv = 0.99
                 for ibk in xrange(100):
                     self.x = x0 + lv * dx
                     rt = self.__calculateResidual(self.x, reset_ccsr=True)
                     rmax1 = rt
                     l2norm = np.linalg.norm(self.r)
                     if l2norm < 0.90 * l2norm0:
                         break
                     lv *= 0.95
         #--calculate hmax
         hmax = np.abs(self.x - x0).max()
         #--calculate
         if hmax <= self.hclose and abs(rmax1) <= self.rclose:
             print ' Outer Iterations: {0}'.format(outer+1)
             converged = True
             self.__calculateQNodes(self.x)
             #print self.cellQ[4], self.cellQ[-4]
             break
Exemplo n.º 52
0
def NormalEquationsInversion(Op, Regs, data, dataregs=None, epsI=0,
                             epsRs=None, x0=None,
                             returninfo=False, **kwargs_cg):
    r"""Inversion of normal equations.

    Solve the regularized normal equations for a system of equations given the operator ``Op`` and a
    list of regularization terms ``Regs``.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    Regs : :obj:`list`
        Regularization operators
    data : :obj:`numpy.ndarray`
        Data
    dataregs : :obj:`list`
        Regularization data
    espI : :obj:`float`
        Tikhonov damping
    epsRs : :obj:`list`
         Regularization dampings
    x0 : :obj:`numpy.ndarray`
        Initial guess
    returninfo : :obj:`bool`
        Return info of CG solver
    **kwargs_cg
        Arbitrary keyword arguments for :py:func:`scipy.sparse.linalg.cg` solver

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model.
    istop : :obj:`int`
        Convergence information:

        ``0``: successful exit

        ``>0``: convergence to tolerance not achieved, number of iterations

        ``<0``: illegal input or breakdown

    Notes
    -----
    Solve the following normal equations for a system of regularized equations
    given the operator :math:`\mathbf{Op}`, a list of regularization terms
    :math:`\mathbf{R_i}`, the data :math:`\mathbf{d}` and regularization damping
    factors :math:`\epsilon_I` and :math:`\epsilon_{{R}_i}`:

    .. math::
        ( \mathbf{Op}^T\mathbf{Op} + \sum_i \epsilon_{{R}_i}^2
        \mathbf{R}_i^T \mathbf{R}_i + \epsilon_I^2 \mathbf{I} )  \mathbf{x}
        = \mathbf{Op}^T \mathbf{y} +  \sum_i \epsilon_{{R}_i}^2
        \mathbf{R}_i^T \mathbf{d}_{R_i}

    """
    if dataregs is None:
        dataregs = [np.zeros(Op.shape[1])]*len(Regs)

    if epsRs is None:
        epsRs = [1] * len(Regs)

    # Normal equations
    y_normal = Op.H * data
    if Regs is not None:
        for epsR, Reg, datareg in zip(epsRs, Regs, dataregs):
            y_normal += epsR ** 2 * Reg.H * datareg
    Op_normal = Op.H * Op
    if epsI > 0:
        Op_normal += epsI ** 2 * MatrixMult(np.eye(Op.shape[1]))
    if Regs is not None:
        for epsR, Reg in zip(epsRs, Regs):
            Op_normal += epsR ** 2 * Reg.H * Reg

    # CG solver
    if x0 is not None:
        y_normal = y_normal - Op_normal*x0
    xinv, istop = cg(Op_normal, y_normal, **kwargs_cg)
    if x0 is not None:
        xinv = x0 + xinv

    if returninfo:
        return xinv, istop
    else:
        return xinv
Exemplo n.º 53
0
def main(parameters):
    r"""Calculates the ground-state of the system. If the system is perturbed, the time evolution of 
    the perturbed system is then calculated.

    parameters
    ----------
    parameters : object
        Parameters object

    returns object
        Results object
    """
    # Array initialisations
    pm = parameters
    string = 'NON: constructing arrays'
    pm.sprint(string, 1, newline=True)
    pm.setup_space()

    # Construct the kinetic energy matrix
    K = construct_K(pm)

    # Construct the Hamiltonian matrix
    H = np.copy(K)
    H[0, :] += pm.space.v_ext[:]

    # Solve the Schroedinger equation
    string = 'NON: calculating the ground-state density'
    pm.sprint(string, 1)
    energies, wavefunctions = spla.eig_banded(H, lower=True)

    # Normalise the wavefunctions
    wavefunctions /= np.sqrt(pm.space.delta)

    # Calculate the ground-state density
    density = np.sum(np.absolute(wavefunctions[:, :pm.sys.NE])**2, axis=1)

    # Calculate the ground-state energy
    energy = np.sum(energies[0:pm.sys.NE])
    string = 'NON: ground-state energy = {:.5f}'.format(energy)
    pm.sprint(string, 1)

    # Save the quantities to file
    results = rs.Results()
    results.add(pm.space.v_ext, 'gs_non_vxt')
    results.add(density, 'gs_non_den')
    results.add(energy, 'gs_non_E')
    results.add(wavefunctions.T, 'gs_non_eigf')
    results.add(energies, 'gs_non_eigv')
    if (pm.run.save):
        results.save(pm)

    # Propagate through real time
    if (pm.run.time_dependence):

        # Print to screen
        string = 'NON: constructing arrays'
        pm.sprint(string, 1)

        # Construct the Hamiltonian matrix
        H = np.copy(K)
        H[0, :] += pm.space.v_ext[:]
        H[0, :] += pm.space.v_pert[:]

        # Construct the sparse matrices used in the Crank-Nicholson method
        A = construct_A(pm, H)
        C = 2.0 * sps.identity(pm.space.npt, dtype=np.cfloat) - A

        # Construct the time-dependent density array
        density = np.zeros((pm.sys.imax, pm.space.npt), dtype=np.float)

        # Save the ground-state
        for j in range(pm.sys.NE):
            density[0, :] += np.absolute(wavefunctions[:, j])**2

        # Print to screen
        string = 'NON: real time propagation'
        pm.sprint(string, 1)

        # Loop over each electron
        for n in range(pm.sys.NE):

            # Single-electron wavefunction
            wavefunction = wavefunctions[:, n].astype(np.cfloat)

            # Perform real time iterations
            for i in range(1, pm.sys.imax):

                # Construct the vector b
                b = C * wavefunction

                # Solve Ax=b
                wavefunction, info = spsla.cg(A,
                                              b,
                                              x0=wavefunction,
                                              tol=pm.non.rtol_solver)

                # Normalise the wavefunction
                norm = npla.norm(wavefunction) * np.sqrt(pm.space.delta)
                wavefunction /= norm
                norm = npla.norm(wavefunction) * np.sqrt(pm.space.delta)
                string = 'NON: t = {:.5f}, normalisation = {}'.format(
                    i * pm.sys.deltat, norm)
                pm.sprint(string, 1, newline=False)

                # Calculate the density
                density[i, :] += np.absolute(wavefunction[:])**2

        # Calculate the current density
        current_density = calculate_current_density(pm, density)

        # Save the quantities to file
        results.add(density, 'td_non_den')
        results.add(current_density, 'td_non_cur')
        results.add(pm.space.v_ext + pm.space.v_pert, 'td_non_vxt')
        if (pm.run.save):
            results.save(pm)

    return results
Exemplo n.º 54
0
 def cg(self, A, F, uh):
     counter = IterationCounter()
     uh.T.flat, info = cg(A, F.T.flat, tol=1e-8, callback=counter)
     print("Convergence info:", info)
     print("Number of iteration of pcg:", counter.niter)
     return uh
Exemplo n.º 55
0
E = np.zeros(vec_shape); E[0] = 1. # set macroscopic loading

# PROJECTION IN FOURIER SPACE #############################################
Ghat = np.zeros((ndim,ndim)+ N) # zero initialize
freq = [np.arange(-(N[ii]-1)/2.,+(N[ii]+1)/2.) for ii in range(ndim)]
for i,j in itertools.product(range(ndim),repeat=2):
    for ind in itertools.product(*[range(n) for n in N]):
        q = np.empty(ndim)
        for ii in range(ndim):
            q[ii] = freq[ii][ind[ii]]  # frequency vector
        if not q.dot(q) == 0:          # zero freq. -> mean
            Ghat[i,j][ind] = -(q[i]*q[j])/(q.dot(q))

# OPERATORS ###############################################################
dot21  = lambda A,v: np.einsum('ij...,j...  ->i...',A,v)
fft    = lambda V: np.fft.fftshift(np.fft.fftn (np.fft.ifftshift(V),N))
ifft   = lambda V: np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(V),N))
G_fun  = lambda V: np.real(ifft(dot21(Ghat,fft(V)))).reshape(-1)
A_fun  = lambda v: dot21(A,v.reshape(vec_shape))
GA_fun = lambda v: G_fun(A_fun(v))

# CONJUGATE GRADIENT SOLVER ###############################################
b = -GA_fun(E) # right-hand side
e, _=sp.cg(A=sp.LinearOperator(shape=(ndof, ndof), matvec=GA_fun, dtype='float'), b=b)

aux = e+E.reshape(-1)
print('auxiliary field for macroscopic load E = {1}:\n{0}'.format(e.reshape(vec_shape),
                                                                  format((1,)+(ndim-1)*(0,))))
print('homogenised properties A11 = {}'.format(np.inner(A_fun(aux).reshape(-1), aux)/prodN))
print('END')
Exemplo n.º 56
0
    # first iteration residual: distribute "barF" over grid using "K4"
    b = -G_K_dF((barF - barF_t)[:2, :2])
    F += barF - barF_t

    # parameters for Newton iterations: normalization and iteration counter
    Fn = np.linalg.norm(F)
    iiter = 0

    # iterate as long as the iterative update does not vanish
    while True:

        # solve linear system using the Conjugate Gradient iterative solver
        dFm, i = sp.cg(
            tol=1.e-8,
            A=sp.LinearOperator(shape=(2 * 2 * Nx * Ny, 2 * 2 * Nx * Ny),
                                matvec=G_K_dF,
                                dtype='float'),
            b=b,
            maxiter=1000,
        )
        if i: raise IOError('CG-solver failed')

        # apply iterative update to (3-D) DOFs
        F[:2, :2] += dFm.reshape(2, 2, Nx, Ny)

        # compute residual stress and tangent, convert to residual
        P, P_2, K4_2, be, ep = constitutive(F, F_t, be_t, ep_t)
        b = -G(P_2)

        # check for convergence, print convergence info to screen
        print('{0:10.2e}'.format(np.linalg.norm(dFm) / Fn))
        if np.linalg.norm(dFm) / Fn < 1.e-5 and iiter > 0: break
Exemplo n.º 57
0
    def Solve(self, A, b, reuse_factorisation=False):
        """Solves the linear system of equations"""

        if not issparse(A):
            raise ValueError("Linear system is not of sparse type")

        if A.shape == (0, 0) and b.shape[0] == 0:
            warn("Empty linear system!!! Nothing to solve!!!")
            return np.copy(b)

        self.reuse_factorisation = reuse_factorisation
        if self.solver_type != "direct" and self.reuse_factorisation is True:
            warn(
                "Re-using factorisation for non-direct solvers is not possible. The pre-conditioner is going to be reused instead"
            )

        # DECIDE IF THE SOLVER TYPE IS APPROPRIATE FOR THE PROBLEM
        if self.switcher_message is False and self.dont_switch_solver is False:
            # PREFER PARDISO OR MUMPS OVER AMG IF AVAILABLE
            if self.has_pardiso:
                self.solver_type = "direct"
                self.solver_subtype = "pardiso"
            elif self.has_mumps:
                self.solver_type = "direct"
                self.solver_subtype = "mumps"
            elif b.shape[0] > 100000 and self.has_amg_solver:
                self.solver_type = "amg"
                self.solver_subtype = "gmres"
                print(
                    'Large system of equations. Switching to algebraic multigrid solver'
                )
                self.switcher_message = True
            # elif mesh.points.shape[0]*MainData.nvar > 50000 and MainData.C < 4:
            # self.solver_type = "direct"
            # self.solver_subtype = "MUMPS"
            # print 'Large system of equations. Switching to MUMPS solver'
            elif b.shape[
                    0] > 70000 and self.geometric_discretisation == "hex" and self.has_amg_solver:
                self.solver_type = "amg"
                self.solver_subtype = "gmres"
                print(
                    'Large system of equations. Switching to algebraic multigrid solver'
                )
                self.switcher_message = True
            else:
                self.solver_type = "direct"
                self.solver_subtype = "umfpack"

        if self.solver_type == 'direct':
            # CALL DIRECT SOLVER
            if self.solver_subtype == 'umfpack' and self.has_umfpack:
                if A.dtype != np.float64:
                    A = A.astype(np.float64)

                t_solve = time()
                if self.solver_context_manager is None:
                    if self.reuse_factorisation is False:
                        sol = spsolve(A,
                                      b,
                                      permc_spec='MMD_AT_PLUS_A',
                                      use_umfpack=True)
                        # from scikits import umfpack
                        # sol = umfpack.spsolve(A, b)

                        # SuperLU
                        # from scipy.sparse.linalg import splu
                        # lu = splu(A.tocsc())
                        # sol = lu.solve(b)
                    else:
                        from scikits import umfpack
                        lu = umfpack.splu(A)
                        sol = lu.solve(b)
                        self.solver_context_manager = lu
                else:
                    sol = self.solver_context_manager.solve(b)

                # print("UMFPack solver time is {}".format(time() - t_solve))

            elif self.solver_subtype == 'mumps' and self.has_mumps:

                from mumps.mumps_context import MUMPSContext
                t_solve = time()
                A = A.tocoo()
                # False means non-symmetric - Do not change it to True. True means symmetric pos def
                # which is not the case for electromechanics
                if self.solver_context_manager is None:
                    context = MUMPSContext(
                        (A.shape[0], A.row, A.col, A.data, False),
                        verbose=False)
                    context.analyze()
                    context.factorize()
                    sol = context.solve(rhs=b)

                    if self.reuse_factorisation:
                        self.solver_context_manager = context
                else:
                    sol = self.solver_context_manager.solve(rhs=b)

                print("MUMPS solver time is {}".format(time() - t_solve))

                return sol

            elif self.solver_subtype == "pardiso" and self.has_pardiso:
                # NOTE THAT THIS PARDISO SOLVER AUTOMATICALLY SAVES THE RIGHT FACTORISATION
                import pypardiso
                from pypardiso.scipy_aliases import pypardiso_solver as ps
                A = A.tocsr()
                t_solve = time()
                sol = pypardiso.spsolve(A, b)
                if self.reuse_factorisation is False:
                    ps.remove_stored_factorization()
                    ps.free_memory()
                print("Pardiso solver time is {}".format(time() - t_solve))

            else:
                # FOR 'super_lu'
                if A.dtype != np.float64:
                    A = A.astype(np.float64)
                A = A.tocsc()

                t_solve = time()
                if self.solver_context_manager is None:
                    if self.reuse_factorisation is False:
                        sol = spsolve(A,
                                      b,
                                      permc_spec='MMD_AT_PLUS_A',
                                      use_umfpack=True)
                    else:
                        lu = splu(A)
                        sol = lu.solve(b)
                        self.solver_context_manager = lu
                else:
                    sol = self.solver_context_manager.solve(b)

                # print("Linear solver time is {}".format(time() - t_solve))

        elif self.solver_type == "iterative":
            t_solve = time()
            # CALL ITERATIVE SOLVER
            if self.solver_subtype == "gmres":
                sol = gmres(A, b, tol=self.iterative_solver_tolerance)[0]
            if self.solver_subtype == "lgmres":
                sol = lgmres(A, b, tol=self.iterative_solver_tolerance)[0]
            elif self.solver_subtype == "bicgstab":
                sol = bicgstab(A, b, tol=self.iterative_solver_tolerance)[0]
            else:
                sol = cg(A, b, tol=self.iterative_solver_tolerance)[0]

            # PRECONDITIONED ITERATIVE SOLVER - CHECK
            # P = spilu(A.tocsc(), drop_tol=1e-5)
            # M_x = lambda x: P.solve(x)
            # m = A.shape[1]
            # n = A.shape[0]
            # M = LinearOperator((n * m, n * m), M_x)
            # sol = cg(A, b, tol=self.iterative_solver_tolerance, M=M)[0]
            print("Iterative solver time is {}".format(time() - t_solve))

        elif self.solver_type == "amg":
            if self.has_amg_solver is False:
                raise ImportError(
                    'Algebraic multigrid solver was not found. Please install it using "pip install pyamg"'
                )
            from pyamg import ruge_stuben_solver, rootnode_solver, smoothed_aggregation_solver

            if A.dtype != b.dtype:
                # DOWN-CAST
                b = b.astype(A.dtype)

            if not isspmatrix_csr(A):
                A = A.tocsr()

            t_solve = time()

            if self.iterative_solver_tolerance > 1e-9:
                self.iterative_solver_tolerance = 1e-10

            # AMG METHOD
            amg_func = None
            if self.preconditioner_type == "smoothed_aggregation":
                # THIS IS TYPICALLY FASTER BUT THE TOLERANCE NEED TO BE SMALLER, TYPICALLY 1e-10
                amg_func = smoothed_aggregation_solver
            elif self.preconditioner_type == "ruge_stuben":
                amg_func = ruge_stuben_solver
            elif self.preconditioner_type == "rootnode":
                amg_func = rootnode_solver
            else:
                amg_func = rootnode_solver

            ml = amg_func(A)
            # ml = amg_func(A, smooth=('energy', {'degree':2}), strength='evolution' )
            # ml = amg_func(A, max_levels=3, diagonal_dominance=True)
            # ml = amg_func(A, coarse_solver=spsolve)
            # ml = amg_func(A, coarse_solver='cholesky')

            if self.solver_context_manager is None:
                # M = ml.aspreconditioner(cycle='V')
                M = ml.aspreconditioner()
                if self.reuse_factorisation:
                    self.solver_context_manager = M
            else:
                M = self.solver_context_manager

            # EXPLICIT CALL TO KYROLOV SOLVERS WITH AMG PRECONDITIONER
            # sol, info = bicgstab(A, b, M=M, tol=self.iterative_solver_tolerance)
            # sol, info = cg(A, b, M=M, tol=self.iterative_solver_tolerance)
            # sol, info = gmres(A, b, M=M, tol=self.iterative_solver_tolerance)

            # IMPLICIT CALL TO KYROLOV SOLVERS WITH AMG PRECONDITIONER
            residuals = []
            sol = ml.solve(b,
                           tol=self.iterative_solver_tolerance,
                           accel=self.solver_subtype,
                           residuals=residuals)

            print("AMG solver time is {}".format(time() - t_solve))

        elif self.solver_type == "petsc" and self.has_petsc:
            if self.solver_subtype != "gmres" and self.solver_subtype != "minres" and self.solver_subtype != "cg":
                self.solver_subtype == "cg"
            if self.iterative_solver_tolerance < 1e-9:
                self.iterative_solver_tolerance = 1e-7

            from petsc4py import PETSc
            t_solve = time()
            pA = PETSc.Mat().createAIJ(size=A.shape,
                                       csr=(A.indptr, A.indices, A.data))
            pb = PETSc.Vec().createWithArray(b)

            ksp = PETSc.KSP()
            ksp.create(PETSc.COMM_WORLD)
            # ksp.create()
            ksp.setType(self.solver_subtype)
            ksp.setTolerances(atol=self.iterative_solver_tolerance,
                              rtol=self.iterative_solver_tolerance)
            # ILU
            ksp.getPC().setType('icc')

            # CREATE INITIAL GUESS
            psol = PETSc.Vec().createWithArray(np.ones(b.shape[0]))
            # SOLVE
            ksp.setOperators(pA)
            ksp.setFromOptions()
            ksp.solve(pb, psol)
            sol = psol.getArray()

            # print('Converged in', ksp.getIterationNumber(), 'iterations.')
            print("Petsc linear iterative solver time is {}".format(time() -
                                                                    t_solve))

        else:
            warn(
                "{} solver is not available. Default solver is going to be used"
                .format(self.solver_type))
            # FOR 'super_lu'
            if A.dtype != np.float64:
                A = A.astype(np.float64)
            A = A.tocsc()

            if self.solver_context_manager is None:
                if self.reuse_factorisation is False:
                    sol = spsolve(A,
                                  b,
                                  permc_spec='MMD_AT_PLUS_A',
                                  use_umfpack=True)
                else:
                    lu = splu(A)
                    sol = lu.solve(b)
                    self.solver_context_manager = lu
            else:
                sol = self.solver_context_manager.solve(b)

        return sol
Exemplo n.º 58
0
    item.M[i, i + 1] = -item.me * item.xa / item.le
    item.M[i + 1, i] = -item.me * item.xa / item.le
    item.M[i + 1, i + 1] = item.me * item.xa / item.le

for item in element_table:
    K += item.M

C = abs(np.amax(K)) if abs(np.amax(K)) > abs(np.amin(K)) else abs(np.amin(K))

for item in constrained_nodes:
    K[item, item] += C * 10**8

F = np.array([item.f for item in node_table])

# solving matrix
D = cg(K, F)[0]

# adding displacement to class variable
for i, item in enumerate(D):
    node_table[i].d = item if item > 10**-5 else 0

# calculating strain and stress
for item in element_table:
    item.eta = (-item.n1.d + item.n2.d) / (item.n2.x - item.n1.x)
    item.sigma = item.me * item.eta

print("NodeIndex        Displacement")
fw.write("NodeIndex        Displacement\n")

for item in node_table:
    print("{:9} {:19.4f}".format(item.num + 1, item.d))
Exemplo n.º 59
0
 def BlindSolver(self,y,reg_inf=10**-6,reg_sup=10**-3,\
            verbose=True,warning=False,export=False):
     """
       Solve the inverse problem for image y that may or may not be noisy,
       and the regularisation paramter alpha is blindly found.
       
          Parameters
          ----------
                y   (numpy.array): image of x or noisy image of x, size nx
                reg_inf   (float): lower boundary for regularization parameter
                reg_sup   (float): higher boundary for regularization parameter
                verbose    (bool): if True, print statistic and plot reconstructed function
                warning    (bool): if True, print warning if the regularization parameter saturates
                export     (bool): export datas
         Retruns
         ----------
              (numpy.array): solution of the regularized inverse problem, size nx
     """
     # step 1 : estimating delta
     delta = self.nx * np.linalg.norm(y[:20]) / 20
     reg = 0
     error_compare = delta
     # step 2 :loop over alpha to find the best candidate of regularization
     for alpha in np.linspace(reg_inf, reg_sup, 10000):
         # step 3 : inversion
         if self.resol == 'cg':
             xd = cg(self.tTT + alpha * self.tDD,
                     np.transpose(self.T).dot(y))
         if self.resol == 'mycg':
             xd, _ = Conjugate_grad(self.tTT + alpha * self.tDD,
                                    np.transpose(self.T).dot(y))
         else:
             xd = np.linalg.inv(self.tTT + alpha * self.tDD).dot(
                 np.transpose(self.T).dot(y))
         # step 4 : error computation
         Txd = self.T.dot(xd)
         error = np.linalg.norm(Txd - y)
         if error < error_compare:
             error_compare = error
             reg = alpha
             xadp = xd.copy()
     # step 5 : alert and statistics
     # warning
     if warning:
         if (reg == reg_inf):
             print("noise={:.3e}".format(delta),\
                   "inf=",reg_inf,", reg=",reg)
             print("Wrong regularization parameter, too high")
             print("==========================================")
         elif (reg == reg_sup):
             print("noise={:.3e}".format(delta),\
                   "sup=",reg_sup,", reg=",reg)
             print("Wrong regularization parameter, too low")
             print("==========================================")
     # verbose and plots
     if verbose:
         print("delta={:.3e}, inf={:.3e}, sup={:.3e}, reg={:.3e}".format(\
                  delta,reg_inf,reg_sup,reg))
         t = np.linspace(0, 1, self.nx)
         plt.figure(figsize=(7, 4))
         plt.subplot(121)
         plt.plot(t, y)
         plt.subplot(122)
         plt.plot(t, xadp)
         plt.show()
     # export
     if export:
         if self.kern == True:
             kern = 'kernel'
         else:
             kern = ''
         Export(t, xadp, self.folder,
                "pred{}{}".format(self.a, self.p) + kern)
     # step 6 : return
     return xadp, delta
Exemplo n.º 60
0
 def _solve(self, A, b):
     if self.solver == "cg":
         x, info = cg(A, b, tol=self.tol)
     elif self.solver == "dense":
         x = solve(A, b, sym_pos=True)
     return x