Beispiel #1
0
 def time_solve(self, n, solver):
     if solver == 'dense':
         linalg.solve(self.P_dense, self.b)
     elif solver == 'cg':
         cg(self.P_sparse, self.b)
     elif solver == 'minres':
         minres(self.P_sparse, self.b)
     elif solver == 'spsolve':
         spsolve(self.P_sparse, self.b)
     else:
         raise ValueError('Unknown solver: %r' % solver)
Beispiel #2
0
 def time_solve(self, n, solver):
     if solver == 'dense':
         linalg.solve(self.P_dense, self.b)
     elif solver == 'cg':
         cg(self.P_sparse, self.b)
     elif solver == 'minres':
         minres(self.P_sparse, self.b)
     elif solver == 'spsolve':
         spsolve(self.P_sparse, self.b)
     else:
         raise ValueError('Unknown solver: %r' % solver)
    def solve(self, b, maxiter=1000, tol=1.e-10):

        if self.method == 'Dense':
            from scipy.linalg import solve
            return solve(self.mat, b)
        else:
            from scipy.sparse.linalg import gmres, aslinearoperator, minres
            P = self.P
            Aop = aslinearoperator(self)

            residual = Residual()

            if P != None:
                x, info = gmres(Aop,
                                b,
                                tol=tol,
                                restart=30,
                                maxiter=maxiter,
                                callback=residual,
                                M=P)
            else:
                x, info = minres(Aop,
                                 b,
                                 tol=tol,
                                 maxiter=maxiter,
                                 callback=residual)
            self.solvmatvecs += residual.itercount()

            if self.verbose:
                print("Number of iterations is %g and status is %g" %
                      (residual.itercount(), info))

        return x
Beispiel #4
0
    def _solve_sym_nonposdef(self, Q, q):
        # since Q is indefinite, i.e., the function is linear along the eigenvectors
        # correspondent to the null eigenvalues, the system has not solutions, so we
        # will choose the one that minimizes the residue, i.e. the least-squares solution
        # see more @ https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html#solving-linear-problems

        # bad numerical solution: does not exploit the symmetricity of Q, waiting for `symmlq` in scipy
        if self.sym_nonposdef_solver == 'lsqr':

            x = lsqr(Q, -q)[0]

        else:

            # `min ||Ax - b||` is formally equivalent to solve the linear system:
            #                           A^T A x = A^T b

            Q, q = np.inner(Q, Q), Q.T.dot(q)

            if self.sym_nonposdef_solver == 'minres':

                x = minres(Q, -q)[0]

            else:

                raise TypeError(
                    f'{self.sym_nonposdef_solver} is not an allowed solver, '
                    f'choose one of `minres` or `lsqr`')

        return x
Beispiel #5
0
    def gradient_descent(self, x, y, w, l, hv):

        # fix number of steps
        for i in range(100):
            max_l = np.fmax(0, l)

            # compute objective function value
            obj = np.sum(max_l**2) / 2 + self.lmbda * w.dot(w) / 2

            # compute gradient
            grad = self.lmbda * w - np.append([np.dot(max_l * y, x)],
                                              [np.sum(max_l * y)])

            # perform line search for optimal w (normal to hyperplane)
            sv = np.where(l > 0)[0]

            # vector where optimal solution is located
            vec, info = linalg.minres(hv, -grad)
            x_d = x.dot(vec[0:-1]) + vec[-1]
            w_d = self.lmbda * w[0:-1].dot(vec[0:-1])
            d_d = self.lmbda * vec[0:-1].dot(vec[0:-1])

            t = 0
            l_0 = l
            for i in range(1000):
                l_0 = l - t * (y * x_d)
                sv = np.where(l_0 > 0)[0]

                g = w_d + t * d_d - (l_0[sv] * y[sv]).dot(x_d[sv])
                h = d_d + x_d[sv].dot(x_d[sv])
                t -= g / h

            w += t * vec
            if -vec.dot(grad) < 0.0000001 * obj:
                break
Beispiel #6
0
    def _solve_lin(self):
        'Auxilliary function: solve the linear system exactly'
        # Linear equation coefficients
        Lhs, rhs, x0 = self._get_lineq()
        # Set x[I], y, czl, czu by solving linear equation
        # xy = cholmod.splinsolve(Lhs,rhs)

        self.CG_r0 = Lhs * x0 - rhs

        # Convert to numpy/scipy matrix
        npLhs = cvxopt_to_numpy_matrix(Lhs)
        nprhs = cvxopt_to_numpy_matrix(matrix(rhs))
        npx0 = cvxopt_to_numpy_matrix(x0)

        # Solve the linear system
        collector = Iter_collect(tol=self.option['ResTol'])
        #        collector = Iter_collect()
        cg = minres(npLhs, nprhs, npx0, callback=collector)
        xy = numpy_to_cvxopt_matrix(cg[0])
        self.cgiter = collector.iter
        self.CG_r = Lhs * numpy_to_cvxopt_matrix(collector.x) - rhs

        nI = len(self.I)
        ny = self.QP.numeq
        ncL = len(self.cAL)
        ncU = len(self.cAU)
        self.x[self.I] = xy[0:nI]
        if xy[nI:nI + ny].size[1] == 1:
            self.y = xy[nI:nI + ny]

        if self.czl.size[0] > 0:
            self.czl[self.cAL] = xy[nI + ny:nI + ny + ncL]
            self.czu[self.cAU] = xy[nI + ny + ncL:]
Beispiel #7
0
    def rvs(self, b, omega, tau, random_state):
        """Generate a random draw from this distribution."""
        eps = random_state.standard_normal(self._n_plus_k)
        rnorm1 = np.sqrt(omega) * eps[:self._n]
        rnorm2 = self._eigen @ (sqrt(tau) * eps[self._n:])
        out = b + rnorm1 + rnorm2

        block_prec = self._block_Q.copy()
        block_prec.data = tau * block_prec.data
        diag_vec = np.tile(omega, 2)
        block_prec.setdiag(block_prec.diagonal() + diag_vec)

        self._rhs[:self._n] = out
        # Iterative solvers are efficient at solving sparse large systems.
        xz, fail = minres(block_prec, self._rhs, x0=self._guess)
        # update starting guess for next call to the function
        self._guess = xz

        if fail:
            raise RuntimeError('MINRES solver did not converge!')

        x = xz[:self._n]
        z = xz[self._n:]

        ensure_sums_to_zero(x, z, out)

        return out
Beispiel #8
0
def estimate_rhoB(adjacency_matrix):
    print("Tuning rhoB estimation")
    degrees = np.asarray(adjacency_matrix.sum(axis=1), dtype=np.float64).flatten()
    guessForFirstEigen = (degrees ** 2).mean() / degrees.mean() - 1
    errtol = 1e-2
    maxIter = 10

    err = 1
    iteration = 0
    rhoB = guessForFirstEigen
    print("Initial guess of rhoB is %f" % rhoB)
    while err > errtol and iteration < maxIter:
        iteration += 1
        print("Building matrices")
        BH = build_weighted_bethe_hessian(adjacency_matrix, rhoB)
        BHprime = build_weighted_bethe_hessian_derivative(adjacency_matrix, rhoB)

        sigma = 0
        op_inverse = lambda v: minres(BH, v, tol=1e-5)[0]
        OPinv = LinearOperator(matvec=op_inverse, shape=adjacency_matrix.shape, dtype=np.float64)

        print("Solving the eigenproblem")
        mu, x = eigsh(A=BH, M=BHprime, k=1, which='LM', sigma=sigma, OPinv=OPinv)
        mu = mu[0]
        print("mu is %f" % mu)
        err = abs(mu) / rhoB
        rhoB -= mu
        print("Iteration %d, updated value of rhoB %f, relative error %f" % (iteration, rhoB, err))

    return rhoB
Beispiel #9
0
def izracun_najbrzeg_silaska(A, b, x0):
    x = ln.minres(A, b, x0, 0.0, 1e-8, None, None, neka_vrijednost, 1)
    x = x[0]
    print("\nRezultat je\n")
    print(x, '\n')

    return x
Beispiel #10
0
def eigPsi_0(H, Dim, L, psi0_flag, sigma=None):
    """Compute the initial state as the groundstate of the Hamiltonian or a middle eigenstate
    Args:
        H(2d sparse matrix)                 = Hamiltonian matrix in sparse form
        Dim(int)                            = Hilbert space dimension
        L(int)                              = Size of the chain
        psi0_flag(int)                      = Flag for initial state (0 ---> GS, 1 ---> Middle state)
        sigma(float)                        = Target energy for shift-invert method
    """
    if psi0_flag == 1:
        if sigma is None:
            print("No target energy given. Check psi0_flag and/or sigma")
        else:
            if L < 14:
                e, v = np.linalg.eig(H.todense())
                idx = e.argsort()[::1]
                e_0 = e[idx]
                v_0 = v[:, idx][Dim // 2]
            elif L == 14 or L == 16:
                e_0, v_0 = eigsh(H, 1, sigma=sigma, maxiter=1E4)
                v_0 = np.asarray(v_0).ravel()

            else:
                OP = H - sigma * eye(Dim)
                OPinv = LinearOperator(
                    matvec=lambda v: minres(OP, v, tol=1e-5)[0],
                    shape=H.shape,
                    dtype=H.dtype)
                e_0, v_0 = eigsh(H, sigma=sigma, k=1, tol=1e-9, OPinv=OPinv)
                v_0 = np.asarray(v_0).ravel()

    if psi0_flag == 0:
        e_0, v_0 = eigsh(H, k=1)

    return e_0, v_0
Beispiel #11
0
    def _solve_lin(self):
        'Auxilliary function: solve the linear system exactly'
        # Linear equation coefficients
        Lhs, rhs, x0 = self._get_lineq()
        # Set x[I], y, czl, czu by solving linear equation
        # xy = cholmod.splinsolve(Lhs,rhs)

        self.CG_r0 = Lhs*x0 - rhs

        # Convert to numpy/scipy matrix
        npLhs = cvxopt_to_numpy_matrix(Lhs)
        nprhs = cvxopt_to_numpy_matrix(matrix(rhs))
        npx0 = cvxopt_to_numpy_matrix(x0)

        # Solve the linear system
        collector = Iter_collect(tol=self.option['ResTol'])
#        collector = Iter_collect()
        cg = minres(npLhs,nprhs,npx0,callback=collector)
        xy = numpy_to_cvxopt_matrix(cg[0])
        self.cgiter = collector.iter
        self.CG_r = Lhs*numpy_to_cvxopt_matrix(collector.x) - rhs

        nI = len(self.I)
        ny = self.QP.numeq
        ncL = len(self.cAL)
        ncU = len(self.cAU)
        self.x[self.I] = xy[0:nI]
        if xy[nI:nI + ny].size[1] == 1:
            self.y = xy[nI:nI + ny]

        if self.czl.size[0] > 0:
            self.czl[self.cAL] = xy[nI+ny:nI+ny+ncL]
            self.czu[self.cAU] = xy[nI+ny+ncL:]
Beispiel #12
0
 def _update_g(self, z, lamb, D2g_ext, **kwargs):
     """
     Evaluate the g-update proximal map for ADMM.
     input:
         z : current z-value (2*N_pad)-shaped
         lamb : lagrange multiplier enforcing D.dot(g) = z
                 (same shape as z)
         D2g_ext : D.T.dot(D.dot(g_ext)) the fourth-derivative of the
                   loop field
     kwargs are passed to spl.minres. tol and maxiter control how hard it
     tries
     output:
         updated g : (N_pad)-shaped
     """
     self._oldg = self._oldg if self._oldg is not None else np.zeros(
         self.N_pad)
     self._oldAg = self.A.dot(self._oldg)
     self._c = (self._Mtphi - self.D.T.dot(lamb - self.rho * z) -
                self.rho * D2g_ext) - self._oldAg
     maxiter = kwargs.get('maxiter', 200)
     tol = kwargs.get('tol', 1E-6)
     self._gminsol = spl.minres(self.A, self._c, maxiter=maxiter, tol=tol)
     self._newg = self._gminsol[0] + self._oldg
     self._oldg = self._newg.copy()
     return self._newg
Beispiel #13
0
def dfs_trunk(sim, A, alpha=0.99, QUERYKNN=10, maxiter=8, K=100, tol=1e-3):
    qsim = sim_kernel(sim).T
    sortidxs = np.argsort(-qsim, axis=1)
    for i in range(len(qsim)):
        qsim[i, sortidxs[i, QUERYKNN:]] = 0
    qsims = sim_kernel(qsim)
    W = sim_kernel(A)
    W = csr_matrix(topK_W(W, K))
    out_ranks = []
    t = time.time()
    for i in range(qsims.shape[0]):
        qs = qsims[i, :]
        tt = time.time()
        w_idxs, W_trunk = find_trunc_graph(qs, W, 2)
        Wn = normalize_connection_graph(W_trunk)
        Wnn = eye(Wn.shape[0]) - alpha * Wn
        f, inf = s_linalg.minres(Wnn, qs[w_idxs], tol=tol, maxiter=maxiter)
        ranks = w_idxs[np.argsort(-f.reshape(-1))]
        missing = np.setdiff1d(np.arange(A.shape[1]), ranks)
        out_ranks.append(
            np.concatenate([ranks.reshape(-1, 1),
                            missing.reshape(-1, 1)],
                           axis=0))
    print(time.time() - t, 'qtime')
    out_ranks = np.concatenate(out_ranks, axis=1)
    return out_ranks
def solve(A, b, method, tol=1e-3):
    """ General sparse solver interface.

    method can be one of
    - spsolve_umfpack_mmd_ata
    - spsolve_umfpack_colamd
    - spsolve_superlu_mmd_ata
    - spsolve_superlu_colamd
    - bicg
    - bicgstab
    - cg
    - cgs
    - gmres
    - lgmres
    - minres
    - qmr
    - lsqr
    - lsmr
    """

    if method == 'spsolve_umfpack_mmd_ata':
        return spla.spsolve(A, b, use_umfpack=True, permc_spec='MMD_ATA')
    elif method == 'spsolve_umfpack_colamd':
        return spla.spsolve(A, b, use_umfpack=True, permc_spec='COLAMD')
    elif method == 'spsolve_superlu_mmd_ata':
        return spla.spsolve(A, b, use_umfpack=False, permc_spec='MMD_ATA')
    elif method == 'spsolve_superlu_colamd':
        return spla.spsolve(A, b, use_umfpack=False, permc_spec='COLAMD')
    elif method == 'bicg':
        res = spla.bicg(A, b, tol=tol)
        return res[0]
    elif method == 'bicgstab':
        res = spla.bicgstab(A, b, tol=tol)
        return res[0]
    elif method == 'cg':
        res = spla.cg(A, b, tol=tol)
        return res[0]
    elif method == 'cgs':
        res = spla.cgs(A, b, tol=tol)
        return res[0]
    elif method == 'gmres':
        res = spla.gmres(A, b, tol=tol)
        return res[0]
    elif method == 'lgmres':
        res = spla.lgmres(A, b, tol=tol)
        return res[0]
    elif method == 'minres':
        res = spla.minres(A, b, tol=tol)
        return res[0]
    elif method == 'qmr':
        res = spla.qmr(A, b, tol=tol)
        return res[0]
    elif method == 'lsqr':
        res = spla.lsqr(A, b, atol=tol, btol=tol)
        return res[0]
    elif method == 'lsmr':
        res = spla.lsmr(A, b, atol=tol, btol=tol)
        return res[0]
    else:
        raise Exception('UnknownSolverType')
Beispiel #15
0
	def disc_proj(u1, u2, n, h, V, P,type):
		proj = Function(V)
		u = TrialFunction(V)
		v = TestFunction(V)
		if type=='Uh':
			dof_coords = V.tabulate_dof_coordinates().reshape(-1, 2)
				#print(len(dof_coords))
				#u_sol = Function(V)
			for j in range(len(dof_coords)):
				proj.vector()[j] = u1(dof_coords[j])+u2(dof_coords[j])
		elif type=='L2':
			m = u*v*dx
			Me = assemble(m)
			M_mat = as_backend_type(Me).mat()
			M = sp.csc_matrix(sp.csr_matrix(M_mat.getValuesCSR()[::-1], shape = M_mat.size))
			rhs = assemble(u1*v*dx) + assemble(u2*v*dx)
			val,auuu = spl.minres(M,rhs,tol=1e-10)
			proj.vector()[:] = val
		elif type=='H1':
			m = u*v*dx + inner(grad(u),grad(v))*dx
			Me = assemble(m)
			M_mat = as_backend_type(Me).mat()
			M = sp.csc_matrix(sp.csr_matrix(M_mat.getValuesCSR()[::-1], shape = M_mat.size))
			rhs = assemble(u1*v*dx) + assemble(u2*v*dx) + assemble(inner(grad(u1),grad(v))*dx) + assemble(inner(grad(u2),grad(v))*dx)
			val,auuu = spl.cg(M,rhs,tol=1e-7)
			proj.vector()[:] = val
		elif type=='Vh':
			Me = gh(u,v,Constant(1.))
			M_mat = as_backend_type(Me).mat()
			M = sp.csc_matrix(sp.csr_matrix(M_mat.getValuesCSR()[::-1], shape = M_mat.size))
			rhs = gh(u1,v,Constant(1.)) + gh(u2,v,Constant(1.))
			val,auuu = spl.cg(M,rhs,M=P,tol=1e-7)
			proj.vector()[:] = val
		return proj
Beispiel #16
0
def JDLoop(A, m, perp, V, tol, verbose = False):
	assert(type(A) == type(np.zeros(1)))
	assert(type(perp) == type(np.zeros(1)))
	n = A.shape[0]
	assert(perp.shape[0]== n and perp.shape[1]== 1)
	assert(V.shape[0] == n and V.shape[1] == m-1)
	
	t = copy.copy(perp)
	k = 0
	while k < .001:
		perp = copy.copy(t)
		#print "\tT0:",t
		for i in xrange(m-1):
			t[:,0] -= (np.dot(V[:,i].T,t[:,0]) * V[:,i])
		#print "\tTF:", t
		#print "Norm v0, iteration",m,":",la.norm(v0)
		#print "Norm t:",la.norm(t)
		k = la.norm(t) / la.norm(perp)
		if verbose:
			print "\t\tOrthogonalization rescale kappa: ", k
	perp= copy.copy(t)
	perp /= la.norm(t)
	vm = perp
	for i in xrange(m-1):
		assert(np.dot(t[:,0].T,V[:,i]) < 1e-10)

	V = np.append(V,perp,1)

	M = np.dot(V.T, np.dot(A, V))

	th, s = la.eigh(M, eigvals=(m-1,m-1))

	u = np.dot(V,s)
	r = np.dot(A, np.dot(V, s)) - th * u
	if la.norm(r) < tol:
		print "\t\t\t residual", la.norm(r)
		print "\t\t\t SUCCESS!"
		return la.norm(r), th, u, t, vm

	diag = A.diagonal()
	diag.shape = (n,1)
	I = np.eye(n)
	P = I - np.dot(u, u.T)
	A2 = A - (th * I)
	MAT = np.dot(P, np.dot(A2,P))

	#print MAT.shape, r.shape
	x, info = sla.minres(MAT,-r)
	
	t = x
	t /= la.norm(t)
	t.shape = (n,1)

	if verbose:
		print "\t\t\t residual", la.norm(r)
		print "\t\t\t t dot u: ", np.dot(t.T,u)
	#assert(np.dot(t.T,u) <= 1e-5)

	return la.norm(r), th, u, t, vm
Beispiel #17
0
def cg_diffusion(qsims, Wn, alpha=0.99, maxiter=10, tol=1e-3):
    Wnn = eye(Wn.shape[0]) - alpha * Wn
    out_sims = []
    for i in range(qsims.shape[0]):
        #f,inf = s_linalg.cg(Wnn, qsims[i,:], tol=tol, maxiter=maxiter)
        f, inf = s_linalg.minres(Wnn, qsims[i, :], tol=tol, maxiter=maxiter)
        out_sims.append(f.reshape(-1, 1))
    out_sims = np.concatenate(out_sims, axis=1)
    ranks = np.argsort(-out_sims, axis=0)
    return ranks
Beispiel #18
0
    def calculate(self,alldata_par, alldata_obs, alldata_wei):
        
        no_snapshots, no_parameters = alldata_par.shape
        TEMP = np.zeros([no_snapshots,no_snapshots])
        for i in range(no_parameters):
            v = alldata_par[:,i]
            Q = npm.repmat(v,no_snapshots,1)
            T = np.transpose(Q)
            TEMP = TEMP + np.power(Q-T,2)
        np.sqrt(TEMP,out=TEMP) # distances between all points
        if self.no_keep>0:
            MAX = 2*np.max(TEMP)
            M = TEMP+MAX*np.eye(no_snapshots)
            to_keep = np.ones(no_snapshots,dtype=bool)
            for i in range(no_snapshots - self.no_keep):
                argmin = np.argmin(M)
                xx = argmin // no_snapshots
                if self.expensive>0:
                    S=sum(M)
                    yy = argmin % no_snapshots
                    M[xx,yy]=MAX
                    M[yy,xx]=MAX
                    if S[yy]<S[xx]:
                        yy=xx
                M[xx,:]=MAX
                M[:,xx]=MAX
                to_keep[xx]=False
            TEMP = TEMP[to_keep,:]
            TEMP = TEMP[:,to_keep]
            alldata_par = alldata_par[to_keep,:]
            alldata_obs = alldata_obs[to_keep,:]
            try:
                to_keep=np.append(to_keep,np.ones(no_parameters+1,dtype=bool))
                self.initial_iteration = self.initial_iteration[to_keep]
            except:
                print("Exception!")    
        kernel.kernel(TEMP,self.type_kernel)
        P = np.ones([no_snapshots,1]) # only constant polynomials
    #    print(P.shape, no_evaluations, alldata_par.shape, TEMP.shape)
        P = np.append(alldata_par,P,axis=1) # plus linear polynomials
        no_polynomials = P.shape[1]
        TEMP = np.append(TEMP,P,axis=1)
        TEMP2 = np.append(np.transpose(P),np.zeros([no_polynomials,no_polynomials]),axis=1)
        TEMP2 = np.vstack([TEMP,TEMP2])
        RHS = np.vstack([ alldata_obs, np.zeros([no_polynomials,1]) ])
    #    print("Condition_number:",np.linalg.cond(TEMP2))
#        print("DEBUG2:", TEMP2.shape, np.linalg.matrix_rank(TEMP2), RHS.shape)
        if self.type_solver == 0:
            SOL=np.linalg.solve(TEMP2,RHS)
        else:
            SOL_=splin.minres(TEMP2,RHS,x0=self.initial_iteration,tol=pow(10,-self.type_solver),show=False)
            SOL=SOL_[0]
    #    RES=RHS-np.reshape(np.matmul(TEMP2,SOL),(no_evaluations+no_polynomials,1))
    #    print("Residual norm:",np.linalg.norm(RES), "RHSnorm:", np.linalg.norm(RHS))
        return SOL, no_snapshots, alldata_par, alldata_obs, alldata_wei, TEMP2, RHS
Beispiel #19
0
def solve_factorized_aug(z, Fval, LU, G, A):
    M, N = G.shape
    P, N = A.shape
    """Total number of inequality constraints"""
    m = M
    """Primal variable"""
    x = z[0:N]
    """Multiplier for equality constraints"""
    nu = z[N:N + P]
    """Multiplier for inequality constraints"""
    l = z[N + P:N + P + M]
    """Slacks"""
    s = z[N + P + M:]
    """Dual infeasibility"""
    rd = Fval[0:N]
    """Primal infeasibility"""
    rp1 = Fval[N:N + P]
    rp2 = Fval[N + P:N + P + M]
    """Centrality"""
    rc = Fval[N + P + M:]
    """Sigma matrix"""
    SIG = diags(l / s, 0)
    """LU is actually the augmented system W"""
    W = LU

    b1 = -rd - mydot(G.T, mydot(SIG, rp2)) + mydot(G.T, rc / s)
    b2 = -rp1
    b = np.hstack((b1, b2))
    """Prepare iterative solve via MINRES"""
    sign = np.zeros(N + P)
    sign[0:N // 2] = 1.0
    sign[N // 2:] = -1.0
    T = diags(sign, 0)
    """Change rhs"""
    b_new = mydot(T, b)

    dW = np.abs(W.diagonal())
    dPc = np.ones(W.shape[0])
    ind = (dW > 0.0)
    dPc[ind] = 1.0 / dW[ind]
    Pc = diags(dPc, 0)
    dxnu, info = minres(W, b_new, tol=1e-10, M=Pc)

    # dxnu = solve(J, b)
    dx = dxnu[0:N]
    dnu = dxnu[N:]
    """Obtain search directions for l and s"""
    ds = -rp2 - mydot(G, dx)
    # ds = s*ds
    # SIG = np.diag(l/s)
    dl = -mydot(SIG, ds) - rc / s

    dz = np.hstack((dx, dnu, dl, ds))
    return dz
Beispiel #20
0
def test_minres_scipy():
    H = sprandsym(10)
    v = sp_rand(10,3,0.8)
    A = sparse([[H],[v]])
    vrow = sparse([[v.T],[spmatrix([],[],[],(3,3))]])
    A = sparse([A,vrow])
    b = sp_rand(13,1,0.8)
    As = cvxopt_to_numpy_matrix(A)
    bs = cvxopt_to_numpy_matrix(matrix(b))
    result = minres(As,bs,)
    x = numpy_to_cvxopt_matrix(result[0])
    print nrm2(A*x-b)
Beispiel #21
0
    def solve(self, b, maxiter=1000, tol=1.e-10):
        """
        Compute Q^{-1}b

        Parameters:
        -----------
        b:  	(n,) ndarray
             given right hand side
        maxiter: int, optional. default = 1000
            Maximum number of iterations for the linear solver

        tol:	float, optional. default = 1.e-10
            Residual stoppingtolerance for the iterative solver

        Notes:
        ------
        If 'Dense' then inverts using LU factorization otherwise uses iterative solver
            - MINRES if used without preconditioner or GMRES with preconditioner.
            Preconditioner is not guaranteed to be positive definite.

        """

        if self.method == 'Dense':
            from scipy.linalg import solve
            x = solve(self.mat, b)

        else:
            from scipy.sparse.linalg import gmres, aslinearoperator, minres

            P = self.P
            Aop = aslinearoperator(self)

            residual = _Residual()
            if P != None:
                x, info = gmres(Aop,
                                b,
                                tol=tol,
                                restart=30,
                                maxiter=1000,
                                callback=residual,
                                M=P)
            else:
                x, info = minres(Aop,
                                 b,
                                 tol=tol,
                                 maxiter=maxiter,
                                 callback=residual)
            self.solvmatvecs += residual.itercount()
            if self.verbose:
                print
                "Number of iterations is %g and status is %g" % (
                    residual.itercount(), info)
        return x
Beispiel #22
0
 def x_star(self):
     if not hasattr(self, 'x_opt'):
         try:
             # use the Cholesky factorization to solve the linear system if Q is
             # symmetric and positive definite, i.e., the function is strictly convex
             self.x_opt = cho_solve(cho_factor(self.Q), -self.q)
         except np.linalg.LinAlgError:
             # since Q is is not strictly psd, i.e., the function is linear along the
             # eigenvectors correspondent to the null eigenvalues, the system has infinite
             # solutions, so we will choose the one that minimizes the residue
             self.x_opt = minres(self.Q, -self.q)[0]
     return self.x_opt
Beispiel #23
0
def test(mesh, normalize=False, scipy_solver='lu'):
  V = FunctionSpace(mesh, 'Lagrange', 1)
  bc= DirichletBC(V, Constant(0.0), DomainBoundary())

  u  =TrialFunction(V)
  v = TestFunction(V)

  f = Constant(1.0)
  a = inner(grad(u), grad(v))*dx
  L = f*v*dx

  A, b = assemble_system(a, L, bc)

  if normalize:
    max = A.array().max()
    A /= max
    b /= max

  u = Function(V)
  solve(A, u.vector(), b)
  plot(u, interactive=True, title='dolfin')

  x = A.array()
  print "Value of A are [%g, %g]" %(x.min(), x.max())
  print "Num of entries in A larger theb 100", np.where(x > 1E2)[0].sum()
  print "Number of mesh cells", mesh.num_cells()

  # see about scipy
  rows, cols, values = A.data()
  B = csr_matrix((values, cols, rows))
  d = b.array().T

  if scipy_solver == 'cg':
    v_, info = cg(B, d)              
  elif scipy_solver == 'bicg':
    v_, info = bicg(B, d)              
  elif scipy_solver == 'bicgstab':
    v_, info = bicgstab(B, d)              
  elif scipy_solver == 'gmres':
    v_, info = gmres(B, d)              
  elif scipy_solver == 'minres':
    v_, info = minres(B, d)              
  else:
    v_ = spsolve(B, d)              

  v = Function(V)
  v.vector()[:] = v_
  plot(v, interactive=True, title='scipy')
  try:
    print "info", info, 'v_max', v_.max()
  except:
    pass
Beispiel #24
0
def test(mesh, normalize=False, scipy_solver='lu'):
    V = FunctionSpace(mesh, 'Lagrange', 1)
    bc = DirichletBC(V, Constant(0.0), DomainBoundary())

    u = TrialFunction(V)
    v = TestFunction(V)

    f = Constant(1.0)
    a = inner(grad(u), grad(v)) * dx
    L = f * v * dx

    A, b = assemble_system(a, L, bc)

    if normalize:
        max = A.array().max()
        A /= max
        b /= max

    u = Function(V)
    solve(A, u.vector(), b)
    plot(u, interactive=True, title='dolfin')

    x = A.array()
    print "Value of A are [%g, %g]" % (x.min(), x.max())
    print "Num of entries in A larger theb 100", np.where(x > 1E2)[0].sum()
    print "Number of mesh cells", mesh.num_cells()

    # see about scipy
    rows, cols, values = A.data()
    B = csr_matrix((values, cols, rows))
    d = b.array().T

    if scipy_solver == 'cg':
        v_, info = cg(B, d)
    elif scipy_solver == 'bicg':
        v_, info = bicg(B, d)
    elif scipy_solver == 'bicgstab':
        v_, info = bicgstab(B, d)
    elif scipy_solver == 'gmres':
        v_, info = gmres(B, d)
    elif scipy_solver == 'minres':
        v_, info = minres(B, d)
    else:
        v_ = spsolve(B, d)

    v = Function(V)
    v.vector()[:] = v_
    plot(v, interactive=True, title='scipy')
    try:
        print "info", info, 'v_max', v_.max()
    except:
        pass
Beispiel #25
0
 def iterative(self, subtype):
     assert subtype in ['cg', 'gmres', 'minres']
     A = sparse.csr_matrix(self.matrix_a)
     b = self.vector_b
     counter = method_counter()
     if subtype == 'gmres':
         (x, info) = linalg.gmres(A, b, callback=counter)
     elif subtype == 'minres':
         (x, info) = linalg.minres(A, b, callback=counter)
     elif subtype == 'cg':
         (x, info) = linalg.cg(A, b, callback=counter)
     print(counter.niter)
     return x
Beispiel #26
0
def minres_solve(A, u, b, tol=1e-08):
    print("Solving system using MINRES solver")
    """ Solves the linear system A*u = b
        Input
            A: numpy array of NxN components (LHS)
            b: numpy array of Nx1 components (RHS)
            u: numpy array of Nx1 components (Solution)
    """

    # Change RHS representation
    A = sp.csr_matrix(A)

    # Solve system
    u[:] = spla.minres(A, b, tol=tol)[0]
Beispiel #27
0
    def solveSchurDiag(self, res ):
        '''Solves diagonal of Schur matrix
        Useful for Block Jacobi'''
        res = res.reshape([self.n, self.m])
        out = zeros_like(res)
        for i in xrange(self.n):
            diag_op = splinalg.LinearOperator( (self.m, self.m) ,matvec = lambda x: self.schurDiag(i,x), dtype = float )

            x0 = out[i-1] if i>0 else None

            ans, err = splinalg.minres(diag_op, res[i], x0=x0)
            out[i] = ans

        return out.ravel()
Beispiel #28
0
    def _solve_CG(self, X, Y):
        """
        Solve the primal SVM problem with Newton method without computing hessina matrix explicit,
        good for big sparse matrix
        :param X: 
        :param Y: 
        """
        [n, d] = X.shape

        # we add one last component, which is b (bias)
        self.w = np.zeros(d + 1)

        # helper variable for storing 1-Y*(np.dot(X,w))
        self.out = np.ones(n)

        l = self.l2reg
        # the number of alg. iteration
        iter = 0

        sv = np.where(self.out > 0)[0]

        # create linear operator, acts as matrix vector multiplication, without storing full matrix(hessian)
        #hess_vec = linalg.LinearOperator((d + 1, d + 1), matvec=self._matvec_mull)

        # This is a hack in order to pass additional parameters to linear matvec function
        mv2 = lambda v: self._matvec_mull(v, sv)
        # create linear operator, acts as matrix vector multiplication, without storing full matrix(hessian)
        hess_vec = linalg.LinearOperator((d + 1, d + 1), matvec=mv2)

        while True:
            iter = iter + 1
            if iter > self.newton_iter:
                print("Maximum {0} of Newton steps reached, change newton_iter parameter or try larger lambda".format(
                    iter))
                break

            obj, grad = self._obj_func(self.w, X, Y, self.out)

            # np.where returns a tuple, we take the first dim
            sv = np.where(self.out > 0)[0]

            step, info = linalg.minres(hess_vec, -grad)

            t, self.out = self._line_search(self.w, step, self.out)

            self.w += t * step

            if -step.dot(grad) < self._prec * obj:
                break
Beispiel #29
0
def task_dfs_loop(i, qs, alpha, W, tol, maxiter, top_k):
    w_idxs, W_trunk = find_trunc_graph(qs, W, 2)
    Wn = normalize_connection_graph(W_trunk)
    Wnn = eye(Wn.shape[0]) - alpha * Wn
    f, inf = s_linalg.minres(Wnn,
                             qs[w_idxs].toarray(),
                             tol=tol,
                             maxiter=maxiter)
    ranks = w_idxs[np.argsort(-f.reshape(-1))]
    missing = np.setdiff1d(np.arange(W.shape[1]), ranks)
    cur_rank = np.concatenate(
        [ranks.reshape(-1, 1), missing.reshape(-1, 1)], axis=0)
    if top_k is not None:
        cur_rank = cur_rank[:top_k]
    return i, cur_rank
Beispiel #30
0
def test_minres_scipy():
    H = sprandsym(10)
    v = sp_rand(10, 3, 0.8)
    A = sparse([[H], [v]])
    vrow = sparse([[v.T], [spmatrix([], [], [], (3, 3))]])
    A = sparse([A, vrow])
    b = sp_rand(13, 1, 0.8)
    As = cvxopt_to_numpy_matrix(A)
    bs = cvxopt_to_numpy_matrix(matrix(b))
    result = minres(
        As,
        bs,
    )
    x = numpy_to_cvxopt_matrix(result[0])
    print nrm2(A * x - b)
Beispiel #31
0
def _arrlist_lsq_solve(fA, b, iter=10):
    '''
    Min residual method on lists of numpy arrays. 
    fA is a function that takes an array list and outputs an array list.
    b is an array list. x = fA^-1 b
    https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.minres.html#scipy.sparse.linalg.minres
    '''
    x = [np.zeros_like(bb) for bb in b]
    #shape_arr = [bb.shape for bb in b]
    #for shapea in shape_arr: assert shapea==shape_arr[0]
    fAOp = LinearOperator((-1, -1), matvec=fA)
    x, info = minres(
        fAOp, b, x0=x, shift=1e-8, tol=1e-05
    )  #, maxiter=iter, xtype=None, M=None, callback=None, show=False, check=False)
    #r=(0.0,1e3)[info==0]
    return x, np.abs(info)
def cg_wrapper(operator, rhs, x0 = None):
    """
    do reshapes, and call cg; is that all there is to it?
    if rhs and x0 are in boundified subspace, we should not need to reapply this constraint
    """
    if not x0 is None: x0=np.ravel(x0)
    shape = rhs.shape
    from scipy.sparse.linalg import cg, LinearOperator, minres, gmres

    def flat_operator(x):
        return np.ravel( operator(x.reshape(shape)))
    N = np.prod(shape)
    wrapped_operator = LinearOperator((N,N), flat_operator, dtype=np.float)

    y = minres(wrapped_operator, np.ravel(rhs), x0, maxiter = 10)[0]
    return y.reshape(shape)
Beispiel #33
0
def rqi(A=None, x=None, k=None):
    from numpy.linalg import norm, solve
    from numpy import dot, eye
    from tracemin_fiedler import cg
    from scipy.sparse.linalg import minres

    for j in range(k):
        u = x / norm(x)  # normalize
        lam = dot(u, A * u)  # Rayleigh quotient
        #B = A - lam * eye(A.shape[0], A.shape[1])
        #x = solve(B,u)  			 # inverse power iteration
        #D = scipy.sparse.dia_matrix((1.0/(A.diagonal()-lam), 0), shape=A.shape)

        x, flag = minres(A, u, tol=1e-5, maxiter=30, shift=lam)
    x = x / norm(x)
    lam = dot(x, A * x)  # Rayleigh quotient
    return [lam, x]
Beispiel #34
0
def rqi(A=None, x=None, k=None):
    from numpy.linalg import norm, solve
    from numpy import dot, eye
    from tracemin_fiedler import cg
    from scipy.sparse.linalg import minres

    for j in range(k):    
        u = x/norm(x)                            # normalize
        lam = dot(u,A*u) 	                 # Rayleigh quotient
	#B = A - lam * eye(A.shape[0], A.shape[1])
        #x = solve(B,u)  			 # inverse power iteration
    	#D = scipy.sparse.dia_matrix((1.0/(A.diagonal()-lam), 0), shape=A.shape)

	x,flag = minres(A,u,tol=1e-5,maxiter=30,shift=lam)
    x = x/norm(x)
    lam = dot(x,A*x)                        	 # Rayleigh quotient
    return [lam,x]
Beispiel #35
0
 def in_sample_kfoldcv(self, folds, maxiter = None):
     """
     Computes the in-sample k-fold cross-validation predictions. By in-sample we denote the
     setting, where we leave a set of arbitrary entries of Y out at a time.
     
     Returns
     -------
     F : array, shape = [n_samples1*n_samples2]
         Training set labels. Label for (X1[i], X2[j]) maps to
         F[i + j*n_samples1] (column order).
         
     """
     if not self.kernelmode:
         X1, X2 = self.X1, self.X2
         P = X1 @ self.W @ X2.T
         R1 = la.inv(X1.T @ X1 + self.regparam1 * np.eye(X1.shape[1])) @ X1.T
         R2 = la.inv(X2.T @ X2 + self.regparam2 * np.eye(X2.shape[1])) @ X2.T
     else:
         P = self.K1 @ self.A @ self.K2.T
         H1 = self.K1 @ la.inv(self.K1 + self.regparam1 * np.eye(self.K1.shape[0]))
         H2 = self.K2 @ la.inv(self.K2 + self.regparam2 * np.eye(self.K2.shape[0]))
     
     allhopreds = np.zeros(self.Y.shape)
     for fold in folds:
         row_inds_K1, row_inds_K2 = fold
         if not self.kernelmode:
             u_inds_1, i_inds_1 = np.unique(row_inds_K1, return_inverse = True)
             r_inds_1 = np.arange(len(u_inds_1))[i_inds_1]
             H1_ho = X1[u_inds_1] @ R1[:, u_inds_1]
             u_inds_2, i_inds_2 = np.unique(row_inds_K2, return_inverse = True)
             r_inds_2 = np.arange(len(u_inds_2))[i_inds_2]
             H2_ho = X2[u_inds_2] @ R2[:, u_inds_2]
             pko = PairwiseKernelOperator(H1_ho, H2_ho, r_inds_1, r_inds_2, r_inds_1, r_inds_2)
         else:
             pko = PairwiseKernelOperator(H1, H2, row_inds_K1, row_inds_K2, row_inds_K1, row_inds_K2)
         temp = P[row_inds_K1, row_inds_K2]
         temp -= np.array(pko.matvec(np.array(self.Y)[row_inds_K1, row_inds_K2].squeeze())).squeeze()
         def mv(v):
             return v - pko.matvec(v)
         G = LinearOperator((len(row_inds_K1), len(row_inds_K1)), matvec = mv, dtype = np.float64)
         hopred = minres(G, temp.T, tol=1e-20, maxiter = maxiter)[0]
         allhopreds[row_inds_K1, row_inds_K2] = hopred
     return allhopreds.ravel(order = 'F')
Beispiel #36
0
	def solve(self, b, maxiter = 1000, tol = 1.e-10):
		"""
		Compute Q^{-1}b
		
		Parameters:
		-----------
		b:  	(n,) ndarray
		 	given right hand side
		maxiter: int, optional. default = 1000
			Maximum number of iterations for the linear solver

		tol:	float, optional. default = 1.e-10
			Residual stoppingtolerance for the iterative solver
				
		Notes:
		------
		If 'Dense' then inverts using LU factorization otherwise uses iterative solver 
			- MINRES if used without preconditioner or GMRES with preconditioner. 
			Preconditioner is not guaranteed to be positive definite.

		"""
		
		
				
		if self.method == 'Dense':
			from scipy.linalg import solve
			x = solve(self.mat, b)

		else:
			from scipy.sparse.linalg import gmres, aslinearoperator, minres

			P = self.P
			Aop = aslinearoperator(self)
		
			residual = _Residual()
			if P != None:
				x, info = gmres(Aop, b, tol = tol, restart = 30, maxiter = 1000, callback = residual, M = P)
			else:
				x, info = minres(Aop, b, tol = tol, maxiter = maxiter, callback = residual )
			self.solvmatvecs += residual.itercount()
			if self.verbose:	
				print "Number of iterations is %g and status is %g"% (residual.itercount(), info)
		return x
Beispiel #37
0
 def _update_g(self, h, lamb, **kwargs):
     """
     Evaluate the g-update proximal map for ADMM.
     input:
         h : g-like array 
         lamb : lagrange multiplier enforcing D.dot(g) = z
                 (same shape as z)
     kwargs are passed to spl.minres. tol and maxiter control how hard it
     tries
     output:
         updated g : (N_pad)-shaped
     """
     self._oldg = self._oldg if self._oldg is not None else np.zeros(self.N_pad)
     oldAg = self.A.dot(self._oldg)
     self._c = self._Mtphi/self.sigma**2 + lamb + self.rho * h - oldAg
     maxiter = kwargs.get('maxiter', 200)
     tol = kwargs.get('tol', 1E-12)
     self._gminsol = spl.minres(self.A, self._c, maxiter = maxiter, tol = tol)
     self._newg = self._gminsol[0] + self._oldg
     self._oldg = self._newg.copy()
     return self._newg
def solve_factorized_aug(z, Fval, LU, G, A):
    M, N=G.shape
    P, N=A.shape

    """Total number of inequality constraints"""
    m=M    

    """Primal variable"""
    x=z[0:N]

    """Multiplier for equality constraints"""
    nu=z[N:N+P]

    """Multiplier for inequality constraints"""
    l=z[N+P:N+P+M]

    """Slacks"""
    s=z[N+P+M:]

    """Dual infeasibility"""
    rd = Fval[0:N]
    
    """Primal infeasibility"""
    rp1 = Fval[N:N+P]
    rp2 = Fval[N+P:N+P+M]

    """Centrality"""
    rc = Fval[N+P+M:]

    """Sigma matrix"""
    SIG = diags(l/s, 0)

    """LU is actually the augmented system W"""
    W = LU

    b1 = -rd - mydot(G.T, mydot(SIG, rp2)) + mydot(G.T, rc/s)
    b2 = -rp1
    b = np.hstack((b1, b2))

    """Prepare iterative solve via MINRES"""
    sign = np.zeros(N+P)
    sign[0:N/2] = 1.0
    sign[N/2:] = -1.0
    T = diags(sign, 0)        
   
    """Change rhs"""
    b_new = mydot(T, b)

    dW = np.abs(W.diagonal())
    dPc = np.ones(W.shape[0])
    ind = (dW > 0.0)
    dPc[ind] = 1.0/dW[ind]
    Pc = diags(dPc, 0)    
    dxnu, info = minres(W, b_new, tol=1e-10, M=Pc)
    
    # dxnu = solve(J, b)
    dx = dxnu[0:N]
    dnu = dxnu[N:]

    """Obtain search directions for l and s"""
    ds = -rp2 - mydot(G, dx)
    # ds = s*ds
    # SIG = np.diag(l/s)
    dl = -mydot(SIG, ds) - rc/s

    dz = np.hstack((dx, dnu, dl, ds))
    return dz 
Beispiel #39
0
 def __init__(self, **kwargs):
     self.Y = kwargs["Y"]
     #self.Y = array_tools.as_2d_array(Y)
     self.trained = False
     if "regparam" in kwargs:
         self.regparam = kwargs["regparam"]
     else:
         self.regparam = 0.
     regparam = self.regparam
     if CALLBACK_FUNCTION in kwargs:
         self.callbackfun = kwargs[CALLBACK_FUNCTION]
     else:
         self.callbackfun = None
     if "compute_risk" in kwargs:
         self.compute_risk = kwargs["compute_risk"]
     else:
         self.compute_risk = False
     
     if 'K1' in kwargs or 'pko' in kwargs:
         if 'pko' in kwargs:
             pko = kwargs['pko']
         else:
             self.input1_inds = np.array(kwargs["label_row_inds"], dtype = np.int32)
             self.input2_inds = np.array(kwargs["label_col_inds"], dtype = np.int32)
             K1 = kwargs['K1']
             K2 = kwargs['K2']
             if 'weights' in kwargs: weights = kwargs['weights']
             else: weights = None
             pko = pairwise_kernel_operator.PairwiseKernelOperator(K1, K2, self.input1_inds, self.input2_inds, self.input1_inds, self.input2_inds, weights)
         self.pko = pko
         if 'maxiter' in kwargs: maxiter = int(kwargs['maxiter'])
         else: maxiter = None
         
         Y = np.array(self.Y).ravel(order = 'F')
         self.bestloss = float("inf")
         def mv(v):
             return pko.matvec(v) + regparam * v
         
         def mvr(v):
             raise Exception('This function should not be called!')
         
         def cgcb(v):
             if self.compute_risk:
                 P =  sampled_kronecker_products.sampled_vec_trick(v, K2, K1, self.input2_inds, self.input1_inds, self.input2_inds, self.input1_inds)
                 z = (Y - P)
                 Ka = sampled_kronecker_products.sampled_vec_trick(v, K2, K1, self.input2_inds, self.input1_inds, self.input2_inds, self.input1_inds)
                 loss = (np.dot(z, z) + regparam * np.dot(v, Ka))
                 print("loss", 0.5 * loss)
                 if loss < self.bestloss:
                     self.A = v.copy()
                     self.bestloss = loss
             else:
                 self.A = v
             if not self.callbackfun is None:
                 #self.predictor = KernelPairwisePredictor(self.A, self.input1_inds, self.input2_inds, self.pko.weights)
                 self.callbackfun.callback(self)
         
         G = LinearOperator((self.Y.shape[0], self.Y.shape[0]), matvec = mv, rmatvec = mvr, dtype = np.float64)
         self.A = minres(G, self.Y, maxiter = maxiter, callback = cgcb, tol=1e-20)[0]
         self.predictor = KernelPairwisePredictor(self.A, self.pko.original_col_inds_K1, self.pko.original_col_inds_K2, self.pko.weights)
         if not self.callbackfun is None:
                 self.callbackfun.finished(self)
     else:
         self.input1_inds = np.array(kwargs["label_row_inds"], dtype = np.int32)
         self.input2_inds = np.array(kwargs["label_col_inds"], dtype = np.int32)
         X1 = kwargs['X1']
         X2 = kwargs['X2']
         self.X1, self.X2 = X1, X2
         
         if 'maxiter' in kwargs: maxiter = int(kwargs['maxiter'])
         else: maxiter = None
         
         if 'weights' in kwargs: weights = kwargs['weights']
         else: weights = None
         
         Y = np.array(self.Y).ravel(order = 'F')
         self.bestloss = float("inf")
         def mv(v):
             v_after = pko.matvec(v)
             v_after = pko.rmatvec(v_after) + regparam * v
             return v_after
         
         def cgcb(v):
             if self.compute_risk:
                 P = sampled_kronecker_products.sampled_vec_trick(v, X2, X1, self.input2_inds, self.input1_inds)
                 z = (Y - P)
                 loss = (np.dot(z,z)+regparam*np.dot(v,v))
                 if loss < self.bestloss:
                     self.W = v.copy().reshape(pko.shape, order = 'F')
                     self.bestloss = loss
             else:
                 self.W = v
             if not self.callbackfun is None:
                 self.predictor = LinearPairwisePredictor(self.W)
                 self.callbackfun.callback(self)
         
         v_init = np.array(self.Y).reshape(self.Y.shape[0])
         pko = pairwise_kernel_operator.PairwiseKernelOperator(X1, X2, self.input1_inds, self.input2_inds, None, None, weights)
         G = LinearOperator((pko.shape[1], pko.shape[1]), matvec = mv, dtype = np.float64)
         v_init = pko.rmatvec(v_init)
         '''if 'warm_start' in kwargs:
             x0 = np.array(kwargs['warm_start']).reshape(kronfcount, order = 'F')
         else:
             x0 = None'''
         minres(G, v_init, maxiter = maxiter, callback = cgcb, tol=1e-20)#[0].reshape((pko_T.shape[0], pko.shape[1]), order='F')
         self.predictor = LinearPairwisePredictor(self.W, self.input1_inds, self.input2_inds, weights)
         if not self.callbackfun is None:
                 self.callbackfun.finished(self)
Beispiel #40
0
        self.pde = pde
        self.hist = []

    def __call__(self, x):
        self.n += 1
        if self.n == 1 or self.n % 10 == 0:
            resnorm = norm(self.pde.matvec(x, 1))
            gradient = kuramoto.c_gradAdj()
            print 'iter ', self.n, resnorm, gradient
            self.hist.append([self.n, resnorm, gradient])
        sys.stdout.flush()

# --- solve with minres (if cg converges this should converge -#
callback = Callback(pde)
callback(rhs * 0)
vw, info = splinalg.minres(oper, rhs, maxiter=100, tol=1E-6,
                           callback=callback)

pde.matvec(vw, 1)

u, v, w, v0, uEnd = [], [], [], [], []
for i in range(kuramoto.cvar.N_CHUNK):
    for j in range(kuramoto.cvar.N_STEP):
        u.append(kuramoto.c_u(i, j))
        v.append(kuramoto.c_v(i, j))
        w.append(kuramoto.c_w(i, j))
        v0.append(v[-1].copy())
        kuramoto.c_project_ddt(i, j, v0[-1])
    uEnd.append(kuramoto.c_u(i, kuramoto.cvar.N_STEP))

u, v, w, v0, uEnd = array(u), array(v), array(w),  array(v0), array(uEnd)
Beispiel #41
0
    def __init__(self, **kwargs):
        self.resource_pool = kwargs
        Y = kwargs["Y"]
        self.input1_inds = np.array(kwargs["label_row_inds"], dtype=np.int32)
        self.input2_inds = np.array(kwargs["label_col_inds"], dtype=np.int32)
        Y = array_tools.as_2d_array(Y)
        self.Y = np.mat(Y)
        self.trained = False
        if kwargs.has_key("regparam"):
            self.regparam = kwargs["regparam"]
        else:
            self.regparam = 0.0
        if kwargs.has_key(CALLBACK_FUNCTION):
            self.callbackfun = kwargs[CALLBACK_FUNCTION]
        else:
            self.callbackfun = None
        if kwargs.has_key("compute_risk"):
            self.compute_risk = kwargs["compute_risk"]
        else:
            self.compute_risk = False

        regparam = self.regparam
        if self.resource_pool.has_key("K1"):

            K1 = self.resource_pool["K1"]
            K2 = self.resource_pool["K2"]

            if "maxiter" in self.resource_pool:
                maxiter = int(self.resource_pool["maxiter"])
            else:
                maxiter = None

            Y = np.array(self.Y).ravel(order="F")
            self.bestloss = float("inf")

            def mv(v):
                return (
                    sampled_kronecker_products.sampled_vec_trick(
                        v, K2, K1, self.input2_inds, self.input1_inds, self.input2_inds, self.input1_inds
                    )
                    + regparam * v
                )

            def mvr(v):
                raise Exception("You should not be here!")

            def cgcb(v):
                if self.compute_risk:
                    P = sampled_kronecker_products.sampled_vec_trick(
                        v, K2, K1, self.input2_inds, self.input1_inds, self.input2_inds, self.input1_inds
                    )
                    z = Y - P
                    Ka = sampled_kronecker_products.sampled_vec_trick(
                        v, K2, K1, self.input2_inds, self.input1_inds, self.input2_inds, self.input1_inds
                    )
                    loss = np.dot(z, z) + regparam * np.dot(v, Ka)
                    print "loss", 0.5 * loss
                    if loss < self.bestloss:
                        self.A = v.copy()
                        self.bestloss = loss
                else:
                    self.A = v
                if not self.callbackfun is None:
                    self.predictor = KernelPairwisePredictor(self.A, self.input1_inds, self.input2_inds)
                    self.callbackfun.callback(self)

            G = LinearOperator((len(self.input1_inds), len(self.input1_inds)), matvec=mv, rmatvec=mvr, dtype=np.float64)
            self.A = minres(G, self.Y, maxiter=maxiter, callback=cgcb, tol=1e-20)[0]
            self.predictor = KernelPairwisePredictor(self.A, self.input1_inds, self.input2_inds)
        else:
            X1 = self.resource_pool["X1"]
            X2 = self.resource_pool["X2"]
            self.X1, self.X2 = X1, X2

            if "maxiter" in self.resource_pool:
                maxiter = int(self.resource_pool["maxiter"])
            else:
                maxiter = None

            x1tsize, x1fsize = X1.shape  # m, d
            x2tsize, x2fsize = X2.shape  # q, r

            kronfcount = x1fsize * x2fsize

            Y = np.array(self.Y).ravel(order="F")
            self.bestloss = float("inf")

            def mv(v):
                v_after = sampled_kronecker_products.sampled_vec_trick(v, X2, X1, self.input2_inds, self.input1_inds)
                v_after = (
                    sampled_kronecker_products.sampled_vec_trick(
                        v_after, X2.T, X1.T, None, None, self.input2_inds, self.input1_inds
                    )
                    + regparam * v
                )
                return v_after

            def mvr(v):
                raise Exception("You should not be here!")
                return None

            def cgcb(v):
                if self.compute_risk:
                    P = sampled_kronecker_products.sampled_vec_trick(v, X2, X1, self.input2_inds, self.input1_inds)
                    z = Y - P
                    loss = np.dot(z, z) + regparam * np.dot(v, v)
                    if loss < self.bestloss:
                        self.W = v.copy().reshape((x1fsize, x2fsize), order="F")
                        self.bestloss = loss
                else:
                    self.W = v.reshape((x1fsize, x2fsize), order="F")
                if not self.callbackfun is None:
                    self.predictor = LinearPairwisePredictor(self.W)
                    self.callbackfun.callback(self)

            G = LinearOperator((kronfcount, kronfcount), matvec=mv, rmatvec=mvr, dtype=np.float64)

            v_init = np.array(self.Y).reshape(self.Y.shape[0])
            v_init = sampled_kronecker_products.sampled_vec_trick(
                v_init, X2.T, X1.T, None, None, self.input2_inds, self.input1_inds
            )
            v_init = np.array(v_init).reshape(kronfcount)
            if self.resource_pool.has_key("warm_start"):
                x0 = np.array(self.resource_pool["warm_start"]).reshape(kronfcount, order="F")
            else:
                x0 = None
            minres(G, v_init, x0=x0, maxiter=maxiter, callback=cgcb, tol=1e-20)[0].reshape(
                (x1fsize, x2fsize), order="F"
            )
            self.predictor = LinearPairwisePredictor(self.W)
            if not self.callbackfun is None:
                self.callbackfun.finished(self)
Beispiel #42
0
from pdas.prob import randQP
from pdas.convert import numpy_to_cvxopt_matrix, cvxopt_to_numpy_matrix
import ctypes
import inspect

class LS(object):
    'Class holder for linear equation object.'
    def __init__(self):
        qp = randQP(10)
        self.pdas = PDAS(qp)
        self.Lhs, self.rhs, self.x0 = self.pdas._get_lineq()
        self.it = 0

    def __call__(self,x):
        # Access value of itn in minres
        frame = inspect.currentframe().f_back
        self.it = frame.f_locals['itn']
        # Change value of
        # ctype.pythonapi.Pycell_Set(id(inner.func_closure[0]), id(x))
        print('solution updated to: ', self.it)
        

if __name__=='__main__':
    a = LS()
    Lhs = cvxopt_to_numpy_matrix(a.Lhs)
    rhs = cvxopt_to_numpy_matrix(matrix(a.rhs))
    x0  = cvxopt_to_numpy_matrix(matrix(a.x0))
    minres(Lhs,rhs,x0,callback = a)


Beispiel #43
0
def solve_factorized(KKTval, z, LU, A0, Ak, Ak0, G):
    r"""Compute the Newton increment for a DKKT system with prefactored
    LHS.        
    
    """
    
    """Dimensions of subproblems"""
    n = A0.shape[1] # Primal point
    p_E0 = A0.shape[0] # Equality constraints alpha=0
    p_E = Ak.shape[0] # Equality constraints alpha>0
    m = G.shape[0] # Inequality constraints

    """Total number of subproblems, alpha=0...K-1"""
    K = (z.shape[0] - (n+p_E0+m+m))/(n+p_E+m+m) + 1

    """Total dimensions of subvectors"""
    N = K*n # Primal
    P = p_E0 + (K-1)*p_E # Multipliers equality
    M = K*m # Multipliers/slacks inequality

    """Multiplier for inequality constraints"""
    l = z[N+P:N+P+M]

    """Slacks for inequality constraints"""
    s = z[N+P+M:]

    """Dual residuum"""    
    rd = KKTval[0:N]
    
    """Primal residuum (equalities)"""
    rp1 = KKTval[N:N+P]

    """Primal residuum (inequalities)"""
    rp2 = KKTval[N+P:N+P+M]

    """Complementarity"""
    rc = KKTval[N+P+M:]

    """Unpack decomposition"""
    S, LU_W = LU

    """Set up extended block containing coupling to augmented system
    at alpha=0"""
    B = bmat([[None, csr_matrix((n, p_E0))], [Ak0, None]], format='csr')
    
    """Diagonal of Sigma matrix"""
    sig = l/s

    """Compute RHS of augmented system for all alpha and assemble RHS
    of condensed system, store intermediate results W_k^{-1}b_k, k>0, for
    later use"""
    Winvb = []

    for k in range(K):
        """Extract subvectors"""
        sk = s[k*m:(k+1)*m]
        sigk = sig[k*m:(k+1)*m]
        rdk = rd[k*n:(k+1)*n]
        rp2k = rp2[k*m:(k+1)*m]
        rck = rc[k*m:(k+1)*m]
        if k==0:
            rp1k = rp1[0:p_E0]
        else:
            rp1k = rp1[p_E0+(k-1)*p_E:p_E0+k*p_E]
        """RHS of augmented system"""
        b1 = rdk + mydot(G.T, sigk*rp2k) - mydot(G.T, rck/sk)
        b2 = rp1k
        b = np.hstack((b1, b2))
        if k==0:
            """RHS of condensed system"""
            rhs0 = b
        else:
            """Compute W_k^{-1}b_k"""
            Winvbk = mysolve(LU_W[k-1], b)
            """Update RHS of condensed system"""
            rhs0 -= mydot(B.T, Winvbk)
            """Store W_k^{-1}b_k"""
            Winvb.append(Winvbk)

    """Increment subvectors for return"""
    dx = np.zeros(N)
    dnu = np.zeros(P)
    dl = np.zeros(M)
    ds = np.zeros(M)

    """Set up the symmetrization matrix"""
    T0 = diags(np.hstack((np.ones(n/2), -1.0*np.ones(n/2+p_E0))), 0)    
    rhs0sym = T0.dot(rhs0)

    """Set up symmetric Schur complement as LinearOperator"""
    def matvec(x):
        return T0.dot(S.dot(x))    
    Ssym = LinearOperator(S.shape, matvec=matvec)

    """Set up Jacobi preconditioning for Ssym"""
    dSsym = np.abs(probe(Ssym, k=1))
    dPc = np.ones(Ssym.shape[0])
    ind = (dSsym > 0.0)
    dPc[ind] = 1.0/dSsym[ind]
    Pc = diags(dPc, 0)    
    mycounter = MyCounter()

    """Compute increment subvectors and assign"""
    for k in range(K):
        if k==0:
            dy0, info = minres(Ssym, -rhs0sym, tol=1e-13, M=Pc, callback=mycounter.count)
            # print mycounter.N
            dy = dy0
            dnu[0:p_E0] = dy0[n:]
        else:
            dy = -Winvb[k-1] - mysolve(LU_W[k-1], mydot(B, dy0))
            dnu[p_E0+(k-1)*p_E:p_E0+k*p_E] = dy[n:]
        dx[k*n:(k+1)*n] = dy[0:n]
        ds[k*m:(k+1)*m] = -rp2[k*m:(k+1)*m] - mydot(G, dx[k*n:(k+1)*n])
        dl[k*m:(k+1)*m] = -sig[k*m:(k+1)*m]*ds[k*m:(k+1)*m] - rc[k*m:(k+1)*m]/s[k*m:(k+1)*m]

    return np.hstack((dx, dnu, dl, ds))
Beispiel #44
0
def solve_full(z, Fval, DPhival, G, A):    
    M, N=G.shape
    P, N=A.shape

    """Total number of inequality constraints"""
    m=M    

    """Primal variable"""
    x=z[0:N]

    """Multiplier for equality constraints"""
    nu=z[N:N+P]

    """Multiplier for inequality constraints"""
    l=z[N+P:N+P+M]

    """Slacks"""
    s=z[N+P+M:]

    """Dual infeasibility"""
    rd = Fval[0:N]
    
    """Primal infeasibility"""
    rp1 = Fval[N:N+P]
    rp2 = Fval[N+P:N+P+M]

    """Centrality"""
    rc = Fval[N+P+M:]

    """Sigma matrix"""
    SIG = np.diag(l/s)

    """Condensed system"""
    if issparse(DPhival):
        if not issparse(A):
            A = csr_matrix(A)        
        H = DPhival + mydot(G.T, mydot(SIG, G))
        J = bmat([[H, A.T], [A, None]])
    else:
        if issparse(A):
            A = A.toarray()
        J = np.zeros((N+P, N+P))
        J[0:N, 0:N] = DPhival + mydot(G.T, mydot(SIG, G))            
        J[0:N, N:] = A.T
        J[N:, 0:N] = A

    b1 = -rd - mydot(G.T, mydot(SIG, rp2)) + mydot(G.T, rc/s)
    b2 = -rp1
    b = np.hstack((b1, b2))

    """Prepare iterative solve via MINRES"""
    sign = np.zeros(N+P)
    sign[0:N/2] = 1.0
    sign[N/2:] = -1.0
    S = diags(sign, 0)
    J_new = mydot(S, csr_matrix(J))
    b_new = mydot(S, b)

    dJ_new = np.abs(J_new.diagonal())
    dPc = np.ones(J_new.shape[0])
    ind = (dJ_new > 0.0)
    dPc[ind] = 1.0/dJ_new[ind]
    Pc = diags(dPc, 0)    
    dxnu, info = minres(J_new, b_new, tol=1e-8, M=Pc)
    
    # dxnu = solve(J, b)
    dx = dxnu[0:N]
    dnu = dxnu[N:]

    """Obtain search directions for l and s"""
    ds = -rp2 - mydot(G, dx)
    dl = -mydot(SIG, ds) - rc/s

    dz = np.hstack((dx, dnu, dl, ds))
    return dz 
Beispiel #45
0
mua = np.ones ((1,nlen)) * 0.025
mus = np.ones ((1,nlen)) * 2.0
ref = np.ones ((1,nlen)) * 1.4
freq = 100

# Set up the linear system
smat = mesh.Sysmat (mua, mus, ref, freq)
qvec = mesh.Qvec ()
mvec = mesh.Mvec ()

# Solve the linear system
nq = qvec.shape[1]
phi = np.empty(qvec.shape,dtype='complex128')
for q in range(nq):
    qq = qvec[:,q].todense()
    res = linalg.minres(smat,qq,tol=1e-12)
    phi[:,q] = res[0]

# Project to boundary
y = mvec.transpose() * phi
logy = np.log(y)

# Display as sinogram
plt.figure(1)
im = plt.imshow(logy.real,interpolation='none')
plt.title('log amplitude')
plt.xlabel('detector index')
plt.ylabel('source index')
plt.colorbar()
plt.draw()
#plt.show()
Beispiel #46
0
 def solveDiagBlock(self, i, rhs, tol=1e-4, x0=None):
     Mi = self.diagBlockOperator(i)
     ans, err = splinalg.minres(Mi, rhs, tol=tol, x0=x0)
     return ans
Beispiel #47
0
 def _action(self, act=1):
     if QSMODE == MODE_MPMATH:
         args = {}
         if act==0:
             self.typeStr = "mpmath_qr_solve_dps"+getArgDesc(mpmath.qr_solve, args)+" DPS"+str(DPS)
             self.matrixType = 1
             self.indexType = 1
         else:
             self.coeffVec = mpmath.qr_solve(self.sysMat, self.resVec, **args)
             self.printCalStr()
     else:
         if PYTYPE_COEFF_SOLVE_METHOD == "numpy_solve":
             args = {}
             if act==0:
                 self.typeStr = "numpy_solve"+getArgDesc(np.linalg.solve, args)
                 self.matrixType = 0
                 self.indexType = 0
             else:
                 self.coeffVec = np.linalg.solve(self.sysMat, self.resVec, **args)
                 self.printCalStr()
         elif PYTYPE_COEFF_SOLVE_METHOD == "numpy_lstsq":
             args = {}
             if act==0:
                 self.typeStr = "numpy_lstsq"+getArgDesc(np.linalg.lstsq, args)
                 self.matrixType = 0
                 self.indexType = 0
             else:
                 self.coeffVec = np.linalg.lstsq(self.sysMat, self.resVec, **args)[0]
                 self.printCalStr()
         elif PYTYPE_COEFF_SOLVE_METHOD == "numpy_sparse_bicg": 
             args = {}
             if act==0:
                 self.typeStr = "numpy_sparse_bicg"+getArgDesc(sp_sparse_linalg.bicg, args)
                 self.matrixType = 0
                 self.indexType = 1
             else:
                 self.coeffVec = self._sparseRet(sp_sparse_linalg.bicg(self.sysMat, self.resVec, **args))#, tol=1e-05, maxiter=10*len(self.resVec)
         elif PYTYPE_COEFF_SOLVE_METHOD == "numpy_sparse_bicgstab":
             args = {}
             if act==0:
                 self.typeStr = "numpy_sparse_bicgstab"+getArgDesc(sp_sparse_linalg.bicgstab, args)
                 self.matrixType = 0
                 self.indexType = 1
             else:
                 self.coeffVec = self._sparseRet(sp_sparse_linalg.bicgstab(self.sysMat, self.resVec, **args))#, tol=1e-05, maxiter=10*len(self.resVec)
         elif PYTYPE_COEFF_SOLVE_METHOD == "numpy_sparse_lgmres":
             args = {}
             if act==0:
                 self.typeStr = "numpy_sparse_lgmres"+getArgDesc(sp_sparse_linalg.lgmres, args)
                 self.matrixType = 0
                 self.indexType = 1
             else:
                 self.coeffVec = self._sparseRet(sp_sparse_linalg.lgmres(self.sysMat, self.resVec, **args))#, tol=1e-05, maxiter=1000
         elif PYTYPE_COEFF_SOLVE_METHOD == "numpy_sparse_minres":
             args = {}
             if act==0:
                 self.typeStr = "numpy_sparse_minres"+getArgDesc(sp_sparse_linalg.minres, args)
                 self.matrixType = 0
                 self.indexType = 1
             else:
                 self.coeffVec = self._sparseRet(sp_sparse_linalg.minres(self.sysMat, self.resVec, **args))#, tol=1e-05, maxiter=5*self.sysMat.shape[0]
         elif PYTYPE_COEFF_SOLVE_METHOD == "numpy_sparse_qmr":
             args = {}
             if act==0:
                 self.typeStr = "numpy_sparse_qmr"+getArgDesc(sp_sparse_linalg.qmr, args)
                 self.matrixType = 0
                 self.indexType = 1
             else:
                 self.coeffVec = self._sparseRet(sp_sparse_linalg.qmr(self.sysMat, self.resVec, **args))#, tol=1e-05, maxiter=10*len(self.resVec)
         elif PYTYPE_COEFF_SOLVE_METHOD == "numpy_qr":
             args_qr = {}
             args_s = {}
             if act==0:
                 self.typeStr = "numpy_qr"+getArgDesc(np.linalg.qr, args_qr)+",numpy_solve"+getArgDesc(np.linalg.solve, args_s)
                 self.matrixType = 0
                 self.indexType = 0
             else:
                 Q,R = np.linalg.qr(self.sysMat, **args_qr)
                 y = np.dot(Q.T,self.resVec)
                 self.coeffVec = np.linalg.solve(R,y, **args_s) 
                 self.printCalStr()
Beispiel #48
0
def main(write_output=True):
    from hedge.data import GivenFunction, ConstantGivenFunction

    from hedge.backends import guess_run_context
    rcon = guess_run_context()

    dim = 2

    def boundary_tagger(fvi, el, fn, points):
        from math import atan2, pi
        normal = el.face_normals[fn]
        if -90/180*pi < atan2(normal[1], normal[0]) < 90/180*pi:
            return ["neumann"]
        else:
            return ["dirichlet"]

    def dirichlet_boundary_tagger(fvi, el, fn, points):
            return ["dirichlet"]

    if dim == 2:
        if rcon.is_head_rank:
            from hedge.mesh.generator import make_disk_mesh
            mesh = make_disk_mesh(r=0.5, 
                    boundary_tagger=dirichlet_boundary_tagger,
                    max_area=1e-3)
    elif dim == 3:
        if rcon.is_head_rank:
            from hedge.mesh.generator import make_ball_mesh
            mesh = make_ball_mesh(max_volume=0.0001,
                    boundary_tagger=lambda fvi, el, fn, points:
                    ["dirichlet"])
    else:
        raise RuntimeError, "bad number of dimensions"

    if rcon.is_head_rank:
        print "%d elements" % len(mesh.elements)
        mesh_data = rcon.distribute_mesh(mesh)
    else:
        mesh_data = rcon.receive_mesh()

    discr = rcon.make_discretization(mesh_data, order=5, 
            debug=[])

    def dirichlet_bc(x, el):
        from math import sin
        return sin(10*x[0])

    def rhs_c(x, el):
        if la.norm(x) < 0.1:
            return 1000
        else:
            return 0

    def my_diff_tensor():
        result = numpy.eye(dim)
        result[0,0] = 0.1
        return result

    try:
        from hedge.models.poisson import (
                PoissonOperator,
                HelmholtzOperator)
        from hedge.second_order import \
                IPDGSecondDerivative, LDGSecondDerivative, \
                StabilizedCentralSecondDerivative

        k = 1

        from hedge.mesh import TAG_NONE, TAG_ALL
        op = HelmholtzOperator(k, discr.dimensions, 
                #diffusion_tensor=my_diff_tensor(),

                #dirichlet_tag="dirichlet",
                #neumann_tag="neumann", 

                dirichlet_tag=TAG_ALL,
                neumann_tag=TAG_NONE, 

                #dirichlet_tag=TAG_ALL,
                #neumann_tag=TAG_NONE, 

                #dirichlet_bc=GivenFunction(dirichlet_bc),
                dirichlet_bc=ConstantGivenFunction(0),
                neumann_bc=ConstantGivenFunction(-10),

                scheme=StabilizedCentralSecondDerivative(),
                #scheme=LDGSecondDerivative(),
                #scheme=IPDGSecondDerivative(),
                )
        bound_op = op.bind(discr)

        if False:
            from hedge.iterative import parallel_cg
            u = -parallel_cg(rcon, -bound_op, 
                    bound_op.prepare_rhs(discr.interpolate_volume_function(rhs_c)), 
                    debug=20, tol=5e-4,
                    dot=discr.nodewise_dot_product,
                    x=discr.volume_zeros())
        else:
            rhs = bound_op.prepare_rhs(discr.interpolate_volume_function(rhs_c))
            def compute_resid(x):
                return bound_op(x)-rhs

            from scipy.sparse.linalg import minres, LinearOperator
            u, info = minres(
                    LinearOperator(
                        (len(discr), len(discr)),
                        matvec=bound_op, dtype=bound_op.dtype),
                    rhs,
                    callback=ResidualPrinter(compute_resid),
                    tol=1e-5)
            print
            if info != 0:
                raise RuntimeError("gmres reported error %d" % info)
            print "finished gmres"

            print la.norm(bound_op(u)-rhs)/la.norm(rhs)

        if write_output:
            from hedge.visualization import SiloVisualizer, VtkVisualizer
            vis = VtkVisualizer(discr, rcon)
            visf = vis.make_file("fld")
            vis.add_data(visf, [ ("sol", discr.convert_volume(u, kind="numpy")), ])
            visf.close()
    finally:
        discr.close()