コード例 #1
0
ファイル: __init__.py プロジェクト: hkaiser/amgcl
    def __init__(self,
            A,
            coarsening=pyamgcl_ext.coarsening.smoothed_aggregation,
            relaxation=pyamgcl_ext.relaxation.spai0,
            prm={}
            ):
        """
        Class constructor.

        Creates algebraic multigrid hierarchy.

        Parameters
        ----------
        A : the system matrix in scipy.sparse format
        coarsening : {ruge_stuben, aggregation, *smoothed_aggregation*, smoothed_aggr_emin}
            The coarsening type to use for construction of the multigrid
            hierarchy.
        relaxation : {damped_jacobi, gauss_seidel, chebyshev, *spai0*, ilu0}
            The relaxation scheme to use for multigrid cycles.
        prm : dictionary with amgcl parameters
        """
        Acsr = A.tocsr()

        self.P = pyamgcl_ext.make_preconditioner(
                coarsening, relaxation, prm,
                Acsr.indptr.astype(numpy.int32),
                Acsr.indices.astype(numpy.int32),
                Acsr.data.astype(numpy.float64)
                )

        LinearOperator.__init__(self, A.shape, self.P)
コード例 #2
0
ファイル: linop.py プロジェクト: jeremander/Gplus
 def __init__(self, F, Delta, u):
     n = F.shape[0]
     assert (F.shape[1] == n) and (u.shape == (n,))
     self.F, self.Delta, self.u = F, Delta, u
     self.u_prime = self.Delta - self.u
     self.ones = np.ones(n, dtype=float)
     LinearOperator.__init__(self, dtype=float, shape=self.F.shape)
コード例 #3
0
ファイル: linop.py プロジェクト: jeremander/Gplus
 def __init__(self, block_grid):
     """Input is a 2D list of SparseLinearOperators. The resulting operator is the corresponding operator comprised of these operator blocks. The dimensions must match correctly. This assumes the number of blocks in each row and column is the same."""
     self.block_grid_shape = (len(block_grid), len(block_grid[0]))
     # validate block dimensions
     assert all(
         [len(row) == self.block_grid_shape[1] for row in block_grid]
     ), "Must be same number of blocks in each row."
     assert all(
         [
             len(set([block_grid[i][j].shape[0] for j in range(self.block_grid_shape[1])])) == 1
             for i in range(self.block_grid_shape[0])
         ]
     ), "dimension mismatch"
     assert all(
         [
             len(set([block_grid[i][j].shape[1] for i in range(self.block_grid_shape[0])])) == 1
             for j in range(self.block_grid_shape[1])
         ]
     ), "dimension mismatch"
     shape = (
         sum([block_grid[i][0].shape[0] for i in range(len(block_grid))]),
         sum([block_grid[0][j].shape[1] for j in range(len(block_grid[0]))]),
     )
     # compute transition indices between blocks
     self.row_indices = [0] + list(np.cumsum([row[0].shape[0] for row in block_grid]))
     self.column_indices = [0] + list(np.cumsum([block.shape[1] for block in block_grid[0]]))
     self.block_grid = block_grid
     LinearOperator.__init__(self, dtype=float, shape=shape)
コード例 #4
0
ファイル: lobpcg.py プロジェクト: gcasey/scipy
def makeOperator( operatorInput, expectedShape ):
    """Internal. Takes a dense numpy array or a sparse matrix or
    a function and makes an operator performing matrix * blockvector
    products.

    Examples
    --------
    >>> A = makeOperator( arrayA, (n, n) )
    >>> vectorB = A( vectorX )

    """
    if operatorInput is None:
        def ident(x):
            return x
        operator = LinearOperator(expectedShape, ident, matmat=ident)
    else:
        operator = aslinearoperator(operatorInput)

    if operator.shape != expectedShape:
        raise ValueError('operator has invalid shape')

    if sys.version_info[0] >= 3:
        # special methods are looked up on the class -- so make a new one
        operator.__class__ = CallableLinearOperator
    else:
        operator.__call__ = operator.matmat

    return operator
コード例 #5
0
ファイル: lobpcg.py プロジェクト: decarlin/stuartlab-scripts
def makeOperator( operatorInput, expectedShape ):
    """Internal. Takes a dense numpy array or a sparse matrix or
    a function and makes an operator performing matrix * blockvector
    products.

    Example
    -------

    >>> A = makeOperator( arrayA, (n, n) )
    >>> vectorB = A( vectorX )

    """
    if operatorInput is None:
        def ident(x):
            return x
        operator = LinearOperator(expectedShape, ident, matmat=ident)
    else:
        operator = aslinearoperator(operatorInput)

    if operator.shape != expectedShape:
        raise ValueError('operator has invalid shape')

    operator.__call__ = operator.matmat

    return operator
コード例 #6
0
ファイル: linop.py プロジェクト: jeremander/AttrVN
 def __init__(self, D):
     """D is a 1D array containing the diagonal entries."""
     self.D = D
     if LINOP_SUBCLASSING:
         LinearOperator.__init__(self, dtype = float, shape = (len(D), len(D)))
     else:
         LinearOperator.__init__(self, dtype = float, shape = (len(D), len(D)), matvec = lambda x : type(self)._matvec(self, x))
コード例 #7
0
ファイル: linop.py プロジェクト: jeremander/AttrVN
 def __init__(self, n, c):
     """n is dimension, c is a constant to be multiplied by the identity matrix."""
     self.c = c
     if LINOP_SUBCLASSING:
         LinearOperator.__init__(self, dtype = float, shape = (n, n))
     else:
         LinearOperator.__init__(self, dtype = float, shape = (n, n), matvec = lambda x : type(self)._matvec(self, x))
コード例 #8
0
ファイル: linop.py プロジェクト: jeremander/AttrVN
 def __init__(self, u, v):
     assert (len(u) == len(v))
     self.u, self.v = u, v
     if LINOP_SUBCLASSING:
         LinearOperator.__init__(self, dtype = float, shape = (len(u), len(v)))
     else:
         LinearOperator.__init__(self, dtype = float, shape = (len(u), len(v)), matvec = lambda x : type(self)._matvec(self, x))
コード例 #9
0
ファイル: linop.py プロジェクト: jeremander/AttrVN
 def __init__(self, A):
     assert isinstance(A, SymmetricSparseLinearOperator)
     self.A = A
     if LINOP_SUBCLASSING:
         LinearOperator.__init__(self, dtype = float, shape = A.shape)
     else:
         LinearOperator.__init__(self, dtype = float, shape = A.shape, matvec = lambda x : type(self)._matvec(self, x))
     self.D_ratio = self.A._matvec(np.ones(self.A.shape[1], dtype = float)) / self.shape[0]
コード例 #10
0
    def __init__(self, shape, matvec):

        self._shape = shape
        self._action = matvec

        LinearOperator.__init__(self,
                                shape=self._shape,
                                matvec=self.parallelDot,
                                dtype=np.complex128)
コード例 #11
0
 def __init__(self, K, quad, **kwargs):
     assert len(K.shape) == 1
     shape = (K.shape[0]-4, K.shape[0]-4)
     N = shape[0]
     LinearOperator.__init__(self, shape, None, **kwargs)
     ck = ones(K.shape)
     if quad == "GC": ck[N-1] = 2
     self.dd = -4*pi*(K[:N]+1)/(K[:N]+3)*(K[:N]+2)**2
     self.ud = [2*pi*(K[:N-2]+1)*(K[:N-2]+2)]        
     self.ld = [2*pi*(K[2:N]-1)*(K[2:N]+2)]
コード例 #12
0
ファイル: linop.py プロジェクト: jeremander/AttrVN
 def __init__(self, F, Delta, u):
     n = F.shape[0]
     assert ((F.shape[1] == n) and (u.shape == (n,)))
     self.F, self.Delta, self.u = F, Delta, u
     self.u_prime = self.Delta - self.u
     self.ones = np.ones(n, dtype = float)
     if LINOP_SUBCLASSING:
         LinearOperator.__init__(self, dtype = float, shape = self.F.shape)
     else:
         LinearOperator.__init__(self, dtype = float, shape = self.F.shape, matvec = lambda x : type(self)._matvec(self, x))
コード例 #13
0
def Operator(f, size=None):
	"""Create a stacked version of the function f, casted as a LinearOperator.
	
	size is the dimension of the operator f."""
	
	if size == None: size = f.Size
	
	operator = LinearOperator((size,size), matvec = StackFunction(f), dtype=float64)
	operator.__call__ = lambda x: operator * x
	
	return operator
コード例 #14
0
    def __init__(self, K, **kwargs):
        assert len(K.shape) == 1
        shape = (K.shape[0]-4, K.shape[0]-4)
        N = shape[0]
        LinearOperator.__init__(self, shape, None, **kwargs)
        self.dd = 8*pi*(K[:N]+1)**2*(K[:N]+2)*(K[:N]+4)   
        self.ud = []
        for i in range(2, N, 2):
            self.ud.append(8*pi*(K[:-(i+4)]+1)*(K[:-(i+4)]+2)*(K[:-(i+4)]*(K[:-(i+4)]+4)+3*(arange(i, N)+2)**2)/((arange(i, N)+3)))

        self.ld = None
コード例 #15
0
 def __init__(self, K, quad, **kwargs):
     assert len(K.shape) == 1
     shape = (K.shape[0]-4, K.shape[0]-4)
     N = shape[0]
     LinearOperator.__init__(self, shape, None, **kwargs)
     ck = ones(K.shape)
     if quad == "GC": ck[N-1] = 2
     self.dd = pi/2*(ck[:-4] + 4*(K[:N]+2)**2/(K[:N]+3)**2 + (K[:N]+1)**2/(K[:N]+3)**2)           
     self.ud = [-pi*((K[:N-2]+2)/(K[:N-2]+3) + (K[:N-2]+4)/(K[:N-2]+5)*(K[:N-2]+1)/(K[:N-2]+3)),
                pi/2*(K[:N-4]+1)/(K[:N-4]+3)]
     
     self.ld = [pi/2*(K[:N-4]+1)/(K[:N-4]+3),
                -pi*((K[:N-2]+2)/(K[:N-2]+3) + (K[:N-2]+4)/(K[:N-2]+5)*(K[:N-2]+1)/(K[:N-2]+3))]
コード例 #16
0
ファイル: hmat.py プロジェクト: Nehoroshiy/hmat
    def __init__(self, mat, r=10, leaf_side=16):
        LinearOperator.__init__(self, dtype=mat.dtype, shape=mat.shape,
                                matvec=self._matvec,
                                rmatvec=self._rmatvec)
        self.mat = BlackBox(mat)
        self.r = r
        self.leaf_side = leaf_side
        self.leaf_size = leaf_side**2

        N = int(np.sqrt(self.mat.shape[0]))

        self.pattern = distance_matrix(N, format='coo')
        perm = hilbert_traverse(N)
        conjugate_sparse(self.pattern, perm)
        self.pattern = self.pattern.tocsr()
        self.mat.permutate(perm)
        self.root = hmat_node(self, tuple(zip((0, 0), self.mat.shape)))
        return
コード例 #17
0
def _diagonal_operator(diag):
    """Creates an operator representing a 
    multiplication with a diagonal matrix"""
    diag = diag.ravel()[:, np.newaxis]

    def diag_matvec(vec):
        if vec.ndim > 1:
            return diag * vec
        else:
            return diag.ravel() * vec

    linop = LinearOperator(shape=(len(diag), len(diag)),
                           matvec=diag_matvec,
                           rmatvec=diag_matvec,
                           dtype=np.float64)
    linop.matvec = diag_matvec
    linop.rmatvec = diag_matvec

    return linop
コード例 #18
0
def get_grad_linop(X, Y, invcovB, invcovN, alpha):
    """
    Linear operator implementing the gradient of the functional
    \frac{1}{2} \|Y - XB\|^2_{\Sigma_n} + \frac{1}{2} \|B\|^2_{\Sigma_s}

    which reads
    grad_B = X^T(XB - Y)\Sigma_n^{-1} + \lambda B\Sigma_s^{-1}
    """

    N, P = X.shape
    T = invcovB.shape[0]

    if P <= N:
        XTX = aslinearoperator(X.T.dot(X))
        XTYinvcovN = invcovN.rmatvec(Y.T.dot(X)).T

        def matvec(vecB):
            XTXB = XTX.matvec(vecB.reshape(T, P).T)
            XTXB_invcovN = invcovN.rmatvec(XTXB.T).T
            B_incovB = invcovB.rmatvec(vecB.reshape(T, P)).T
            result = XTXB_invcovN - XTYinvcovN + alpha * B_incovB
            return result.T.ravel()
    else:
        # raise(Exception)
        def matvec(vecB):
            XB_minus_Y_invcovN = invcovN.rmatvec(
                (X.dot(vecB.reshape(T, P).T) - Y).T).T
            XT_XB_minus_Y_invcovN = X.T.dot(XB_minus_Y_invcovN)
            B_incovB = invcovB.rmatvec(vecB.reshape(T, P)).T
            result = XT_XB_minus_Y_invcovN + alpha * B_incovB
            return result.T.ravel()

    linop = LinearOperator(shape=tuple([X.shape[1] * Y.shape[1]] * 2),
                           matvec=matvec,
                           rmatvec=matvec,
                           dtype=np.dtype('float64'))
    linop.matvec = matvec
    linop.rmatvec = matvec
    linop.dtype = np.dtype('float64')

    return linop
コード例 #19
0
def _woodbury_inverse(Ainv, Cinv, U, V):
    """Uses Woodbury Matrix Identity to invert the Matrix
    (A + UCV) ^ (-1)
    See http://en.wikipedia.org/wiki/Woodbury_matrix_identity"""

    def matvec(x):
        # this is probably wildly suboptimal, but it works
        Ainv_x = Ainv.matvec(x)
        Cinv_mat = Cinv.matvec(np.eye(Cinv.shape[0]))
        VAinvU = V.dot(Ainv.matvec(U))
        inv_Cinv_plus_VAinvU = np.linalg.inv(Cinv_mat + VAinvU)
        VAinv_x = V.dot(Ainv_x)
        inv_blabla_VAinv_x = inv_Cinv_plus_VAinvU.dot(VAinv_x)
        whole_big_block = Ainv.matvec(
            U.dot(inv_blabla_VAinv_x))
        return Ainv_x - whole_big_block

    shape = Ainv.shape
    linop = LinearOperator(shape=shape, matvec=matvec)
    linop.matvec = matvec
    linop.rmatvec = matvec
    return linop
コード例 #20
0
ファイル: __init__.py プロジェクト: intbots/amgcl
    def __init__(self, A, pmask, prm={}):
        """
        Class constructor.

        Parameters
        ----------
        A : the system matrix in scipy.sparse format
        prm : dictionary with amgcl parameters
        """
        Acsr = A.tocsr()

        self.P = pyamgcl_ext.make_simple(
                prm,
                Acsr.indptr.astype(numpy.int32),
                Acsr.indices.astype(numpy.int32),
                Acsr.data.astype(numpy.float64),
                pmask.astype(numpy.int32)
                )

        if [int(v) for v in scipy.__version__.split('.')] < [0, 16, 0]:
            LinearOperator.__init__(self, A.shape, self.P)
        else:
            LinearOperator.__init__(self, dtype=numpy.float64, shape=A.shape)
コード例 #21
0
ファイル: helpers_test.py プロジェクト: whshangl/finmag
def test_as_petsc_matrix():
    N = 20

    # Create a tridiagonal matrix with random entries
    A = np.zeros((N, N))
    a = np.random.random_sample(N - 1)
    b = np.random.random_sample(N)
    c = np.random.random_sample(N - 1)
    A += np.diag(a, k=-1)
    A += np.diag(b, k=0)
    A += np.diag(c, k=+1)
    print "[DDD] A:"
    print A

    # Convert to PETSC matrix
    A_petsc = as_petsc_matrix(A)

    # Check that the sparsity pattern is as expected
    indptr, _, data = A_petsc.getValuesCSR()
    indptr_expected = [0] + range(2, 3 * (N - 1), 3) + [3 * N - 2]
    assert (all(indptr == indptr_expected))

    # Convert back to numpy array
    B = as_dense_array(A_petsc)
    assert (np.allclose(A, B))

    # A numpy array of dtype complex can only be converted to a PETSc
    # matrix if the imaginary part is zero.
    C = (1.0 + 0.j) * np.random.random_sample((N, N))
    assert (C.dtype == complex)  # check that we created a complex array
    C2 = as_dense_array(as_petsc_matrix(C))
    assert (np.allclose(C, C2))

    D = 1j * np.random.random_sample((N, N))
    assert (D.dtype == complex)  # check that we created a complex array
    with pytest.raises(TypeError):
        as_petsc_matrix(D)

    # Check that we can also convert a LinearOperator to a PETScMatrix
    A_LinearOperator = LinearOperator(shape=(N, N),
                                      matvec=lambda v: np.dot(A, v))
    A_petsc2 = as_petsc_matrix(A_LinearOperator)
    A_roundtrip = as_dense_array(A_petsc2)
    assert (np.allclose(A, A_roundtrip))
コード例 #22
0
def _compute_eigenvalues(operator_type, eigenvalue_type, num_eigenvalues, v0,
                         psi, modeleval, mu, g):
    if operator_type == "k":
        A = modeleval._get_keo(mu)
    elif operator_type == "p":
        A = modeleval.get_preconditioner(psi, mu, g)
    elif operator_type == "j":
        jac = modeleval.get_jacobian(psi, mu, g)
        # Consider bordering.
        # A = _complex_with_bordering2real( jac )
        A = _complex2real(jac)
    elif operator_type == "pj":
        # build preconditioned operator
        prec_inv = modeleval.get_preconditioner_inverse(psi, mu, g)
        jacobian = modeleval.get_jacobian(psi, mu, g)

        def _apply_prec_jacobian(phi):
            return prec_inv * (jacobian * phi)

        num_unknowns = len(modeleval.mesh.node_coords)
        A = LinearOperator((num_unknowns, num_unknowns),
                           _apply_prec_jacobian,
                           dtype=complex)
    else:
        raise ValueError("Unknown operator '%s'." % operator_type)

    print("Compute the %d %s eigenvalues of %s..." %
          (num_eigenvalues, eigenvalue_type, operator_type))
    start_time = time.clock()
    eigenvals, X = eigs(
        A,
        k=num_eigenvalues,
        sigma=None,
        which=eigenvalue_type,
        v0=v0,
        return_eigenvectors=True,
    )
    end_time = time.clock()
    print("done. (", end_time - start_time, "s).")

    # make sure they are real (as they are supposed to be)
    assert all(abs(eigenvals.imag) < 1.0e-10), eigenvals

    return eigenvals.real, X
コード例 #23
0
def create_flattened_Linear_Operators(slices, LO_type, k=2):
  n = slices[0].shape[0]
  m = slices[0].shape[1]
  T = len(slices)

  # apply the linear operators to each slice
  LO_slices = []
  for slice in slices:
    if LO_type == "mean_center":
      LO_slices.append(mean_center(slice))
    elif LO_type == "power":
      LO_slices.append(matrix_power(slice,k))
    elif LO_type == "already_applied":
      LO_slices = slices
    else:
      raise ValueError("invalid Linear Operator type")

  def mat_vec(v):
    if v.shape == (m*T,):
      output_vec = np.empty(n)
    elif v.shape == (m*T,1):
      output_vec = np.empty((n,1))
    else:
      raise ValueError("non-vector passed into mat_vec, object of shape {"
                       "}".format(v.shape))
    output_vec = LO_slices[0] * v[0:m]
    for t in range(1,T):
      output_vec = output_vec + LO_slices[t] * v[t*m:(t+1)*m]

    return output_vec

  def rmat_vec(v):
    if v.shape == (n,):
      output_vec = np.empty(m*T)
    elif v.shape == (n, 1):
      output_vec = np.empty((m*T, 1))
    else:
      raise ValueError("non-vector passed into mat_vec, object of shape {"
                       "}".format(v.shape))
    for t in range(T):
      output_vec[t*m:(t+1)*m] = LO_slices[t].rmatvec(v)

    return output_vec
  return LinearOperator((n,m*T), mat_vec, rmatvec= rmat_vec)
コード例 #24
0
def _rescale(hamiltonian, eps, v0, bounds):
    """Rescale a Hamiltonian and return a LinearOperator

    Parameters
    ----------
    hamiltonian : 2D array
        Hamiltonian of the system.
    eps : scalar
        Ensures that the bounds are strict.
    v0 : random vector, or None
        Used as the initial residual vector for the algorithm that
        finds the lowest and highest eigenvalues.
    bounds : tuple, or None
        Boundaries of the spectrum. If not provided the maximum and
        minimum eigenvalues are calculated.
    """
    # Relative tolerance to which to calculate eigenvalues.  Because after
    # rescaling we will add eps / 2 to the spectral bounds, we don't need
    # to know the bounds more accurately than eps / 2.
    tol = eps / 2

    if bounds:
        lmin, lmax = bounds
    else:
        lmax = float(eigsh(hamiltonian, k=1, which='LA',
                           return_eigenvectors=False, tol=tol, v0=v0))
        lmin = float(eigsh(hamiltonian, k=1, which='SA',
                           return_eigenvectors=False, tol=tol, v0=v0))

    a = np.abs(lmax-lmin) / (2. - eps)
    b = (lmax+lmin) / 2.

    if lmax - lmin <= abs(lmax + lmin) * tol / 2:
        raise ValueError(
            'The Hamiltonian has a single eigenvalue, it is not possible to '
            'obtain a spectral density.')

    def rescaled(v):
        return (hamiltonian.dot(v) - b * v) / a

    rescaled_ham = LinearOperator(shape=hamiltonian.shape, matvec=rescaled)

    return rescaled_ham, (a, b)
コード例 #25
0
 def in_sample_kfoldcv(self, folds, maxiter = None):
     """
     Computes the in-sample k-fold cross-validation predictions. By in-sample we denote the
     setting, where we leave a set of arbitrary entries of Y out at a time.
     
     Returns
     -------
     F : array, shape = [n_samples1*n_samples2]
         Training set labels. Label for (X1[i], X2[j]) maps to
         F[i + j*n_samples1] (column order).
         
     """
     if not self.kernelmode:
         X1, X2 = self.X1, self.X2
         P = X1 @ self.W @ X2.T
         R1 = la.inv(X1.T @ X1 + self.regparam1 * np.eye(X1.shape[1])) @ X1.T
         R2 = la.inv(X2.T @ X2 + self.regparam2 * np.eye(X2.shape[1])) @ X2.T
     else:
         P = self.K1 @ self.A @ self.K2.T
         H1 = self.K1 @ la.inv(self.K1 + self.regparam1 * np.eye(self.K1.shape[0]))
         H2 = self.K2 @ la.inv(self.K2 + self.regparam2 * np.eye(self.K2.shape[0]))
     
     allhopreds = np.zeros(self.Y.shape)
     for fold in folds:
         row_inds_K1, row_inds_K2 = fold
         if not self.kernelmode:
             u_inds_1, i_inds_1 = np.unique(row_inds_K1, return_inverse = True)
             r_inds_1 = np.arange(len(u_inds_1))[i_inds_1]
             H1_ho = X1[u_inds_1] @ R1[:, u_inds_1]
             u_inds_2, i_inds_2 = np.unique(row_inds_K2, return_inverse = True)
             r_inds_2 = np.arange(len(u_inds_2))[i_inds_2]
             H2_ho = X2[u_inds_2] @ R2[:, u_inds_2]
             pko = PairwiseKernelOperator(H1_ho, H2_ho, r_inds_1, r_inds_2, r_inds_1, r_inds_2)
         else:
             pko = PairwiseKernelOperator(H1, H2, row_inds_K1, row_inds_K2, row_inds_K1, row_inds_K2)
         temp = P[row_inds_K1, row_inds_K2]
         temp -= np.array(pko.matvec(np.array(self.Y)[row_inds_K1, row_inds_K2].squeeze())).squeeze()
         def mv(v):
             return v - pko.matvec(v)
         G = LinearOperator((len(row_inds_K1), len(row_inds_K1)), matvec = mv, dtype = np.float64)
         hopred = minres(G, temp.T, tol=1e-20, maxiter = maxiter)[0]
         allhopreds[row_inds_K1, row_inds_K2] = hopred
     return allhopreds.ravel(order = 'F')
コード例 #26
0
def lanczos_iteration_scipy(Fvp_fn, params, k=20):
    """
    Fvp_fn must have parameters closure
    That is
    """
    theta = parameters_to_vector(params)
    n_params = len(theta)

    def mv(v):
        v = torch.from_numpy(v).float()
        hvp_p = Fvp_fn(v)
        return hvp_p.data.numpy()

    H = LinearOperator((n_params, n_params), matvec=mv)
    try:
        w = eigsh(H, k=k, which='LM', return_eigenvectors=False)
    except ArpackNoConvergence as arpack:
        w = arpack.eigenvalues
    return w
コード例 #27
0
def EfficientDecomposableGaussianORFF(X,
                                      A,
                                      gamma=1.,
                                      D=100,
                                      eps=1e-5,
                                      random_state=0):
    r"""Return the Efficient ORFF map associated with the data X.

    Parameters
    ----------
    X : {array-like}, shape = [n_samples, n_features]
        Samples.
    A : {array-like}, shape = [n_targets, n_targets]
        Operator of the Decomposable kernel (positive semi-definite)
    gamma : {float},
        Gamma parameter of the RBF kernel.
    D : {integer},
        Number of random features.
    eps : {float},
        Cutoff threshold for the singular values of A.
    random_state : {integer},
        Seed of the generator.

    Returns
    -------
    \tilde{\Phi}(X) : Linear Operator, callable
    """
    # Decompose A=BB^T
    u, s, v = svd(A, full_matrices=False, compute_uv=True)
    B = dot(diag(sqrt(s[s > eps])), v[s > eps, :])

    # Sample a RFF from the scalar Gaussian kernel
    phi_s = RBFSampler(gamma=gamma, n_components=D, random_state=random_state)
    phiX = phi_s.fit_transform(X)

    # Create the ORFF linear operator
    cshape = (D, B.shape[0])
    rshape = (X.shape[0], B.shape[1])
    return LinearOperator(
        (phiX.shape[0] * B.shape[1], D * B.shape[0]),
        matvec=lambda b: dot(phiX, dot(b.reshape(cshape), B)),
        rmatvec=lambda r: dot(phiX.T, dot(r.reshape(rshape), B.T)),
        dtype=float)
コード例 #28
0
    def d(self, p):
        """
        Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
        """
        if p not in self._d:
            matvec = lambda v: self._a * (self._A.dot(v) - self._mu * v)
            rmatvec = lambda v: _np.conj(self._a) * (self._A.H.dot(v) - _np.
                                                     conj(self._mu) * v)
            LO = LinearOperator(self._A.shape,
                                dtype=self._dtype,
                                matvec=matvec,
                                rmatvec=rmatvec)

            est = onenormest(LO**p)

            # est = onenormest((self._a * aslinearoperator(self._A))**p)
            self._d[p] = est**(1.0 / p)

        return self._d[p]
コード例 #29
0
def convection(u, u_bar, gradp, source, f_log, extend_u):
    '''
    Residual is Ax - b
    when x=0, residual = -b, so b = -residual(0, u, ...)
    when x is not zero, Ax = residual + b
    '''

    u_hat = zeros(u.shape)
    b = -ravel(residual(u_hat, u, u_bar, gradp, source, extend_u))
    def linear_op(u_hat):
        u_hat = u_hat.reshape(u.shape)
        res = residual(u_hat, u, u_bar, gradp, source, extend_u)
        return ravel(res) + b
    A = LinearOperator((u.size, u.size), linear_op, dtype='float64')
    u_hat, info = gmres(A, b, x0=ravel(u.copy()), tol=settings.tol, maxiter=200)
    res = residual(u_hat.reshape(u.shape), u, u_bar, gradp, source, extend_u)
    f_log.write("convection GMRES returns {0}, residual={1}\n".format(
                info, linalg.norm(ravel(res))))
    return u_hat.reshape(u.shape)
コード例 #30
0
def LH_linear_operator(A_L, lR):
    """
    Return, as a LinearOperator, the LHS of the equation found by 
    summing the geometric series for
    the left environment Hamiltonian.
    """
    chi = A_L.shape[1]
    I = np.eye(chi, dtype=A_L.dtype)

    def matvec(v):
        v = v.reshape((chi, chi))
        Th_v = ct.XopL(A_L, X=v)
        vR = proj(v, lR) * I
        v = v - Th_v + vR
        v = v.flatten()
        return v

    op = LinearOperator((chi**2, chi**2), matvec=matvec, dtype=A_L.dtype)
    return op
コード例 #31
0
    def forward(self, M, k):
        r"""
        :param M: square symmetric matrix :math:`N \times N`
        :param k: desired rank (must be smaller than :math:`N`)
        :type M: torch.tensor
        :type k: int
        :return: eigenvalues D, leading k eigenvectors U
        :rtype: torch.tensor, torch.tensor

        **Note:** `depends on scipy`

        Return leading k-eigenpairs of a matrix M, where M is symmetric 
        :math:`M=M^T`, by computing the symmetric decomposition :math:`M= UDU^T` 
        up to rank k. Partial eigendecomposition is done through Arnoldi method.
        """

        # input validation (M is square and symmetric) is provided by
        # the scipy.sparse.linalg.eigsh

        # get M as numpy ndarray and wrap back to torch
        # allow for mat-vec ops to be carried out on GPU
        def mv(v):
            V = torch.as_tensor(v, dtype=M.dtype, device=M.device)
            V = torch.mv(M, V)
            return V.detach().cpu().numpy()

        M_nograd = LinearOperator(M.size(), matvec=mv)

        D, U = scipy.sparse.linalg.eigsh(M_nograd, k=k)
        D = torch.as_tensor(D)
        U = torch.as_tensor(U)

        # reorder the eigenpairs by the largest magnitude of eigenvalues
        absD, p = torch.sort(torch.abs(D), descending=True)
        D = D[p]
        U = U[:, p]

        if M.is_cuda:
            U = U.to(M.device)
            D = D.to(M.device)

        self.save_for_backward(D, U)
        return D, U
コード例 #32
0
def AXfunc(A, At, d1, d2, p1, p2, p3):
    """
    Returns a linear operator which computes A * x for PCG.

        y = hessphi * [x1; x2],

        where hessphi = [ A'*A*2+D1, D2;
                          D2,        D1]
    """
    def matvec(vec):
        n = vec.shape[0] // 2
        x1 = vec[:n]
        x2 = vec[n:]

        return np.hstack(
            [At.dot(A.dot(x1) * 2) + d1 * x1 + d2 * x2, d2 * x1 + d1 * x2])

    N = 2 * d1.shape[0]
    return LinearOperator((N, N), matvec=matvec)
コード例 #33
0
    def _lanczos(self, X):
        P = self._bha._P
        W = self._bha._W
        n = X.shape[0]
        PcolsT = P.T.dot(np.ones((n, 1)))/n
        Ones = np.ones((1, n))

        # 1. Define the linear operator for JPWP^TJ
        def lin_op(v):
            x = W.dot(P.T.dot(v) - PcolsT.dot(Ones.dot(v)))
            y = -0.5*(P.dot(x) - Ones.T.dot(PcolsT.T.dot(x)))
            return y
        A = LinearOperator((n, n), matvec=lin_op)

        # 2. Truncated Eigenvalues of Y = V D V^T
        D, V = sparsela.eigsh(A, k=self._k, which='LA')
        Z = V[:, :self._k].dot(np.diag(np.sqrt(D[:self._k])))

        return Z
コード例 #34
0
def MGVP(nx, ny, num_levels):
    '''
  Multigrid Preconditioner. Returns a (scipy.sparse) LinearOperator that can
  be passed to Krylov solvers as a preconditioner. The matrix is not 
  explicitly needed.  All that is needed is a matrix vector product 
  In any stationary iterative method, the preconditioner-vector product
  can be obtained by setting the RHS to the vector and initial guess to 
  zero and performing one iteration. (Richardson Method)  
  '''
    def pc_fn(v):
        u = np.zeros([nx + 2, ny + 2])
        f = np.zeros([nx + 2, ny + 2])
        f[1:nx + 1, 1:ny + 1] = v.reshape([nx, ny])
        #perform one V cycle
        u, res = V_cycle(nx, ny, num_levels, u, f)
        return u[1:nx + 1, 1:ny + 1].reshape(v.shape)

    M = LinearOperator((nx * ny, nx * ny), matvec=pc_fn)
    return M
コード例 #35
0
def lsmr_operator(Jop, d, active_set):
    """Compute LinearOperator to use in LSMR by dogbox algorithm.

    `active_set` mask is used to excluded active variables from computations
    of matrix-vector products.
    """
    m, n = Jop.shape

    def matvec(x):
        x_free = x.ravel().copy()
        x_free[active_set] = 0
        return Jop.matvec(x * d)

    def rmatvec(x):
        r = d * Jop.rmatvec(x)
        r[active_set] = 0
        return r

    return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
コード例 #36
0
ファイル: gw_iter.py プロジェクト: zzy2014/pyscf
 def si_c2(self,ww):
   """
   This computes the correlation part of the screened interaction using LinearOpt and lgmres
   lgmres method is much slower than np.linalg.solve !!
   """
   import numpy as np
   from scipy.sparse.linalg import lgmres
   from scipy.sparse.linalg import LinearOperator
   rf0 = si0 = self.rf0(ww)
   for iw,w in enumerate(ww):
     k_c = np.dot(self.kernel_sq, rf0[iw,:,:])
     b = np.dot(k_c, self.kernel_sq)
     self.comega_current = w
     k_c_opt = LinearOperator((self.nprod,self.nprod), matvec=self.gw_vext2veffmatvec, dtype=self.dtypeComplex)
     for m in range(self.nprod):
        si0[iw,m,:],exitCode = lgmres(k_c_opt, b[m,:], atol=self.gw_iter_tol, maxiter=self.maxiter)
     if exitCode != 0: print("LGMRES has not achieved convergence: exitCode = {}".format(exitCode))
     #np.allclose(np.dot(k_c, si0), b, atol=1e-05) == True  #Test
   return si0
コード例 #37
0
ファイル: mda_solver.py プロジェクト: cephdon/meta-core
    def execute_Newton(self):
        """ Solver execution loop: Newton-Krylov. """

        # Find dimension of our problem.
        nEdge = self.workflow.initialize_residual()

        A = LinearOperator((nEdge, nEdge),
                           matvec=self.workflow.matvecFWD,
                           dtype=float)

        # Initial Run
        self.run_iteration()

        # Initial residuals
        norm = numpy.linalg.norm(self.workflow.calculate_residuals())
        print "Residual vector norm:\n", norm

        # Loop until convergence of residuals
        iter_num = 0
        while (norm > self.tolerance) and (iter_num < self.max_iteration):

            # Each comp calculates its own derivatives at the current
            # point. (i.e., linearizes)
            self.workflow.calc_derivatives(first=True)

            # Call GMRES to solve the linear system
            dv, info = gmres(A,
                             -self.workflow.res,
                             tol=self.tolerance,
                             maxiter=100)

            # Increment the model input edges by dv
            self.workflow.set_new_state(dv)

            # Run all components
            self.run_iteration()

            # New residuals
            norm = numpy.linalg.norm(self.workflow.calculate_residuals())
            print "Residual vector norm:\n", norm

            iter_num += 1
            self.record_case()
コード例 #38
0
def RH_linear_operator(A_R, rL):
    chi = A_R.shape[1]
    """
    Return, as a LinearOperator, the LHS of the equation found by 
    summing the geometric series for
    the right environment Hamiltonian.
    """
    I = np.eye(chi, dtype=A_R.dtype)

    def matvec(v):
        v = v.reshape((chi, chi))
        Th_v = ct.XopR(A_R, X=v)
        Lv = proj(rL, v) * I
        v = v - Th_v + Lv
        v = v.flatten()
        return v

    op = LinearOperator((chi**2, chi**2), matvec=matvec, dtype=A_R.dtype)
    return op
コード例 #39
0
def inv_resolvent_norm(A, z, method='svd'):
    r'''Compute the reciprocal norm of the resolvent

    :param A: the input matrix as a ``numpy.array``, sparse matrix or
      ``LinearOperator`` with ``A.shape==(m,n)``, where :math:`m\geq n`.
    :param z: a complex number
    :param method: (optional) one of

      * ``svd`` (default): computes the minimal singular value of :math:`A-zI`.
        This one should be used for dense matrices.
      * ``lanczos``: computes the minimal singular value with the Lanczos
        iteration on the matrix
        :math:`\begin{bmatrix}0&A\\A^*&0\end{bmatrix}`
    '''
    if method == 'svd':
        return numpy.min(svdvals(A - z*numpy.eye(*A.shape)))
    elif method == 'lanczos':
        m, n = A.shape
        if m > n:
            raise ValueError('m > n is not allowed')
        AH = A.T.conj()

        def matvec(x):
            r'''matrix-vector multiplication

            matrix-vector multiplication with matrix
            :math:`\begin{bmatrix}0&A\\A^*&0\end{bmatrix}`
            '''
            x1 = x[:m]
            x2 = x[m:]
            ret1 = AH.dot(x2) - numpy.conj(z)*x2
            ret2 = numpy.array(A.dot(x1), dtype=numpy.complex)
            ret2[:n] -= z*x1
            return numpy.c_[ret1, ret2]
        AH_A = LinearOperator(matvec=matvec, dtype=numpy.complex,
                              shape=(m+n, m+n))

        evals = eigsh(AH_A, k=2, tol=1e-6, which='SM', maxiter=m+n+1,
                      ncv=2*(m+n),
                      return_eigenvectors=False)

        return numpy.min(numpy.abs(evals))
コード例 #40
0
def fp_TT(T, U=None, C2_0=None, verbosity=0):
    if U is None: U = T

    # applies
    #
    # /--T--    /--
    # B  |   => B
    # \--U--    \--
    def mv(v):
        B = torch.as_tensor(v, dtype=T.dtype, device=T.device)
        B = B.view(T.size()[0], T.size()[0])

        # B--1 0--T--1
        # 0       2
        B = torch.tensordot(B, T, ([1], [0]))

        # B-------T--1
        # |       2
        # |       2
        # \--0 0--U--1->0
        B = torch.tensordot(U, B, ([0, 2], [0, 2]))
        B = B.view(-1)
        return B.detach().cpu().numpy()

    M_op = LinearOperator((T.size()[0]**2, T.size()[0]**2), matvec=mv)

    D, V = truncated_eig_arnoldi(M_op,
                                 2,
                                 v0=C2_0,
                                 dtype=T.dtype,
                                 device=T.device)

    if verbosity > 0:
        log.info(
            f"fp_TT spec {[tuple(D[i,:].tolist()) for i in range(D.size()[0])]}"
        )
    assert (torch.abs(torch.max(V[:, 0, 1])) < 1.0e-14)

    nC2 = V[:, 0, 0]
    nC2 = nC2.view(T.size()[0], T.size()[0])

    return nC2
コード例 #41
0
    def fit(self, samples):
        diagnostic = dict()

        fit_data = np.reshape(samples["data"], (-1, self.output_size))
        fit_cost = samples["cost"]
        avg_cost = np.mean(fit_cost)
        min_cost = np.min(fit_cost)

        normalize_fit_cost = (fit_cost - np.mean(fit_cost)) / (1e-5 + np.std(fit_cost))
        loss_before = self.loss(normalize_fit_cost, fit_data)

        fischer_sampled_data = self.distribution.sample(self.fischer_sample_size)
        fischer_vector_product_on_data = lambda vec:  self.damped_hessian_vector_product(fischer_sampled_data, vec)
        fischer_vector_product_linear_operator = LinearOperator(shape=(self.distribution_params_dim, self.distribution_params_dim),
                                                                matvec=fischer_vector_product_on_data)
        flat_surrogate_loss_gradients = self.flat_surrogate_loss_gradients(normalize_fit_cost, fit_data)
        delta_params, _ = cg(fischer_vector_product_linear_operator, -flat_surrogate_loss_gradients, maxiter=50)
        delta_params *= self.lr
        tensor_delta_params = reshape_vector_to_tensor_list(delta_params, self.distribution_params_shape_list)
        hessian_delta_product = self.hessian_vector_product(fischer_sampled_data, delta_params)
        self.update_params(*tensor_delta_params)

        loss_after = self.loss(normalize_fit_cost, fit_data)

        # update the damping parameter
        # q_theta =  loss_before + np.sum(delta_params * flat_surrogate_loss_gradients) + 0.5 * np.sum(
        #     delta_params * hessian_delta_product)
        # reduction_ratio = (loss_after-loss_before)/(q_theta-loss_before)

        # if reduction_ratio < 0.25:
        #     self.fischer_damping *= 1.5
        # if reduction_ratio > 0.75:
        #     self.fischer_damping *= 2. / 3.

        diagnostic["loss_before"] = float(loss_before)
        diagnostic["loss_after"] = float(loss_after)
        diagnostic["mean_cost"] = float(avg_cost)
        diagnostic["min_cost"] = float(min_cost)
        diagnostic["fischer_damping"] = self.fischer_damping
        diagnostic.update(self.distribution.diagnostic())

        return diagnostic
コード例 #42
0
ファイル: cg_rankrls.py プロジェクト: peknau/RLScore
    def trainWithLabels(self):
        regparam = float(
            self.resource_pool[data_sources.TIKHONOV_REGULARIZATION_PARAMETER])
        #regparam = 0.
        if data_sources.TRAIN_QIDS in self.resource_pool:
            P = sp.lil_matrix((self.size, len(self.qidmap.keys())))
            for qidind in range(len(self.indslist)):
                inds = self.indslist[qidind]
                qsize = len(inds)
                for i in inds:
                    P[i, qidind] = 1. / sqrt(qsize)
            P = P.tocsr()
            PT = P.tocsc().T
        else:
            P = 1. / sqrt(self.size) * (np.mat(
                np.ones((self.size, 1), dtype=np.float64)))
            PT = P.T
        X = self.X.tocsc()
        X_csr = X.tocsr()

        def mv(v):
            v = np.mat(v).T
            return X_csr * (X.T * v) - X_csr * (P * (PT *
                                                     (X.T * v))) + regparam * v

        G = LinearOperator((X.shape[0], X.shape[0]),
                           matvec=mv,
                           dtype=np.float64)
        Y = self.Y
        if not self.callbackfun == None:

            def cb(v):
                self.A = np.mat(v).T
                self.b = np.mat(np.zeros((1, 1)))
                self.callback()
        else:
            cb = None
        XLY = X_csr * Y - X_csr * (P * (PT * Y))
        try:
            self.A = np.mat(cg(G, XLY, callback=cb)[0]).T
        except Finished, e:
            pass
コード例 #43
0
ファイル: cg_rankrls.py プロジェクト: peknau/RLScore
    def trainWithPreferences(self):
        regparam = float(
            self.resource_pool[data_sources.TIKHONOV_REGULARIZATION_PARAMETER])
        X = self.X.tocsc()
        X_csr = X.tocsr()
        vals = np.concatenate([
            np.ones((self.pairs.shape[0]), dtype=np.float64), -np.ones(
                (self.pairs.shape[0]), dtype=np.float64)
        ])
        row = np.concatenate(
            [np.arange(self.pairs.shape[0]),
             np.arange(self.pairs.shape[0])])
        col = np.concatenate([self.pairs[:, 0], self.pairs[:, 1]])
        coo = coo_matrix((vals, (row, col)),
                         shape=(self.pairs.shape[0], X.shape[1]))
        pairs_csr = coo.tocsr()
        pairs_csc = coo.tocsc()

        def mv(v):
            vmat = np.mat(v).T
            ret = np.array(X_csr * (pairs_csc.T *
                                    (pairs_csr *
                                     (X.T * vmat)))) + regparam * vmat
            return ret

        G = LinearOperator((X.shape[0], X.shape[0]),
                           matvec=mv,
                           dtype=np.float64)
        self.As = []
        M = np.mat(np.ones((self.pairs.shape[0], 1)))
        if not self.callbackfun == None:

            def cb(v):
                self.A = np.mat(v).T
                self.b = np.mat(np.zeros((1, 1)))
                self.callback()
        else:
            cb = None
        XLY = X_csr * (pairs_csc.T * M)
        self.A = np.mat(cg(G, XLY, callback=cb)[0]).T
        self.b = np.mat(np.zeros((1, self.A.shape[1])))
        self.results[data_sources.MODEL] = self.getModel()
コード例 #44
0
ファイル: _numdiff.py プロジェクト: AlexeyDzyubaP/LinearReg
def _linear_operator_difference(fun, x0, f0, h, method):
    m = f0.size
    n = x0.size

    if method == '2-point':

        def matvec(p):
            if np.array_equal(p, np.zeros_like(p)):
                return np.zeros(m)
            dx = h / norm(p)
            x = x0 + dx * p
            df = fun(x) - f0
            return df / dx

    elif method == '3-point':

        def matvec(p):
            if np.array_equal(p, np.zeros_like(p)):
                return np.zeros(m)
            dx = 2 * h / norm(p)
            x1 = x0 - (dx / 2) * p
            x2 = x0 + (dx / 2) * p
            f1 = fun(x1)
            f2 = fun(x2)
            df = f2 - f1
            return df / dx

    elif method == 'cs':

        def matvec(p):
            if np.array_equal(p, np.zeros_like(p)):
                return np.zeros(m)
            dx = h / norm(p)
            x = x0 + dx * p * 1.j
            f1 = fun(x)
            df = f1.imag
            return df / dx

    else:
        raise RuntimeError("Never be here.")

    return LinearOperator((m, n), matvec)
コード例 #45
0
ファイル: _numpy_backend.py プロジェクト: oguzziya/PhiFlow
    def conjugate_gradient(self,
                           A,
                           y,
                           x0,
                           solve_params=LinearSolve(),
                           gradient: str = 'implicit',
                           callback=None):
        bs_y = self.staticshape(y)[0]
        bs_x0 = self.staticshape(x0)[0]
        batch_size = combined_dim(bs_y, bs_x0)

        if callable(A):
            A = LinearOperator(dtype=y.dtype,
                               shape=(self.staticshape(y)[-1],
                                      self.staticshape(x0)[-1]),
                               matvec=A)
        elif isinstance(A, (tuple, list)) or self.ndims(A) == 3:
            batch_size = combined_dim(batch_size, self.staticshape(A)[0])

        iterations = [0] * batch_size
        converged = []
        results = []

        def count_callback(*args):
            iterations[batch] += 1
            if callback is not None:
                callback(*args)

        for batch in range(batch_size):
            y_ = y[min(batch, bs_y - 1)]
            x0_ = x0[min(batch, bs_x0 - 1)]
            x, ret_val = cg(A,
                            y_,
                            x0_,
                            tol=solve_params.relative_tolerance,
                            atol=solve_params.absolute_tolerance,
                            maxiter=solve_params.max_iterations,
                            callback=count_callback)
            converged.append(ret_val == 0)
            results.append(x)
        solve_params.result = SolveResult(all(converged), max(iterations))
        return self.stack(results)
コード例 #46
0
ファイル: misc_mps.py プロジェクト: zuozw/TensorNetwork
def LGMRES_solver(mps,
                  direction,
                  left_dominant,
                  right_dominant,
                  inhom,
                  x0,
                  precision=1e-10,
                  nmax=2000,
                  **kwargs):
  """
    see Appendix of arXiv:1801.02219 for details of this
    This routine uses scipy's sparse.lgmres module. tf.Tensors are mapped to numpy 
    and back to tf.Tensor for each application of the sparse matrix vector product.
    This is not optimal and will be improved in a future version
    Args:
        mps (InfiniteMPSCentralGauge):   an infinite mps
        direction (int or str):          if (1,'l','left'): do left multiplication
                                         if (-1,'r','right'): do right multiplication
        left_dominant (tf.Tensor):       tensor of shape (mps.D[0],mps.D[0])
                                         left dominant eigenvector of the unit-cell transfer operator of mps
        right_dominant (tf.Tensor):      tensor of shape (mps.D[-1],mps.D[-1])
                                         right dominant eigenvector of the unit-cell transfer operator of mps
        inhom (tf.Tensor):               vector of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])
    Returns:
        tf.Tensor
    """
  #mps.D[0] has to be mps.D[-1], so no distincion between direction='l' or direction='r' has to be made here
  if not tf.equal(mps.D[0], mps.D[-1]):
    raise ValueError(
        'in LGMRES_solver: mps.D[0]!=mps.D[-1], can only handle intinite MPS!')
  inhom_numpy = tf.reshape(inhom, [mps.D[0] * mps.D[0]]).numpy()
  x0_numpy = tf.reshape(x0, [mps.D[0] * mps.D[0]]).numpy()
  mv = fct.partial(one_minus_pseudo_unitcell_transfer_op,
                   *[direction, mps, left_dominant, right_dominant])

  LOP = LinearOperator((int(mps.D[0])**2, int(mps.D[-1])**2),
                       matvec=mv,
                       dtype=mps.dtype.as_numpy_dtype)
  out, info = lgmres(
      A=LOP, b=inhom_numpy, x0=x0_numpy, tol=precision, maxiter=nmax, **kwargs)

  return tf.reshape(tf.convert_to_tensor(out), [mps.D[0], mps.D[0]]), info
コード例 #47
0
    def aspreconditioner(self, cycle='V'):
        """Create a preconditioner using this multigrid cycle.

        Parameters
        ----------
        cycle : {'V','W','F','AMLI'}
            Type of multigrid cycle to perform in each iteration.

        Returns
        -------
        precond : LinearOperator
            Preconditioner suitable for the iterative solvers in defined in
            the scipy.sparse.linalg module (e.g. cg, gmres) and any other
            solver that uses the LinearOperator interface.  Refer to the
            LinearOperator documentation in scipy.sparse.linalg

        See Also
        --------
        multilevel_solver.solve, scipy.sparse.linalg.LinearOperator

        Examples
        --------
        >>> from pyamg.aggregation import smoothed_aggregation_solver
        >>> from pyamg.gallery import poisson
        >>> from scipy.sparse.linalg import cg
        >>> import scipy as sp
        >>> A = poisson((100, 100), format='csr')          # matrix
        >>> b = np.random.rand(A.shape[0])                 # random RHS
        >>> ml = smoothed_aggregation_solver(A)            # AMG solver
        >>> M = ml.aspreconditioner(cycle='V')             # preconditioner
        >>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M)  # solve with CG

        """
        from scipy.sparse.linalg import LinearOperator

        shape = self.levels[0].A.shape
        dtype = self.levels[0].A.dtype

        def matvec(b):
            return self.solve(b, maxiter=1, cycle=cycle, tol=1e-12)

        return LinearOperator(shape, matvec, dtype=dtype)
コード例 #48
0
ファイル: __init__.py プロジェクト: hbrunie/ASPIRE-Python
    def expand(self, x):
        """
        Obtain coefficients in the basis from those in standard coordinate basis

        This is a similar function to evaluate_t but with more accuracy by using
        the cg optimizing of linear equation, Ax=b.

        :param x: An array whose first two or three dimensions are to be expanded
            the desired basis. These dimensions must equal `self.sz`.
        :return : The coefficients of `v` expanded in the desired basis.
            The first dimension of `v` is with size of `count` and the
            second and higher dimensions of the return value correspond to
            those higher dimensions of `x`.

        """
        # ensure the first dimensions with size of self.sz
        x, sz_roll = unroll_dim(x, self.ndim + 1)
        ensure(x.shape[:self.ndim] == self.sz,
               f'First {self.ndim} dimensions of x must match {self.sz}.')

        operator = LinearOperator(
            shape=(self.count, self.count),
            matvec=lambda v: self.evaluate_t(self.evaluate(v)))

        # TODO: (from MATLAB implementation) - Check that this tolerance make sense for multiple columns in v
        tol = 10 * np.finfo(x.dtype).eps
        logger.info('Expanding array in basis')

        # number of image samples
        n_data = np.size(x, self.ndim)
        v = np.zeros((self.count, n_data), dtype=x.dtype)

        for isample in range(0, n_data):
            b = self.evaluate_t(x[..., isample])
            # TODO: need check the initial condition x0 can improve the results or not.
            v[..., isample], info = cg(operator, b, tol=tol)
            if info != 0:
                raise RuntimeError('Unable to converge!')

        # return v coefficients with the first dimension of self.count
        v = roll_dim(v, sz_roll)
        return v
コード例 #49
0
def regularized_lsq_operator(J, diag):
    """Return a matrix arising in regularized least squares as LinearOperator.
    
    The matrix is
        [ J ]
        [ D ]
    where D is diagonal matrix with elements from `diag`.
    """
    J = aslinearoperator(J)
    m, n = J.shape

    def matvec(x):
        return np.hstack((J.matvec(x), diag * x))

    def rmatvec(x):
        x1 = x[:m]
        x2 = x[m:]
        return J.rmatvec(x1) + diag * x2

    return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
コード例 #50
0
    def lagrangian_hessian(self, z, v):
        """Returns scaled Lagrangian Hessian"""
        # Compute Hessian in relation to x and s
        Hx = self.lagrangian_hessian_x(z, v)
        if self.n_ineq > 0:
            S_Hs_S = self.lagrangian_hessian_s(z, v)

        # The scaled Lagragian Hessian is:
        #     [ Hx    0    ]
        #     [ 0   S Hs S ]
        def matvec(vec):
            vec_x = self.get_variables(vec)
            vec_s = self.get_slack(vec)
            if self.n_ineq > 0:
                return np.hstack((Hx.dot(vec_x), S_Hs_S * vec_s))
            else:
                return Hx.dot(vec_x)

        return LinearOperator(
            (self.n_vars + self.n_ineq, self.n_vars + self.n_ineq), matvec)
コード例 #51
0
ファイル: kle.py プロジェクト: mjasher/agu
	def make_kle(self, KLE_sigma, KLE_L,
				KLE_num_eig,
				nrow, ncol, nlay, Lx, Ly):

		print "making KLE, this will take a long time for large dimensions"

		print KLE_sigma, KLE_L, KLE_num_eig, nrow, ncol, nlay, Lx, Ly


		# initialise swig wrapped cpp class

		C_matrix = correlation.C_matrix(KLE_sigma, KLE_L)
		C_matrix.set_dims(nrow, ncol, Lx, Ly)

		out_vec = np.zeros((nrow*ncol), 'd')
		def KLE_Av(v):
			C_matrix.av_no_C(nrow*ncol, v, out_vec)
			return out_vec

		KLE_A = LinearOperator((nrow*ncol,nrow*ncol), matvec=KLE_Av, dtype='d')

		t1 = time.time()
		eig_vals, eig_vecs = eigsh( KLE_A, k=KLE_num_eig)
		t2 = time.time()


		# sometimes one gets -v rather than v?
		for i in range(KLE_num_eig):
			print "NORM", np.linalg.norm(eig_vecs[:,i])
			assert np.allclose(KLE_A.matvec(eig_vecs[:,i]), eig_vals[i]*eig_vecs[:,i])

		print "=================================="
		print "SVD took ", t2-t1, "seconds and "
		print "they seem to indeed be eigen vectors"
		print "=================================="


# plot eigenvectors
		from mpl_toolkits.mplot3d import axes3d
		import matplotlib.pyplot as plt
		fig = plt.figure(figsize=plt.figaspect(0.2))
		for i in range(eig_vecs.shape[1]):
			ax = fig.add_subplot(1, eig_vecs.shape[1], i, projection='3d')
			x = np.arange(0, nrow*Lx, Lx)
			X = np.empty((nrow, ncol))
			for col in range(ncol):
				X[:,col] = x
			y = np.arange(0, ncol*Ly, Ly)
			Y = np.empty((nrow, ncol))
			for row in range(nrow):
				Y[row,:] = y
			Z = eig_vecs[:,i].reshape((nrow, ncol))
			ax.plot_wireframe(X, Y, Z)
		plt.show()

# plot eigenvals

		# import matplotlib.pyplot as plt
		plt.plot(eig_vals, 'o-')
		plt.show()

		print "eig_vals", eig_vals

		np.savez(working_directory+'kle.npz', eig_vals=eig_vals, eig_vecs=eig_vecs, KLE_L=KLE_L, KLE_sigma=KLE_sigma, KLE_num_eig=KLE_num_eig, nrow=nrow, ncol=ncol, Lx=Lx, Ly=Ly)

		return eig_vals, eig_vecs
コード例 #52
0
ファイル: Simon_Taylor_method.py プロジェクト: akr89/Thesis
def calculate_delta(gamma,     #3-dim array (Nz,Nx,Ny) of complex shear
                    P_kd,      #line-of-sight kappa-delta transform
                    P_gk,      #gamma-kappa transform
                    S_dd,      #expected signal covariance of delta
                               # (can be arbitrary form)
                    N,         #shear noise
                    alpha,     #weiner filter strength
                    M_factor = 1 #optional scaling of M before cg-method
                    ):    
    """
    Implement equation A3 from Simon & Taylor 2009
    Note: their notation Q -> our notation P_kd

    Also, here we factor N_d^-1 out of M: M should be Hermitian for
    the cg method. The Simon 09 expression is Hermitian only if N_d is
    proportional to the identity, which will not be the case for
    deweighted border pixels.
    """
    P_kd = as_Lens3D_matrix(P_kd)
    P_gk = as_Lens3D_matrix(P_gk)
    S_dd = as_Lens3D_matrix(S_dd)
    N = as_Lens3D_matrix(N)
    
    P_gk_cross = P_gk.conj_transpose()
    P_kd_T = P_kd.transpose()

    print "calculating delta:"

    print "  shape of P_gk:",P_gk.shape
    print "  shape of P_kd:",P_kd.shape

    print "constructing linear operator M"
    #define an operator which performs matrix-vector
    # multiplication representing M
    def matvec(v):
        v0 = P_gk_cross.view_as_Lens3D_vec(v)
        
        v1 = N.matvec(v0)
        v1 *= alpha

        v2 = P_gk_cross.matvec( v0 )
        v2 = P_kd_T.matvec( v2 )
        v2 = S_dd.matvec( v2 )
        v2 = P_kd.matvec( v2 )
        v2 = P_gk.matvec( v2 )

        ret = numpy.zeros(v.shape,dtype=complex)
        ret += v1.vec
        ret += v2.vec

        return P_gk_cross.view_as_same_type( ret * M_factor , v )

    M = LinearOperator(P_gk.shape,
                       matvec=matvec,
                       dtype=complex)

    v = numpy.random.random(M.shape[1])
    t0 = time()
    v2 = M.matvec(v)
    t = time()-t0
    print "  M multiplication: %.3g sec" % t

    #print M.matvec(numpy.ones(M.shape[1]))[:10]
    #exit()

    print "constructing preconditioner for M"

    #define an operator which can quickly approximate the inverse of
    # M using fourier-space inversions.  This inverse will be exact for
    # a noiseless reconstruction on an infinite field
    P_gk_I = P_gk.inverse(False)
    #P_kd_I = P_kd.inverse()
    S_dd_I = S_dd.inverse(False)
    #P_kd_I_T = P_kd_I.transpose()
    P_gk_I_cross = P_gk_I.conj_transpose()

    def matvec_pc(v):
        v0 = P_gk_I.view_as_Lens3D_vec(v)
        v0 = P_gk_I.matvec(v0)
        #v0 = P_kd_I.matvec(v0)
        v0 = S_dd_I.matvec(v0)
        #v0 = P_kd_I_T.matvec(v0)
        v0 = P_gk_I_cross.matvec(v0)
        return P_gk_I.view_as_same_type(v0,v)
    
    M_pc = LinearOperator( (M.shape[1],M.shape[0]), 
                           matvec = matvec_pc,
                           dtype = M.dtype )
    
    v = numpy.random.random(M_pc.shape[1])
    t0 = time()
    v3 = M_pc.matvec(v)
    t_pc = time()-t0
    print "  preconditioner multiplication: %.3g sec" % t_pc

    step1_vec = gamma.vec

    use_cg = True

    #---define callback function---
    def callback(self):
        callback.N += 1
        if callback.N%100 == 0: print callback.N,'iterations'
    callback.N = 0
    #------------------------------
        
    t0 = time()
    print "calculating cg:"
    ret,errcode = cg( M, step1_vec,
                      x0 = numpy.zeros(M.shape[1],dtype=step1_vec.dtype),
                      callback=callback,
                      M = M_pc)
    if errcode != 0:
        raise ValueError, "calculate_delta: cg iterations did not converge: err = %s" % (str(errcode))
    tf = time()

    print "   cg:   total time           = %.2g sec" % (tf-t0)
    print "         number of iterations = %i" % callback.N
    print "         time per iteration   = %.2g sec" % ( (tf-t0)/callback.N )
    
    ret *= M_factor
    
    ret = P_gk_cross * ret
    ret = P_kd_T * ret
    delta = S_dd * ret

    return P_kd.view_as_Lens3D_vec(delta)
コード例 #53
0
ファイル: Simon_Taylor_method.py プロジェクト: akr89/Thesis
def estimate_condition_number(P_kd,
                              P_gk,
                              S_dd,
                              N,
                              alpha,
                              compute_exact = False):
    P_kd = as_Lens3D_matrix(P_kd)
    P_gk = as_Lens3D_matrix(P_gk)
    S_dd = as_Lens3D_matrix(S_dd)
    N = as_Lens3D_matrix(N)
    
    P_gk_cross = P_gk.conj_transpose()
    P_kd_T = P_kd.transpose()
    
    def matvec(v):
        v0 = P_gk_cross.view_as_Lens3D_vec(v)

        v2 = P_gk_cross.matvec( v0 )
        v2 = P_kd_T.matvec( v2 )
        v2 = S_dd.matvec( v2 )
        v2 = P_kd.matvec( v2 )
        v2 = P_gk.matvec( v2 )
        
        v1 = N.matvec(v0)
        v1 *= alpha

        ret = numpy.zeros(v.shape,dtype=complex)
        ret += v1.vec
        ret += v2.vec

        return P_gk_cross.view_as_same_type( ret , v )

    M = LinearOperator(P_gk.shape,
                       matvec=matvec,
                       dtype=complex)

    #compute the exact condition number
    if compute_exact:
        v = numpy.random.random(M.shape[1])
        t0 = time()
        v2 = M.matvec(v)
        t = time()-t0
        print " - constructing matrix representation (est. %s)" \
            % printtime(t*M.shape[0])
        t0 = time()
        M_rep = get_mat_rep(M)
        print "    time to get mat rep: %.2g sec" % (time()-t0)
        print " - computing SVD"
        t0 = time()
        sig = numpy.linalg.svd(M_rep,compute_uv=False)
        print "    time for SVD: %.2g sec" % (time()-t0)
        print 'true condition number:      %.2e / %.2e = %.2e' \
            % (sig[0],sig[-1],
               sig[0]/sig[-1])

    #estimate condition number, assuming the noiseless matrix
    # is rank-deficient.  This will be true if there are more
    # source lens-planes than mass lens-planes
    eval_max,evec_max = arpack.eigen(M,1)
    print 'estimated condition number: %.2e / %.2e = %.2e' \
        % ( abs(eval_max[0]) , numpy.min(N.data),
            abs(eval_max[0]) / numpy.min(N.data) )
コード例 #54
0
ファイル: linop.py プロジェクト: jeremander/Gplus
 def __init__(self, u, v):
     assert len(u) == len(v)
     self.u, self.v = u, v
     LinearOperator.__init__(self, dtype=float, shape=(len(u), len(v)))
コード例 #55
0
ファイル: linop.py プロジェクト: jeremander/Gplus
 def __init__(self, u):
     self.u = u
     LinearOperator.__init__(self, dtype=float, shape=(len(u), len(u)))
コード例 #56
0
ファイル: linop.py プロジェクト: jeremander/Gplus
 def __init__(self, A):
     assert isinstance(A, SymmetricSparseLinearOperator)
     self.A = A
     LinearOperator.__init__(self, dtype=float, shape=A.shape)
     self.D_ratio = self.A._matvec(np.ones(self.A.shape[1], dtype=float)) / self.shape[0]
コード例 #57
0
ファイル: test_ST_method.py プロジェクト: akr89/Thesis
                     interpolation='nearest',cmap=pylab.cm.gray,
                     origin = 'lower')
        pylab.title(r'$|\gamma_{\rm{test}} - \gamma_{\rm{true}}|$')
        pylab.colorbar()

    pylab.show()
    exit()

#test Sigma->gamma
if False:
    print "testing Sigma->gamma"
    def matvec(v):
        v1 = P_kd.matvec(v)
        return P_gk.matvec(v1)

    M = LinearOperator(P_kd.shape,matvec=matvec,dtype=complex)
    
    kappa_test = P_kd.matvec(Sigma)
    gamma_test = M.matvec(Sigma.vec)
    gamma_test = P_kd.view_as_Lens3D_vec(gamma_test)
    
    for i in range(gamma.Nz):
        pylab.figure( figsize=(6,8) )
        pylab.subplot(211)
        kappa.imshow_lens_plane(i)
        gamma.fieldplot_lens_plane(i)
        pylab.title(r"$\kappa,\gamma\ \rm{true}\ (z=%.2f)$" % z_gamma[i])
        pylab.subplot(212)
        kappa_test.imshow_lens_plane(i)
        gamma_test.fieldplot_lens_plane(i)
        pylab.title(r"$\kappa,\gamma\ \rm{test}\ (z=%.2f)$" % z_gamma[i])
コード例 #58
0
ファイル: linop.py プロジェクト: jeremander/Gplus
 def __init__(self, D):
     """D is a 1D array containing the diagonal entries."""
     self.D = D
     LinearOperator.__init__(self, dtype=float, shape=(len(D), len(D)))
コード例 #59
0
ファイル: linop.py プロジェクト: jeremander/Gplus
 def __init__(self, n, c):
     """n is dimension, c is a constant to be multiplied by the identity matrix."""
     self.c = c
     LinearOperator.__init__(self, dtype=float, shape=(n, n))
コード例 #60
0
ファイル: linop.py プロジェクト: jeremander/AttrVN
 def __init__(self, u):
     self.u = u
     if LINOP_SUBCLASSING:
         LinearOperator.__init__(self, dtype = float, shape = (len(u), len(u)))
     else:
         LinearOperator.__init__(self, dtype = float, shape = (len(u), len(u)), matvec = lambda x : type(self)._matvec(self, x))