def Torque_from_Omega(self,Om,F):
    '''
    Given an angular velocity Om, a torque is computed based on the forces (F) so that the angular velocity of the particles 
    is approximatly constrained to be = Om*y_hat.
    '''
    r_vecs_np = [b.location for b in self.bodies]
    r_vecs = self.put_r_vecs_in_periodic_box(r_vecs_np,self.periodic_length)
    
    def Mrr(torque):
	return self.mobility_rot_times_torque_wall(r_vecs, torque, self.eta, self.a, periodic_length=self.periodic_length)
    def Mtr(torque):
	return self.mobility_trans_times_torque_wall(r_vecs, torque, self.eta, self.a, periodic_length=self.periodic_length)
    def Mtt(force):
	return self.mobility_trans_times_force_wall(r_vecs, force, self.eta, self.a, periodic_length=self.periodic_length)
    
    
    def V_T_Mat_Mult(VT):
        VT = np.reshape(VT,(len(self.bodies),6))
        
        V = np.copy(VT)
        V[:,3::] *= 0;
        V = -1.0*V.flatten()
        T = VT[:,3::].flatten()
        
        out = np.reshape(self.IpMDR_Mult(V), (len(self.bodies),6))
        out[:,0:3] += np.reshape(Mtr(T), (len(self.bodies),3))
        out[:,3::] += np.reshape(Mrr(T), (len(self.bodies),3))
        return out
        
    
    Om0 = np.zeros(6*len(self.bodies))
    for i in range(len(self.bodies)):
      Om0[6*i+4] = Om
    F0 = np.concatenate((F, np.zeros(F.shape)), axis=1)
    F0 = F0.flatten()
    RHS = self.IpMDR_Mult(Om0) - self.Wall_Mobility_Mult(F0,r_vecs_np=r_vecs)

    A = spla.LinearOperator((6*len(self.bodies), 6*len(self.bodies)), matvec = V_T_Mat_Mult, dtype='float64')

    ############### PC 1 ############
    # Get the tt and rt blocks of DR
    ttInd = np.zeros((len(self.bodies),6))
    ttInd[:,0:3] = 1 
    ttInd = np.nonzero(ttInd.flatten())[0]
    
    rrInd = np.zeros((len(self.bodies),6))
    rrInd[:,3::] = 1 
    rrInd = np.nonzero(rrInd.flatten())[0]
    
    DRtt = self.Delta_R[:, ttInd][ttInd, :]
    DRrt = self.Delta_R[:, ttInd][rrInd, :]
    
    c1 = 6.0 * np.pi * self.eta * self.a
    c2 = 8.0 * np.pi * self.eta * self.a**3
    
    Vmat = sp.diags(c1*np.ones(3*len(self.bodies)),0,format='csc') + DRtt
    Vfact = cholesky(Vmat)
    
    def PC_mult(ab):
        AB = np.reshape(ab,(len(self.bodies),6))
        a = -c1*AB[:,0:3].flatten()
        v = Vfact(a)
        t = c2*AB[:,3::].flatten() + DRrt.dot(v)
        V = np.reshape(v,(len(self.bodies),3))
        T = np.reshape(t,(len(self.bodies),3))
        return np.concatenate((V, T), axis=1).flatten()

    PC = spla.LinearOperator((6*len(self.bodies), 6*len(self.bodies)), matvec = PC_mult, dtype='float64')
    ############### PC 1 ############
    
    
    # Scale RHS to norm 1
    RHS_norm = np.linalg.norm(RHS)
    if RHS_norm > 0:
      RHS = RHS / RHS_norm

    # use 8*pi*eta*a^3*Omega as initial guess for torque
    Om_g = np.zeros((len(self.bodies),3))
    Om_g[:,1] += Om
    T_g = c2*Om_g
    V_g = 0*T_g
    X0_vt = np.concatenate((V_g, T_g), axis=1).flatten()
    X0_vt *= (1.0/RHS_norm)
    
    # Solve linear system 
    res_list = []
    (VT_gmres, info_precond) = pyamg.krylov.gmres(A, RHS, M=PC, x0 = X0_vt, tol=self.tolerance, maxiter=100, restrt = min(100,A.shape[0]),residuals=res_list)

    print res_list
    # Scale solution with RHS norm
    if RHS_norm > 0:
      VT_gmres = VT_gmres * RHS_norm
      
    VT = np.reshape(VT_gmres, (len(self.bodies),6))
    Torque = VT[:,3::]
    
    VO_guess = np.concatenate((VT[:,0:3],Om_g), axis=1)
    VO_guess = VO_guess.flatten()
    
    return Torque, VO_guess
Esempio n. 2
0
 def F_wrapper(n):
     return splin.LinearOperator((n, n), lambda u: F_u(u, h))
    def compute_stochastic_velocity(self, dt):
        '''
    Compute stochastic torque and velocity. First,
    solve for the torque
    
    M_rr * T = -kT*div_t(M_rt) - sqrt(2*kT) * (N^{1/2}*W)_r,

    then set linear velocity
    
    v_stoch = M_tr * T + sqrt(2*kT) * (N^{1/2}*W)_t + kT*div_t(M_tt).

    Here N = (M_tt M_tr; M_rt M_rr) is the grand mobility matrix.
    We use random finite difference to compute the divergence
    terms. Note that in principle we should include the term
    div_r(M_rr) in the torque equation and div_r(M_tr) in the
    velocity equation but they are zero for a roller.

    This function returns the stochastic velocity v_stoch.
    '''
        # Create auxiliar variables
        Nblobs = len(self.bodies)
        blob_mass = 1.0

        # Get blobs coordinates
        r_vectors_blobs = np.empty((Nblobs, 3))
        for k, b in enumerate(self.bodies):
            r_vectors_blobs[k] = b.location

        # Generate random vector
        z = np.random.randn(6 * Nblobs)

        # Define grand mobility matrix
        def grand_mobility_matrix(force_torque,
                                  r_vectors=None,
                                  eta=None,
                                  a=None,
                                  periodic_length=None):
            half_size = force_torque.size // 2
            # velocity = self.mobility_trans_times_force_torque(r_vectors, force_torque[0:half_size], force_torque[half_size:], eta, a, periodic_length = periodic_length)
            velocity = self.mobility_trans_times_force(
                r_vectors,
                force_torque[0:half_size],
                eta,
                a,
                periodic_length=periodic_length)
            velocity += self.mobility_trans_times_torque(
                r_vectors,
                force_torque[half_size:],
                eta,
                a,
                periodic_length=periodic_length)
            angular_velocity = self.mobility_rot_times_force(
                r_vectors,
                force_torque[0:half_size],
                eta,
                a,
                periodic_length=periodic_length)
            angular_velocity += self.mobility_rot_times_torque(
                r_vectors,
                force_torque[half_size:],
                eta,
                a,
                periodic_length=periodic_length)
            return np.concatenate([velocity, angular_velocity])

        partial_grand_mobility_matrix = partial(
            grand_mobility_matrix,
            r_vectors=r_vectors_blobs,
            eta=self.eta,
            a=self.a,
            periodic_length=self.periodic_length)

        # Generate noise term sqrt(2*kT) * N^{1/2} * z
        velocities_noise, it_lanczos = stochastic.stochastic_forcing_lanczos(
            factor=np.sqrt(2 * self.kT / dt),
            tolerance=self.tolerance,
            dim=self.Nblobs * 6,
            mobility_mult=partial_grand_mobility_matrix,
            z=z,
            print_residual=self.print_residual)
        self.stoch_iterations_count += it_lanczos

        # Compute divergence terms div_t(M_rt) and div_t(M_tt)
        if self.kT > 0.0 and self.domain != 'no_wall':
            # 1. Generate random displacement
            dx_stoch = np.reshape(np.random.randn(Nblobs * 3), (Nblobs, 3))
            # 2. Displace blobs
            r_vectors_blobs += dx_stoch * (self.rf_delta * self.a * 0.5)
            # 3. Compute M_rt(r+0.5*dx) * dx_stoch
            div_M_rt = self.mobility_rot_times_force(
                r_vectors_blobs,
                np.reshape(dx_stoch, dx_stoch.size),
                self.eta,
                self.a,
                periodic_length=self.periodic_length)
            div_M_tt = self.mobility_trans_times_force(
                r_vectors_blobs,
                np.reshape(dx_stoch, dx_stoch.size),
                self.eta,
                self.a,
                periodic_length=self.periodic_length)
            # 4. Displace blobs in the other direction
            r_vectors_blobs -= dx_stoch * self.rf_delta * self.a

            # 5. Compute -M_rt(r-0.5*dx) * dx_stoch
            div_M_rt -= self.mobility_rot_times_force(
                r_vectors_blobs,
                np.reshape(dx_stoch, dx_stoch.size),
                self.eta,
                self.a,
                periodic_length=self.periodic_length)
            div_M_tt -= self.mobility_trans_times_force(
                r_vectors_blobs,
                np.reshape(dx_stoch, dx_stoch.size),
                self.eta,
                self.a,
                periodic_length=self.periodic_length)

            # Reset blobs location
            r_vectors_blobs += dx_stoch * (self.rf_delta * self.a * 0.5)
        else:
            div_M_rt = np.zeros(Nblobs * 3)
            div_M_tt = np.zeros(Nblobs * 3)

        # Use constraint motion or free kinematics
        if self.free_kinematics == 'False':
            # Set RHS = -kT*div_t(M_rt) - sqrt(2*kT) * (N^{1/2}*W)_r,
            RHS = -velocities_noise[velocities_noise.size //
                                    2:] - div_M_rt * (self.kT /
                                                      (self.rf_delta * self.a))

            # Set linear operator
            system_size = 3 * len(self.bodies)

            def mobility_rot_torque(torque,
                                    r_vectors=None,
                                    eta=None,
                                    a=None,
                                    periodic_length=None):
                return self.mobility_rot_times_torque(
                    r_vectors, torque, eta, a, periodic_length=periodic_length)

            linear_operator_partial = partial(
                mobility_rot_torque,
                r_vectors=r_vectors_blobs,
                eta=self.eta,
                a=self.a,
                periodic_length=self.periodic_length)
            A = spla.LinearOperator((system_size, system_size),
                                    matvec=linear_operator_partial,
                                    dtype='float64')

            # Scale RHS to norm 1
            RHS_norm = np.linalg.norm(RHS)
            if RHS_norm > 0:
                RHS = RHS / RHS_norm

            # Solve linear system
            counter = gmres_counter(print_residual=self.print_residual)
            (sol_precond, info_precond) = utils.gmres(A,
                                                      RHS,
                                                      tol=self.tolerance,
                                                      maxiter=1000,
                                                      callback=counter)
            self.det_iterations_count += counter.niter

            # Scale solution with RHS norm
            if RHS_norm > 0:
                sol_precond = sol_precond * RHS_norm
        else:
            # This is free kinematics, stochastic torque set to zero
            # because we only care for the translational degrees of freedom
            sol_precond = np.zeros(3 * Nblobs)

        # Compute stochastic velocity v_stoch = M_tr * T + sqrt(2*kT) * (N^{1/2}*W)_t + kT*div_t(M_tt).
        v_stoch = self.mobility_trans_times_torque(
            r_vectors_blobs,
            sol_precond,
            self.eta,
            self.a,
            periodic_length=self.periodic_length)
        v_stoch += velocities_noise[0:velocities_noise.size //
                                    2] + (self.kT /
                                          (self.rf_delta * self.a)) * div_M_tt
        return v_stoch
Esempio n. 4
0
def eyelo(size):
    def mv(v):
        return v
    return SSLA.LinearOperator(shape=(size,size), matvec=mv, rmatvec=mv, dtype=float)
Esempio n. 5
0
def adjoint_average(A):
    size = A.shape[0]
    assert(size==A.shape[1])
    def mv(v):
        return 0.5*(A.matvec(v)+A.rmatvec(v))
    return SSLA.LinearOperator(shape=(size,size), matvec=mv, rmatvec=mv, dtype=float)
Esempio n. 6
0
def s2lo_core(M):
    def mv(v):
        return M.dot(v)
    return SSLA.LinearOperator(shape=M.shape,matvec=mv,dtype=float)
Esempio n. 7
0
def zlo(size):
    def mv(v):
        return np.zeros(size[0])
    def rmv(v):
        return np.zeros(size[1])
    return SSLA.LinearOperator(shape=size,matvec=mv,rmatvec=rmv,dtype=float)
Esempio n. 8
0
    def __init__(me, *args, **kwargs):
        dl.FunctionSpace.__init__(me, *args, **kwargs)

        me._coords = None
        me._mass_matrix_petsc = None
        me._mass_matrix_scipy = None
        me._mass_matrix_LUSolver = None
        me._kd_tree = None
        me._mass_lumps_rowsum_petsc = None
        me._mass_lumps_rowsum_numpy = None
        me._mass_lumps_diagonal_petsc = None
        me._mass_lumps_diagonal_numpy = None

        # me.num_applies = 0

        me.apply_mass_matrix_linop = spla.LinearOperator(
            (me.dim(), me.dim()),
            matvec=me.apply_mass_matrix,
            rmatvec=me.apply_mass_matrix)

        me.solve_mass_matrix_LU_linop = spla.LinearOperator(
            (me.dim(), me.dim()),
            matvec=me.solve_mass_matrix_LU,
            rmatvec=me.solve_mass_matrix_LU)

        me.apply_lumped_mass_matrix_diagonal_linop = spla.LinearOperator(
            (me.dim(), me.dim()),
            matvec=me.apply_lumped_mass_matrix_diagonal,
            rmatvec=me.apply_lumped_mass_matrix_diagonal)

        me.apply_lumped_mass_matrix_rowsum_linop = spla.LinearOperator(
            (me.dim(), me.dim()),
            matvec=me.apply_lumped_mass_matrix_rowsum,
            rmatvec=me.apply_lumped_mass_matrix_rowsum)

        me.solve_lumped_mass_matrix_diagonal_linop = spla.LinearOperator(
            (me.dim(), me.dim()),
            matvec=me.solve_lumped_mass_matrix_diagonal,
            rmatvec=me.solve_lumped_mass_matrix_diagonal)

        me.solve_lumped_mass_matrix_rowsum_linop = spla.LinearOperator(
            (me.dim(), me.dim()),
            matvec=me.solve_lumped_mass_matrix_rowsum,
            rmatvec=me.solve_lumped_mass_matrix_rowsum)

        me.solve_mass_matrix_PCG_linop = spla.LinearOperator(
            (me.dim(), me.dim()),
            matvec=me.solve_mass_matrix_PCG,
            rmatvec=me.solve_mass_matrix_PCG)

        me.apply_M = me.apply_mass_matrix
        me.apply_MLd = me.apply_lumped_mass_matrix_diagonal
        me.apply_MLr = me.apply_lumped_mass_matrix_rowsum
        me.solve_M_LU = me.solve_mass_matrix_LU
        me.solve_MLd = me.solve_lumped_mass_matrix_diagonal
        me.solve_MLr = me.solve_lumped_mass_matrix_rowsum

        me.apply_M_linop = me.apply_mass_matrix_linop
        me.apply_MLd_linop = me.apply_lumped_mass_matrix_diagonal_linop
        me.apply_MLr_linop = me.apply_lumped_mass_matrix_rowsum_linop
        me.solve_M_LU_linop = me.solve_mass_matrix_LU_linop
        me.solve_MLd_linop = me.solve_lumped_mass_matrix_diagonal_linop
        me.solve_MLr_linop = me.solve_lumped_mass_matrix_rowsum_linop
Esempio n. 9
0
def schmidt_decomposition(
        vec: np.ndarray,
        dim: Union[int, List[int], np.ndarray] = None,
        k_param: int = 0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    r"""
    Compute the Schmidt decomposition of a bipartite vector [WikSD]_.

    Examples
    ==========

    Consider the :math:`3`-dimensional maximally entangled state

    .. math::
        u = \frac{1}{\sqrt{3}} \left( |000 \rangle + |111 \rangle + |222 \rangle \right)

    We can generate this state using the :code:`toqito` module as follows.

    >>> from toqito.states import max_entangled
    >>> max_entangled(3)
    [[0.57735027],
     [0.        ],
     [0.        ],
     [0.        ],
     [0.57735027],
     [0.        ],
     [0.        ],
     [0.        ],
     [0.57735027]]

    Computing the Schmidt decomposition of :math:`u`, we can obtain the corresponding singular
    values of :math:`u` as

    .. math::
        \frac{1}{\sqrt{3}} \left[1, 1, 1 \right]^{\text{T}}.

    >>> from toqito.states import max_entangled
    >>> from toqito.state_ops import schmidt_decomposition
    >>> singular_vals, u_mat, vt_mat = schmidt_decomposition(max_entangled(3))
    >>> singular_vals
    [[0.57735027]
     [0.57735027]
     [0.57735027]]
    >>> u_mat
    [[1. 0. 0.]
     [0. 1. 0.]
     [0. 0. 1.]]
    >>> vt_mat
    [[1. 0. 0.]
     [0. 1. 0.]
     [0. 0. 1.]]

    References
    ==========
    .. [WikSD] Wikipedia: Schmidt decomposition
        https://en.wikipedia.org/wiki/Schmidt_decomposition

    :param vec: A bipartite quantum state to compute the Schmidt decomposition of.
    :param dim: An array consisting of the dimensions of the subsystems (default gives subsystems
                equal dimensions).
    :param k_param: How many terms of the Schmidt decomposition should be computed (default is 0).
    :return: The Schmidt decomposition of the :code:`vec` input.
    """
    eps = np.finfo(float).eps

    if dim is None:
        dim = np.round(np.sqrt(len(vec)))
    if isinstance(dim, list):
        dim = np.array(dim)

    # Allow the user to enter a single number for `dim`.
    if isinstance(dim, float):
        dim = np.array([dim, len(vec) / dim])
        if np.abs(dim[1] - np.round(dim[1])) >= 2 * len(vec) * eps:
            raise ValueError(
                "InvalidDim: The value of `dim` must evenly divide"
                " `len(vec)`; please provide a `dim` array "
                "containing the dimensions of the subsystems.")
        dim[1] = np.round(dim[1])

    # Try to guess whether SVD or SVDS will be faster, and then perform the
    # appropriate singular value decomposition.
    adj = 20 + 1000 * (not issparse(vec))

    # Just a few Schmidt coefficients.
    if 0 < k_param <= np.ceil(np.min(dim) / adj):
        u_mat, singular_vals, vt_mat = linalg.svds(
            linalg.LinearOperator(np.reshape(vec, dim[::-1].astype(int)),
                                  k_param))
    # Otherwise, use lots of Schmidt coefficients.
    else:
        u_mat, singular_vals, vt_mat = np.linalg.svd(
            np.reshape(vec, dim[::-1].astype(int)))

    if k_param > 0:
        u_mat = u_mat[:, :k_param]
        singular_vals = singular_vals[:k_param]
        vt_mat = vt_mat[:, :k_param]

    # singular_vals = np.diag(singular_vals)
    singular_vals = singular_vals.reshape(-1, 1)
    if k_param == 0:
        # Schmidt rank.
        r_param = np.sum(
            singular_vals > np.max(dim) * np.spacing(singular_vals[0]))
        # Schmidt coefficients.
        singular_vals = singular_vals[:r_param]
        u_mat = u_mat[:, :r_param]
        vt_mat = vt_mat[:, :r_param]

    u_mat = u_mat.conj().T
    return singular_vals, u_mat, vt_mat
Esempio n. 10
0
def ridge_regression(X, y, alpha, sample_weight=1.0, solver='auto',
                     max_iter=None, tol=1e-3):
    """Solve the ridge equation by the method of normal equations.

    Parameters
    ----------
    X : {array-like, sparse matrix, LinearOperator},
        shape = [n_samples, n_features]
        Training data

    y : array-like, shape = [n_samples] or [n_samples, n_responses]
        Target values

    max_iter : int, optional
        Maximum number of iterations for conjugate gradient solver.
        The default value is determined by scipy.sparse.linalg.

    sample_weight : float or numpy array of shape [n_samples]
        Individual weights for each sample

    solver : {'auto', 'dense_cholesky', 'lsqr', 'sparse_cg'}
        Solver to use in the computational routines:

        - 'auto' chooses the solver automatically based on the type of data.

        - 'dense_cholesky' uses the standard scipy.linalg.solve function to
          obtain a closed-form solution.

        - 'sparse_cg' uses the conjugate gradient solver as found in
          scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
          more appropriate than 'dense_cholesky' for large-scale data
          (possibility to set `tol` and `max_iter`).

        - 'lsqr' uses the dedicated regularized least-squares routine
          scipy.sparse.linalg.lsqr. It is the fatest but may not be available
          in old scipy versions. It also uses an iterative procedure.

        All three solvers support both dense and sparse data.

    tol: float
        Precision of the solution.

    Returns
    -------
    coef: array, shape = [n_features] or [n_responses, n_features]
        Weight vector(s).

    Notes
    -----
    This function won't compute the intercept.
    """

    n_samples, n_features = X.shape
    has_sw = isinstance(sample_weight, np.ndarray) or sample_weight != 1.0

    if solver == 'auto':
        # cholesky if it's a dense array and cg in
        # any other case
        if hasattr(X, '__array__'):
            solver = 'dense_cholesky'
        else:
            solver = 'sparse_cg'

    elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
        warnings.warn("""lsqr not available on this machine, falling back
                      to sparse_cg.""")
        solver = 'sparse_cg'

    if has_sw:
        solver = 'dense_cholesky'

    if solver == 'sparse_cg':
        # gradient descent
        X1 = sp_linalg.aslinearoperator(X)
        if y.ndim == 1:
            y1 = np.reshape(y, (-1, 1))
        else:
            y1 = y
        coefs = np.empty((y1.shape[1], n_features))

        if n_features > n_samples:
            def mv(x):
                return X1.matvec(X1.rmatvec(x)) + alpha * x
        else:
            def mv(x):
                return X1.rmatvec(X1.matvec(x)) + alpha * x

        for i in range(y1.shape[1]):
            y_column = y1[:, i]
            if n_features > n_samples:
                # kernel ridge
                # w = X.T * inv(X X^t + alpha*Id) y
                C = sp_linalg.LinearOperator(
                    (n_samples, n_samples), matvec=mv, dtype=X.dtype)
                coef, info = sp_linalg.cg(C, y_column, tol=tol)
                coefs[i] = X1.rmatvec(coef)
            else:
                # ridge
                # w = inv(X^t X + alpha*Id) * X.T y
                y_column = X1.rmatvec(y_column)
                C = sp_linalg.LinearOperator(
                    (n_features, n_features), matvec=mv, dtype=X.dtype)
                coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
                                              tol=tol)
            if info != 0:
                raise ValueError("Failed with error code %d" % info)

        if y.ndim == 1:
            coefs = np.ravel(coefs)

        return coefs
    elif solver == "lsqr":
        if y.ndim == 1:
            y1 = np.reshape(y, (-1, 1))
        else:
            y1 = y
        coefs = np.empty((y1.shape[1], n_features))

        # According to the lsqr documentation, alpha = damp^2.
        sqrt_alpha = np.sqrt(alpha)

        for i in range(y1.shape[1]):
            y_column = y1[:, i]
            coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha,
                                      atol=tol, btol=tol, iter_lim=max_iter)[0]

        if y.ndim == 1:
            coefs = np.ravel(coefs)

        return coefs
    else:
        # normal equations (cholesky) method
        if n_features > n_samples or has_sw:
            # kernel ridge
            # w = X.T * inv(X X^t + alpha*Id) y
            A = safe_sparse_dot(X, X.T, dense_output=True)
            A.flat[::n_samples + 1] += alpha * sample_weight
            Axy = linalg.solve(A, y, sym_pos=True, overwrite_a=True)
            coef = safe_sparse_dot(X.T, Axy, dense_output=True)
        else:
            # ridge
            # w = inv(X^t X + alpha*Id) * X.T y
            A = safe_sparse_dot(X.T, X, dense_output=True)
            A.flat[::n_features + 1] += alpha
            Xy = safe_sparse_dot(X.T, y, dense_output=True)
            coef = linalg.solve(A, Xy, sym_pos=True, overwrite_a=True)

        return coef.T
Esempio n. 11
0
    u_1 = analytical(dt)

    u_BDF1 = u_1.toarray()
    u_BDF2 = u_1.toarray()
    u_BDF2_old = u_0.toarray()
    u_Theta = u_1.toarray()

    N = int(np.round(T/dt+1))
    print('dt = ' + str(dt) + ', ' + str(N) + ' time steps to solve')

    print('solve BDF1')
    M_BDF1 = assemble_blockwise_matrix_BDF1(dt)
    #lu_BDF1 = sp_la.splu(M_BDF1.tocsc())
    spilu = sp_la.spilu(M_BDF1.tocsc(), fill_factor=300, drop_tol=1e-6)
    M_x = lambda x: spilu.solve(x)
    precond = sp_la.LinearOperator((2*ndofs_u+ndofs_p+1, 2*ndofs_u+ndofs_p+1), M_x)
    ### start time loop for dt
    for k in range(2,N):
        # t0_BDF1 = time.time()
        rhs_BDF1 = assemble_blockwise_force_BDF1(k*dt, dt, u_BDF1)
        #sol = lu_BDF1.solve(rhs_BDF1)
        sol = sp_la.bicgstab(M_BDF1, rhs_BDF1, M=precond, tol=1e-8)[0]
        u_BDF1 = np.reshape(sol, (2*ndofs_u + ndofs_p + 1, 1))
        # print 'error of BDF1 solution for t = ' + str(k*dt) + ': ' + str(np.linalg.norm(u_BDF1[0:2*ndofs_u]-analytical_u(k*dt)))
        # t1_BDF1 = time.time()
    gc.collect()

    print('solve BDF2')
    M_BDF2 = assemble_blockwise_matrix_BDF2(dt)
    #lu_BDF2 = sp_la.splu(M_BDF2.tocsc())
    spilu = sp_la.spilu(M_BDF2.tocsc(), fill_factor=300, drop_tol=1e-6)
Esempio n. 12
0
    def computeEigenmodes(self, solver_mode=openmoc.FORWARD, num_modes=5,
                          inner_method='lgmres', outer_tol=1e-5,
                          inner_tol=1e-6, interval=10):
        """Compute all eigenmodes in the problem using the scipy.linalg package.

        Parameters
        ----------
        solver_mode : {openmoc.FORWARD, openmoc.ADJOINT}
            The type of eigenmodes to compute (default is openmoc.FORWARD)
        num_modes : Integral
            The number of eigenmodes to compute (default is 5)
        inner_method : {'gmres', 'lgmres', 'bicgstab', 'cgs'}
            Krylov subspace method used for the Ax=b solve (default is 'gmres')
        outer_tol : Real
            The tolerance on the outer eigenvalue solve (default is 1E-5)
        inner_tol : Real
            The tolerance on the inner Ax=b solve (default is 1E-5)
        interval : Integral
            The inner iteration interval for logging messages (default is 10)
        """

        # Ensure that vacuum boundary conditions are used
        geometry = self._moc_solver.getGeometry()
        if (geometry.getMinXBoundaryType() != openmoc.VACUUM or
            geometry.getMaxXBoundaryType() != openmoc.VACUUM or
            geometry.getMinYBoundaryType() != openmoc.VACUUM or
            geometry.getMaxYBoundaryType() != openmoc.VACUUM or
            (self._moc_solver.is3D() and 
             (geometry.getMinZBoundaryType() != openmoc.VACUUM or
              geometry.getMaxZBoundaryType() != openmoc.VACUUM))):
            py_printf('ERROR', 'All boundary conditions must be ' + \
                      'VACUUM for the IRAMSolver')

        import scipy.sparse.linalg as linalg

        # Set solution-dependent class attributes based on parameters
        # These are accessed and used by the LinearOperators
        self._num_modes = num_modes
        self._inner_method = inner_method
        self._outer_tol = outer_tol
        self._inner_tol = inner_tol
        self._interval = interval

        # Initialize inner/outer iteration counters to zero
        self._m_count = 0
        self._a_count = 0

        # Initialize MOC solver
        self._moc_solver.initializeSolver(solver_mode)

        # Initialize SciPy operators
        op_shape = (self._op_size, self._op_size)
        self._A_op = linalg.LinearOperator(op_shape, self._A,
                                           dtype=self._precision)
        self._M_op = linalg.LinearOperator(op_shape, self._M,
                                           dtype=self._precision)
        self._F_op = linalg.LinearOperator(op_shape, self._F,
                                           dtype=self._precision)

        # Solve the eigenvalue problem
        timer = openmoc.Timer()
        timer.startTimer()
        vals, vecs = linalg.eigs(self._F_op, k=self._num_modes,
                                 tol=self._outer_tol)
        timer.stopTimer()

        # Print a timer report
        tot_time = timer.getTime()
        time_per_mode = tot_time / self._num_modes
        tot_time = '{0:.4e} sec'.format(tot_time)
        time_per_mode = '{0:.4e} sec'.format(time_per_mode)
        py_printf('RESULT', 'Total time to solution'.ljust(53, '.') + tot_time)
        py_printf('RESULT', 'Solution time per mode'.ljust(53, '.') + time_per_mode)

        # Store the eigenvalues and eigenvectors
        self._eigenvalues = vals
        self._eigenvectors = vecs

        # Restore the material data
        self._moc_solver.resetMaterials(solver_mode)
Esempio n. 13
0

#=====================Generate matrix=========================================

Lap,Q = sparse_laplacian(nx,ny)

I = sp.eye(nx*ny,format='csc')

# A0 = Q @ (I - CFL * Lap)
A0 = Q @ ( I - CFL*Lap)


# preconditioner
M2 = spla.splu(A0)
M_x = lambda x: M2.solve(x)
M = spla.LinearOperator((nv,nv), M_x)
#=====================================================================


reach_bottom = False # flag when melting reach the max radius
all_reach_top = False # flag when all sampling trajectories reach top 

# qs_x spatial
qs_x = heat_source_x( x )




theta = np.linspace(1,90,90) * pi/180
Ntraj = theta.size
Esempio n. 14
0
def linear_solver(Afun,
                  B,
                  ATfun=None,
                  x0=None,
                  par=None,
                  solver=None,
                  callback=None):
    """
    Wraper for various linear solvers suited for FFT-based homogenization.
    """
    tim = Timer('Solving linsys by %s' % solver)
    if x0 is None:
        x0 = B.zeros_like()

    if callback is not None:
        callback(x0)

    if solver.lower() in ['cg']:  # conjugate gradients
        x, info = CG(Afun, B, x0=x0, par=par, callback=callback)
    elif solver.lower() in ['bicg']:  # biconjugate gradients
        x, info = BiCG(Afun, ATfun, B, x0=x0, par=par, callback=callback)
    elif solver.lower() in ['iterative', 'richardson']:  # iterative solver
        x, info = richardson(Afun, B, x0, par=par, callback=callback)
    elif solver.lower() in ['chebyshev', 'cheby']:  # iterative solver
        x, info = cheby2TERM(A=Afun, B=B, x0=x0, par=par, callback=callback)
    elif solver.split('_')[0].lower() in ['scipy']:  # solvers in scipy
        if isinstance(x0, np.ndarray):
            x0vec = x0.ravel()
        else:
            x0vec = x0.vec()

        if solver in ['scipy.sparse.linalg.cg', 'scipy_cg']:
            Afun.define_operand(B)
            Afunvec = spslin.LinearOperator(Afun.matshape,
                                            matvec=Afun.matvec,
                                            dtype=np.float64)
            xcol, info = spslin.cg(Afunvec,
                                   B.vec(),
                                   x0=x0vec,
                                   tol=par['tol'],
                                   maxiter=par['maxiter'],
                                   M=None,
                                   callback=callback)
            info = {'info': info}
        elif solver in ['scipy.sparse.linalg.bicg', 'scipy_bicg']:
            Afun.define_operand(B)
            ATfun.define_operand(B)
            Afunvec = spslin.LinearOperator(Afun.shape,
                                            matvec=Afun.matvec,
                                            rmatvec=ATfun.matvec,
                                            dtype=np.float64)
            xcol, info = spslin.bicg(Afunvec,
                                     B.vec(),
                                     x0=x0.vec(),
                                     tol=par['tol'],
                                     maxiter=par['maxiter'],
                                     M=None,
                                     callback=callback)
        res = dict()
        res['info'] = info
        x = B.empty_like(name='x')
        x.val = np.reshape(xcol, B.val.shape)
    else:
        msg = "This kind (%s) of linear solver is not implemented" % solver
        raise NotImplementedError(msg)

    tim.measure(print_time=False)
    info.update({'time': tim.vals})
    return x, info
Esempio n. 15
0
def array2rlo_core(a):
    def mv(v):
        return np.dot(a,v)
    return SSLA.LinearOperator(shape=(1,a.shape[0]),matvec=mv,dtype=float)
        row = np.loadtxt('../output/matrices/Mass_'+geo+'_'+str(ni)+"_"+str(type)+'.txt',usecols=(0))
        col = np.loadtxt('../output/matrices/Mass_'+geo+'_'+str(ni)+"_"+str(type)+'.txt',usecols=(1))
        val = np.loadtxt('../output/matrices/Mass_'+geo+'_'+str(ni)+"_"+str(type)+'.txt',usecols=(2,3)).view(complex)
        val = val[:,0]
        Mass = coo_matrix((val,(row,col)), shape=(nc,nc)).tocsr()

        ###
        temp = Mass.dot(uex)
        norm=np.vdot(temp,uex)

        ###########################
        #   Lancement solveur     #
        ###########################
        print("Lancement solveur...")
        R_x = lambda x: spla.spsolve(Prec, x)
        R = spla.LinearOperator((nc, nc), R_x)
        counter = gmres_counter()
        counter.reset()
        MTF = -0.5*(A-alpha*M-(1-alpha)*P)
        x,info = gmres(MTF, b, M=R, restart=200, callback=counter, maxiter=1e4)
        dico[str(alpha)][ni-1]=cp.deepcopy(counter.niter)
        err = Mass.dot(x-uex)
        print(np.vdot(err,x-uex)/norm,counter.niter)
        # counter.reset()
        # x,info = gmres(MTF, b, restart=200, callback=counter, maxiter=1e4)
        # iterations_no_precond[ni-1]=cp.deepcopy(counter.niter)
        # err = Mass.dot(x-uex)
        # print(np.vdot(err,x-uex)/norm)


df = pandas.DataFrame(dico)
Esempio n. 17
0
def array2clo_core(a):
    def mv(v):
        # assert(v.shape==(1,1))
        return v* a
    return SSLA.LinearOperator(shape=(a.shape[0],1),matvec=mv,dtype=float)
Esempio n. 18
0
 def computePrecon(self, g, o):
     M_x = lambda x: spl.spsolve(self.sysA[g, o] * sps.eye(self.nNodes), x)
     self.sysP[g, o] = spl.LinearOperator((self.nNodes, self.nNodes), M_x)
Esempio n. 19
0
def diag2lo(w):
    n = w.shape[0]
    def mv(v):
        return v*w
    return SSLA.LinearOperator(shape=(n,n),matvec=mv,rmatvec=mv,matmat=naive_matmat(mv),dtype=float)
Esempio n. 20
0
def TVRegDiff(data,
              itern,
              alph,
              u0=None,
              scale='small',
              ep=1e-6,
              dx=None,
              plotflag=_has_matplotlib,
              diagflag=1):

    # code starts here
    # Make sure we have a column vector
    data = np.array(data)
    if (len(data.shape) != 1):
        logging.error("Error - data is not a column vector")
        return
    # Get the data size.
    n = len(data)

    # Default checking. (u0 is done separately within each method.)
    if dx is None:
        dx = 1.0 / n

    # Different methods for small- and large-scale problems.
    if (scale.lower() == 'small'):

        # Construct differentiation matrix.
        c = np.ones(n + 1) / dx
        D = sparse.spdiags([-c, c], [0, 1], n, n + 1)

        DT = D.transpose()

        # Construct antidifferentiation operator and its adjoint.
        def A(x):
            return chop(np.cumsum(x) - 0.5 * (x + x[0])) * dx

        def AT(w):
            return (sum(w) * np.ones(n + 1) - np.transpose(
                np.concatenate(([sum(w) / 2.0], np.cumsum(w) - w / 2.0)))) * dx

        # Default initialization is naive derivative

        if u0 is None:
            u0 = np.concatenate(([0], np.diff(data), [0]))

        u = u0
        # Since Au( 0 ) = 0, we need to adjust.
        ofst = data[0]
        # Precompute.
        ATb = AT(ofst - data)  # input: size n

        # Main loop.
        for ii in range(1, itern + 1):
            # Diagonal matrix of weights, for linearizing E-L equation.
            Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n)
            # Linearized diffusion matrix, also approximation of Hessian.
            L = dx * DT * Q * D

            # Gradient of functional.
            g = AT(A(u)) + ATb + alph * L * u

            # Prepare to solve linear equation.
            tol = 1e-4
            maxit = 100
            # Simple preconditioner.
            P = alph * sparse.spdiags(L.diagonal() + 1, 0, n + 1, n + 1)

            def linop(v):
                return (alph * L * v + AT(A(v)))

            linop = splin.LinearOperator((n + 1, n + 1), linop)

            if diagflag:
                [s, info_i] = sparse.linalg.cg(linop,
                                               g,
                                               x0=None,
                                               tol=tol,
                                               maxiter=maxit,
                                               callback=None,
                                               M=P)
                logging.info('iteration {0:4d}: relative change = {1:.3e}, '
                             'gradient norm = {2:.3e}\n'.format(
                                 ii,
                                 np.linalg.norm(s[0]) / np.linalg.norm(u),
                                 np.linalg.norm(g)))
                if (info_i > 0):
                    logging.warning(
                        "WARNING - convergence to tolerance not achieved!")
                elif (info_i < 0):
                    logging.warning("WARNING - illegal input or breakdown")
            else:
                [s, info_i] = sparse.linalg.cg(linop,
                                               g,
                                               x0=None,
                                               tol=tol,
                                               maxiter=maxit,
                                               callback=None,
                                               M=P)
            # Update solution.
            u = u - s
            # Display plot.
            if plotflag:
                plt.plot(u)
                plt.show()

    elif (scale.lower() == 'large'):

        # Construct antidifferentiation operator and its adjoint.
        def A(v):
            return np.cumsum(v)

        def AT(w):
            return (sum(w) * np.ones(len(w)) -
                    np.transpose(np.concatenate(([0.0], np.cumsum(w[:-1])))))

        # Construct differentiation matrix.
        c = np.ones(n)
        D = sparse.spdiags([-c, c], [0, 1], n, n) / dx
        mask = np.ones((n, n))
        mask[-1, -1] = 0.0
        D = sparse.dia_matrix(D.multiply(mask))
        DT = D.transpose()
        # Since Au( 0 ) = 0, we need to adjust.
        data = data - data[0]
        # Default initialization is naive derivative.
        if u0 is None:
            u0 = np.concatenate(([0], np.diff(data)))
        u = u0
        # Precompute.
        ATd = AT(data)

        # Main loop.
        for ii in range(1, itern + 1):
            # Diagonal matrix of weights, for linearizing E-L equation.
            Q = sparse.spdiags(1. / np.sqrt((D * u)**2.0 + ep), 0, n, n)
            # Linearized diffusion matrix, also approximation of Hessian.
            L = DT * Q * D
            # Gradient of functional.
            g = AT(A(u)) - ATd
            g = g + alph * L * u
            # Build preconditioner.
            c = np.cumsum(range(n, 0, -1))
            B = alph * L + sparse.spdiags(c[::-1], 0, n, n)
            # droptol = 1.0e-2
            R = sparse.dia_matrix(np.linalg.cholesky(B.todense()))
            # Prepare to solve linear equation.
            tol = 1.0e-4
            maxit = 100

            def linop(v):
                return (alph * L * v + AT(A(v)))

            linop = splin.LinearOperator((n, n), linop)

            if diagflag:
                [s, info_i] = sparse.linalg.cg(linop,
                                               -g,
                                               x0=None,
                                               tol=tol,
                                               maxiter=maxit,
                                               callback=None,
                                               M=np.dot(R.transpose(), R))
                logging.info('iteration {0:4d}: relative change = {1:.3e}, '
                             'gradient norm = {2:.3e}\n'.format(
                                 ii,
                                 np.linalg.norm(s[0]) / np.linalg.norm(u),
                                 np.linalg.norm(g)))
                if (info_i > 0):
                    logging.warning(
                        "WARNING - convergence to tolerance not achieved!")
                elif (info_i < 0):
                    logging.warning("WARNING - illegal input or breakdown")

            else:
                [s, info_i] = sparse.linalg.cg(linop,
                                               -g,
                                               x0=None,
                                               tol=tol,
                                               maxiter=maxit,
                                               callback=None,
                                               M=np.dot(R.transpose(), R))
            # Update current solution
            u = u + s
            # Display plot.
            if plotflag:
                plt.plot(u / dx)
                plt.show()

        u = u / dx

    return u
Esempio n. 21
0
def bem2lo(M):
    return SSLA.LinearOperator(shape=M.shape, matvec=M.matvec, rmatvec=M.rmatvec, matmat=M.matmat, dtype=float)
Esempio n. 22
0
    def test_DoGIP_vs_FEniCS(self):
        print(
            '\n== testing DoGIP vs. FEniCS for problem of weighted projection ===='
        )
        for dim, pol_order in itertools.product([2, 3], [1, 2]):
            print('dim={}; pol_order={}'.format(dim, pol_order))
            N = 2  # no. of elements

            # creating MESH, defining MATERIAL and SOURCE
            if dim == 2:
                mesh = UnitSquareMesh(N, N)
                m = Expression("1+10*16*x[0]*(1-x[0])*x[1]*(1-x[1])", degree=4)
                f = Expression("80*x[0]*(0.5-x[0])*(1.-x[0])*x[1]*(1.-x[1])",
                               degree=5)
            elif dim == 3:
                mesh = UnitCubeMesh(N, N, N)
                m = Expression("1+10*16*x[0]*(1-x[0])*(1-x[1])*x[2]", degree=4)
                f = Expression("80*x[0]*(0.5-x[0])*(1.-x[0])*x[1]*(1.-x[1])",
                               degree=5)

            mesh.coordinates()[:] += 0.1 * np.random.random(
                mesh.coordinates().shape)  # mesh perturbation

            ## standard approach with FEniCS #############################################
            V = FunctionSpace(mesh, "CG", pol_order)  # original FEM space
            bc = DirichletBC(V, Constant(0.0),
                             lambda x, on_boundary: on_boundary)
            u, v = TrialFunction(V), TestFunction(V)
            u_fenics = Function(V)  # the vector for storing the solution
            solve(m * inner(grad(u), grad(v)) * dx == f * v * dx, u_fenics,
                  bc)  # solution by FEniCS

            ## DoGIP - double-grid integration with interpolation-projection #############
            W = FunctionSpace(mesh, "DG",
                              2 * (pol_order - 1))  # double-grid space
            Wvector = VectorFunctionSpace(
                mesh, "DG",
                2 * (pol_order - 1))  # vector variant of double-grid space
            w = TestFunction(W)
            A_dogip = assemble(
                m * w *
                dx).get_local()  # diagonal matrix of material coefficients
            A_dogip_full = np.einsum(
                'i,jk->ijk', A_dogip,
                np.eye(dim))  # block-diagonal mat. for non-isotropic mat.
            bv = assemble(f * v * dx)
            bc.apply(bv)
            b = bv.get_local()  # vector of right-hand side

            # assembling global interpolation-projection matrix B
            B = get_B(V, Wvector, problem=1)

            # solution to DoGIP problem
            def Afun(x):
                Axd = np.einsum('...jk,...j', A_dogip_full,
                                B.dot(x).reshape((-1, dim)))
                Afunx = B.T.dot(Axd.ravel())
                Afunx[list(bc.get_boundary_values()
                           )] = 0  # application of Dirichlet BC
                return Afunx

            Alinoper = linalg.LinearOperator((b.size, b.size),
                                             matvec=Afun,
                                             dtype=np.float)  # system matrix
            x, info = linalg.cg(Alinoper,
                                b,
                                x0=np.zeros_like(b),
                                tol=1e-8,
                                maxiter=1e2)  # conjugate gradients

            # testing the difference between DoGIP and FEniCS
            self.assertAlmostEqual(
                0, np.linalg.norm(u_fenics.vector().get_local() - x))
            print('...ok')
Esempio n. 23
0
def adjoint(A):
    return SSLA.LinearOperator(shape=(A.shape[1],A.shape[0]),matvec=A.rmatvec,rmatvec=A.matvec, dtype=float)
Esempio n. 24
0
 def init_solver(self, L):
     global linalg
     from scipy.sparse import linalg
     ilu = linalg.spilu(self.L1.tocsc())
     n = self.n - 1
     self.M = linalg.LinearOperator(shape=(n, n), matvec=ilu.solve)
Esempio n. 25
0
    def fit(self, lambdas, maxiter=np.inf, tol=1e-6, start_rank=100):
        """Complete matrix X, return series of low-rank approximations

        Args:
        lambdas: regularization parameters for fitting
        maxiter: maximem number of made iterations
        tol: tolerance, if Frobenius norm of the difference of two approximations is less than tol,
             algorithm is treated as converged
        start_rank: rank of the initial approximation

        Returns:
        lambdas: descendingly sorted lambdas from Args
        U_approx, Vt_approx: U_approx[i].dot(Vt_approx[i]) is an i-th approximation for X

        """
        self.rank = start_rank
        self.tol = tol
        self.lambdas = np.sort(lambdas)[::-1]
        self.Z_proj = sparse.csr_matrix(self.X.shape)
        self.S = np.zeros((self.rank,))
        self.U = np.zeros((self.X.shape[0], self.rank))
        self.Vt = np.zeros((self.rank, self.X.shape[1]))
        
        self.U_approx = []
        self.Vt_approx = []
        
        self.bias = np.array([])
        
        num_iter = 0

        for l in self.lambdas:
            while (num_iter < maxiter):
                self.update_Z_proj()
                self.bias = np.append(self.bias, spla.norm(self.Z_proj - self.X, ord='fro'))
                
                lin_op = spla.LinearOperator(self.X.shape,
                                             matvec=self.matvec,
                                             rmatvec=self.rmatvec)
                U, S, Vt = spla.svds(lin_op, self.rank)
                
                S -= l
                S[S < 0] = 0
                
                if (num_iter > 0):
                    rel_err = np.linalg.norm(S - self.S) ** 2 / np.linalg.norm(self.S) ** 2
                else:
                    rel_err = 2 * self.tol
                
                S = S[S > 0]
                self.rank = S.shape[0]
                
                U = U[:, :self.rank]
                Vt = Vt[:self.rank, :]
                
                self.U = U
                self.Vt = Vt
                self.S = S
                
                self.Vt = np.diag(self.S).dot(self.Vt)
                
                if (rel_err < self.tol):
                    break
                
                num_iter += 1
                
            self.U_approx.append(self.U)
            self.Vt_approx.append(self.Vt)
        
        return self.lambdas, self.U_approx, self.Vt_approx
Esempio n. 26
0
    # first iteration residual: distribute "barF" over grid using "K4"
    b = -G_K_dF((barF - barF_t)[:2, :2])
    F += barF - barF_t

    # parameters for Newton iterations: normalization and iteration counter
    Fn = np.linalg.norm(F)
    iiter = 0

    # iterate as long as the iterative update does not vanish
    while True:

        # solve linear system using the Conjugate Gradient iterative solver
        dFm, i = sp.cg(
            tol=1.e-8,
            A=sp.LinearOperator(shape=(2 * 2 * Nx * Ny, 2 * 2 * Nx * Ny),
                                matvec=G_K_dF,
                                dtype='float'),
            b=b,
            maxiter=1000,
        )
        if i: raise IOError('CG-solver failed')

        # apply iterative update to (3-D) DOFs
        F[:2, :2] += dFm.reshape(2, 2, Nx, Ny)

        # compute residual stress and tangent, convert to residual
        P, P_2, K4_2, be, ep = constitutive(F, F_t, be_t, ep_t)
        b = -G(P_2)

        # check for convergence, print convergence info to screen
        print('{0:10.2e}'.format(np.linalg.norm(dFm) / Fn))
    def compute_deterministic_velocity_and_torque(self):
        '''
    Compute the torque on bodies rotating with a prescribed
    angular velocity and subject to forces, i.e., solve the
    linear system
    
    M_rr * T = omega - M_rt * forces
    
    Then compute the translational velocity

    v = M_tr * T + M_tt * forces
    
    It returns the velocities and torques (v,T).
    '''
        # Create auxiliar variables
        Nblobs = len(self.bodies)
        blob_mass = 1.0

        # Get blobs coordinates
        r_vectors_blobs = np.empty((Nblobs, 3))
        for k, b in enumerate(self.bodies):
            r_vectors_blobs[k] = b.location

        # Compute one-blob forces (same function for all blobs)
        # force = np.zeros(r_vectors_blobs.size)
        force = self.calc_one_blob_forces(r_vectors_blobs,
                                          blob_radius=self.a,
                                          blob_mass=blob_mass)

        # Compute blob-blob forces (same function for all pair of blobs)
        force += self.calc_blob_blob_forces(r_vectors_blobs,
                                            blob_radius=self.a)
        force = np.reshape(force, force.size)

        # Use constraint motion or free kinematics
        if self.free_kinematics == 'False':
            # Set rollers angular velocity
            omega = np.empty(3 * len(self.bodies))
            for i in range(len(self.bodies)):
                omega[3 * i:3 * (i + 1)] = self.get_omega_one_roller()

            # Set RHS = omega - M_rt * force
            RHS = omega - self.mobility_rot_times_force(
                r_vectors_blobs,
                force,
                self.eta,
                self.a,
                periodic_length=self.periodic_length)

            # Set linear operator
            system_size = 3 * len(self.bodies)

            def mobility_rot_torque(torque,
                                    r_vectors=None,
                                    eta=None,
                                    a=None,
                                    periodic_length=None):
                return self.mobility_rot_times_torque(
                    r_vectors, torque, eta, a, periodic_length=periodic_length)

            linear_operator_partial = partial(
                mobility_rot_torque,
                r_vectors=r_vectors_blobs,
                eta=self.eta,
                a=self.a,
                periodic_length=self.periodic_length)
            A = spla.LinearOperator((system_size, system_size),
                                    matvec=linear_operator_partial,
                                    dtype='float64')

            # Scale RHS to norm 1
            RHS_norm = np.linalg.norm(RHS)
            if RHS_norm > 0:
                RHS = RHS / RHS_norm

            # Solve linear system
            counter = gmres_counter(print_residual=self.print_residual)
            (sol_precond, info_precond) = utils.gmres(
                A,
                RHS,
                x0=self.deterministic_torque_previous_step,
                tol=self.tolerance,
                maxiter=1000,
                callback=counter)
            self.det_iterations_count += counter.niter
            self.deterministic_torque_previous_step = sol_precond

            # Scale solution with RHS norm
            if RHS_norm > 0:
                sol_precond = sol_precond * RHS_norm
        else:
            # This is free kinematics, compute torque
            sol_precond = self.get_torque()

        # Compute linear velocity
        velocity = self.mobility_trans_times_force(
            r_vectors_blobs,
            force,
            self.eta,
            self.a,
            periodic_length=self.periodic_length)
        if np.any(sol_precond):
            velocity += self.mobility_trans_times_torque(
                r_vectors_blobs,
                sol_precond,
                self.eta,
                self.a,
                periodic_length=self.periodic_length)

        # Return linear velocity and torque
        return velocity, sol_precond
Esempio n. 28
0
def transpose(A):
    return SSLA.LinearOperator(shape=(A.shape[1],A.shape[0]),matvec=A.rmatvec,rmatvec=A.matvec, dtype=float)
Esempio n. 29
0
f = mm.open(f'{problem}_rhs1.mtx.gz')
b = np.array(io.mmread(f)).ravel()
f.close()

bnorm = np.linalg.norm(b)
count = [0]


def matvec(v):
    count[0] += 1
    sys.stderr.write(f"{count[0]}\r")
    return Am@v


A = la.LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)

M = 100

print("MatrixMarket problem %s" % problem)
print(f"Invert {Am.shape[0]} x {Am.shape[1]} matrix; nnz = {Am.nnz}")

count[0] = 0
x0, info = la.gmres(A, b, restrt=M, tol=1e-14)
count_0 = count[0]
err0 = np.linalg.norm(Am@x0 - b) / bnorm
print(f"GMRES({M}): {count_0} matvecs, relative residual: {err0}")
if info != 0:
    print("Didn't converge")

count[0] = 0
  def Lubrucation_solve(self, X, Xm, X0=None, print_residual=False, its_out = 1000, PC_flag=True):
    '''
    Solve the lubrication problem for bodies using GMRES. Computes the solution U = [I + M_RPY*DR]^{-1} * (X + M*Xm).
    The PC ignores 'Isolated particles' which are those that are far from the wall and other particcles (making the PC less efective)
    '''
    if(self.Delta_R is None):
      self.Set_R_Mats()
    
    num_particles = len(self.bodies)
    res_list = []
    
    RHS = np.zeros((6*num_particles,1))
    if Xm is not None:
      MXm = self.Wall_Mobility_Mult(Xm)
      MXm = MXm[:,np.newaxis]
      RHS += MXm
    if X is not None:
      RHS += X
      
    RHS_norm = np.linalg.norm(RHS)
    if RHS_norm > 0:
      RHS = RHS / RHS_norm

    if PC_flag:
      #############
      r_vecs_np = [b.location for b in self.bodies]
      r_vecs = list(self.put_r_vecs_in_periodic_box(r_vecs_np,self.periodic_length))
      r_tree = spatial.cKDTree(np.array(r_vecs),boxsize=self.periodic_length)

      isolated = [];
      for j in range(num_particles):
	s1 = r_vecs[j]
	if s1[2] < self.cutoff*self.a: 
	  continue
	idx = r_tree.query_ball_point(s1,r=self.cutoff*self.a)
	idx_trim = [i for i in idx if i > j]
	if not idx_trim:
	  isolated.append(j)
      ##############
      
      start = time.time()
      small = 6.0*np.pi*self.eta*self.a*self.tolerance
      Eig_Shift_R_Sup = self.R_Sup + sp.diags(small*np.ones(6*num_particles),0,format='csc')
      factor = cholesky(Eig_Shift_R_Sup)
      end = time.time()
      print 'factor time : '+ str((end - start))
      PC_operator_partial = partial(self.IpMDR_PC, R_fact=factor,isolated=isolated)
      
      ################## SWAN #####################
      #start = time.time()
      #gamma = 6.0*np.pi*self.eta*self.a
      #gamma_r = 8.0*np.pi*self.eta*(self.a**3)
      #onev = np.ones(6*num_particles)
      #diag_m = onev
      #for k in range(num_particles):
	#diag_m[6*k:6*k+3] *= (1.0/gamma)
	#diag_m[6*k+3:6*k+6] *= (1.0/gamma_r)
      #Shift_R_Sup = sp.diags(diag_m,0,format='csc')*self.Delta_R + sp.diags(onev,0,format='csc')
      #factor = cholesky(Shift_R_Sup)
      #end = time.time()
      #PC_operator_partial = partial(self.IpMDR_Swan_PC, R_fact=factor)
      ################## SWAN #####################
      
      PC = spla.LinearOperator((6*num_particles, 6*num_particles), matvec = PC_operator_partial, dtype='float64')
    else:
      PC = None
      
    A = spla.LinearOperator((6*num_particles, 6*num_particles), matvec = self.IpMDR_Mult, dtype='float64')
    
    res_list = []
    if X0 is not None:
      X0 = X0/RHS_norm
    
    (U_gmres, info_precond) = pyamg.krylov.gmres(A,RHS, x0=X0, tol=self.tolerance, M=PC, maxiter=min(its_out,A.shape[0]), restrt = min(100,A.shape[0]), residuals=res_list) 
    #(U_gmres, info_precond) = pyamg.krylov.bicgstab(A,RHS, x0=X0, tol=self.tolerance, M=PC, maxiter=min(its_out,A.shape[0]), residuals=res_list) 
    #(U_gmres, info_precond) = utils.gmres(A, RHS, x0=X0, tol=self.tolerance, M=PC, maxiter=min(its_out,A.shape[0]), restart = min(100,A.shape[0])) #, callback=counter) 
    if RHS_norm > 0:
      U_gmres = U_gmres * RHS_norm
    #print res_list
    return U_gmres, res_list#