Exemple #1
0
    def __measure_stress(self, cell1, cell2, weight):
        """ Measures stress from the polar decomposition of the transformation :math:`UPA = C`.
        
        Args:
            cell1 (ndarray): Lower basis.
            cell2 (ndarray): Upper basis.
            weight (float): Weight factor for common unit cell.

        Returns:
            float: :math:`\bar{\epsilon}_A + \bar{\epsilon}_ B`.
        """
        from scipy.linalg import polar

        A = cell1.copy()[:2, :2]
        B = cell2.copy()[:2, :2]
        C = A + weight * (B - A)
        T1 = C @ np.linalg.inv(A)
        T2 = C @ np.linalg.inv(B)

        def measure(P):
            eps = P - np.identity(2)
            meps = np.sqrt((eps[0, 0]**2 + eps[1, 1]**2 +
                            eps[0, 0] * eps[1, 1] + eps[1, 0]**2) / 4)
            return meps

        U1, P1 = polar(T1)  # this one goes counterclockwise
        U2, P2 = polar(T2)  # this one goes clockwise
        # u is rotation, p is strain
        meps1 = measure(P1)
        meps2 = measure(P2)
        stress = meps1 + meps2
        return (stress, P1 - np.identity(2), P2 - np.identity(2))
Exemple #2
0
    def random_init(self, D):
        """Random initialization of the MPS and initializiation of the
        environments.
        """
        Lcanon = [
            polar(rand(D * self.pdim, D) +
                  rand(D * self.pdim, D) * 1j)[0].reshape(D, self.pdim, D)
            for i in range(self.cell_size)
        ]
        Rcanon = [
            polar(rand(D, self.pdim * D) +
                  rand(D, self.pdim * D) * 1j)[0].reshape(D, self.pdim, D)
            for i in range(self.cell_size)
        ]

        self.sites = Lcanon + Rcanon
        self.c = rand(D, D) + rand(D, D) * 1j
        self.c = self.c / norm(self.c)
        self.center_bond = self.cell_size

        # Begin of environment
        LE = np.zeros((D, self.MPOdim, D))
        LE[:, 0, :] = 1.
        LE = LE / norm(LE)
        self.LEnvironment[0] = LE

        # End of environment
        RE = np.zeros((D, self.MPOdim, D))
        RE[:, -1, :] = 1.
        RE = RE / norm(RE)
        self.REnvironment[self.end_bond] = RE
Exemple #3
0
    def measure_stress(self) -> float:
        """Measures the stress on both unit cells."""
        A = self.bottom.cell.copy()[:2, :2]
        B = self.top.cell.copy()[:2, :2]
        C = A + self._weight * (B - A)
        T1 = C @ np.linalg.inv(A)
        T2 = C @ np.linalg.inv(B)

        def measure(P):
            eps = P - np.identity(2)
            meps = np.sqrt(
                (
                    eps[0, 0] ** 2
                    + eps[1, 1] ** 2
                    + eps[0, 0] * eps[1, 1]
                    + eps[1, 0] ** 2
                )
                / 4
            )
            return meps

        U1, P1 = polar(T1)  # this one goes counterclockwise
        U2, P2 = polar(T2)  # this one goes clockwise
        # u is rotation, p is strain
        meps1 = measure(P1)
        meps2 = measure(P2)
        stress = meps1 + meps2
        # return (stress, P1 - np.identity(2), P2 - np.identity(2))
        return stress
    def _find_matches(self) -> None:
        """
        Finds and stores the ZSL matches
        """
        self.zsl_matches = []

        film_sg = SlabGenerator(
            self.film_structure,
            self.film_miller,
            min_slab_size=1,
            min_vacuum_size=3,
            in_unit_planes=True,
            center_slab=True,
            primitive=True,
            reorient_lattice=
            False,  # This is necessary to not screw up the lattice
        )

        sub_sg = SlabGenerator(
            self.substrate_structure,
            self.substrate_miller,
            min_slab_size=1,
            min_vacuum_size=3,
            in_unit_planes=True,
            center_slab=True,
            primitive=True,
            reorient_lattice=
            False,  # This is necessary to not screw up the lattice
        )

        film_slab = film_sg.get_slab(shift=0)
        sub_slab = sub_sg.get_slab(shift=0)

        film_vectors = film_slab.lattice.matrix
        substrate_vectors = sub_slab.lattice.matrix

        # Generate all possible interface matches
        self.zsl_matches = list(
            self.zslgen(film_vectors[:2], substrate_vectors[:2], lowest=False))

        for match in self.zsl_matches:
            xform = get_2d_transform(film_vectors, match.film_vectors)
            strain, rot = polar(xform)
            assert np.allclose(
                strain, np.round(strain)
            ), "Film lattice vectors changed during ZSL match, check your ZSL Generator parameters"

            xform = get_2d_transform(substrate_vectors,
                                     match.substrate_vectors)
            strain, rot = polar(xform)
            assert np.allclose(
                strain, strain.astype(int)
            ), "Substrate lattice vectors changed during ZSL match, check your ZSL Generator parameters"
Exemple #5
0
    def __optimize_cell_rotation(self, cell1, cell2, weight):
        r""" Optimizes indivual cell rotation before matching the common unit cell.
       
        Args:
            cell1 (ndarray): Lower basis.
            cell2 (ndarray): Upper basis.
            weight (float): Weight factor for common unit cell.

        Returns:
            tuple: (:math:`\phi_A`, :math:`\phi_B`)
        """
        from scipy.linalg import polar
        from scipy.optimize import minimize

        def measure(P):
            eps = P - np.identity(2)
            meps = np.sqrt((eps[0, 0]**2 + eps[1, 1]**2 +
                            eps[0, 0] * eps[1, 1] + eps[1, 0]**2) / 4)
            return meps

        def find(angle, start, target):
            c = np.cos(angle)
            s = np.sin(angle)
            R = np.array([[c, -s], [s, c]])
            newP = np.linalg.inv(R) @ target @ np.linalg.inv(start)
            return measure(newP)

        def f(params, args):
            angle1, angle2 = params
            A, B, C = args
            m1 = find(angle1, A, C)
            m2 = find(angle2, B, C)
            return m1 + m2

        A = cell1.copy()[:2, :2]
        B = cell2.copy()[:2, :2]
        C = A + weight * (B - A)
        T1 = C @ np.linalg.inv(A)
        T2 = C @ np.linalg.inv(B)
        U1, P1 = polar(T1)  # this one goes counterclockwise
        U2, P2 = polar(T2)  # this one goes clockwise
        angle1 = np.arccos(U1[0, 0]) if U1[0, 0] < 0 else -np.arccos(U1[0, 0])
        angle2 = np.arccos(U2[0, 0]) if U2[0, 0] < 0 else -np.arccos(U2[0, 0])
        try:
            res = minimize(f, x0=[angle1, angle2], args=[A, B, C])
            newangle1, newangle2 = res.x
            return (newangle1, newangle2)
        except Exception as e:
            print(e)
Exemple #6
0
 def polarDecompose(self, H):
     from scipy.linalg import polar
     T, L = self.separateTranslation(H)  # T is translation matrix,
     R, K = polar(L)  # R is rotation matrix, K is stretch matrix
     # The determinant of a rotation matrix must be positive
     if np.linalg.det(R) < 0:
         R[:3, :3] = -R[:3, :3]
         K[:3, :3] = -K[:3, :3]
     # Check answer still OK
     assert np.allclose(L, R @ K), 'R*K should equal L, but it does not!'
     assert np.allclose(H,
                        T @ R @ K), 'T*R*K should equal H, but it does not!'
     # Decompose stretch matrix K into scale matrices
     f, X = np.linalg.eig(
         K)  # eigenvalues and eigenvectors of stretch matrix
     S = []
     for factor, axis in zip(f[:3], X.T[:3]):
         #if not np.isclose(factor, 1):
         scale = np.eye(4) + np.outer(axis, axis) * (factor - 1)
         S.append(scale)
     # Check answers still OK
     scale_prod = np.eye(4)
     for scale in S:
         scale_prod = scale_prod @ scale
     if not np.allclose(K, scale_prod):
         print(
             'Product of scale matrices should equal stretch matrix K, but it does not!'
         )
     if not np.allclose(H, T @ R @ scale_prod):
         print(
             'T*R*(product of scale matrices) should equal stretch matrix K, but it does not!'
         )
     # Return all interesting outputs
     return T, R, K, S, f, X
Exemple #7
0
  def testQdwhWithUpperTriangularInputAllOnes(self, m, n, log_cond):
    """Tests qdwh with upper triangular input of all ones."""
    a = jnp.triu(jnp.ones((m, n))).astype(_QDWH_TEST_DTYPE)
    u, s, v = jnp.linalg.svd(a, full_matrices=False)
    cond = 10**log_cond
    s = jnp.expand_dims(jnp.linspace(cond, 1, min(m, n)), range(u.ndim - 1))
    a = (u * s) @ v
    is_hermitian = _check_symmetry(a)
    max_iterations = 10

    actual_u, actual_h, _, _ = qdwh.qdwh(a, is_hermitian=is_hermitian,
                                         max_iterations=max_iterations)
    expected_u, expected_h = osp_linalg.polar(a)

    # Sets the test tolerance.
    rtol = 1E6 * _QDWH_TEST_EPS

    with self.subTest('Test u.'):
      relative_diff_u = _compute_relative_diff(actual_u, expected_u)
      np.testing.assert_almost_equal(relative_diff_u, 1E-6, decimal=5)

    with self.subTest('Test h.'):
      relative_diff_h = _compute_relative_diff(actual_h, expected_h)
      np.testing.assert_almost_equal(relative_diff_h, 1E-6, decimal=5)

    with self.subTest('Test u.dot(h).'):
      a_round_trip = _dot(actual_u, actual_h)
      relative_diff_a = _compute_relative_diff(a_round_trip, a)
      np.testing.assert_almost_equal(relative_diff_a, 1E-6, decimal=5)

    with self.subTest('Test orthogonality.'):
      actual_results = _dot(actual_u.T, actual_u)
      expected_results = np.eye(n)
      self.assertAllClose(
          actual_results, expected_results, rtol=rtol, atol=1E-5)
Exemple #8
0
def nearestPSD(A):

    B = (A + A.T) / 2

    H = sl.polar(B)[1]

    return (B + H) / 2
Exemple #9
0
    def test_polarfunction(self):
        '''Test routines to compute the matrix polar decomposition.'''
        from scipy.linalg import polar
        # Starting Matrix
        matrix1 = self.create_matrix()
        self.write_matrix(matrix1, self.input_file)

        # Check Matrix
        dense_check_u, dense_check_h = polar(matrix1.todense())
        self.CheckMat = csr_matrix(dense_check_h)

        # Result Matrix
        input_matrix = nt.Matrix_ps(self.input_file, False)
        u_matrix = nt.Matrix_ps(self.mat_dim)
        h_matrix = nt.Matrix_ps(self.mat_dim)
        permutation = nt.Permutation(input_matrix.GetLogicalDimension())
        permutation.SetRandomPermutation()
        self.isp.SetLoadBalance(permutation)
        nt.SignSolvers.ComputePolarDecomposition(input_matrix, u_matrix,
                                                 h_matrix, self.isp)
        h_matrix.WriteToMatrixMarket(result_file)
        comm.barrier()

        self.check_result()

        comm.barrier()
        self.CheckMat = csr_matrix(dense_check_u)
        u_matrix.WriteToMatrixMarket(result_file)

        self.check_result()
Exemple #10
0
def randomized_expander(
        z: np.ndarray,
        q: np.ndarray,
        n_col: int = 10,
        n_iter: int = 2) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    # step A
    m = z.shape[1]
    p = min(n_col, m - q.shape[1])

    w = npr.randn(m, p)
    y = z @ w
    del w

    qy = partial_orthogonalization(y, q, overwrite_y=True)
    q = np.append(q, qy, axis=1)

    for _ in range(n_iter):
        q = z @ (z.T @ q)
        q = qr(q, mode='economic')[0]

    # step B
    h, c = qr(z.T @ q, mode='economic')
    w, p = polar(c)
    v, d = sym_eig(p)

    return q @ v, d, (h @ w @ v).T
Exemple #11
0
  def testQdwhWithOnRankDeficientInput(self, m, n, log_cond):
    """Tests qdwh with rank-deficient input."""
    a = jnp.triu(jnp.ones((m, n))).astype(_QDWH_TEST_DTYPE)

    # Generates a rank-deficient input.
    u, s, v = jnp.linalg.svd(a, full_matrices=False)
    cond = 10**log_cond
    s = jnp.linspace(cond, 1, min(m, n))
    s = jnp.expand_dims(s.at[-1].set(0), range(u.ndim - 1))
    a = (u * s) @ v

    is_hermitian = _check_symmetry(a)
    max_iterations = 15
    actual_u, actual_h, _, _ = qdwh.qdwh(a, is_hermitian=is_hermitian,
                                         max_iterations=max_iterations)
    _, expected_h = osp_linalg.polar(a)

    # Sets the test tolerance.
    rtol = 1E4 * _QDWH_TEST_EPS

    # For rank-deficient matrix, `u` is not unique.
    with self.subTest('Test h.'):
      relative_diff_h = _compute_relative_diff(actual_h, expected_h)
      np.testing.assert_almost_equal(relative_diff_h, 1E-6, decimal=5)

    with self.subTest('Test u.dot(h).'):
      a_round_trip = _dot(actual_u, actual_h)
      relative_diff_a = _compute_relative_diff(a_round_trip, a)
      np.testing.assert_almost_equal(relative_diff_a, 1E-6, decimal=5)

    with self.subTest('Test orthogonality.'):
      actual_results = _dot(actual_u.T.conj(), actual_u)
      expected_results = np.eye(n)
      self.assertAllClose(
          actual_results, expected_results, rtol=rtol, atol=1E-6)
Exemple #12
0
def project_norm_pos_def(A):
    """
  Calculates the nearest (in Frobenius norm) Symmetric Positive Definite matrix to A
  https://www.sciencedirect.com/science/article/pii/0024379588902236
  :param A: a square matrix
  :return A_pd: the projection of A onto the space pf positive definite matrices
  """
    assert A.ndim == 2 and A.shape[0] == A.shape[1], "A must be a square matrix"

    # symmetrize A into B
    B = (A + A.T) / 2

    # Compute the symmetric polar factor H of B
    _, H = polar(B)

    A_pd = (B + H) / 2

    # ensure symmetry
    A_pd = (A_pd + A_pd.T) / 2

    # test that A_pd is indeed PD. If not, then tweak it just a little bit
    pd = False
    k = 0
    while not pd:
        eig = np.linalg.eigvals(A_pd)
        pd = np.all(eig > 0)
        k += 1
        if not pd:
            mineig = min(eig)
            A_pd = A_pd + (-mineig * k**2 + 10**-8) * np.eye(A.shape[0])

    return A_pd
Exemple #13
0
def fronebius_nearest_psd(A, return_distance=False):
    """Find the positive semi-definite matrix closest to `A`.

    The closeness to `A` is measured by the Fronebius norm. The matrix closest to `A`
    by that measure is uniquely defined in [3]_.

    Parameters
    ----------
    A : numpy.ndarray
        Symmetric matrix
    return_distance : bool, optional
        Return distance of the input matrix to the approximation as given in
        theorem 2.1 in [3]_.
        This can be compared to the actual Frobenius norm between the
        input and output to verify the calculation.

    Returns
    -------
    X : numpy.ndarray
        Positive semi-definite matrix approximating `A`.

    Notes
    -----
    This function is a modification of [1]_, which is a Python adaption of [2]_, which
    credits [3]_.

    References
    ----------
    ..  [1] https://gist.github.com/fasiha/fdb5cec2054e6f1c6ae35476045a0bbd
    ..  [2] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
    ..  [3] N.J. Higham, "Computing a nearest symmetric positive semidefinite
        matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
    """
    # pylint: disable=invalid-name
    assert A.ndim == 2, "input is not a 2D matrix"
    B = (A + A.T)/2.
    _, H = lin.polar(B)
    X = (B + H)/2.
    # small numerical errors can make matrices that are not exactly
    # symmetric, fix that
    X = (X + X.T)/2.
    # due to numerics, it's possible that the matrix is _still_ not psd.
    # We can fix that iteratively by adding small increments of the identity matrix.
    # This part comes from [1].
    if not is_psd(X):
        spacing = np.spacing(lin.norm(X))
        I = np.eye(X.shape[0])
        k = 1
        while not is_psd(X):
            mineig = np.min(np.real(lin.eigvals(X)))
            X += I * (-mineig * k**2 + spacing)
            k += 1
    if return_distance:
        C = (A - A.T)/2.
        lam = lin.eigvalsh(B)
        # pylint doesn't know that numpy.sum takes the "where" argument
        # pylint: disable=unexpected-keyword-arg
        dist = np.sqrt(np.sum(lam**2, where=lam < 0.) + lin.norm(C, ord='fro')**2)
        return X, dist
    return X
def polar_correct(list_a):
    # We replace the matrices with the closest unitary matrices
    # according to the Frobenius norm using polar decomposition
    list_u = []
    for a in list_a:
        _u, _ = linalg.polar(a)
        list_u.append(_u)
    return list_u
Exemple #15
0
 def recoverM(A):
     # Find the closest positive semi-definite matrix
     # (via Higham)
     B = 0.5 * (A + A.T)
     H = polar(B)[1]
     M = 0.5 * (B+H)
     M = M / np.trace(M)
     return M
def calc_deformation(src, dst):
    
    A = affine_transform(src,dst)[:-1,:-1]
    U,P = polar(A,side='left')
    rotation = np.arctan2(U[1,0], U[0,0])
    rotation = (rotation + np.pi) % (2 * np.pi )
    
    return rotation, P
Exemple #17
0
def reorient_bvecs(gtab, affines, atol=1e-2):
    """Reorient the directions in a GradientTable.

    When correcting for motion, rotation of the diffusion-weighted volumes
    might cause systematic bias in rotationally invariant measures, such as FA
    and MD, and also cause characteristic biases in tractography, unless the
    gradient directions are appropriately reoriented to compensate for this
    effect [Leemans2009]_.

    Parameters
    ----------
    gtab : GradientTable
        The nominal gradient table with which the data were acquired.
    affines : list or ndarray of shape (n, 4, 4) or (n, 3, 3)
        Each entry in this list or array contain either an affine
        transformation (4,4) or a rotation matrix (3, 3).
        In both cases, the transformations encode the rotation that was applied
        to the image corresponding to one of the non-zero gradient directions
        (ordered according to their order in `gtab.bvecs[~gtab.b0s_mask]`)
    atol: see gradient_table()

    Returns
    -------
    gtab : a GradientTable class instance with the reoriented directions

    References
    ----------
    .. [Leemans2009] The B-Matrix Must Be Rotated When Correcting for
       Subject Motion in DTI Data. Leemans, A. and Jones, D.K. (2009).
       MRM, 61: 1336-1349
    """
    new_bvecs = gtab.bvecs[~gtab.b0s_mask]

    if new_bvecs.shape[0] != len(affines):
        e_s = "Number of affine transformations must match number of "
        e_s += "non-zero gradients"
        raise ValueError(e_s)

    for i, aff in enumerate(affines):
        if aff.shape == (4, 4):
            # This must be an affine!
            # Remove the translation component:
            aff = aff[:3, :3]
        # Decompose into rotation and scaling components:
        R, S = polar(aff)
        Rinv = inv(R)
        # Apply the inverse of the rotation to the corresponding gradient
        # direction:
        new_bvecs[i] = np.dot(Rinv, new_bvecs[i])

    return_bvecs = np.zeros(gtab.bvecs.shape)
    return_bvecs[~gtab.b0s_mask] = new_bvecs
    return gradient_table(gtab.bvals,
                          return_bvecs,
                          big_delta=gtab.big_delta,
                          small_delta=gtab.small_delta,
                          b0_threshold=gtab.b0_threshold,
                          atol=atol)
def compute_orientations(domain):

    for key_block in domain.blocks:

        block = domain.blocks[key_block]

        for key_element in block.elements:

            element = block.elements[key_element]

            element.variables['R'] = dict()
            element.variables['U'] = dict()
            element.variables['orientation'] = dict()

            for step in domain.times:

                element.variables['R'][step], element.variables['U'][step] = \
                    polar(element.variables['F'][step])

                element.variables['orientation'][step] = \
                    np.dot(
                        element.variables['R'][step],
                        np.dot(block.material.orientation, element.variables['R'][step].T))

            for key_point in element.points:

                point = element.points[key_point]

                point.variables['R'] = dict()
                point.variables['U'] = dict()
                point.variables['orientation'] = dict()

                for step in domain.times:

                    point.variables['R'][step], point.variables['U'][step] = \
                        polar(point.variables['F'][step])

                    point.variables['orientation'][step] = \
                        np.tensordot(
                            point.variables['R'][step],
                            np.tensordot(
                                block.material.orientation,
                                point.variables['R'][step].T,
                                axes = 1),
                            axes = 1)
Exemple #19
0
def compute_orientations(domain):

    for block in domain.blocks.values():

        # block = domain.blocks[key_block]

        for element in block.elements.values():

            # element = block.elements[key_element]

            element.variables['R'] = dict()
            element.variables['U'] = dict()
            element.variables['orientation'] = dict()

            for step in domain.times:

                element.variables['R'][step], element.variables['U'][step] = \
                    polar(element.variables['F'][step])

                element.variables['orientation'][step] = \
                    np.dot(
                        element.variables['R'][step],
                        np.dot(block.material.orientation, element.variables['R'][step].T))

            for point in element.points.values():

                # point = element.points[key_point]

                point.variables['R'] = dict()
                point.variables['U'] = dict()
                point.variables['orientation'] = dict()

                for step in domain.times:

                    point.variables['R'][step], point.variables['U'][step] = \
                        polar(point.variables['F'][step])

                    point.variables['orientation'][step] = \
                        np.tensordot(
                            point.variables['R'][step],
                            np.tensordot(
                                block.material.orientation,
                                point.variables['R'][step].T,
                                axes = 1),
                            axes = 1)
Exemple #20
0
    def apply_gradient_component(self, wavepacket, component):
        r"""Compute the effect of the gradient operator :math:`-i \varepsilon^2 \nabla_x`
        on the basis functions :math:`\psi(x)` of a component :math:`\Phi_i` of the
        new-kind Hagedorn wavepacket :math:`\Psi`.

        :param wavepacket: The wavepacket :math:`\Psi` containing :math:`\Phi_i`.
        :type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
        :param component: The index :math:`i` of the component :math:`\Phi_i`.
        :type component: Integer.
        :return: Extended basis shape :math:`\mathfrak{\dot{K}}` and new coefficients :math:`c^\prime`
                 for component :math:`\Phi_i`. The coefficients are stored column-wise with
                 one column per dimension :math:`d`. The :math:`c^\prime` array is of shape
                 :math:`|\mathfrak{\dot{K}}| \times D`.
        """
        D = wavepacket.get_dimension()
        eps = wavepacket.get_eps()
        q, p, Q, P, _ = wavepacket.get_parameters(component=component)

        _, PA = polar(Q, side='left')
        EW, EV = eigh(real(PA))

        E = real(dot(P, inv(Q)))
        F1 = dot(E, dot(inv(EV.T), diag(EW)))
        F2 = 1.0j * dot(inv(EV.T), diag(1.0 / EW))
        Gb = F1 - F2
        Gf = F1 + F2

        coeffs = wavepacket.get_coefficients(component=component)

        # Prepare storage for new coefficients
        K = wavepacket.get_basis_shapes(component=component)
        Ke = K.extend()
        size = Ke.get_basis_size()
        cnew = zeros((size, D), dtype=complexfloating)

        # We implement the more efficient scatter type stencil here
        for k in K.get_node_iterator():
            # Central phi_i coefficient
            cnew[Ke[k], :] += squeeze(coeffs[K[k]] * p)

            # Backward neighbours phi_{i - e_d}
            nbw = Ke.get_neighbours(k, selection="backward")

            for d, nb in nbw:
                cnew[Ke[nb], :] += (sqrt(eps**2 / 2.0) *
                                    sqrt(k[d]) * coeffs[K[k]] *
                                    Gb[:, d])

            # Forward neighbours phi_{i + e_d}
            nfw = Ke.get_neighbours(k, selection="forward")

            for d, nb in nfw:
                cnew[Ke[nb], :] += (sqrt(eps**2 / 2.0) *
                                    sqrt(k[d] + 1.0) * coeffs[K[k]] *
                                    Gf[:, d])

        return (Ke, cnew)
    def dP(self, dF):
        dR = polar(self.F + dF)[0] - self.R
        # JFTinv = self.J*np.linalg.inv(self.F).transpose()

        JFTinv = np.array([[self.F[1, 1], -self.F[1, 0]],
                           [-self.F[0, 1], self.F[0, 0]]])
        dJFTinv = np.array([[dF[1, 1], -dF[1, 0]], [-dF[0, 1], dF[0, 0]]])
        return 2 * self.mu * (dF - dR) + self.lambd * JFTinv * np.tensordot(
            JFTinv, dF) + self.lambd * (self.J - 1) * dJFTinv
def checkdstable(A):
    n = len(A)
    P = solve_discrete_lyapunov(A.T, np.identity(n))
    S = sqrtm(P)
    invS = np.linalg.inv(S)
    UB = S.dot(A).dot(invS)
    [U, B] = polar(UB)
    B = projectPSD(B, 0, 1)
    return P, S, U, B
def get_list_noisy(list_goal_u, coeff, n):
    list_u = []
    for l in range(n):
        error = coeff * (np.random.randn(n, n) + 1j * np.random.randn(n, n))
        new_matrix = list_goal_u[l] + error
        _u, _ = linalg.polar(new_matrix)
        new_u = _u
        list_u.append(new_u)
    return list_u
Exemple #24
0
    def R(self):
        """
        Return rotation part of ``DefGrad`` from polar decomposition.
        """

        from scipy.linalg import polar

        R, _ = polar(self)

        return DefGrad(R)
Exemple #25
0
    def R(self):
        """
        Return rotation part of ``DefGrad`` from polar decomposition.
        """

        from scipy.linalg import polar

        R, _ = polar(self)

        return DefGrad(R)
Exemple #26
0
    def U(self):
        """
        Return stretching part of ``DefGrad`` from right polar decomposition.
        """

        from scipy.linalg import polar

        _, U = polar(self, "right")

        return DefGrad(U)
Exemple #27
0
    def V(self):
        """
        Return stretching part of ``DefGrad`` from left polar decomposition.
        """

        from scipy.linalg import polar

        _, V = polar(self, "left")

        return DefGrad(V)
Exemple #28
0
    def U(self):
        """
        Return stretching part of ``DefGrad`` from right polar decomposition.
        """

        from scipy.linalg import polar

        _, U = polar(self, "right")

        return DefGrad(U)
Exemple #29
0
    def V(self):
        """
        Return stretching part of ``DefGrad`` from left polar decomposition.
        """

        from scipy.linalg import polar

        _, V = polar(self, "left")

        return DefGrad(V)
Exemple #30
0
def reorient_bvecs(gtab, affines):
    """Reorient the directions in a GradientTable.

    When correcting for motion, rotation of the diffusion-weighted volumes
    might cause systematic bias in rotationally invariant measures, such as FA
    and MD, and also cause characteristic biases in tractography, unless the
    gradient directions are appropriately reoriented to compensate for this
    effect [Leemans2009]_.

    Parameters
    ----------
    gtab : GradientTable
        The nominal gradient table with which the data were acquired.
    affines : list or ndarray of shape (n, 4, 4) or (n, 3, 3)
        Each entry in this list or array contain either an affine
        transformation (4,4) or a rotation matrix (3, 3).
        In both cases, the transformations encode the rotation that was applied
        to the image corresponding to one of the non-zero gradient directions
        (ordered according to their order in `gtab.bvecs[~gtab.b0s_mask]`)

    Returns
    -------
    gtab : a GradientTable class instance with the reoriented directions

    References
    ----------
    .. [Leemans2009] The B-Matrix Must Be Rotated When Correcting for
       Subject Motion in DTI Data. Leemans, A. and Jones, D.K. (2009).
       MRM, 61: 1336-1349
    """
    new_bvecs = gtab.bvecs[~gtab.b0s_mask]

    if new_bvecs.shape[0] != len(affines):
        e_s = "Number of affine transformations must match number of "
        e_s += "non-zero gradients"
        raise ValueError(e_s)

    for i, aff in enumerate(affines):
        if aff.shape == (4, 4):
            # This must be an affine!
            # Remove the translation component:
            aff_no_trans = aff[:3, :3]
            # Decompose into rotation and scaling components:
            R, S = polar(aff_no_trans)
        elif aff.shape == (3, 3):
            # We assume this is a rotation matrix:
            R = aff
        Rinv = inv(R)
        # Apply the inverse of the rotation to the corresponding gradient
        # direction:
        new_bvecs[i] = np.dot(Rinv, new_bvecs[i])

    return_bvecs = np.zeros(gtab.bvecs.shape)
    return_bvecs[~gtab.b0s_mask] = new_bvecs
    return gradient_table(gtab.bvals, return_bvecs)
Exemple #31
0
def _nifti_bvecs_to_worldspace_new(bvecs, meta):
    from scipy.linalg import polar
    from scipy.linalg import inv
    R, S = polar(meta.frame)
    for i in range(len(bvecs)):
        bvecs[i] = np.dot(inv(R), bvecs[i])
    #norm = la.norm(bvecs, axis=1)
    #norm[norm == 0] = 1.0
    #bvecs = bvecs / norm[:, None]
    #bvecs[norm == 0] = np.array((0, 0, 1))
    return bvecs
def get_noisy(list_u, coeff):
    list_u_noisy = []
    n = len(list_u[0])
    m = len(list_u)
    for l in range(m):
        error = coeff * (np.random.randn(n, n) + 1j * np.random.randn(n, n))
        new_matrix = list_u[l] + error
        _u, _ = linalg.polar(new_matrix)
        new_u = _u
        list_u_noisy.append(new_u)
    return list_u_noisy
Exemple #33
0
def orthogonalize_cell(atoms: Atoms,
                       limit_denominator: int = 10,
                       return_strain=False):
    """
    Make the cell of an ASE atoms object orthogonal. This is accomplished by repeating the cell until the x-component
    of the lattice vectors in the xy-plane closely matches. If the ratio between the x-components is irrational this
    may not be possible without introducing some strain. However, the amount of strain can be made arbitrarily small
    by using many repetitions.

    Parameters
    ----------
    atoms : ASE atoms object
        The non-orthogonal atoms object.
    limit_denominator : int
        The maximum denominator in the rational approximation. Increase this to allow more repetitions and hence less
        strain.
    return_strain : bool
        If true, return the strain tensor that were applied to make the atoms orthogonal.

    Returns
    -------
    atoms : ASE atoms object
        The orthogonal atoms.
    strain_tensor : 2x2 array
        The applied strain tensor. Only provided if return_strain is true.
    """
    if is_cell_orthogonal(atoms):
        return atoms

    atoms = atoms.copy()
    atoms = standardize_cell(atoms)

    fraction = atoms.cell[0, 0] / atoms.cell[1, 0]
    fraction = Fraction(fraction).limit_denominator(limit_denominator)

    atoms *= (fraction.denominator, fraction.numerator, 1)

    new_cell = atoms.cell.copy()
    new_cell[1, 0] = new_cell[0, 0]

    a = np.linalg.solve(atoms.cell[:2, :2], new_cell[:2, :2])
    _, strain_tensor = polar(a, side='left')
    strain_tensor[0, 0] -= 1
    strain_tensor[1, 1] -= 1

    atoms.set_cell(new_cell, scale_atoms=True)
    atoms.set_cell(np.diag(atoms.cell))
    atoms.wrap()

    if return_strain:
        return atoms, strain_tensor
    else:
        return atoms
    def execute(self, context):
        global path, FilePath
        Rigname = context.scene.RigNameLBS
        if path[len(path) - len(Rigname) - 1:len(path) - 1] != Rigname:
            path = FilePath + Rigname + '/'
        Vrt = np.loadtxt(path + Rigname + '_vertz.txt', delimiter=',')
        Fful = ReadTxt(path + Rigname + '_facz.txt')
        NF = len(Fful)
        NPs, NV = np.shape(Vrt)
        NPs = NPs // 3
        RM = np.zeros((NF, 9 * (NPs - 1)))
        strttime = time.time()
        t = 0
        print('Computing Rotations....')
        for f in Fful:
            for ps in range(NPs):
                if ps == 0:
                    RefFrm = Vrt[3 * ps:3 * ps + 3, f].T - np.mean(
                        Vrt[3 * ps:3 * ps + 3, f].T, axis=0)
                    nrm = np.cross(RefFrm[0], RefFrm[1]) / np.linalg.norm(
                        np.cross(RefFrm[0], RefFrm[1]))
                    RefFrm = np.concatenate((RefFrm, np.reshape(nrm, (1, 3))),
                                            axis=0)
                else:
                    DefFrm = Vrt[3 * ps:3 * ps + 3, f].T - np.mean(
                        Vrt[3 * ps:3 * ps + 3, f].T, axis=0)
                    nrm = np.cross(DefFrm[0], DefFrm[1]) / np.linalg.norm(
                        np.cross(DefFrm[0], DefFrm[1]))
                    DefFrm = np.concatenate((DefFrm, np.reshape(nrm, (1, 3))),
                                            axis=0)

                    Q = np.dot(DefFrm.T, np.linalg.pinv(RefFrm.T))

                    R, S = scla.polar(Q)
                    RM[t, 9 * (ps - 1):9 * (ps - 1) + 9] = np.ravel(R)
            t += 1

        Ncl = context.scene.NumOfCls
        print('Classifying....', Ncl, '...classes')
        clustering = KMeans(n_clusters=Ncl).fit(RM)
        Y = list(clustering.labels_)

        Cls = [[]] * Ncl
        t = 0
        for i in list(Y):
            Cls[i] = Cls[i] + [t]
            t += 1

        print("Segmentation time ...", time.time() - strttime)
        WriteAsTxt(path + Rigname + "_ClusterKmeans.txt", Cls)
        return {'FINISHED'}
def Us_from_A(A):
    """
    N.B. I don't know how useful this will prove
    
    Process to get the brick U1 and U2 from translationally invariant
    single site A mps.
    
    Multiple the two As together:
        
    1)  --A-A--  -> --B--
          | |        ||
      
     Use QR decomp to turn B into:
         
    2)    -- C --D --
                ||
          
    3) Reshape D into 4 x 4
    
    Use Polar Decomp to turn D into a unitary:
        
                      | |
                       H
     4) --D--   =     | |
         ||           U_d    
                      | |    
                     
    5) Then multiply H into C and embed in a unitary
    """

    # 1)
    B = np.transpose(np.tensordot(A, A, axes=(2, 1)),
                     [1, 0, 3, 2]).reshape(2, 8)

    #2)
    C, D = qr(B, overwrite_a=True)  # overwrite_a can give better performance

    #3)
    D = np.transpose(D.reshape(2, 2, 2, 2), [1, 2, 0, 3]).reshape(4, 4)

    #4)
    U_d, H = polar(D)

    #5)
    H = H.reshape(2, 2, 2, 2)
    C_ = np.tensordot(H, C, axes=((2, 3), (1, 0))).reshape(4, 1)
    C_ = C_ / np.linalg.norm(C_)
    U_c = np.concatenate((C_, null_space(C_.conj().T)), axis=1)

    return U_c, U_d
Exemple #36
0
def verify_polar(a):
    # Compute the polar decomposition, and then verify that
    # the result has all the expected properties.
    product_atol = np.sqrt(np.finfo(float).eps)

    aa = np.asarray(a)
    m, n = aa.shape

    u, p = polar(a, side='right')
    assert_equal(u.shape, (m, n))
    assert_equal(p.shape, (n, n))
    # a = up
    assert_allclose(u.dot(p), a, atol=product_atol)
    if m >= n:
        assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
    else:
        assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
    # p is Hermitian positive semidefinite.
    assert_allclose(p.conj().T, p)
    evals = eigh(p, eigvals_only=True)
    nonzero_evals = evals[abs(evals) > 1e-14]
    assert_((nonzero_evals >= 0).all())

    u, p = polar(a, side='left')
    assert_equal(u.shape, (m, n))
    assert_equal(p.shape, (m, m))
    # a = pu
    assert_allclose(p.dot(u), a, atol=product_atol)
    if m >= n:
        assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
    else:
        assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
    # p is Hermitian positive semidefinite.
    assert_allclose(p.conj().T, p)
    evals = eigh(p, eigvals_only=True)
    nonzero_evals = evals[abs(evals) > 1e-14]
    assert_((nonzero_evals >= 0).all())
Exemple #37
0
def verify_polar(a):
    # Compute the polar decomposition, and then verify that
    # the result has all the expected properties.
    product_atol = np.sqrt(np.finfo(float).eps)

    aa = np.asarray(a)
    m, n = aa.shape

    u, p = polar(a, side='right')
    assert_equal(u.shape, (m, n))
    assert_equal(p.shape, (n, n))
    # a = up
    assert_allclose(u.dot(p), a, atol=product_atol)
    if m >= n:
        assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
    else:
        assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
    # p is Hermitian positive semidefinite.
    assert_allclose(p.conj().T, p)
    evals = eigh(p, eigvals_only=True)
    nonzero_evals = evals[abs(evals) > 1e-14]
    assert_((nonzero_evals >= 0).all())

    u, p = polar(a, side='left')
    assert_equal(u.shape, (m, n))
    assert_equal(p.shape, (m, m))
    # a = pu
    assert_allclose(p.dot(u), a, atol=product_atol)
    if m >= n:
        assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
    else:
        assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
    # p is Hermitian positive semidefinite.
    assert_allclose(p.conj().T, p)
    evals = eigh(p, eigvals_only=True)
    nonzero_evals = evals[abs(evals) > 1e-14]
    assert_((nonzero_evals >= 0).all())
def project_to_unitary(parameters, check_unitary=True):
    """
    Use polar decomposition to find the closest unitary matrix.
    """
    # parameters must be a square number (TODO: FIRE)
    n = int(np.sqrt(len(parameters)))

    A = parameters.reshape(n, n)
    U, p = polar(A, side='left')

    if check_unitary:
        # (np.conj is harmless for real U)
        assert np.allclose(np.dot(U, np.conj(U.T)), np.eye(n))

    parameters = U.reshape(n*n)
    return parameters
Exemple #39
0
    def axisangle(self):
        """Return rotation part of ``DefGrad`` axis, angle tuple."""
        from scipy.linalg import polar

        R, _ = polar(self)
        w, W = np.linalg.eig(R.T)
        i = np.where(abs(np.real(w) - 1.0) < 1e-8)[0]
        if not len(i):
            raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
        axis = Vec3(np.real(W[:, i[-1]]).squeeze())
        # rotation angle depending on direction
        cosa = (np.trace(R) - 1.0) / 2.0
        if abs(axis[2]) > 1e-8:
            sina = (R[1, 0] + (cosa - 1.0) * axis[0] * axis[1]) / axis[2]
        elif abs(axis[1]) > 1e-8:
            sina = (R[0, 2] + (cosa - 1.0) * axis[0] * axis[2]) / axis[1]
        else:
            sina = (R[2, 1] + (cosa - 1.0) * axis[1] * axis[2]) / axis[0]
        angle = np.rad2deg(np.arctan2(sina, cosa))
        return axis, angle
Exemple #40
0
def check_precomputed_polar(a, side, expected_u, expected_p):
    # Compare the result of the polar decomposition to a
    # precomputed result.
    u, p = polar(a, side=side)
    assert_allclose(u, expected_u, atol=1e-15)
    assert_allclose(p, expected_p, atol=1e-15)
def psolve(a, b):
    u, p = sla.polar(a)
    return np.dot(np.dot(la.inv(p), la.inv(u)), b)
Exemple #42
0
 def polar_decomposition(self, side='right'):
     """
     calculates matrices for polar decomposition
     """
     return polar(self, side=side)
Exemple #43
0
    def _my_sqrtm_polar(self, X):
        tol = 1e-10

        L = np.linalg.cholesky(X + np.diag(np.ones(X.shape[0]) * tol))
        U, P = polar(L, side="right")
        return P
Exemple #44
0
 def recoverM(A):
     B = 0.5 * (A + A.T)
     H = polar(B)[1]
     M = 0.5 * (B+H)
     M = M / np.trace(M)
     return M
Exemple #45
0
    def evaluate_basis_at(self, grid, component, *, prefactor=False):
        r"""Evaluate the basis functions :math:`\phi_k` recursively at the given nodes :math:`\gamma`.

        :param grid: The grid :math:`\Gamma` containing the nodes :math:`\gamma`.
        :type grid: A class having a :py:meth:`get_nodes(...)` method.
        :param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate.
        :param prefactor: Whether to include a factor of :math:`\frac{1}{\sqrt{\det(Q)}}`.
        :type prefactor: Boolean, default is ``False``.
        :return: A two-dimensional ndarray :math:`H` of shape :math:`(|\mathfrak{K}_i|, |\Gamma|)` where
                 the entry :math:`H[\mu(k), i]` is the value of :math:`\phi_k(\gamma_i)`.
        """
        D = self._dimension

        bas = self._basis_shapes[component]
        bs = self._basis_sizes[component]

        # The grid
        grid = self._grid_wrap(grid)
        nodes = grid.get_nodes()
        nn = grid.get_number_nodes(overall=True)

        # Allocate the storage array
        phi = zeros((bs, nn), dtype=complexfloating)

        # Precompute some constants
        Pi = self.get_parameters(component=component)
        q, p, Q, P, _ = Pi

        # Transformation to {w} basis
        _, PA = polar(Q, side='left')
        EW, EV = eigh(real(PA))
        Qinv = dot(diag(1.0 / EW), EV.T)
        QQ = identity(D)

        # Compute the ground state phi_0 via direct evaluation
        mu0 = bas[tuple(D * [0])]
        phi[mu0, :] = self._evaluate_phi0(component, nodes, prefactor=False)

        # Compute all higher order states phi_k via recursion
        for d in range(D):
            # Iterator for all valid index vectors k
            indices = bas.get_node_iterator(mode="chain", direction=d)

            for k in indices:
                # Current index vector
                ki = vstack(k)

                # Access predecessors
                phim = zeros((D, nn), dtype=complexfloating)

                for j, kpj in bas.get_neighbours(k, selection="backward"):
                    mukpj = bas[kpj]
                    phim[j, :] = phi[mukpj, :]

                # Compute 3-term recursion
                p1 = (nodes - q) * phi[bas[k], :]
                p2 = sqrt(ki) * phim

                t1 = sqrt(2.0 / self._eps**2) * dot(Qinv[d, :], p1)
                t2 = dot(QQ[d, :], p2)

                # Find multi-index where to store the result
                kped = bas.get_neighbours(k, selection="forward", direction=d)

                # Did we find this k?
                if len(kped) > 0:
                    kped = kped[0]

                    # Store computed value
                    phi[bas[kped[1]], :] = (t1 - t2) / sqrt(ki[d] + 1.0)

        if prefactor is True:
            phi = phi / self._get_sqrt(component)(det(Q))

        return phi
Exemple #46
0
    def slim_recursion(self, grid, component, *, prefactor=False):
        r"""Evaluate the Hagedorn wavepacket :math:`\Psi` at the given nodes :math:`\gamma`.
        This routine is a slim version compared to the full basis evaluation. At every moment
        we store only the data we really need to compute the next step until we hit the highest
        order basis functions.

        :param grid: The grid :math:`\Gamma` containing the nodes :math:`\gamma`.
        :type grid: A class having a :py:meth:`get_nodes(...)` method.
        :param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate.
        :param prefactor: Whether to include a factor of :math:`\frac{1}{\sqrt{\det(Q)}}`.
        :type prefactor: Boolean, default is ``False``.
        :return: A list of arrays or a single array containing the values of the :math:`\Phi_i`
                 at the nodes :math:`\gamma`.

        Note that this function does not include the global phase :math:`\exp(\frac{i S}{\varepsilon^2})`.
        """
        D = self._dimension

        # Precompute some constants
        Pi = self.get_parameters(component=component)
        q, p, Q, P, _ = Pi

        # Transformation to {w} basis
        _, PA = polar(Q, side='left')
        EW, EV = eigh(real(PA))
        Qinv = dot(diag(1.0 / EW), EV.T)
        QQ = identity(D)

        # The basis shape
        bas = self._basis_shapes[component]
        Z = tuple(D * [0])

        # Book keeping
        todo = []
        newtodo = [Z]
        olddelete = []
        delete = []
        tmp = {}

        # The grid nodes
        grid = self._grid_wrap(grid)
        nn = grid.get_number_nodes(overall=True)
        nodes = grid.get_nodes()

        # Evaluate phi0
        tmp[Z] = self._evaluate_phi0(component, nodes, prefactor=False)
        psi = self._coefficients[component][bas[Z], 0] * tmp[Z]

        # Iterate for higher order states
        while len(newtodo) != 0:
            # Delete results that never will be used again
            for d in olddelete:
                del tmp[d]

            # Exchange queues
            todo = newtodo
            newtodo = []
            olddelete = delete
            delete = []

            # Compute new results
            for k in todo:
                # Center stencil at node k
                ki = vstack(k)

                # Access predecessors
                phim = zeros((D, nn), dtype=complexfloating)
                for j, kpj in bas.get_neighbours(k, selection="backward"):
                    phim[j, :] = tmp[kpj]

                # Compute the neighbours
                for d, n in bas.get_neighbours(k, selection="forward"):
                    if n not in tmp.keys():
                        # Compute 3-term recursion
                        p1 = (nodes - q) * tmp[k]
                        p2 = sqrt(ki) * phim

                        t1 = sqrt(2.0 / self._eps**2) * dot(Qinv[d, :], p1)
                        t2 = dot(QQ[d, :], p2)

                        # Store computed value
                        tmp[n] = (t1 - t2) / sqrt(ki[d] + 1.0)
                        # And update the result
                        psi = psi + self._coefficients[component][bas[n], 0] * tmp[n]

                        newtodo.append(n)
                delete.append(k)

        if prefactor is True:
            psi = psi / self._get_sqrt(component)(det(Q))

        return psi