示例#1
0
def to_realimag(z):
    """
    Convert a complex hermitian matrix to a real valued doubled up representation, i.e., for
    ``Z = Z_r + 1j * Z_i`` return ``R(Z)``::

        R(Z) = [ Z_r   Z_i]
               [-Z_i   Z_r]

    A complex hermitian matrix ``Z`` with elementwise real and imaginary parts
    ``Z = Z_r + 1j * Z_i`` can be
    isomorphically represented in doubled up form as::

        R(Z) = [ Z_r   Z_i]
               [-Z_i   Z_r]

        R(X)*R(Y) = [ (X_r*Y_r-X_i*Y_i)    (X_r*Y_i + X_i*Y_r)]
                    [-(X_r*Y_i + X_i*Y_r)  (X_r*Y_r-X_i*Y_i)  ]

                  = R(X*Y).

    In particular, ``Z`` is complex positive (semi-)definite iff ``R(Z)`` is real positive
    (semi-)definite.

    :param (qutip.Qobj|scipy.sparse.base.spmatrix) z:  The operator representation matrix.
    :returns: R(Z) the doubled up representation.
    :rtype: scipy.sparse.csr_matrix
    """
    if isinstance(z, qt.Qobj):
        z = z.data
    if not is_hermitian(z):  # pragma no coverage
        raise ValueError("Need a hermitian matrix z")
    return spvstack([sphstack([z.real, z.imag]),
                     sphstack([z.imag.T, z.real])]).tocsr().real
    def M(self, x, dx, t):
        r"""
        Returns the constrained mass matrix

        Parameters
        ----------
        x: numpy.array
            Global state vector of the system
        dx: numpy.array
            First time derivative of global state vector of the constrained system
        t: float
            time

        Returns
        -------
        M: csr_matrix
            Constrained mass matrix

        Notes
        -----
        In this formulation this returns

        .. math::
            \begin{bmatrix} L^T M_{raw} \\
            s B(u, t)
            \end{bmatrix}

        """
        u = self.u(x, t)
        du = self.du(x, dx, t)
        M = self._M_func(u, du, t)
        return spvstack((csr_matrix(self.L(
            x, t).T.dot(M)), self._scaling * self._B_func(u, t)),
                        format='csr')
    def test_K(self):
        x = np.arange(self.formulation.dimension, dtype=float)
        dx = x.copy()

        K_desired = spvstack((sphstack((self.K_unconstr,
                                        self.B_holo_func(x[:self.no_of_dofs_unconstrained],
                                                         0.0).T), format='csr'),
                             sphstack((self.B_holo_func(x[:self.no_of_dofs_unconstrained], 0.0),
                                       csr_matrix((1, 1))), format='csr')),
                             format='csr')

        K_actual = self.formulation.K(x, dx, 0.0)
        assert_array_equal(K_actual.todense(), K_desired.todense())
    def test_M(self):
        x = np.arange(self.formulation.dimension, dtype=float)
        dx = x.copy()

        M_desired = spvstack((sphstack((self.M_unconstr,
                                        csr_matrix((self.no_of_dofs_unconstrained,
                                                    self.no_of_constraints))), format='csr'),
                             sphstack((csr_matrix((self.no_of_constraints,
                                                   self.no_of_dofs_unconstrained)),
                                       csr_matrix((1, 1))), format='csr')),
                             format='csr')

        M_actual = self.formulation.M(x, dx, 0.0)
        assert_array_equal(M_actual.todense(), M_desired.todense())
示例#5
0
    def K(self, x, dx, t):
        r"""
        Returns the constrained stiffness matrix

        Parameters
        ----------
        x: numpy.array
            Global state vector of the system
        dx: numpy.array
            First time derivative of global state vector of the constrained system
        t: float
            time

        Returns
        -------
        K: csr_matrix
            Constrained mass matrix

        Notes
        -----
        In this formulation this returns

        .. math::
            \begin{bmatrix} K_{raw} + psB^T B & sB^T \\
            sB & 0
            \end{bmatrix}

        Attention: d(B.T@g)/dq is evaluated as = B.T@dg/dq, which means that dB/dq is assumed to be zero.
        This is done because dB/dq could be expensive to evaluate.
        """
        B = self._B_func(self.u(x, t), t)
        K = self._jac_h_u(self.u(x, t), self.du(x, dx, t), t)
        if self._penalty is not None:
            K += self._penalty * self._scaling * B.T.dot(B)

        return spvstack(
            (sphstack((K, self._scaling * B.T), format='csr'),
             sphstack(
                 (self._scaling * B,
                  csr_matrix(
                      (self._no_of_constraints, self._no_of_constraints))),
                 format='csr')),
            format='csr')
    def K(self, x, dx, t):
        r"""
        Returns the constrained stiffness matrix

        This is an approximation! The upper part, namely the B of the internal and external forces
        is exactly evaluated. But the B of the a_function is not available and set to zero!

        Parameters
        ----------
        x: numpy.array
            Global state vector of the system
        dx: numpy.array
            First time derivative of global state vector of the constrained system
        t: float
            time

        Returns
        -------
        K: csr_matrix
            Constrained mass matrix

        Notes
        -----
        In this formulation this returns

        .. math::
            \begin{bmatrix} L^T K_{raw}\\
            0
            \end{bmatrix}

        """
        u = self.u(x, t)
        du = self.du(x, dx, t)
        K = self._jac_h_u(u, du, t)
        if self._jac_p_u is not None:
            K -= self._jac_p_u(u, du, t)

        return spvstack(
            (self.L(x, t).T.dot(K),
             csr_matrix(
                 (self._no_of_constraints, self._no_of_dofs_unconstrained))),
            format='csr')
def _prepare_b_jkl_mn(readout_povm, pauli_basis, pre_channel_ops, post_channel_ops, rho0):
    """
    Prepare the coefficient matrix for process tomography. This function uses sparse matrices
    for much greater efficiency. The coefficient matrix is defined as:

    .. math::

            B_{(jkl)(mn)}=\sum_{r,q}\pi_{jr}(\mathcal{R}_{k})_{rm} (\mathcal{R}_{l})_{nq} (\rho_0)_q

    where :math:`\mathcal{R}_{k}` is the transfer matrix of the quantum map corresponding to the
    k-th pre-measurement channel, while :math:`\mathcal{R}_{l}` is the transfer matrix of the l-th
    state preparation process. We also require the overlap
    between the (generalized) Pauli basis ops and the projection operators
    :math:`\pi_{jl}:=\sbraket{\Pi_j}{P_l} = \tr{\Pi_j P_l}`.

    See the grove documentation on tomography for detailed information.

    :param DiagonalPOVM readout_povm: The POVM corresponding to the readout plus classifier.
    :param OperatorBasis pauli_basis: The (generalized) Pauli basis employed in the estimation.
    :param list pre_channel_ops: The state preparation channel operators as `qutip.Qobj`
    :param list post_channel_ops: The pre-measurement (post circuit) channel operators as `qutip.Qobj`
    :param qutip.Qobj rho0: The initial state as a density matrix.
    :return: The coefficient matrix necessary to set up the binomial state tomography problem.
    :rtype: scipy.sparse.csr_matrix
    """
    c_jk_m = state_tomography._prepare_c_jk_m(readout_povm, pauli_basis, post_channel_ops)
    pre_channel_transfer_matrices = [pauli_basis.transfer_matrix(qt.to_super(ek))
                                     for ek in pre_channel_ops]
    rho0_q = pauli_basis.project_op(rho0)

    # These next lines hide some very serious (sparse-)matrix index magic,
    # basically we exploit the same index math as in `qutip.sprepost()`
    # i.e., if a matrix X is linearly mapped `X -> A.dot(X).dot(B)`
    # then this can be rewritten as
    #           `np.kron(B.T, A).dot(X.T.ravel()).reshape((B.shape[1], A.shape[0])).T`
    # The extra matrix transpose operations are necessary because numpy by default
    # uses row-major storage, whereas these operations are conventionally defined for column-major
    # storage.
    d_ln = spvstack([(rlnq * rho0_q).T for rlnq in pre_channel_transfer_matrices]).tocoo()
    b_jkl_mn = spkron(d_ln, c_jk_m).real
    return b_jkl_mn
示例#8
0
 def initial(a, size, N=self.N, factory=factory):
     return spvstack(size * (factory(a, (1, N)), ))
示例#9
0
def admm_for_dmd(P,
                 q,
                 s,
                 gamma_vec,
                 rho=1,
                 maxiter=10000,
                 eps_abs=1e-6,
                 eps_rel=1e-4):

    # blank return value
    answer = type('ADMMAnswer', (object, ), {})()

    # check input vars
    P = np.squeeze(P)
    q = np.squeeze(q)[:, np.newaxis]
    gamma_vec = np.squeeze(gamma_vec)
    if P.ndim != 2:
        raise ValueError('invalid P')
    if q.ndim != 2:
        raise ValueError('invalid q')
    if gamma_vec.ndim != 1:
        raise ValueError('invalid gamma_vec')

    # number of optimization variables
    n = len(q)

    # identity matrix
    I = np.eye(n)

    # allocate memory for gamma-dependent output variables
    answer.gamma = gamma_vec
    answer.Nz = np.zeros([
        len(gamma_vec),
    ])  # number of non-zero amplitudes
    answer.Jsp = np.zeros([
        len(gamma_vec),
    ])  # square of Frobenius norm (before polishing)
    answer.Jpol = np.zeros([
        len(gamma_vec),
    ])  # square of Frobenius norm (after polishing)
    answer.Ploss = np.zeros([
        len(gamma_vec),
    ])  # optimal performance loss (after polishing)
    answer.xsp = np.zeros(
        [n, len(gamma_vec)],
        dtype='complex')  # vector of amplitudes (before polishing)
    answer.xpol = np.zeros(
        [n, len(gamma_vec)],
        dtype='complex')  # vector of amplitudes (after polishing)

    # Cholesky factorization of matrix P + (rho/2)*I
    Prho = P + (rho / 2) * I
    Plow = cholesky(Prho)
    Plow_star = Plow.conj().T

    # sparse P (for KKT system)
    Psparse = sparse(P)

    for i, gamma in enumerate(gamma_vec):

        # initial conditions
        y = np.zeros([n, 1], dtype='complex')  # Lagrange multiplier
        z = np.zeros([n, 1], dtype='complex')  # copy of x

        # Use ADMM to solve the gamma-parameterized problem
        for step in range(maxiter):

            # x-minimization step
            u = z - (1 / rho) * y
            # x = solve((P + (rho/2) * I), (q + rho * u))
            xnew = solve(Plow_star, solve(Plow, q + (rho / 2) * u))

            # z-minimization step
            a = (gamma / rho) * np.ones([n, 1])
            v = xnew + (1 / rho) * y
            # soft-thresholding of v
            znew = multiply(multiply(np.divide(1 - a, np.abs(v)), v),
                            (np.abs(v) > a))

            # primal and dual residuals
            res_prim = norm(xnew - znew, 2)
            res_dual = rho * norm(znew - z, 2)

            # Lagrange multiplier update step
            y += rho * (xnew - znew)

            # stopping criteria
            eps_prim = np.sqrt(n) * eps_abs + eps_rel * np.max(
                [norm(xnew, 2), norm(znew, 2)])
            eps_dual = np.sqrt(n) * eps_abs + eps_rel * norm(y, 2)

            if (res_prim < eps_prim) and (res_dual < eps_dual):
                break
            else:
                z = znew

        # record output data
        answer.xsp[:, i] = z.squeeze()  # vector of amplitudes
        answer.Nz[i] = np.count_nonzero(
            answer.xsp[:, i])  # number of non-zero amplitudes
        answer.Jsp[i] = (np.real(dot(dot(z.conj().T, P), z)) -
                         2 * np.real(dot(q.conj().T, z)) + s
                         )  # Frobenius norm (before polishing)

        # polishing of the nonzero amplitudes
        # form the constraint matrix E for E^T x = 0
        ind_zero = np.flatnonzero(
            np.abs(z) < 1e-12)  # find indices of zero elements of z
        m = len(ind_zero)  # number of zero elements

        if m > 0:

            # form KKT system for the optimality conditions
            E = I[:, ind_zero]
            E = sparse(E, dtype='complex')
            KKT = spvstack([
                sphstack([Psparse, E], format='csc'),
                sphstack(
                    [E.conj().T, sparse((m, m), dtype='complex')],
                    format='csc'),
            ],
                           format='csc')
            rhs = np.vstack([q, np.zeros([m, 1],
                                         dtype='complex')])  # stack vertically

            # solve KKT system
            sol = spsolve(KKT, rhs)
        else:
            sol = solve(P, q)

        # vector of polished (optimal) amplitudes
        xpol = sol[:n]

        # record output datas
        answer.xpol[:, i] = xpol.squeeze()

        # polished (optimal) least-squares residual
        answer.Jpol[i] = (np.real(dot(dot(xpol.conj().T, P), xpol)) -
                          2 * np.real(dot(q.conj().T, xpol)) + s)

        # polished (optimal) performance loss
        answer.Ploss[i] = 100 * np.sqrt(answer.Jpol[i] / s)

        print(i)

    return answer
示例#10
0
def _solver_arc_length_riks(an, silent=False):
    r"""Arc-Length solver using the Riks method

    """
    msg('___________________________________________', level=1, silent=silent)
    msg('                                           ', level=1, silent=silent)
    msg('Arc-Length solver using Riks implementation', level=1, silent=silent)
    msg('___________________________________________', level=1, silent=silent)
    msg('Initializing...', level=1, silent=silent)
    lbd = 0.
    arc_length = an.initialInc
    length = arc_length
    dlbd = arc_length
    max_arc_length = an.maxArcLength

    modified_NR = an.modified_NR
    kC = an.calc_kC(silent=True)
    fext = an.calc_fext(inc=1., silent=True)
    kT = kC
    c = solve(kC, arc_length * fext, silent=True)
    fint = kC * c
    dc = c
    c_last = 0 * c

    step_num = 1

    if modified_NR:
        compute_NL_matrices = False
    else:
        compute_NL_matrices = True

    while step_num < 1000:
        msg('Step %d, lbd %1.5f, arc-length %1.5f' %
            (step_num, lbd, arc_length),
            level=1,
            silent=silent)
        min_Rmax = 1.e6
        prev_Rmax = 1.e6
        converged = False
        iteration = 0
        varlbd = 0
        varc = 0
        phi = 1  # spheric arc-length

        while True:
            iteration += 1
            if iteration > an.maxNumIter:
                warn('Maximum number of iterations achieved!',
                     level=2,
                     silent=silent)
                break
            q = fext
            TMP = sphstack((kT, -q[:, None]), format='lil')
            dcext = np.concatenate((dc, [0.]))
            TMP = spvstack((TMP, 2 * dcext[None, :]), format='lil')
            TMP[-1, -1] = 2 * phi**2 * dlbd * np.dot(q, q)
            TMP = TMP.tocsr()
            right_vec = np.zeros(q.shape[0] + 1, dtype=q.dtype)

            R = fint - (lbd + dlbd) * q
            A = -(np.dot(dc, dc) + phi**2 * dlbd**2 * np.dot(q, q) -
                  arc_length**2)
            right_vec[:-1] = -R
            right_vec[-1] = A
            solution = solve(TMP, right_vec, silent=True)
            varc = solution[:-1]
            varlbd = solution[-1]

            dlbd = dlbd + varlbd
            dc = dc + varc

            msg('iter %d, lbd+dlbd %1.5f' % (iteration, lbd + dlbd),
                level=2,
                silent=silent)

            # computing the Non-Linear matrices
            if compute_NL_matrices:
                kC = an.calc_kC(c=(c + dc), NLgeom=True, silent=True)
                kG = an.calc_kG(c=(c + dc), NLgeom=True, silent=True)
                kT = kC + kG
                if modified_NR:
                    compute_NL_matrices = False
            else:
                if not modified_NR:
                    compute_NL_matrices = True

            # calculating the residual
            fint = an.calc_fint(c + dc, silent=True)
            Rmax = np.abs((lbd + dlbd) * fext - fint).max()
            if iteration >= 2 and Rmax <= an.absTOL:
                converged = True
                break
            if (Rmax > min_Rmax and Rmax > prev_Rmax and iteration > 3):
                warn('Diverged - Rmax value significantly increased',
                     level=2,
                     silent=silent)
                break
            else:
                min_Rmax = min(min_Rmax, Rmax)
            change_rate_Rmax = abs(1 - Rmax / prev_Rmax)
            if (iteration > 2 and change_rate_Rmax < an.too_slow_TOL):
                warn('Diverged - convergence too slow', level=2, silent=silent)
                break
            prev_Rmax = Rmax

        if converged:
            step_num += 1
            msg('Converged at lbd+dlbd of %1.5f, total length %1.5f' %
                (lbd + dlbd, length),
                level=2,
                silent=silent)
            length += arc_length

            lbd = lbd + dlbd

            arc_length *= 1.1111

            dlbd = arc_length
            c_last = c.copy()
            c = c + dc

            an.increments.append(lbd)
            an.cs.append(c.copy())

        else:
            msg('Reseting step with reduced arc-length',
                level=2,
                silent=silent)
            arc_length *= 0.90

        if length >= max_arc_length:
            msg('Maximum specified arc-length of %1.5f achieved' %
                max_arc_length,
                level=2,
                silent=silent)
            break

        dc = c - c_last
        dlbd = arc_length

        kC = an.calc_kC(c=c, NLgeom=True, silent=True)
        kG = an.calc_kG(c=c, NLgeom=True, silent=True)
        kT = kC + kG
        fint = an.calc_fint(c=c, silent=True)
        compute_NL_matrices = False

    msg('Finished Non-Linear Static Analysis', silent=silent)
    msg('    total arc-length %1.5f' % length, level=1, silent=silent)