def test_QobjPermute(): "Qobj permute" A = basis(5, 0) B = basis(5, 4) C = basis(5, 2) psi = tensor(A, B, C) psi2 = psi.permute([2, 0, 1]) assert_equal(psi2, tensor(C, A, B)) A = fock_dm(5, 0) B = fock_dm(5, 4) C = fock_dm(5, 2) rho = tensor(A, B, C) rho2 = rho.permute([2, 0, 1]) assert_equal(rho2, tensor(C, A, B)) for ii in range(3): A = rand_ket(5) B = rand_ket(5) C = rand_ket(5) psi = tensor(A, B, C) psi2 = psi.permute([1, 0, 2]) assert_equal(psi2, tensor(B, A, C)) for ii in range(3): A = rand_dm(5) B = rand_dm(5) C = rand_dm(5) rho = tensor(A, B, C) rho2 = rho.permute([1, 0, 2]) assert_equal(rho2, tensor(B, A, C))
def test_QobjPermute(): "Qobj permute" A = basis(5, 0) B = basis(5, 4) C = basis(5, 2) psi = tensor(A, B, C) psi2 = psi.permute([2, 0, 1]) assert_(psi2 == tensor(C, A, B)) A = fock_dm(5, 0) B = fock_dm(5, 4) C = fock_dm(5, 2) rho = tensor(A, B, C) rho2 = rho.permute([2, 0, 1]) assert_(rho2 == tensor(C, A, B)) for ii in range(3): A = rand_ket(5) B = rand_ket(5) C = rand_ket(5) psi = tensor(A, B, C) psi2 = psi.permute([1, 0, 2]) assert_(psi2 == tensor(B, A, C)) for ii in range(3): A = rand_dm(5) B = rand_dm(5) C = rand_dm(5) rho = tensor(A, B, C) rho2 = rho.permute([1, 0, 2]) assert_(rho2 == tensor(B, A, C))
def test_super_tensor_operket(): """ Tensor: Checks that super_tensor respects states. """ rho1, rho2 = rand_dm(5), rand_dm(7) operator_to_vector(rho1) operator_to_vector(rho2)
def test_super_tensor_operket(): """ Tensor: Checks that super_tensor respects states. """ rho1, rho2 = rand_dm(5), rand_dm(7) operator_to_vector(rho1) operator_to_vector(rho2)
def test_fidelity1(): """ Metrics: Fidelity, mixed state inequality """ for k in range(10): rho1 = rand_dm(25, 0.25) rho2 = rand_dm(25, 0.25) F = fidelity(rho1, rho2) assert_(1-F <= sqrt(1-F**2))
def test_fidelity1(): """ Metrics: Fidelity, mixed state inequality """ for k in range(10): rho1 = rand_dm(25, 0.25) rho2 = rand_dm(25, 0.25) F = fidelity(rho1, rho2) assert_(1 - F <= sqrt(1 - F**2))
def test_QobjPermute(): "Qobj permute" A = basis(3, 0) B = basis(5, 4) C = basis(4, 2) psi = tensor(A, B, C) psi2 = psi.permute([2, 0, 1]) assert psi2 == tensor(C, A, B) psi_bra = psi.dag() psi2_bra = psi_bra.permute([2, 0, 1]) assert psi2_bra == tensor(C, A, B).dag() A = fock_dm(3, 0) B = fock_dm(5, 4) C = fock_dm(4, 2) rho = tensor(A, B, C) rho2 = rho.permute([2, 0, 1]) assert rho2 == tensor(C, A, B) for _ in range(3): A = rand_ket(3) B = rand_ket(4) C = rand_ket(5) psi = tensor(A, B, C) psi2 = psi.permute([1, 0, 2]) assert psi2 == tensor(B, A, C) psi_bra = psi.dag() psi2_bra = psi_bra.permute([1, 0, 2]) assert psi2_bra == tensor(B, A, C).dag() for _ in range(3): A = rand_dm(3) B = rand_dm(4) C = rand_dm(5) rho = tensor(A, B, C) rho2 = rho.permute([1, 0, 2]) assert rho2 == tensor(B, A, C) rho_vec = operator_to_vector(rho) rho2_vec = rho_vec.permute([[1, 0, 2], [4, 3, 5]]) assert rho2_vec == operator_to_vector(tensor(B, A, C)) rho_vec_bra = operator_to_vector(rho).dag() rho2_vec_bra = rho_vec_bra.permute([[1, 0, 2], [4, 3, 5]]) assert rho2_vec_bra == operator_to_vector(tensor(B, A, C)).dag() for _ in range(3): super_dims = [3, 5, 4] U = rand_unitary(np.prod(super_dims), density=0.02, dims=[super_dims, super_dims]) Unew = U.permute([2, 1, 0]) S_tens = to_super(U) S_tens_new = to_super(Unew) assert S_tens_new == S_tens.permute([[2, 1, 0], [5, 4, 3]])
def test_tracedist2(): """ Metrics: Trace dist. & Fidelity mixed/mixed inequality """ for k in range(10): rho1 = rand_dm(25, 0.25) rho2 = rand_dm(25, 0.25) F = fidelity(rho1, rho2) D = tracedist(rho1, rho2) assert_(1-F <= D)
def test_tracedist2(): """ Metrics: Trace dist. & Fidelity mixed/mixed inequality """ for k in range(10): rho1 = rand_dm(25, 0.25) rho2 = rand_dm(25, 0.25) F = fidelity(rho1, rho2) D = tracedist(rho1, rho2) assert_(1 - F <= D)
def test_QobjPermute(): "Qobj permute" A = basis(3, 0) B = basis(5, 4) C = basis(4, 2) psi = tensor(A, B, C) psi2 = psi.permute([2, 0, 1]) assert_(psi2 == tensor(C, A, B)) psi_bra = psi.dag() psi2_bra = psi_bra.permute([2, 0, 1]) assert_(psi2_bra == tensor(C, A, B).dag()) A = fock_dm(3, 0) B = fock_dm(5, 4) C = fock_dm(4, 2) rho = tensor(A, B, C) rho2 = rho.permute([2, 0, 1]) assert_(rho2 == tensor(C, A, B)) for ii in range(3): A = rand_ket(3) B = rand_ket(4) C = rand_ket(5) psi = tensor(A, B, C) psi2 = psi.permute([1, 0, 2]) assert_(psi2 == tensor(B, A, C)) psi_bra = psi.dag() psi2_bra = psi_bra.permute([1, 0, 2]) assert_(psi2_bra == tensor(B, A, C).dag()) for ii in range(3): A = rand_dm(3) B = rand_dm(4) C = rand_dm(5) rho = tensor(A, B, C) rho2 = rho.permute([1, 0, 2]) assert_(rho2 == tensor(B, A, C)) rho_vec = operator_to_vector(rho) rho2_vec = rho_vec.permute([[1, 0, 2],[4,3,5]]) assert_(rho2_vec == operator_to_vector(tensor(B, A, C))) rho_vec_bra = operator_to_vector(rho).dag() rho2_vec_bra = rho_vec_bra.permute([[1, 0, 2],[4,3,5]]) assert_(rho2_vec_bra == operator_to_vector(tensor(B, A, C)).dag()) for ii in range(3): super_dims = [3, 5, 4] U = rand_unitary(np.prod(super_dims), density=0.02, dims=[super_dims, super_dims]) Unew = U.permute([2,1,0]) S_tens = to_super(U) S_tens_new = to_super(Unew) assert_(S_tens_new == S_tens.permute([[2,1,0],[5,4,3]]))
def test_tracedist1(): """ Metrics: Trace dist., invariance under unitary trans. """ for k in range(10): rho1 = rand_dm(25, 0.25) rho2 = rand_dm(25, 0.25) U = rand_unitary(25, 0.25) D = tracedist(rho1, rho2) DU = tracedist(U * rho1 * U.dag(), U * rho2 * U.dag()) assert_(abs((D - DU) / D) < 1e-5)
def test_tracedist1(): """ Metrics: Trace dist., invariance under unitary trans. """ for k in range(10): rho1 = rand_dm(25, 0.25) rho2 = rand_dm(25, 0.25) U = rand_unitary(25, 0.25) D = tracedist(rho1, rho2) DU = tracedist(U*rho1*U.dag(), U*rho2*U.dag()) assert_(abs((D-DU)/D) < 1e-5)
def test_fidelity2(): """ Metrics: Fidelity, invariance under unitary trans. """ for k in range(10): rho1 = rand_dm(25, 0.25) rho2 = rand_dm(25, 0.25) U = rand_unitary(25, 0.25) F = fidelity(rho1, rho2) FU = fidelity(U * rho1 * U.dag(), U * rho2 * U.dag()) assert_(abs((F - FU) / F) < 1e-5)
def test_fidelity2(): """ Metrics: Fidelity, invariance under unitary trans. """ for k in range(10): rho1 = rand_dm(25, 0.25) rho2 = rand_dm(25, 0.25) U = rand_unitary(25, 0.25) F = fidelity(rho1, rho2) FU = fidelity(U*rho1*U.dag(), U*rho2*U.dag()) assert_(abs((F-FU)/F) < 1e-5)
def test_composite_vec(): """ Composite: Tests compositing states and density operators. """ k1 = rand_ket(5) k2 = rand_ket(7) r1 = operator_to_vector(ket2dm(k1)) r2 = operator_to_vector(ket2dm(k2)) r3 = operator_to_vector(rand_dm(3)) r4 = operator_to_vector(rand_dm(4)) assert_(composite(k1, k2) == tensor(k1, k2)) assert_(composite(r3, r4) == super_tensor(r3, r4)) assert_(composite(k1, r4) == super_tensor(r1, r4)) assert_(composite(r3, k2) == super_tensor(r3, r2))
def test_hellinger_inequality(): """ Metrics: Hellinger dist.: check whether Hellinger distance is indeed larger than Bures distance """ for _ in range(10): rho1 = rand_dm(25, 0.25) rho2 = rand_dm(25, 0.25) hellinger = hellinger_dist(rho1, rho2) bures = bures_dist(rho1, rho2) assert_(hellinger >= bures) ket1 = rand_ket(40, 0.25) ket2 = rand_ket(40, 0.25) hellinger = hellinger_dist(ket1, ket2) bures = bures_dist(ket1, ket2) assert_(hellinger >= bures)
def test_trunc_neg(): """ Test Qobj: Checks trunc_neg in several different cases. """ @has_description def case(qobj, method, expected=None): pos_qobj = qobj.trunc_neg(method=method) assert(all([energy > -1e-8 for energy in pos_qobj.eigenenergies()])) assert_almost_equal(pos_qobj.tr(), 1) if expected is not None: assert_almost_equal(pos_qobj.data.todense(), expected.data.todense()) for method in ('clip', 'sgs'): # Make sure that it works for operators that are already positive. yield case("Test Qobj: trunc_neg works for positive opers."), \ rand_dm(5), method # Make sure that it works for a diagonal matrix. yield case("Test Qobj: trunc_neg works for diagonal opers."), \ Qobj(np.diag([1.1, -0.1])), method, Qobj(np.diag([1.0, 0.0])) # Make sure that it works for a non-diagonal matrix. U = rand_unitary(3) yield case("Test Qobj: trunc_neg works for non-diagonal opers."), \ U * Qobj(np.diag([1.1, 0, -0.1])) * U.dag(), \ method, \ U * Qobj(np.diag([1.0, 0.0, 0.0])) * U.dag() # Check the test case in SGS. yield ( case("Test Qobj: trunc_neg works for SGS known-good test case."), Qobj(np.diag([3. / 5, 1. / 2, 7. / 20, 1. / 10, -11. / 20])), 'sgs', Qobj(np.diag([9. / 20, 7. / 20, 1. / 5, 0, 0])) )
def test_wigner_compare_methods_dm(): "wigner: compare wigner methods for random density matrices" xvec = np.linspace(-5.0, 5.0, 100) yvec = xvec X, Y = np.meshgrid(xvec, yvec) # a = X + 1j * Y # consistent with g=2 option to wigner function dx = xvec[1] - xvec[0] dy = yvec[1] - yvec[0] N = 15 for n in range(10): # try ten different random density matrices rho = rand_dm(N, 0.5 + rand() / 2) # calculate the wigner function using qutip and analytic formula W_qutip1 = wigner(rho, xvec, yvec, g=2) W_qutip2 = wigner(rho, xvec, yvec, g=2, method='laguerre') # check difference assert_(np.sum(abs(W_qutip1 - W_qutip1)) < 1e-4) # check normalization assert_(np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8) assert_(np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8)
def test_wigner_compare_methods_dm(): "wigner: compare wigner methods for random density matrices" xvec = np.linspace(-5.0, 5.0, 100) yvec = xvec X, Y = np.meshgrid(xvec, yvec) # a = X + 1j * Y # consistent with g=2 option to wigner function dx = xvec[1] - xvec[0] dy = yvec[1] - yvec[0] N = 15 for n in range(10): # try ten different random density matrices rho = rand_dm(N, 0.5 + rand() / 2) # calculate the wigner function using qutip and analytic formula W_qutip1 = wigner(rho, xvec, yvec, g=2) W_qutip2 = wigner(rho, xvec, yvec, g=2, method='laguerre') # check difference assert_(np.sum(abs(W_qutip1 - W_qutip1)) < 1e-4) # check normalization assert_(np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8) assert_(np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8)
def test_trunc_neg(): """ Test Qobj: Checks trunc_neg in several different cases. """ @has_description def case(qobj, method, expected=None): pos_qobj = qobj.trunc_neg(method=method) assert(all([energy > -1e-8 for energy in pos_qobj.eigenenergies()])) assert_almost_equal(pos_qobj.tr(), 1) if expected is not None: assert_almost_equal(pos_qobj.data.todense(), expected.data.todense()) for method in ('clip', 'sgs'): # Make sure that it works for operators that are already positive. yield case("Test Qobj: trunc_neg works for positive opers."), \ rand_dm(5), method # Make sure that it works for a diagonal matrix. yield case("Test Qobj: trunc_neg works for diagonal opers."), \ Qobj(np.diag([1.1, -0.1])), method, Qobj(np.diag([1.0, 0.0])) # Make sure that it works for a non-diagonal matrix. U = rand_unitary(3) yield case("Test Qobj: trunc_neg works for non-diagonal opers."), \ U * Qobj(np.diag([1.1, 0, -0.1])) * U.dag(), \ method, \ U * Qobj(np.diag([1.0, 0.0, 0.0])) * U.dag() # Check the test case in SGS. yield ( case("Test Qobj: trunc_neg works for SGS known-good test case."), Qobj(np.diag([3. / 5, 1. / 2, 7. / 20, 1. / 10, -11. / 20])), 'sgs', Qobj(np.diag([9. / 20, 7. / 20, 1. / 5, 0, 0])) )
def test_composite_vec(): """ Composite: Tests compositing states and density operators. """ k1 = rand_ket(5) k2 = rand_ket(7) r1 = operator_to_vector(ket2dm(k1)) r2 = operator_to_vector(ket2dm(k2)) r3 = operator_to_vector(rand_dm(3)) r4 = operator_to_vector(rand_dm(4)) assert_(composite(k1, k2) == tensor(k1, k2)) assert_(composite(r3, r4) == super_tensor(r3, r4)) assert_(composite(k1, r4) == super_tensor(r1, r4)) assert_(composite(r3, k2) == super_tensor(r3, r2))
def test_spin_wigner_overlap(spin, pure, n=5): d = int(2 * spin + 1) rho = rand_dm(d, pure=pure) # Points at which to evaluate the spin Wigner function theta = np.linspace(0, np.pi, 256, endpoint=True) phi = np.linspace(-np.pi, np.pi, 512, endpoint=True) W, THETA, _ = qutip.spin_wigner(rho, theta, phi) for k in range(n): test_state = rand_dm(d) state_overlap = (test_state * rho).tr().real W_state, _, _ = qutip.spin_wigner(test_state, theta, phi) W_overlap = np.trapz(np.trapz(W_state * W * np.sin(THETA), theta), phi).real assert_almost_equal(W_overlap, state_overlap, decimal=4)
def TestMultiLevelSystem(self): """ Test for processor with multi-level system """ N = 2 proc = Processor(N=N, dims=[2, 3]) proc.add_control(tensor(sigmaz(), rand_dm(3, density=1.))) proc.pulses[0].coeff = np.array([1, 2]) proc.pulses[0].tlist = np.array([0., 1., 2]) proc.run_state(init_state=tensor([basis(2, 0), basis(3, 1)]))
def test_sparse_nonsymmetric_reverse_permute(): "Sparse: Nonsymmetric Reverse Permute" # CSR square array check A = rand_dm(25, 0.5) rperm = np.random.permutation(25) cperm = np.random.permutation(25) x = sp_permute(A.data, rperm, cperm) B = sp_reverse_permute(x, rperm, cperm) assert_equal((A.full() - B.toarray()).all(), 0) # CSC square array check A = rand_dm(25, 0.5) rperm = np.random.permutation(25) cperm = np.random.permutation(25) B = A.data.tocsc() x = sp_permute(B, rperm, cperm) B = sp_reverse_permute(x, rperm, cperm) assert_equal((A.full() - B.toarray()).all(), 0) # CSR column vector check A = coherent(25, 1) rperm = np.random.permutation(25) x = sp_permute(A.data, rperm, []) B = sp_reverse_permute(x, rperm, []) assert_equal((A.full() - B.toarray()).all(), 0) # CSC column vector check A = coherent(25, 1) rperm = np.random.permutation(25) B = A.data.tocsc() x = sp_permute(B, rperm, []) B = sp_reverse_permute(x, rperm, []) assert_equal((A.full() - B.toarray()).all(), 0) # CSR row vector check A = coherent(25, 1).dag() cperm = np.random.permutation(25) x = sp_permute(A.data, [], cperm) B = sp_reverse_permute(x, [], cperm) assert_equal((A.full() - B.toarray()).all(), 0) # CSC row vector check A = coherent(25, 1).dag() cperm = np.random.permutation(25) B = A.data.tocsc() x = sp_permute(B, [], cperm) B = sp_reverse_permute(x, [], cperm) assert_equal((A.full() - B.toarray()).all(), 0)
def test_sparse_nonsymmetric_reverse_permute(): "Sparse: Nonsymmetric Reverse Permute" # CSR square array check A = rand_dm(25, 0.5) rperm = np.random.permutation(25) cperm = np.random.permutation(25) x = sp_permute(A.data, rperm, cperm) B = sp_reverse_permute(x, rperm, cperm) assert_equal((A.full() - B.toarray()).all(), 0) # CSC square array check A = rand_dm(25, 0.5) rperm = np.random.permutation(25) cperm = np.random.permutation(25) B = A.data.tocsc() x = sp_permute(B, rperm, cperm) B = sp_reverse_permute(x, rperm, cperm) assert_equal((A.full() - B.toarray()).all(), 0) # CSR column vector check A = coherent(25, 1) rperm = np.random.permutation(25) x = sp_permute(A.data, rperm, []) B = sp_reverse_permute(x, rperm, []) assert_equal((A.full() - B.toarray()).all(), 0) # CSC column vector check A = coherent(25, 1) rperm = np.random.permutation(25) B = A.data.tocsc() x = sp_permute(B, rperm, []) B = sp_reverse_permute(x, rperm, []) assert_equal((A.full() - B.toarray()).all(), 0) # CSR row vector check A = coherent(25, 1).dag() cperm = np.random.permutation(25) x = sp_permute(A.data, [], cperm) B = sp_reverse_permute(x, [], cperm) assert_equal((A.full() - B.toarray()).all(), 0) # CSC row vector check A = coherent(25, 1).dag() cperm = np.random.permutation(25) B = A.data.tocsc() x = sp_permute(B, [], cperm) B = sp_reverse_permute(x, [], cperm) assert_equal((A.full() - B.toarray()).all(), 0)
def TestMultiLevelSystem(self): """ Test for processor with multi-level system """ N = 2 proc = Processor(N=N, dims=[2, 3]) proc.add_ctrl(tensor(sigmaz(), rand_dm(3, density=1.))) proc.coeffs = np.array([1, 2]).reshape((1, 2)) proc.tlist = np.array([0., 1., 2]) proc.run_state(rho0=tensor([basis(2, 0), basis(3, 1)]))
def steadystate_nonlinear(L_func, rho0, args={}, maxiter=10, random_initial_state=False, tol=1e-6, itertol=1e-5, use_umfpack=True, verbose=False): """ Steady state for the evolution subject to the nonlinear Liouvillian (which depends on the density matrix). .. note:: Experimental. Not at all certain that the inverse power method works for state-dependent Liouvillian operators. """ use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) if random_initial_state: rhoss = rand_dm(rho0.shape[0], 1.0, dims=rho0.dims) elif isket(rho0): rhoss = ket2dm(rho0) else: rhoss = Qobj(rho0) v = mat2vec(rhoss.full()) n = prod(rhoss.shape) tr_vec = sp.eye(rhoss.shape[0], rhoss.shape[0], format='coo') tr_vec = tr_vec.reshape((1, n)) it = 0 while it < maxiter: L = L_func(rhoss, args) L = L.data.tocsc() - (tol**2) * sp.eye(n, n, format='csc') L.sort_indices() v = spsolve(L, v, use_umfpack=use_umfpack) v = v / la.norm(v, np.inf) data = v / sum(tr_vec.dot(v)) data = reshape(data, (rhoss.shape[0], rhoss.shape[1])).T rhoss.data = sp.csr_matrix(data) it += 1 if la.norm(L * v, np.inf) <= tol: break if it >= maxiter: raise ValueError('Failed to find steady state after ' + str(maxiter) + ' iterations') rhoss = 0.5 * (rhoss + rhoss.dag()) return rhoss.tidyup() if qset.auto_tidyup else rhoss
def test_tracedist3(): """ Metrics: Trace dist. & Fidelity mixed/pure inequality """ for k in range(10): ket = rand_ket(25, 0.25) rho1 = ket*ket.dag() rho2 = rand_dm(25, 0.25) F = fidelity(rho1, rho2) D = tracedist(rho1, rho2) assert_(1-F**2 <= D)
def test_fid_trdist_limits(): """ Metrics: Fidelity / trace distance limiting cases """ rho = rand_dm(25, 0.25) assert_(abs(fidelity(rho, rho)-1) < 1e-6) assert_(tracedist(rho, rho) < 1e-6) rho1 = fock_dm(5, 1) rho2 = fock_dm(5, 2) assert_(fidelity(rho1, rho2) < 1e-6) assert_(abs(tracedist(rho1, rho2)-1) < 1e-6)
def test_spin_q_function_normalized(spin, pure): d = int(2 * spin + 1) rho = rand_dm(d, pure=pure) # Points at which to evaluate the spin Q function theta = np.linspace(0, np.pi, 128, endpoint=True) phi = np.linspace(-np.pi, np.pi, 256, endpoint=True) Q, THETA, _ = qutip.spin_q_function(rho, theta, phi) norm = d / (4 * np.pi) * np.trapz(np.trapz(Q * np.sin(THETA), theta), phi) assert_almost_equal(norm, 1, decimal=4)
def test_tracedist3(): """ Metrics: Trace dist. & Fidelity mixed/pure inequality """ for k in range(10): ket = rand_ket(25, 0.25) rho1 = ket * ket.dag() rho2 = rand_dm(25, 0.25) F = fidelity(rho1, rho2) D = tracedist(rho1, rho2) assert_(1 - F**2 <= D)
def test_fid_trdist_limits(): """ Metrics: Fidelity / trace distance limiting cases """ rho = rand_dm(25, 0.25) assert_(abs(fidelity(rho, rho) - 1) < 1e-6) assert_(tracedist(rho, rho) < 1e-6) rho1 = fock_dm(5, 1) rho2 = fock_dm(5, 2) assert_(fidelity(rho1, rho2) < 1e-6) assert_(abs(tracedist(rho1, rho2) - 1) < 1e-6)
def test_csr_kron(): "spmath: zcsr_kron" num_test = 5 for _ in range(num_test): ra = np.random.randint(2, 100) rb = np.random.randint(2, 100) A = rand_herm(ra, 0.5).data B = rand_herm(rb, 0.5).data As = A.tocsr(1) Bs = B.tocsr(1) C = sp.kron(As, Bs, format='csr') D = zcsr_kron(A, B) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for _ in range(num_test): ra = np.random.randint(2, 100) rb = np.random.randint(2, 100) A = rand_ket(ra, 0.5).data B = rand_herm(rb, 0.5).data As = A.tocsr(1) Bs = B.tocsr(1) C = sp.kron(As, Bs, format='csr') D = zcsr_kron(A, B) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for _ in range(num_test): ra = np.random.randint(2, 100) rb = np.random.randint(2, 100) A = rand_dm(ra, 0.5).data B = rand_herm(rb, 0.5).data As = A.tocsr(1) Bs = B.tocsr(1) C = sp.kron(As, Bs, format='csr') D = zcsr_kron(A, B) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for _ in range(num_test): ra = np.random.randint(2, 100) rb = np.random.randint(2, 100) A = rand_ket(ra, 0.5).data B = rand_ket(rb, 0.5).data As = A.tocsr(1) Bs = B.tocsr(1) C = sp.kron(As, Bs, format='csr') D = zcsr_kron(A, B) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr)
def test_wigner_clenshaw_sp_iter_dm(): "Wigner: Compare Wigner sparse clenshaw and iterative for rand. dm" N = 20 xvec = np.linspace(-10, 10, 128) for i in range(3): rho = rand_dm(N) Wclen = wigner(rho, xvec, xvec, method='clenshaw', sparse=True) W = wigner(rho, xvec, xvec, method='iterative') Wdiff = abs(W - Wclen) assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
def test_sparse_symmetric_permute(): "Sparse: Symmetric Permute" # CSR version A = rand_dm(25, 0.5) perm = np.random.permutation(25) x = sp_permute(A.data, perm, perm).toarray() z = _permutateIndexes(A.full(), perm, perm) assert_equal((x - z).all(), 0) # CSC version B = A.data.tocsc() y = sp_permute(B, perm, perm).toarray() assert_equal((y - z).all(), 0)
def test_spin_wigner_normalized(spin, pure): d = int(2 * spin + 1) rho = rand_dm(d, pure=pure) # Points at which to evaluate the spin Wigner function theta = np.linspace(0, np.pi, 256, endpoint=True) phi = np.linspace(-np.pi, np.pi, 512, endpoint=True) W, THETA, PHI = qutip.spin_wigner(rho, theta, phi) norm = np.trapz( np.trapz(W * np.sin(THETA) * np.sqrt(d / (4 * np.pi)), theta), phi) assert_almost_equal(norm, 1, decimal=4)
def test_csr_kron(): "spmath: zcsr_kron" for kk in range(10): ra = np.random.randint(2,100) rb = np.random.randint(2,100) A = rand_herm(ra,0.5).data B = rand_herm(rb,0.5).data As = A.tocsr(1) Bs = B.tocsr(1) C = sp.kron(As,Bs, format='csr') D = zcsr_kron(A, B) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for kk in range(10): ra = np.random.randint(2,100) rb = np.random.randint(2,100) A = rand_ket(ra,0.5).data B = rand_herm(rb,0.5).data As = A.tocsr(1) Bs = B.tocsr(1) C = sp.kron(As,Bs, format='csr') D = zcsr_kron(A, B) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for kk in range(10): ra = np.random.randint(2,100) rb = np.random.randint(2,100) A = rand_dm(ra,0.5).data B = rand_herm(rb,0.5).data As = A.tocsr(1) Bs = B.tocsr(1) C = sp.kron(As,Bs, format='csr') D = zcsr_kron(A, B) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for kk in range(10): ra = np.random.randint(2,100) rb = np.random.randint(2,100) A = rand_ket(ra,0.5).data B = rand_ket(rb,0.5).data As = A.tocsr(1) Bs = B.tocsr(1) C = sp.kron(As,Bs, format='csr') D = zcsr_kron(A, B) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr)
def test_wigner_fft_comparse_dm(): "Wigner: Compare Wigner fft and iterative for rand. dm" N = 20 xvec = np.linspace(-10, 10, 128) for i in range(3): rho = rand_dm(N) Wfft, yvec = wigner(rho, xvec, xvec, method='fft') W = wigner(rho, xvec, yvec, method='iterative') Wdiff = abs(W - Wfft) assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
def test_wigner_fft_comparse_dm(): "Wigner: Compare Wigner fft and iterative for rand. dm" N = 20 xvec = np.linspace(-10, 10, 128) for i in range(3): rho = rand_dm(N) Wfft, yvec = wigner(rho, xvec, xvec, method='fft') W = wigner(rho, xvec, yvec, method='iterative') Wdiff = abs(W - Wfft) assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
def test_wigner_clenshaw_sp_iter_dm(): "Wigner: Compare Wigner sparse clenshaw and iterative for rand. dm" N = 20 xvec = np.linspace(-10, 10, 128) for i in range(3): rho = rand_dm(N) Wclen = wigner(rho, xvec, xvec, method='clenshaw', sparse=True) W = wigner(rho, xvec, xvec, method='iterative') Wdiff = abs(W - Wclen) assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
def test_sparse_symmetric_permute(): "Sparse: Symmetric Permute" # CSR version A = rand_dm(25, 0.5) perm = np.random.permutation(25) x = sp_permute(A.data, perm, perm).toarray() z = _permutateIndexes(A.full(), perm, perm) assert_equal((x - z).all(), 0) # CSC version B = A.data.tocsc() y = sp_permute(B, perm, perm).toarray() assert_equal((y - z).all(), 0)
def test_SimpleSingleApply(self): """ Non-composite system, operator on Hilbert space. """ rho_3 = rand_dm(3) single_op = rand_unitary(3) analytic_result = single_op * rho_3 * single_op.dag() naive_result = subsystem_apply(rho_3, single_op, [True], reference=True) efficient_result = subsystem_apply(rho_3, single_op, [True]) naive_diff = (analytic_result - naive_result).data.todense() efficient_diff = (efficient_result - analytic_result).data.todense() assert_(norm(naive_diff) < 1e-12 and norm(efficient_diff) < 1e-12)
def _steadystate_power(L, maxiter=10, tol=1e-6, itertol=1e-5, use_umfpack=True, verbose=False): """ Inverse power method for steady state solving. """ if verbose: print('Starting iterative power method Solver...') use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) rhoss = Qobj() sflag = issuper(L) if sflag: rhoss.dims = L.dims[0] rhoss.shape = [prod(rhoss.dims[0]), prod(rhoss.dims[1])] else: rhoss.dims = [L.dims[0], 1] rhoss.shape = [prod(rhoss.dims[0]), 1] n = prod(rhoss.shape) L = L.data.tocsc() - (tol**2) * sp.eye(n, n, format='csc') L.sort_indices() v = mat2vec(rand_dm(rhoss.shape[0], 0.5 / rhoss.shape[0] + 0.5).full()) if verbose: start_time = time.time() it = 0 while (la.norm(L * v, np.inf) > tol) and (it < maxiter): v = spsolve(L, v, use_umfpack=use_umfpack) v = v / la.norm(v, np.inf) it += 1 if it >= maxiter: raise Exception('Failed to find steady state after ' + str(maxiter) + ' iterations') # normalise according to type of problem if sflag: trow = sp.eye(rhoss.shape[0], rhoss.shape[0], format='coo') trow = sp_reshape(trow, (1, n)) data = v / sum(trow.dot(v)) else: data = data / la.norm(v) data = sp.csr_matrix(vec2mat(data)) rhoss.data = 0.5 * (data + data.conj().T) rhoss.isherm = True if verbose: print('Power solver time: ', time.time() - start_time) if qset.auto_tidyup: return rhoss.tidyup() else: return rhoss
def test_sparse_symmetric_reverse_permute(): "Sparse: Symmetric Reverse Permute" # CSR version A = rand_dm(25, 0.5) perm = np.random.permutation(25) x = sp_permute(A.data, perm, perm) B = sp_reverse_permute(x, perm, perm) assert_equal((A.full() - B.toarray()).all(), 0) # CSC version B = A.data.tocsc() perm = np.random.permutation(25) x = sp_permute(B, perm, perm) B = sp_reverse_permute(x, perm, perm) assert_equal((A.full() - B.toarray()).all(), 0)
def test_spin_q_function(spin, pure): d = int(2 * spin + 1) rho = rand_dm(d, pure=pure) # Points at which to evaluate the spin Q function theta = np.linspace(0, np.pi, 16, endpoint=True) phi = np.linspace(-np.pi, np.pi, 32, endpoint=True) Q, _, _ = qutip.spin_q_function(rho, theta, phi) for k, (phi_prime, theta_prime) in enumerate(itertools.product(phi, theta)): state = qutip.spin_coherent(spin, theta_prime, phi_prime) direct_Q = (state.dag() * rho * state).norm() assert_almost_equal(Q.flat[k], direct_Q, decimal=9)
def test_sparse_symmetric_reverse_permute(): "Sparse: Symmetric Reverse Permute" # CSR version A = rand_dm(25, 0.5) perm = np.random.permutation(25) x = sp_permute(A.data, perm, perm) B = sp_reverse_permute(x, perm, perm) assert_equal((A.full() - B.toarray()).all(), 0) # CSC version B = A.data.tocsc() perm = np.random.permutation(25) x = sp_permute(B, perm, perm) B = sp_reverse_permute(x, perm, perm) assert_equal((A.full() - B.toarray()).all(), 0)
def steady_nonlinear(L_func, rho0, args={}, maxiter=10, random_initial_state=False, tol=1e-6, itertol=1e-5, use_umfpack=True): """ Steady state for the evolution subject to the nonlinear Liouvillian (which depends on the density matrix). .. note:: Experimental. Not at all certain that the inverse power method works for state-dependent liouvillian operators. """ use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) if random_initial_state: rhoss = rand_dm(rho0.shape[0], 1.0, dims=rho0.dims) elif isket(rho0): rhoss = ket2dm(rho0) else: rhoss = Qobj(rho0) v = mat2vec(rhoss.full()) n = prod(rhoss.shape) tr_vec = sp.eye(rhoss.shape[0], rhoss.shape[0], format='lil') tr_vec = tr_vec.reshape((1, n)).tocsr() it = 0 while it < maxiter: L = L_func(rhoss, args) L = L.data.tocsc() - (tol ** 2) * sp.eye(n, n, format='csc') L.sort_indices() v = spsolve(L, v, permc_spec="MMD_AT_PLUS_A", use_umfpack=use_umfpack) v = v / la.norm(v, np.inf) data = v / sum(tr_vec.dot(v)) data = reshape(data, (rhoss.shape[0], rhoss.shape[1])).T rhoss.data = sp.csr_matrix(data) it += 1 if la.norm(L * v, np.inf) <= tol: break if it >= maxiter: raise ValueError('Failed to find steady state after ' + str(maxiter) + ' iterations') #rhoss.data = 0.5 * (data + data.conj().T) return rhoss.tidyup() if qset.auto_tidyup else rhoss
def test_csr_kron(): "Sparse: Test CSR Kron" for kk in range(10): ra = np.random.randint(2,100) rb = np.random.randint(2,100) A = rand_herm(ra,0.5).data B = rand_herm(rb,0.5).data C = sp.kron(A,B, format='csr') D = _csr_kron(A.data,A.indices,A.indptr, A.shape[0], A.shape[1], B.data,B.indices,B.indptr, B.shape[0], B.shape[1]) assert_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for kk in range(10): ra = np.random.randint(2,100) rb = np.random.randint(2,100) A = rand_ket(ra,0.5).data B = rand_herm(rb,0.5).data C = sp.kron(A,B, format='csr') D = _csr_kron(A.data,A.indices,A.indptr, A.shape[0], A.shape[1], B.data,B.indices,B.indptr, B.shape[0], B.shape[1]) assert_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for kk in range(10): ra = np.random.randint(2,100) rb = np.random.randint(2,100) A = rand_dm(ra,0.5).data B = rand_herm(rb,0.5).data C = sp.kron(A,B, format='csr') D = _csr_kron(A.data,A.indices,A.indptr, A.shape[0], A.shape[1], B.data,B.indices,B.indptr, B.shape[0], B.shape[1]) assert_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for kk in range(10): ra = np.random.randint(2,100) rb = np.random.randint(2,100) A = rand_ket(ra,0.5).data B = rand_ket(rb,0.5).data C = sp.kron(A,B, format='csr') D = _csr_kron(A.data,A.indices,A.indptr, A.shape[0], A.shape[1], B.data,B.indices,B.indptr, B.shape[0], B.shape[1]) assert_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr)
def test_csr_kron(): "Sparse: Test CSR Kron" for kk in range(10): ra = np.random.randint(2, 100) rb = np.random.randint(2, 100) A = rand_herm(ra, 0.5).data B = rand_herm(rb, 0.5).data C = sp.kron(A, B, format='csr') D = _csr_kron(A.data, A.indices, A.indptr, A.shape[0], A.shape[1], B.data, B.indices, B.indptr, B.shape[0], B.shape[1]) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for kk in range(10): ra = np.random.randint(2, 100) rb = np.random.randint(2, 100) A = rand_ket(ra, 0.5).data B = rand_herm(rb, 0.5).data C = sp.kron(A, B, format='csr') D = _csr_kron(A.data, A.indices, A.indptr, A.shape[0], A.shape[1], B.data, B.indices, B.indptr, B.shape[0], B.shape[1]) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for kk in range(10): ra = np.random.randint(2, 100) rb = np.random.randint(2, 100) A = rand_dm(ra, 0.5).data B = rand_herm(rb, 0.5).data C = sp.kron(A, B, format='csr') D = _csr_kron(A.data, A.indices, A.indptr, A.shape[0], A.shape[1], B.data, B.indices, B.indptr, B.shape[0], B.shape[1]) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr) for kk in range(10): ra = np.random.randint(2, 100) rb = np.random.randint(2, 100) A = rand_ket(ra, 0.5).data B = rand_ket(rb, 0.5).data C = sp.kron(A, B, format='csr') D = _csr_kron(A.data, A.indices, A.indptr, A.shape[0], A.shape[1], B.data, B.indices, B.indptr, B.shape[0], B.shape[1]) assert_almost_equal(C.data, D.data) assert_equal(C.indices, D.indices) assert_equal(C.indptr, D.indptr)
def test_hellinger_monotonicity(): """ Metrics: Hellinger dist.: check monotonicity w.r.t. tensor product, see. Eq. (45) in arXiv:1611.03449v2: hellinger_dist(rhoA*rhoB, sigmaA*sigmaB)>= hellinger_dist(rhoA, sigmaA) with equality iff sigmaB=rhoB """ for _ in range(10): rhoA = rand_dm(8, 0.5) sigmaA = rand_dm(8, 0.5) rhoB = rand_dm(8, 0.5) sigmaB = rand_dm(8, 0.5) hellA = hellinger_dist(rhoA, sigmaA) hell_tensor = hellinger_dist(tensor(rhoA, rhoB), tensor(sigmaA, sigmaB)) #inequality when sigmaB!=rhoB assert_(hell_tensor >= hellA) #equality iff sigmaB=rhoB rhoB = sigmaB hell_tensor = hellinger_dist(tensor(rhoA, rhoB), tensor(sigmaA, sigmaB)) assert_almost_equal(hell_tensor, hellA)
def _steadystate_power(L, maxiter=10, tol=1e-6, itertol=1e-5, verbose=False): """ Inverse power method for steady state solving. """ if verbose: print('Starting iterative power method Solver...') use_solver(assumeSortedIndices=True) rhoss = Qobj() sflag = issuper(L) if sflag: rhoss.dims = L.dims[0] rhoss.shape = [prod(rhoss.dims[0]), prod(rhoss.dims[1])] else: rhoss.dims = [L.dims[0], 1] rhoss.shape = [prod(rhoss.dims[0]), 1] n = prod(rhoss.shape) L = L.data.tocsc() - (tol ** 2) * sp.eye(n, n, format='csc') L.sort_indices() v = mat2vec(rand_dm(rhoss.shape[0], 0.5 / rhoss.shape[0] + 0.5).full()) if verbose: start_time = time.time() it = 0 while (la.norm(L * v, np.inf) > tol) and (it < maxiter): v = spsolve(L, v) v = v / la.norm(v, np.inf) it += 1 if it >= maxiter: raise Exception('Failed to find steady state after ' + str(maxiter) + ' iterations') # normalise according to type of problem if sflag: trow = sp.eye(rhoss.shape[0], rhoss.shape[0], format='coo') trow = sp_reshape(trow, (1, n)) data = v / sum(trow.dot(v)) else: data = data / la.norm(v) data = sp.csr_matrix(vec2mat(data)) rhoss.data = 0.5 * (data + data.conj().T) rhoss.isherm = True if verbose: print('Power solver time: ', time.time() - start_time) if qset.auto_tidyup: return rhoss.tidyup() else: return rhoss
def test_SimpleSuperApply(self): """ Non-composite system, operator on Liouville space. """ rho_3 = rand_dm(3) superop = kraus_to_super(rand_kraus_map(3)) analytic_result = vec2mat(superop.data.todense() * mat2vec(rho_3.data.todense())) naive_result = subsystem_apply(rho_3, superop, [True], reference=True) naive_diff = (analytic_result - naive_result).data.todense() assert_(norm(naive_diff) < 1e-12) efficient_result = subsystem_apply(rho_3, superop, [True]) efficient_diff = (efficient_result - analytic_result).data.todense() assert_(norm(efficient_diff) < 1e-12)
def _steadystate_power(L, ss_args): """ Inverse power method for steady state solving. """ if settings.debug: print('Starting iterative power method Solver...') tol=ss_args['tol'] maxiter=ss_args['maxiter'] use_solver(assumeSortedIndices=True) rhoss = Qobj() sflag = issuper(L) if sflag: rhoss.dims = L.dims[0] else: rhoss.dims = [L.dims[0], 1] n = prod(rhoss.shape) L = L.data.tocsc() - (tol ** 2) * sp.eye(n, n, format='csc') L.sort_indices() v = mat2vec(rand_dm(rhoss.shape[0], 0.5 / rhoss.shape[0] + 0.5).full()) it = 0 while (la.norm(L * v, np.inf) > tol) and (it < maxiter): v = spsolve(L, v) v = v / la.norm(v, np.inf) it += 1 if it >= maxiter: raise Exception('Failed to find steady state after ' + str(maxiter) + ' iterations') # normalise according to type of problem if sflag: trow = sp.eye(rhoss.shape[0], rhoss.shape[0], format='coo') trow = sp_reshape(trow, (1, n)) data = v / sum(trow.dot(v)) else: data = data / la.norm(v) data = sp.csr_matrix(vec2mat(data)) rhoss.data = 0.5 * (data + data.conj().T) rhoss.isherm = True return rhoss
def test_zcsr_transpose(): "spmath: zcsr_transpose" for k in range(50): ra = np.random.randint(2,100) A = rand_ket(ra,0.5).data B = A.T.tocsr() C = A.trans() x = np.all(B.data == C.data) y = np.all(B.indices == C.indices) z = np.all(B.indptr == C.indptr) assert_(x*y*z) for k in range(50): ra = np.random.randint(2,100) A = rand_herm(5,1.0/ra).data B = A.T.tocsr() C = A.trans() x = np.all(B.data == C.data) y = np.all(B.indices == C.indices) z = np.all(B.indptr == C.indptr) assert_(x*y*z) for k in range(50): ra = np.random.randint(2,100) A = rand_dm(ra,1.0/ra).data B = A.T.tocsr() C = A.trans() x = np.all(B.data == C.data) y = np.all(B.indices == C.indices) z = np.all(B.indptr == C.indptr) assert_(x*y*z) for k in range(50): ra = np.random.randint(2,100) A = rand_unitary(ra,1.0/ra).data B = A.T.tocsr() C = A.trans() x = np.all(B.data == C.data) y = np.all(B.indices == C.indices) z = np.all(B.indptr == C.indptr) assert_(x*y*z)
def test_SimpleSingleApply(self): """ Non-composite system, operator on Hilbert space. """ tol = 1e-12 rho_3 = rand_dm(3) single_op = rand_unitary(3) analytic_result = single_op * rho_3 * single_op.dag() naive_result = subsystem_apply(rho_3, single_op, [True], reference=True) naive_diff = (analytic_result - naive_result).data.todense() naive_diff_norm = norm(naive_diff) assert_( naive_diff_norm < tol, msg="SimpleSingle: naive_diff_norm {} " "is beyond tolerance {}".format(naive_diff_norm, tol), ) efficient_result = subsystem_apply(rho_3, single_op, [True]) efficient_diff = (efficient_result - analytic_result).data.todense() efficient_diff_norm = norm(efficient_diff) assert_( efficient_diff_norm < tol, msg="SimpleSingle: efficient_diff_norm {} " "is beyond tolerance {}".format(efficient_diff_norm, tol), )
def test_SimpleSuperApply(self): """ Non-composite system, operator on Liouville space. """ tol = 1e-12 rho_3 = rand_dm(3) superop = kraus_to_super(rand_kraus_map(3)) analytic_result = vec2mat(superop.data.todense() * mat2vec(rho_3.data.todense())) naive_result = subsystem_apply(rho_3, superop, [True], reference=True) naive_diff = (analytic_result - naive_result).data.todense() naive_diff_norm = norm(naive_diff) assert_( naive_diff_norm < tol, msg="SimpleSuper: naive_diff_norm {} " "is beyond tolerance {}".format(naive_diff_norm, tol), ) efficient_result = subsystem_apply(rho_3, superop, [True]) efficient_diff = (efficient_result - analytic_result).data.todense() efficient_diff_norm = norm(efficient_diff) assert_( efficient_diff_norm < tol, msg="SimpleSuper: efficient_diff_norm {} " "is beyond tolerance {}".format(efficient_diff_norm, tol), )
def steady(L, maxiter=10, tol=1e-6, itertol=1e-5, method='solve', use_umfpack=True, use_precond=False): """Steady state for the evolution subject to the supplied Louvillian. Parameters ---------- L : qobj Liouvillian superoperator. maxiter : int Maximum number of iterations to perform, default = 100. tol : float Tolerance used for terminating solver solution, default = 1e-6. itertol : float Tolerance used for iterative Ax=b solver, default = 1e-5. method : str Method for solving linear equations. Direct solver 'solve' (default) or iterative biconjugate gradient method 'bicg'. use_umfpack: bool {True, False} Use the UMFpack backend for the direct solver. If 'False', the solver uses the SuperLU backend. This option does not affect the 'bicg' method. use_precond: bool {False, True} Use an incomplete sparse LU decomposition as a preconditioner for the stabilized bi-conjugate gradient 'bicg' method. Returns -------- ket : qobj Ket vector for steady state. Notes ----- Uses the inverse power method. See any Linear Algebra book with an iterative methods section. Using UMFpack may result in 'out of memory' errors for some Liouvillians. """ use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) if (not isoper(L)) and (not issuper(L)): raise TypeError('Steady states can only be found for operators ' + 'or superoperators.') rhoss = Qobj() sflag = issuper(L) if sflag: rhoss.dims = L.dims[0] rhoss.shape = [prod(rhoss.dims[0]), prod(rhoss.dims[1])] else: rhoss.dims = [L.dims[0], 1] rhoss.shape = [prod(rhoss.dims[0]), 1] n = prod(rhoss.shape) L = L.data.tocsc() - (tol ** 2) * sp.eye(n, n, format='csc') L.sort_indices() v = mat2vec(rand_dm(rhoss.shape[0], 0.5 / rhoss.shape[0] + 0.5).full()) # generate sparse iLU preconditioner if requested if method == 'bicg' and use_precond: try: P = spilu(L, permc_spec='MMD_AT_PLUS_A') P_x = lambda x: P.solve(x) except: warnings.warn("Preconditioning failed. Continuing without.", UserWarning) M = None else: M = LinearOperator((n, n), matvec=P_x) else: M = None it = 0 while (la.norm(L * v, np.inf) > tol) and (it < maxiter): if method == 'bicg': v, check = bicgstab(L, v, tol=itertol, M=M) else: v = spsolve(L, v, permc_spec="MMD_AT_PLUS_A", use_umfpack=use_umfpack) v = v / la.norm(v, np.inf) it += 1 if it >= maxiter: raise ValueError('Failed to find steady state after ' + str(maxiter) + ' iterations') # normalise according to type of problem if sflag: trow = sp.eye(rhoss.shape[0], rhoss.shape[0], format='lil') trow = trow.reshape((1, n)).tocsr() data = v / sum(trow.dot(v)) else: data = data / la.norm(v) data = reshape(data, (rhoss.shape[0], rhoss.shape[1])).T data = sp.csr_matrix(data) rhoss.data = 0.5 * (data + data.conj().T) rhoss.isherm = True if qset.auto_tidyup: return rhoss.tidyup() else: return rhoss
def test_zcsr_mult(): "spmath: zcsr_mult" for k in range(50): A = rand_ket(10,0.5).data B = rand_herm(10,0.5).data C = A.tocsr(1) D = B.tocsr(1) ans1 = B*A ans2 = D*C ans2.sort_indices() x = np.all(ans1.data == ans2.data) y = np.all(ans1.indices == ans2.indices) z = np.all(ans1.indptr == ans2.indptr) assert_(x*y*z) for k in range(50): A = rand_ket(10,0.5).data B = rand_ket(10,0.5).dag().data C = A.tocsr(1) D = B.tocsr(1) ans1 = B*A ans2 = D*C ans2.sort_indices() x = np.all(ans1.data == ans2.data) y = np.all(ans1.indices == ans2.indices) z = np.all(ans1.indptr == ans2.indptr) assert_(x*y*z) ans1 = A*B ans2 = C*D ans2.sort_indices() x = np.all(ans1.data == ans2.data) y = np.all(ans1.indices == ans2.indices) z = np.all(ans1.indptr == ans2.indptr) assert_(x*y*z) for k in range(50): A = rand_dm(10,0.5).data B = rand_dm(10,0.5).data C = A.tocsr(1) D = B.tocsr(1) ans1 = B*A ans2 = D*C ans2.sort_indices() x = np.all(ans1.data == ans2.data) y = np.all(ans1.indices == ans2.indices) z = np.all(ans1.indptr == ans2.indptr) assert_(x*y*z) for k in range(50): A = rand_dm(10,0.5).data B = rand_herm(10,0.5).data C = A.tocsr(1) D = B.tocsr(1) ans1 = B*A ans2 = D*C ans2.sort_indices() x = np.all(ans1.data == ans2.data) y = np.all(ans1.indices == ans2.indices) z = np.all(ans1.indptr == ans2.indptr) assert_(x*y*z)