def test_diag_raises(): np.random.seed(10) Ds = [8, 9, 10] rank = len(Ds) indices = [ Index( BaseCharge(np.random.randint(-2, 3, (1, Ds[n])), charge_types=[U1Charge]), False) for n in range(rank) ] arr = BlockSparseTensor.random(indices) chargearr = ChargeArray.random([indices[0], indices[1]]) with pytest.raises(ValueError): diag(arr) with pytest.raises(ValueError): diag(chargearr)
def test_get_diag(dtype, num_charges, Ds, flow): np.random.seed(10) np_flow = -np.int((np.int(flow) - 0.5) * 2) indices = [ Index( BaseCharge( np.random.randint(-2, 3, (num_charges, Ds[n])), charge_types=[U1Charge] * num_charges), flow) for n in range(2) ] arr = BlockSparseTensor.random(indices, dtype=dtype) fused = fuse_charges(arr.flat_charges, arr.flat_flows) inds = np.nonzero(fused == np.zeros((num_charges, 1), dtype=np.int16))[0] # pylint: disable=no-member left, _ = np.divmod(inds, Ds[1]) unique = np.unique( np_flow * (indices[0]._charges[0].charges[:, left]), axis=1) diagonal = diag(arr) sparse_blocks, _, block_shapes = _find_diagonal_sparse_blocks( arr.flat_charges, arr.flat_flows, 1) data = np.concatenate([ np.diag(np.reshape(arr.data[sparse_blocks[n]], block_shapes[:, n])) for n in range(len(sparse_blocks)) ]) np.testing.assert_allclose(data, diagonal.data) np.testing.assert_allclose(unique, diagonal.flat_charges[0].unique_charges)
def test_get_empty_diag(dtype, num_charges, Ds): np.random.seed(10) indices = [ Index( BaseCharge(np.random.randint(-2, 3, (num_charges, Ds[n])), charge_types=[U1Charge] * num_charges), False) for n in range(2) ] arr = BlockSparseTensor.random(indices, dtype=dtype) diagonal = diag(arr) np.testing.assert_allclose([], diagonal.data) for c in diagonal.flat_charges: assert len(c) == 0
def test_eig_prod(dtype, Ds, num_charges): np.random.seed(10) R = len(Ds) charges = [ BaseCharge( np.random.randint(-5, 6, (num_charges, Ds[n]), dtype=np.int16), charge_types=[U1Charge] * num_charges) for n in range(R) ] flows = [False] * R inds = [Index(charges[n], flows[n]) for n in range(R)] A = BlockSparseTensor.random( inds + [i.copy().flip_flow() for i in inds], dtype=dtype) dims = np.prod(Ds) A = A.reshape([dims, dims]) E, V = eig(A) A_ = V @ diag(E) @ inv(V) np.testing.assert_allclose(A.data, A_.data)
def test_svd_prod(dtype, Ds, R1, num_charges): np.random.seed(10) R = len(Ds) charges = [ BaseCharge(np.random.randint(-5, 6, (num_charges, Ds[n])), charge_types=[U1Charge] * num_charges) for n in range(R) ] flows = [True] * R A = BlockSparseTensor.random( [Index(charges[n], flows[n]) for n in range(R)], dtype=dtype) d1 = np.prod(Ds[:R1]) d2 = np.prod(Ds[R1:]) A = A.reshape([d1, d2]) U, S, V = svd(A, full_matrices=False) A_ = U @ diag(S) @ V assert A_.dtype == A.dtype np.testing.assert_allclose(A.data, A_.data) for n in range(len(A._charges)): assert charge_equal(A_._charges[n], A._charges[n])
def test_eigh_prod(dtype, Ds, num_charges): np.random.seed(10) R = len(Ds) charges = [ BaseCharge( np.random.randint(-5, 6, (num_charges, Ds[n]), dtype=np.int16), charge_types=[U1Charge] * num_charges) for n in range(R) ] flows = [False] * R inds = [Index(charges[n], flows[n]) for n in range(R)] A = BlockSparseTensor.random( inds + [i.copy().flip_flow() for i in inds], dtype=dtype) dims = np.prod(Ds) A = A.reshape([dims, dims]) B = A + A.T.conj() E, V = eigh(B) B_ = V @ diag(E) @ V.conj().T np.testing.assert_allclose(B.data, B_.data) for n in range(len(B._charges)): assert charge_equal(B_._charges[n], B._charges[n])
def test_create_diag(dtype, num_charges): np.random.seed(10) D = 200 index = Index( BaseCharge(np.random.randint(-2, 3, (num_charges, D)), charge_types=[U1Charge] * num_charges), False) arr = ChargeArray.random([index], dtype=dtype) diagarr = diag(arr) dense = np.ravel(diagarr.todense()) np.testing.assert_allclose(np.sort(dense[dense != 0.0]), np.sort(diagarr.data[diagarr.data != 0.0])) sparse_blocks, charges, block_shapes = _find_diagonal_sparse_blocks( diagarr.flat_charges, diagarr.flat_flows, 1) #in range(index._charges[0].unique_charges.shape[1]): for n, block in enumerate(sparse_blocks): shape = block_shapes[:, n] block_diag = np.diag(np.reshape(diagarr.data[block], shape)) np.testing.assert_allclose( arr.data[np.squeeze(index._charges[0] == charges[n])], block_diag)
assert np.allclose(en_even0, en_even1) assert np.allclose(en_odd0, en_odd1) """ Example 2: compute truncated eigendecomposition of a reduced density matrix, keeping only the eigenvalues above some cut-off threshold """ rho_temp = BT.fromdense([ind_chib1] + [ind_chib0], np.array([[1, 0], [0, 0]], dtype=float)) V = V.reshape([2**(n_sites // 2), 2**(n_sites // 2), 2]) rho_half = tn.ncon([V, rho_temp, V.conj()], [[-1, 1, 2], [2, 3], [-2, 1, 3]]) # decomp with evalues sorted by magnitude E2, V2 = eigh(rho_half, which='LM', full_sort=False, threshold=1e-10, max_kept=15) rho_recover = V2 @ BLA.diag(E2) @ V2.T.conj() assert np.allclose(rho_half.todense(), rho_recover.todense()) # decomp with evalues sorted by magnitude within each charge block E2, V2 = eigh(rho_half, which='LM', threshold=1e-10, full_sort=False) rho_recover = V2 @ BLA.diag(E2) @ V2.T.conj() assert np.allclose(rho_half.todense(), rho_recover.todense()) # decomp with no truncation E2, V2 = eigh(rho_half, which='LM') rho_recover = V2 @ BLA.diag(E2) @ V2.T.conj() assert np.allclose(rho_half.todense(), rho_recover.todense())