def make_real_sand_matrix(self, x, y): r"""Make the superoperator matrix representation of N[X,Y](rho) = (1/2) ( X rho Y† + Y rho X† ) In the basis {Λ_j}, N[X,Y](rho) = N(x,y)_jk rho_k Λ_j where N(x,y)_jk = Re[x_m (D_mlj + iF_mlj) (y*)_n (D_knl + iF_knl) ] Λ_j Λ_k = (D_jkl + iF_jkl) Λ_l x and y are vectorized representations of the operators X and Y stored in sparse format. `sparse.tensordot` might decide to return something dense, so the user should be aware of that. """ result_A = sparse.tensordot(x, self.struct, ([0], [0])) result_B = sparse.tensordot(np.conj(y), self.struct, ([0], [1])) # sparse.tensordot fails if both arguments are numpy ndarrays, so we # force the intermediate arrays to be sparse if type(result_B) == np.ndarray: result_B = COO.from_numpy(result_B) if type(result_A) == np.ndarray: result_A = COO.from_numpy(result_A) result = sparse_real(sparse.tensordot(result_A, result_B, ([0], [1]))) # We want our result to be dense, to make things predictable from the # outside. if type(result) == sparse.coo.COO: result = result.todense() return result.real
def make_real_comm_matrix(self, x, y): r"""Make the superoperator matrix representation of M[X,Y](rho) = (1/2) ( [X rho, Y†] + [Y, rho X†] ) In the basis {Λ_j}, M[X,Y](rho) = M(x,y)_jk rho_k Λ_j where M(x,y)_jk = -2 Im[ (y*)_n F_lnj x_m (D_mkl + iF_mkl) ] Λ_j Λ_k = (D_jkl + iF_jkl) Λ_l x and y are vectorized representations of the operators X and Y stored in sparse format. `sparse.tensordot` might decide to return something dense, so the user should be aware of that. """ struct_imag = sparse_imag(self.struct) # sparse.tensordot fails if both arguments are numpy ndarrays, so we # force the intermediate arrays to be sparse result_A = sparse.tensordot(np.conj(y), struct_imag, ([0], [1])) result_B = sparse.tensordot(x, self.struct, ([0], [0])) if type(result_B) == np.ndarray: result_B = COO.from_numpy(result_B) if type(result_A) == np.ndarray: result_A = COO.from_numpy(result_A) result = -2 * sparse_imag(sparse.tensordot(result_A, result_B, ([0], [1]))) # We want our result to be dense, to make things predictable from the # outside. if type(result) == sparse.coo.COO: result = result.todense() return result.real
def vectorize(self, op): sparse_op = COO.from_numpy(op) result = sparse.tensordot(self.dual, sparse_op, ([1,2], [1,0])) if type(result) == np.ndarray: # I want the result stored in a sparse format even if it isn't # sparse. result = COO.from_numpy(result) return result
def __init__(self, dim): self.dim = dim self.basis = COO.from_numpy(np.array(gm.get_basis(dim))) self.sq_norms = COO.from_numpy(np.einsum('jmn,jnm->j', self.basis.todense(), self.basis.todense())) sq_norms_inv = COO.from_numpy(1 / self.sq_norms.todense()) self.dual = self.basis * sq_norms_inv[:,None,None] self.struct = sparse.tensordot(sparse.tensordot(self.basis, self.basis, ([2], [1])), self.dual, ([1, 3], [2, 1])) if type(self.struct) == np.ndarray: # Sometimes sparse.tensordot returns numpy arrays. We want to force # it to be sparse, since sparse.tensordot fails when passed two # numpy arrays. self.struct = COO.from_numpy(self.struct)
def check_sparse_hamil_comm(sparse_basis, H, rho): h_vec = sparse_basis.vectorize(H) rho_vec = sparse_basis.vectorize(rho) dense_hamil_comm = -1j * mf.comm(H, rho) hamil_comm_matrix = sparse_basis.make_hamil_comm_matrix(h_vec) hamil_comm_vec = COO.from_numpy(hamil_comm_matrix @ rho_vec.todense()) assert_almost_equal(np.abs(dense_hamil_comm - sparse_basis.matrize(hamil_comm_vec)).max(), 0.0, 7)
def check_sparse_real_sand(sparse_basis, X, rho, Y): x_vec = sparse_basis.vectorize(X) y_vec = sparse_basis.vectorize(Y) rho_vec = sparse_basis.vectorize(rho) dense_real_sand = (X @ rho @ Y.conj().T + Y @ rho @ X.conj().T) / 2 real_sand_matrix = sparse_basis.make_real_sand_matrix(x_vec, y_vec) real_sand_vec = COO.from_numpy(real_sand_matrix @ rho_vec.todense()) assert_almost_equal(np.abs(dense_real_sand - sparse_basis.matrize(real_sand_vec)).max(), 0.0, 7)
def check_sparse_real_comm(sparse_basis, X, rho, Y): x_vec = sparse_basis.vectorize(X) y_vec = sparse_basis.vectorize(Y) rho_vec = sparse_basis.vectorize(rho) dense_real_comm = (mf.comm(X @ rho, Y.conj().T) + mf.comm(Y, rho @ X.conj().T)) / 2 real_comm_matrix = sparse_basis.make_real_comm_matrix(x_vec, y_vec) real_comm_vec = COO.from_numpy(real_comm_matrix @ rho_vec.todense()) assert_almost_equal(np.abs(dense_real_comm - sparse_basis.matrize(real_comm_vec)).max(), 0.0, 7)
def test_original_is_copied(self, shift, ax): xs = sparse.random((10, 10), density=0.5) xc = COO(np.copy(xs.coords), np.copy(xs.data), shape=xs.shape) sparse.roll(xs, shift, axis=ax) assert_eq(xs, xc)
def test_coord_dtype(): s = sparse.random((2, 3, 4), density=0.5) assert s.coords.dtype == np.uint8 s = COO.from_numpy(np.zeros(1000)) assert s.coords.dtype == np.uint16
def test_empty_reduction(): x = np.zeros((2, 3, 4), dtype=np.float_) xs = COO.from_numpy(x) assert_eq(x.sum(axis=(0, 2)), xs.sum(axis=(0, 2)))
def test_add_many_sparse_arrays(): x = COO({(1, 1): 1}) y = sum([x] * 100) assert y.nnz < np.prod(y.shape)
def test_single_dimension(): x = COO([1, 3], [1.0, 3.0]) assert x.shape == (4,) assert_eq(x, np.array([0, 1.0, 0, 3.0]))
def test_empty_shape(): x = COO(np.empty((0, 1), dtype=np.int8), [1.0]) assert x.shape == () assert ((2 * x).todense() == np.array(2.0)).all()
def test_reshape(a, b): x = random_x(a) s = COO.from_numpy(x) assert_eq(x.reshape(b), s.reshape(b))
def test_slicing(index): x = random_x((2, 3, 4)) s = COO.from_numpy(x) assert_eq(x[index], s[index])
import pytest import random import numpy as np import scipy.sparse from sparse import COO import sparse x = np.zeros(shape=(2, 3, 4), dtype=np.float32) for i in range(10): x[random.randint(0, x.shape[0] - 1), random.randint(0, x.shape[1] - 1), random.randint(0, x.shape[2] - 1)] = random.randint(0, 100) y = COO.from_numpy(x) def random_x(shape, dtype=float): x = np.zeros(shape=shape, dtype=float) for i in range(max(5, np.prod(x.shape) // 10)): x[tuple(random.randint(0, d - 1) for d in x.shape)] = random.randint(0, 100) return x def assert_eq(x, y): assert x.shape == y.shape assert x.dtype == y.dtype if hasattr(x, 'todense'): xx = x.todense() else:
def test_all_nan_reduction_warning(reduction, axis): x = random_value_array(np.nan, 1.0)(2 * 3 * 4).reshape(2, 3, 4) s = COO.from_numpy(x) with pytest.warns(RuntimeWarning): getattr(sparse, reduction)(s, axis=axis)
def calculate_third(atoms, replicated_atoms, third_order_delta, distance_threshold=None, is_verbose=False): """ Compute third order force constant matrices by using the central difference formula for the approximation """ n_atoms = len(atoms.numbers) replicated_atoms = replicated_atoms n_replicas = int(replicated_atoms.positions.shape[0] / n_atoms) i_at_sparse = [] i_coord_sparse = [] jat_sparse = [] j_coord_sparse = [] k_sparse = [] value_sparse = [] n_forces_to_calculate = n_replicas * (n_atoms * 3)**2 n_forces_done = 0 n_forces_skipped = 0 for iat in range(n_atoms): for jat in range(n_replicas * n_atoms): is_computing = True m, j_small = np.unravel_index(jat, (n_replicas, n_atoms)) if (distance_threshold is not None): dxij = atoms.positions[iat] - (list_of_replicas[m] + atoms.positions[j_small]) if (np.linalg.norm(dxij) > distance_threshold): is_computing = False n_forces_skipped += 9 if is_computing: if is_verbose: logging.info('calculating forces on atoms: ' + str(iat) + ',' + str(jat)) for icoord in range(3): for jcoord in range(3): value = calculate_single_third(atoms, replicated_atoms, iat, icoord, jat, jcoord, third_order_delta) for id in range(value.shape[0]): i_at_sparse.append(iat) i_coord_sparse.append(icoord) jat_sparse.append(jat) j_coord_sparse.append(jcoord) k_sparse.append(id) value_sparse.append(value[id]) n_forces_done += 9 if (n_forces_done + n_forces_skipped % 300) == 0: logging.info('Calculate third derivatives ' + str( int((n_forces_done + n_forces_skipped) / n_forces_to_calculate * 100)) + '%') logging.info('total forces to calculate third : ' + str(n_forces_to_calculate)) logging.info('forces calculated : ' + str(n_forces_done)) logging.info('forces skipped (outside distance threshold) : ' + str(n_forces_skipped)) coords = np.array( [i_at_sparse, i_coord_sparse, jat_sparse, j_coord_sparse, k_sparse]) shape = (n_atoms, 3, n_replicas * n_atoms, 3, n_replicas * n_atoms * 3) phifull = COO(coords, np.array(value_sparse), shape) phifull = phifull.reshape \ ((n_atoms * 3, n_replicas * n_atoms * 3, n_replicas * n_atoms * 3)) return phifull