def test_init_weighting(exponent): const = 1.5 weight_vec = _pos_array(Rn(3, float)) weight_mat = _dense_matrix(Rn(3, float)) spaces = [ Fn(3, complex, exponent=exponent, weight=const), Fn(3, complex, exponent=exponent, weight=weight_vec), Fn(3, complex, exponent=exponent, weight=weight_mat) ] weightings = [ FnConstWeighting(const, exponent=exponent), FnVectorWeighting(weight_vec, exponent=exponent), FnMatrixWeighting(weight_mat, exponent=exponent) ] for spc, weight in zip(spaces, weightings): assert spc.weighting == weight
def _test_setslice(slice): # Validate set against python list behaviour r6 = Rn(6) z = [7, 8, 9, 10, 11, 10] y = [0, 1, 2, 3, 4, 5] x = r6.element(y) x[slice] = z[slice] y[slice] = z[slice] assert all_equal(x, y)
def test_vector_vector(): rn = Rn(5) weight_vec = _pos_array(rn) weight_elem = rn.element(weight_vec) weighting_vec = FnVectorWeighting(weight_vec) weighting_elem = FnVectorWeighting(weight_elem) assert isinstance(weighting_vec.vector, np.ndarray) assert isinstance(weighting_elem.vector, FnVector)
def test_matrix_matrix(): fn = Rn(5) sparse_mat = _sparse_matrix(fn) dense_mat = _dense_matrix(fn) w_sparse = FnMatrixWeighting(sparse_mat) w_dense = FnMatrixWeighting(dense_mat) assert isinstance(w_sparse.matrix, sp.sparse.spmatrix) assert isinstance(w_dense.matrix, np.ndarray)
def test_matrix_norm(self): """Compute matrix norm of forward/backward projector using power norm. """ geom = Geometry(2) proj_vec = Rn(geom.proj_size).element(1) # Compute norm for simple least squares cp = ODLChambollePock(geom, proj_vec) self.assertEqual(cp.adj_scal_fac, 1) mat_norm0 = cp.matrix_norm(iterations=4, vol_init=1, intermediate_results=True) self.assertTrue(mat_norm0[-1] > 0) # Resume computation mat_norm1, vol = cp.matrix_norm(iterations=3, vol_init=1, intermediate_results=True, return_volume=True) mat_norm2 = cp.matrix_norm(iterations=4, vol_init=vol, intermediate_results=True) self.assertNotEqual(mat_norm0[0], mat_norm2[0]) self.assertEqual(mat_norm0[3], mat_norm2[0]) # Compute norm for TV mat_norm3 = cp.matrix_norm(iterations=4, vol_init=1, tv_norm=True, intermediate_results=True) self.assertFalse(np.array_equal(mat_norm2, mat_norm3)) print('LS unit init volume:', mat_norm2) print('TV unit init volume:', mat_norm3) # Use non-homogeneous initial volume v0 = np.random.rand(geom.vol_size) mat_norm4 = cp.matrix_norm(iterations=4, vol_init=v0, tv_norm=False, intermediate_results=True) mat_norm5 = cp.matrix_norm(iterations=4, vol_init=v0, tv_norm=True, intermediate_results=True) print('LS random init volume:', mat_norm4) print('TV random init volume:', mat_norm5) # test with adjoint scaling factor for backprojector self.assertEqual(cp.adj_scal_fac, 1) cp.adjoint_scaling_factor() self.assertFalse(cp.adj_scal_fac == 1) print('adjoint scaling factor:', cp.adj_scal_fac) mat_norm6 = cp.matrix_norm(iterations=4, vol_init=1, tv_norm=False, intermediate_results=True) mat_norm7 = cp.matrix_norm(iterations=4, vol_init=1, tv_norm=True, intermediate_results=True) print('LS init volume, adjoint rescaled:', mat_norm6) print('TV init volume, adjoint rescaled:', mat_norm7)
def test_vector_is_valid(): rn = Rn(5) weight_vec = _pos_array(rn) weighting_vec = FnVectorWeighting(weight_vec) assert weighting_vec.is_valid() # Invalid weight_vec[0] = 0 weighting_vec = FnVectorWeighting(weight_vec) assert not weighting_vec.is_valid()
def test_matvec_simple_properties(): # Matrix - always ndarray in for dense input, scipy.sparse.spmatrix else rect_mat = 2 * np.eye(2, 3) r2 = Rn(2) r3 = Rn(3) op = MatVecOperator(rect_mat, r3, r2) assert isinstance(op.matrix, np.ndarray) op = MatVecOperator(np.asmatrix(rect_mat), r3, r2) assert isinstance(op.matrix, np.ndarray) op = MatVecOperator(rect_mat.tolist(), r3, r2) assert isinstance(op.matrix, np.ndarray) assert not op.matrix_issparse sparse_mat = _sparse_matrix(Rn(5)) op = MatVecOperator(sparse_mat, Rn(5), Rn(5)) assert isinstance(op.matrix, sp.sparse.spmatrix) assert op.matrix_issparse
def test_matrix_equiv(): fn = Rn(5) sparse_mat = _sparse_matrix(fn) sparse_mat_as_dense = sparse_mat.todense() dense_mat = _dense_matrix(fn) different_dense_mat = dense_mat.copy() different_dense_mat[0, 0] = -10 w_sparse = FnMatrixWeighting(sparse_mat) w_sparse2 = FnMatrixWeighting(sparse_mat) w_sparse_as_dense = FnMatrixWeighting(sparse_mat_as_dense) w_dense = FnMatrixWeighting(dense_mat) w_dense_copy = FnMatrixWeighting(dense_mat.copy()) w_different_dense = FnMatrixWeighting(different_dense_mat) # Equal -> True assert w_sparse.equiv(w_sparse) assert w_sparse.equiv(w_sparse2) # Equivalent matrices -> True assert w_sparse.equiv(w_sparse_as_dense) assert w_dense.equiv(w_dense_copy) # Different matrices -> False assert not w_dense.equiv(w_different_dense) # Test shortcuts sparse_eye = sp.sparse.eye(5) w_eye = FnMatrixWeighting(sparse_eye) w_dense_eye = FnMatrixWeighting(sparse_eye.todense()) w_eye_vec = FnVectorWeighting(np.ones(5)) w_eye_wrong_exp = FnMatrixWeighting(sparse_eye, exponent=1) sparse_smaller_eye = sp.sparse.eye(4) w_smaller_eye = FnMatrixWeighting(sparse_smaller_eye) sparse_shifted_eye = sp.sparse.eye(5, k=1) w_shifted_eye = FnMatrixWeighting(sparse_shifted_eye) sparse_almost_eye = sp.sparse.dia_matrix((np.ones(4), [0]), (5, 5)) w_almost_eye = FnMatrixWeighting(sparse_almost_eye) assert w_eye.equiv(w_dense_eye) assert w_dense_eye.equiv(w_eye) assert w_eye.equiv(w_eye_vec) assert not w_eye.equiv(w_eye_wrong_exp) assert not w_eye.equiv(w_smaller_eye) assert not w_eye.equiv(w_shifted_eye) assert not w_smaller_eye.equiv(w_shifted_eye) assert not w_eye.equiv(w_almost_eye) # Bogus input assert not w_eye.equiv(True) assert not w_eye.equiv(object) assert not w_eye.equiv(None)
def test_creation_of_vector_in_rn(self): geom = Geometry(2) rn = Rn(geom.proj_size) self.assertEqual(type(rn).__name__, 'Rn') rn_vec = rn.element(np.zeros(geom.proj_size)) self.assertEqual(type(rn_vec).__name__, 'Vector') self.assertEqual(rn.dtype, 'float') self.assertEqual(rn.field, odl.RealNumbers()) ODLChambollePock(geom)
def test_lincomb_exceptions(fn): # Hack to make sure otherfn is different otherfn = Rn(1) if fn.size != 1 else Rn(2) otherx = otherfn.zero() x, y, z = fn.zero(), fn.zero(), fn.zero() with pytest.raises(LinearSpaceTypeError): fn.lincomb(1, otherx, 1, y, z) with pytest.raises(LinearSpaceTypeError): fn.lincomb(1, y, 1, otherx, z) with pytest.raises(LinearSpaceTypeError): fn.lincomb(1, y, 1, z, otherx) with pytest.raises(LinearSpaceTypeError): fn.lincomb([], x, 1, y, z) with pytest.raises(LinearSpaceTypeError): fn.lincomb(1, x, [], y, z)
def test_astype(): rn = Rn(3, weight=1.5) cn = Cn(3, weight=1.5) rn_s = Rn(3, weight=1.5, dtype='float32') cn_s = Cn(3, weight=1.5, dtype='complex64') # Real assert rn.astype('float32') == rn_s assert rn.astype('float64') is rn assert rn._real_space is rn assert rn.astype('complex64') == cn_s assert rn.astype('complex128') == cn assert rn._complex_space == cn # Complex assert cn.astype('complex64') == cn_s assert cn.astype('complex128') is cn assert cn._complex_space is cn assert cn.astype('float32') == rn_s assert cn.astype('float64') == rn assert cn._complex_space is cn
def _get_volume(self): """Returns Rn vector containing the 2D or 3D volume data. Add description of order of dimensions. Returns ------- :rtype: odl.space.cartesian.Rn :returns: Vector in Rn containing 2D or 3D volume data. """ geom = self.geom if geom.geom_type in self.type2d: return Rn(geom.vol_size).element( self.scaling * np.ravel(astra.data2d.get(self.vol_id))) elif geom.geom_type in self.type3d: return Rn(geom.vol_size).element( self.scaling * np.ravel(astra.data3d.get(self.vol_id))) else: raise Exception('Unknown geometry type.')
def __init__(self, geometry_obj=Geometry(), volume_space=Rn(Geometry().vol_size), projections_space=Rn(Geometry().proj_size), gpu_index=0): self.geom = geometry_obj self.vol_space = volume_space self.proj_space = projections_space self.gpu_index = gpu_index self.bp_id = None self.fp_id = None # Create volume geometry self.vol_geom = astra.create_vol_geom(self.geom.vol_shape) # Create projection geometry if self.geom.geom_type == 'cone': self.proj_geom = astra.create_proj_geom( self.geom.geom_type, self.geom.detector_spacing_x, self.geom.detector_spacing_y, self.geom.det_row_count, self.geom.det_col_count, self.geom.angles, self.geom.source_origin, self.geom.origin_detector) elif self.geom.geom_type == 'parallel': self.proj_geom = astra.create_proj_geom( 'parallel', self.geom.detector_spacing_x, self.geom.det_col_count, self.geom.angles) # Allocate ASTRA memory for volume data and projection data if self.geom.vol_ndim == 2: self.volume_id = astra.data2d.create('-vol', self.vol_geom) self.proj_id = astra.data2d.create('-sino', self.proj_geom) elif self.geom.vol_ndim == 3: self.volume_id = astra.data3d.create('-vol', self.vol_geom) self.proj_id = astra.data3d.create('-sino', self.proj_geom) else: raise Exception("Invalid number of dimensions 'ndim'.") # self.scal_fac = self.geom.full_angle_rad / self.geom.angles.size # self.scal_fac = 1.0 / self.geom.angles.size self.scal_fac = self.geom.voxel_size[0] / self.geom.angles.size
def test_adjoint_scaling_factor(self): # x vol_rn = Rn(self.geom.vol_size) vol_rn_ones = vol_rn.element(1) # y proj_rn = Rn(self.geom.proj_size) proj_rn_ones = proj_rn.element(1) # A projector = ODLProjector(self.geom, vol_rn, proj_rn) # A x proj = projector.forward(vol_rn_ones) # A^* y vol = projector.backward(proj_rn_ones) # scaling factor for x[:] = 1 and y[:] = 1 s0 = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol) # A^* A x volp = projector.backward(proj) # scaling factor for x[:] = 1 and y = A x s1 = proj.norm() ** 2 / vol_rn._inner(volp, vol_rn_ones) cp = self.cp_class(self.geom, self.proj_vec) self.assertEqual(cp.adj_scal_fac, 1) cp.adjoint_scaling_factor() s2 = cp.adj_scal_fac self.assertFalse(s2 == 1) self.assertEqual(s0, s2) print ('Test adjoint') print (' Scaling factor for backprojector', s0, s1, s2) projector.clear_astra_memory()
def adjoint_scaling_factor(self): """Compute scaling factor of adjoint projector. Consider A x = y, the adjoint A* of A is defined as: <A x, y>_D = <x, A* y>_I Assume A* = s B with B being the ASTRA backprojector, then: s = <A x, A x> / <B A x, x> Returns ------- :rtype: float :returns: s """ vol_rn = Rn(self.geom.vol_size) proj_rn = Rn(self.geom.proj_size) vol_rn_ones = vol_rn.element(1) proj_rn_ones = proj_rn.element(1) # projector = Projector(self.geom, vol_rn, proj_rn) projector = Projector(self.geom) proj = projector.forward(vol_rn_ones) vol = projector.backward(proj_rn_ones) # print vol.data.min(), vol.data.max() # print proj.data.min(), proj.data.max() self.adj_scal_fac = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol) # self.adj_scal_fac = proj.norm()**2 / vol_rn.inner(vol, vol_rn_ones) # return proj.norm()**2 / vol_rn._inner(vol, vol_rn_ones) projector.clear_astra_memory()
def test_matrix_is_valid(): fn = Rn(5) sparse_mat = _sparse_matrix(fn) dense_mat = _dense_matrix(fn) bad_mat = np.eye(5) bad_mat[0, 0] = 0 w_sparse = FnMatrixWeighting(sparse_mat) w_dense = FnMatrixWeighting(dense_mat) w_bad = FnMatrixWeighting(bad_mat) with pytest.raises(NotImplementedError): w_sparse.is_valid() assert w_dense.is_valid() assert not w_bad.is_valid()
def test_vector_equals(): rn = Rn(5) weight_vec = _pos_array(rn) weight_elem = rn.element(weight_vec) weighting_vec = FnVectorWeighting(weight_vec) weighting_vec2 = FnVectorWeighting(weight_vec) weighting_elem = FnVectorWeighting(weight_elem) weighting_elem2 = FnVectorWeighting(weight_elem) weighting_other_vec = FnVectorWeighting(weight_vec - 1) weighting_other_exp = FnVectorWeighting(weight_vec - 1, exponent=1) assert weighting_vec == weighting_vec2 assert weighting_vec != weighting_elem assert weighting_elem == weighting_elem2 assert weighting_vec != weighting_other_vec assert weighting_vec != weighting_other_exp
def setUp(self): # Timing self.start_time = time.time() # DATA d = ctdata.sets[14] # d.normalize = 10000 d.load() det_row_count, num_proj, det_col_count = d.shape voxel_size_mm = 2 * d.roi_cubic_width_mm / det_col_count self.geom = Geometry( volume_shape=(det_col_count, det_col_count, det_row_count), det_row_count=det_row_count, det_col_count=det_col_count, angles=d.angles_rad, source_origin=d.distance_source_origin_mm / voxel_size_mm, origin_detector=d.distance_origin_detector_mm / voxel_size_mm, det_col_spacing=d.detector_width_mm/det_col_count/voxel_size_mm, det_row_spacing=d.detector_width_mm/det_row_count/voxel_size_mm, voxel_size=voxel_size_mm ) self.voxel_size = voxel_size_mm # Rn vector self.proj_vec = Rn(self.geom.proj_size).element( d.projections.ravel() * (voxel_size_mm * 1e-3)) # Class self.cp_class = ODLChambollePock # self.L = 271.47 # for data set 13 before projector rescaling self.L = 1.5 # TV for data set 13 print ('Set up unit test') print (' Data set:', d.filename) print (' Raw data: min, max, mean = ', d.raw_data_min, d.raw_data_max, d.raw_data_mean) print(' g: min: %g, max: %g' % (self.proj_vec.data.min(), self.proj_vec.data.max())) print (' Voxel size:', voxel_size_mm) print (' Dector pixels:', self.geom.det_col_count, self.geom.det_row_count) print (' Rel. pixel size:', self.geom.detector_spacing_x, self.geom.detector_spacing_x)
def _store_volume(self, rn_vector=Rn(1).element(1)): """Store volume data of Rn vector in ASTRA memory. Parameters ---------- :type rn_vector: odl.space.cartesian.Rn :param rn_vector: Vector in Rn containing 2D or 3D volume data. """ geom = self.geom if geom.geom_type in self.type2d: astra.data2d.store(self.vol_id, rn_vector.data.reshape(geom.vol_shape)) elif geom.geom_type in self.type3d: astra.data3d.store(self.vol_id, rn_vector.data.reshape(geom.vol_shape)) else: raise Exception('Unknown geometry type.')
def test_vector_equiv(): rn = Rn(5) weight_vec = _pos_array(rn) weight_elem = rn.element(weight_vec) diag_mat = weight_vec * np.eye(5) different_vec = weight_vec - 1 w_vec = FnVectorWeighting(weight_vec) w_elem = FnVectorWeighting(weight_elem) w_diag_mat = FnMatrixWeighting(diag_mat) w_different_vec = FnVectorWeighting(different_vec) # Equal -> True assert w_vec.equiv(w_vec) assert w_vec.equiv(w_elem) # Equivalent matrix -> True assert w_vec.equiv(w_diag_mat) # Different vector -> False assert not w_vec.equiv(w_different_vec) # Test shortcuts const_vec = np.ones(5) * 1.5 w_vec = FnVectorWeighting(const_vec) w_const = FnConstWeighting(1.5) w_wrong_const = FnConstWeighting(1) w_wrong_exp = FnConstWeighting(1.5, exponent=1) assert w_vec.equiv(w_const) assert not w_vec.equiv(w_wrong_const) assert not w_vec.equiv(w_wrong_exp) # Bogus input assert not w_vec.equiv(True) assert not w_vec.equiv(object) assert not w_vec.equiv(None)
def _store_projections(self, rn_vector=Rn(1).element(1)): """Store projection data of Rn vector in ASTRA memory. Add description of order of dimensions. Parameters ---------- :type rn_vector: odl.space.cartesian.Rn :param rn_vector: Vector in Rn containing 2D or 3D projection data. """ geom = self.geom if geom.geom_type in self.type2d: astra.data2d.store( self.proj_id, rn_vector.data.reshape(geom.angles.size, geom.det_col_count)) elif geom.geom_type in self.type3d: astra.data3d.store( self.proj_id, rn_vector.data.reshape(geom.det_row_count, geom.angles.size, geom.det_col_count)) else: raise Exception('Unknown geometry type.')
def test_matvec_init(fn): # Square matrices, sparse and dense sparse_mat = _sparse_matrix(fn) dense_mat = _dense_matrix(fn) MatVecOperator(sparse_mat, fn, fn) MatVecOperator(dense_mat, fn, fn) # Test defaults op_float = MatVecOperator([[1.0, 2], [-1, 0.5]]) assert isinstance(op_float.domain, Fn) assert op_float.domain.is_rn assert isinstance(op_float.range, Fn) assert op_float.domain.is_rn op_complex = MatVecOperator([[1.0, 2 + 1j], [-1 - 1j, 0.5]]) assert isinstance(op_complex.domain, Fn) assert op_complex.domain.is_cn assert isinstance(op_complex.range, Fn) assert op_complex.domain.is_cn op_int = MatVecOperator([[1, 2], [-1, 0]]) assert isinstance(op_int.domain, Fn) assert op_int.domain.dtype == int assert isinstance(op_int.range, Fn) assert op_int.domain.dtype == int # Rectangular rect_mat = 2 * np.eye(2, 3) r2 = Rn(2) r3 = Rn(3) MatVecOperator(rect_mat, r3, r2) with pytest.raises(ValueError): MatVecOperator(rect_mat, r2, r2) with pytest.raises(ValueError): MatVecOperator(rect_mat, r3, r3) with pytest.raises(ValueError): MatVecOperator(rect_mat, r2, r3) # Rn to Cn okay MatVecOperator(rect_mat, r3, Cn(2)) # Cn to Rn not okay (no safe cast) with pytest.raises(TypeError): MatVecOperator(rect_mat, Cn(3), r2) # Complex matrix between real spaces not okay rect_complex_mat = rect_mat + 1j with pytest.raises(TypeError): MatVecOperator(rect_complex_mat, r3, r2) # Init with array-like structure (including numpy.matrix) MatVecOperator(rect_mat.tolist(), r3, r2) MatVecOperator(np.asmatrix(rect_mat), r3, r2)
def matrix_norm(self, iterations, vol_init=1.0, tv_norm=False, return_volume=False, intermediate_results=False): """The matrix norm || K ||_2 of 'K' defined here as largest singular value of 'K'. Employs the generic power method to obtain a scalar 's' which tends to || K ||_2 as the iterations N increase. To be implemented: optionally return volume 'x', such that it can be re-used as initializer to continue the iteration. Parameters ---------- :type iterations: int :param iterations: Number of iterations of the generic power method. :type vol_init: float | ndarray (default 1.0) :param vol_init: in I, initial image to start with. :type intermediate_results: bool :param intermediate_results: Returns list of intermediate results instead of scalar. :type return_volume: bool :param return_volume: Return volume in order to resume iteration via passing it over as initial volume. Returns ------- :rtype: float | numpy.ndarray, numpay.array (optional) :returns: s, vol s: Scalar of final iteration or numpy.ndarray containing all results during iteration. vol: Volume vector """ geom = self.geom vol = self.recon_space.element(vol_init) proj = Rn(geom.proj_size).zero() # projector = Projector(geom, vol.space, proj.space) projector = Projector(geom) # print 'projector scaling factor', projector.scal_fac tmp = None if intermediate_results: s = np.zeros(iterations) else: s = 0 # Power method loop for n in range(iterations): # step 4: x_{n+1} <- K^T K x_n if tv_norm: # K = (A, grad) instead of K = A # Compute: - div grad x_n # use sum over generator expression tmp = -reduce(add, (partial( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_width[dim]), dim, geom.voxel_width[dim]) for dim in range(geom.vol_ndim))) # x_n <- A^T (A x_n) vol = projector.backward(projector.forward(vol)) vol *= self.adj_scal_fac if tv_norm: # x_n <- x_n - div grad x_n # print 'n: {2}. vol: min = {0}, max = {1}'.format( # vol.data.min(), vol.data.max(), n) # print 'n: {2}. tv: min = {0}, max = {1}'.format(tmp.min(), # tmp.max(), n) vol.data[:] += tmp.ravel() # step 5: # x_n <- x_n/||x_n||_2 vol /= vol.norm() # step 6: # s_n <-|| K x ||_2 if intermediate_results: # proj <- A^T x_n proj = projector.forward(vol) s[n] = proj.norm() if tv_norm: s[n] = np.sqrt( s[n]**2 + reduce(add, (np.linalg.norm( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_width[dim]))**2 for dim in range(geom.vol_ndim)))) # step 6: || K x ||_2 if not intermediate_results: proj = projector.forward(vol) s = proj.norm() if tv_norm: s = np.sqrt(s**2 + reduce(add, (np.linalg.norm( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_width[dim]))**2 for dim in range(geom.vol_ndim)))) # Clear ASTRA memory projector.clear_astra_memory() # Returns if not return_volume: return s else: return s, vol.data
def test_least_squares_method(self): geom = Geometry(2) proj_vec = Rn(geom.proj_size).element(1) cp = ODLChambollePock(geom, proj_vec) num_iter = 3 cp.least_squares(num_iter, verbose=False)
def test_vector_init(exponent): rn = Rn(5) weight_vec = _pos_array(rn) FnVectorWeighting(weight_vec, exponent=exponent) FnVectorWeighting(rn.element(weight_vec), exponent=exponent)
def run_tests(self): """Run all tests on this space.""" print('\n== RUNNING ALL TESTS ==\n') print('Space = {}'.format(self.space)) self.field() self.element() self.linearity() self.element() self.inner() self.norm() self.dist() self.multiply() self.equals() self.contains() self.vector() def __str__(self): """Return ``str(self)``.""" return 'SpaceTest({})'.format(self.space) def __repr__(self): """Return ``repr(self)``.""" return 'SpaceTest({!r})'.format(self.space) if __name__ == '__main__': from odl import Rn, uniform_discr SpaceTest(Rn(10)).run_tests() SpaceTest(uniform_discr([0, 0], [1, 1], [5, 5])).run_tests()
def test_reductions(): fn = Rn(3) for name, _ in REDUCTIONS: yield _impl_test_reduction, fn, name
def __init__(self, n): self._domain = Rn(n) self._range = Rn(n)
copy=False) # Make symmetric and positive definite return mat + mat.conj().T + fn.size * np.eye(fn.size, dtype=fn.dtype) def _sparse_matrix(fn): """Create a sparse positive definite Hermitian matrix for `fn`.""" return sp.sparse.coo_matrix(_dense_matrix(fn)) # Pytest fixtures # Simply modify spc_params to modify the fixture spc_params = [ Rn(10, np.float64), Rn(10, np.float32), Cn(10, np.complex128), Cn(10, np.complex64), Rn(100) ] spc_ids = [' {!r} '.format(spc) for spc in spc_params] spc_fixture = pytest.fixture(scope="module", ids=spc_ids, params=spc_params) @spc_fixture def fn(request): return request.param # Simply modify exp_params to modify the fixture