예제 #1
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_matvec_adjoint(fn):
    # Square cases
    sparse_mat = _sparse_matrix(fn)
    dense_mat = _dense_matrix(fn)

    op_sparse = MatVecOperator(sparse_mat, fn, fn)
    op_dense = MatVecOperator(dense_mat, fn, fn)

    # Just test if it runs, nothing interesting to test here
    op_sparse.adjoint
    op_dense.adjoint

    # Rectangular case
    rect_mat = 2 * np.eye(2, 3)
    r2, r3 = Rn(2), Rn(3)
    c2 = Cn(2)

    op = MatVecOperator(rect_mat, r3, r2)
    op_adj = op.adjoint
    assert op_adj.domain == op.range
    assert op_adj.range == op.domain
    assert np.array_equal(op_adj.matrix, op.matrix.conj().T)
    assert np.array_equal(op_adj.adjoint.matrix, op.matrix)

    # The operator Rn -> Cn has no adjoint
    op_noadj = MatVecOperator(rect_mat, r3, c2)
    with pytest.raises(NotImplementedError):
        op_noadj.adjoint
예제 #2
0
파일: pyastra.py 프로젝트: moosmann/astra
    def __init__(self,
                 geometry_obj=Geometry(1),
                 vol_vector=None,
                 proj_vector=None,
                 gpu_index=0):
        self.geom = geometry_obj
        if vol_vector is None:
            self.vol = Rn(self.geom.vol_size)
        else:
            self.vol = vol_vector
        if proj_vector is None:
            self.proj = Rn(self.geom.proj_size)
        else:
            self.proj = proj_vector
        self.gpu_index = gpu_index
        self.bp_id = None
        self.fp_id = None

        # Create volume geometry
        self.vol_geom = astra.create_vol_geom(self.geom.vol_shape)

        # Create projection geometry
        self.proj_geom = astra.create_proj_geom(
            self.geom.geom_type, self.geom.detector_spacing_x,
            self.geom.detector_spacing_y, self.geom.det_row_count,
            self.geom.det_col_count, self.geom.angles, self.geom.source_origin,
            self.geom.origin_detector)

        # Allocate ASTRA memory for volume data
        self.volume_id = astra.data3d.create('-vol', self.vol_geom)

        # Allocate ASTRA memory for projection data
        self.proj_id = astra.data3d.create('-sino', self.proj_geom)
예제 #3
0
파일: pyastra.py 프로젝트: moosmann/astra
 def __init__(self,
              geometry_type='cone',
              num_voxel=(100, 100, 100),
              det_row_count=100,
              det_col_count=100,
              angles=np.linspace(0, 2 * np.pi, 180, endpoint=False),
              det_col_spacing=1.0,
              det_row_spacing=1.0,
              source_origin=100.0,
              origin_detector=10.0,
              alg_string='FP3D_CUDA',
              gpu_index=0):
     self.geometry_type = geometry_type
     self.num_voxel = num_voxel
     self._domain = Rn(np.prod(num_voxel))
     self._range = Rn(det_col_count * det_row_count * np.size(angles))
     self.detector_spacing_x = det_col_spacing
     self.detector_spacing_y = det_row_spacing
     self.det_row_count = det_row_count
     self.det_col_count = det_col_count
     self.angles = angles
     self.source_origin = source_origin
     self.origin_detector = origin_detector
     self.alg_string = alg_string
     self.gpu_index = gpu_index
     self._adjoint = BackwardProjector(geometry_type=geometry_type,
                                       num_voxel=num_voxel,
                                       det_row_count=det_row_count,
                                       det_col_count=det_col_count,
                                       angles=angles,
                                       det_col_spacing=det_col_spacing,
                                       det_row_spacing=det_row_spacing,
                                       source_origin=source_origin,
                                       origin_detector=origin_detector,
                                       gpu_index=gpu_index)
예제 #4
0
파일: ntuples_test.py 프로젝트: wjp/odl
def _test_getslice(slice):
    # Validate get against python list behaviour
    r6 = Rn(6)
    y = [0, 1, 2, 3, 4, 5]
    x = r6.element(y)

    assert all_equal(x[slice].data, y[slice])
예제 #5
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def _test_getslice(slice):
    # Validate get against python list behaviour
    r6 = Rn(6)
    y = [0, 1, 2, 3, 4, 5]
    x = r6.element(y)

    assert all_equal(x[slice].data, y[slice])
예제 #6
0
    def adjoint_scaling_factor(self):
        """Compute scaling factor of adjoint projector. Consider A x = y,
        the adjoint A* of A is defined as:

             <A x, y>_D = <x, A* y>_I

         Assume A* = s B with B being the ASTRA backprojector, then:

             s = <A x, A x> / <B A x, x>

        Returns
        -------
        :rtype: float
        :returns: s
        """

        vol_rn = Rn(self.geom.vol_size)
        proj_rn = Rn(self.geom.proj_size)

        vol_rn_ones = vol_rn.element(1)
        proj_rn_ones = proj_rn.element(1)

        projector = ODLProjector(self.geom, vol_rn, proj_rn)

        proj = projector.forward(vol_rn_ones)
        vol = projector.backward(proj_rn_ones)

        # print vol.data.min(), vol.data.max()
        # print proj.data.min(), proj.data.max()

        self.adj_scal_fac = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol)
        # self.adj_scal_fac = proj.norm()**2 / vol_rn.inner(vol, vol_rn_ones)
        # return proj.norm()**2 / vol_rn._inner(vol, vol_rn_ones)

        projector.clear_astra_memory()
예제 #7
0
 def __init__(self,
              geometry=Geometry(),
              projections_vector=Rn(Geometry().proj_size).zero()):
     self.geom = geometry
     self.proj = projections_vector
     self.recon_space = Rn(geometry.vol_size)
     self.adj_scal_fac = 1
     self.forward_proj_scal = 1
예제 #8
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_norm_exceptions(fn):
    # Hack to make sure otherfn is different
    otherfn = Rn(1) if fn.size != 1 else Rn(2)

    otherx = otherfn.zero()

    with pytest.raises(LinearSpaceTypeError):
        fn.norm(otherx)
예제 #9
0
파일: ntuples_test.py 프로젝트: wjp/odl
def test_norm_exceptions(fn):
    # Hack to make sure otherfn is different
    otherfn = Rn(1) if fn.size != 1 else Rn(2)

    otherx = otherfn.zero()

    with pytest.raises(LinearSpaceTypeError):
        fn.norm(otherx)
예제 #10
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def _test_setslice(slice):
    # Validate set against python list behaviour
    r6 = Rn(6)
    z = [7, 8, 9, 10, 11, 10]
    y = [0, 1, 2, 3, 4, 5]
    x = r6.element(y)

    x[slice] = z[slice]
    y[slice] = z[slice]
    assert all_equal(x, y)
예제 #11
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_vector_vector():
    rn = Rn(5)
    weight_vec = _pos_array(rn)
    weight_elem = rn.element(weight_vec)

    weighting_vec = FnVectorWeighting(weight_vec)
    weighting_elem = FnVectorWeighting(weight_elem)

    assert isinstance(weighting_vec.vector, np.ndarray)
    assert isinstance(weighting_elem.vector, FnVector)
예제 #12
0
파일: ntuples_test.py 프로젝트: wjp/odl
def _test_setslice(slice):
    # Validate set against python list behaviour
    r6 = Rn(6)
    z = [7, 8, 9, 10, 11, 10]
    y = [0, 1, 2, 3, 4, 5]
    x = r6.element(y)

    x[slice] = z[slice]
    y[slice] = z[slice]
    assert all_equal(x, y)
예제 #13
0
파일: ntuples_test.py 프로젝트: wjp/odl
def test_vector_vector():
    rn = Rn(5)
    weight_vec = _pos_array(rn)
    weight_elem = rn.element(weight_vec)

    weighting_vec = FnVectorWeighting(weight_vec)
    weighting_elem = FnVectorWeighting(weight_elem)

    assert isinstance(weighting_vec.vector, np.ndarray)
    assert isinstance(weighting_elem.vector, FnVector)
예제 #14
0
    def test_creation_of_vector_in_rn(self):

        geom = Geometry(2)

        rn = Rn(geom.proj_size)
        self.assertEqual(type(rn).__name__, 'Rn')
        rn_vec = rn.element(np.zeros(geom.proj_size))
        self.assertEqual(type(rn_vec).__name__, 'Vector')
        self.assertEqual(rn.dtype, 'float')
        self.assertEqual(rn.field, odl.RealNumbers())

        ODLChambollePock(geom)
예제 #15
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_init():
    # Test run
    Ntuples(3, int)
    Ntuples(3, float)
    Ntuples(3, complex)
    Ntuples(3, 'S1')

    # Fn
    Fn(3, int)
    Fn(3, float)
    Fn(3, complex)

    # Fn only works on scalars
    with pytest.raises(TypeError):
        Fn(3, 'S1')

    # Rn
    Rn(3, float)

    # Rn only works on reals
    with pytest.raises(TypeError):
        Rn(3, complex)
    with pytest.raises(TypeError):
        Rn(3, 'S1')
    with pytest.raises(TypeError):
        Rn(3, int)

    # Cn
    Cn(3, complex)

    # Cn only works on reals
    with pytest.raises(TypeError):
        Cn(3, float)
    with pytest.raises(TypeError):
        Cn(3, 'S1')

    # Backported int from future fails (not recognized by numpy.dtype())
    # (Python 2 only)
    from builtins import int as future_int
    import sys
    if sys.version_info.major != 3:
        with pytest.raises(TypeError):
            Fn(3, future_int)

    # Init with weights or custom space functions
    const = 1.5
    weight_vec = _pos_array(Rn(3, float))
    weight_mat = _dense_matrix(Rn(3, float))

    Rn(3, weight=const)
    Rn(3, weight=weight_vec)
    Rn(3, weight=weight_mat)

    # Different exponents
    exponents = [0.5, 1.0, 2.0, 5.0, float('inf')]
    for exponent in exponents:
        Cn(3, exponent=exponent)
예제 #16
0
파일: ntuples_test.py 프로젝트: wjp/odl
def test_multiply_exceptions(fn):
    # Hack to make sure otherfn is different
    otherfn = Rn(1) if fn.size != 1 else Rn(2)

    otherx = otherfn.zero()
    x, y = fn.zero(), fn.zero()

    with pytest.raises(LinearSpaceTypeError):
        fn.multiply(otherx, x, y)

    with pytest.raises(LinearSpaceTypeError):
        fn.multiply(x, otherx, y)

    with pytest.raises(LinearSpaceTypeError):
        fn.multiply(x, y, otherx)
예제 #17
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_multiply_exceptions(fn):
    # Hack to make sure otherfn is different
    otherfn = Rn(1) if fn.size != 1 else Rn(2)

    otherx = otherfn.zero()
    x, y = fn.zero(), fn.zero()

    with pytest.raises(LinearSpaceTypeError):
        fn.multiply(otherx, x, y)

    with pytest.raises(LinearSpaceTypeError):
        fn.multiply(x, otherx, y)

    with pytest.raises(LinearSpaceTypeError):
        fn.multiply(x, y, otherx)
예제 #18
0
 def __init__(self, geometry=Geometry(),
              projections_vector=Rn(Geometry().proj_size).zero()):
     self.geom = geometry
     self.proj = projections_vector
     self.recon_space = Rn(geometry.vol_size)
     self.adj_scal_fac = 1
     self.forward_proj_scal = 1
예제 #19
0
 def test_tv(self):
     geom = Geometry(2)
     proj_vec = Rn(geom.proj_size).element(1)
     cp = ODLChambollePock(geom, proj_vec)
     cp.least_squares(3, L=131.0, non_negativiy_constraint=False,
                      tv_norm=1,
                      verbose=False)
예제 #20
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_pnorm(exponent):
    for fn in (Rn(3, exponent=exponent), Cn(3, exponent=exponent)):
        xarr, x = example_vectors(fn)
        correct_norm = np.linalg.norm(xarr, ord=exponent)

        assert almost_equal(fn.norm(x), correct_norm)
        assert almost_equal(x.norm(), correct_norm)
예제 #21
0
파일: ntuples_test.py 프로젝트: wjp/odl
def test_matvec_call(fn):
    # Square cases
    sparse_mat = _sparse_matrix(fn)
    dense_mat = _dense_matrix(fn)
    xarr, x = _vectors(fn)

    op_sparse = MatVecOperator(sparse_mat, fn, fn)
    op_dense = MatVecOperator(dense_mat, fn, fn)

    yarr_sparse = sparse_mat.dot(xarr)
    yarr_dense = dense_mat.dot(xarr)

    # Out-of-place
    y = op_sparse(x)
    assert all_almost_equal(y, yarr_sparse)

    y = op_dense(x)
    assert all_almost_equal(y, yarr_dense)

    # In-place
    y = fn.element()
    op_sparse(x, out=y)
    assert all_almost_equal(y, yarr_sparse)

    y = fn.element()
    op_dense(x, out=y)
    assert all_almost_equal(y, yarr_dense)

    # Rectangular case
    rect_mat = 2 * np.eye(2, 3)
    r2, r3 = Rn(2), Rn(3)

    op = MatVecOperator(rect_mat, r3, r2)
    xarr = np.arange(3, dtype=float)
    x = r3.element(xarr)

    yarr = rect_mat.dot(xarr)

    # Out-of-place
    y = op(x)
    assert all_almost_equal(y, yarr)

    # In-place
    y = r2.element()
    op(x, out=y)
    assert all_almost_equal(y, yarr)
예제 #22
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_matvec_call(fn):
    # Square cases
    sparse_mat = _sparse_matrix(fn)
    dense_mat = _dense_matrix(fn)
    xarr, x = example_vectors(fn)

    op_sparse = MatVecOperator(sparse_mat, fn, fn)
    op_dense = MatVecOperator(dense_mat, fn, fn)

    yarr_sparse = sparse_mat.dot(xarr)
    yarr_dense = dense_mat.dot(xarr)

    # Out-of-place
    y = op_sparse(x)
    assert all_almost_equal(y, yarr_sparse)

    y = op_dense(x)
    assert all_almost_equal(y, yarr_dense)

    # In-place
    y = fn.element()
    op_sparse(x, out=y)
    assert all_almost_equal(y, yarr_sparse)

    y = fn.element()
    op_dense(x, out=y)
    assert all_almost_equal(y, yarr_dense)

    # Rectangular case
    rect_mat = 2 * np.eye(2, 3)
    r2, r3 = Rn(2), Rn(3)

    op = MatVecOperator(rect_mat, r3, r2)
    xarr = np.arange(3, dtype=float)
    x = r3.element(xarr)

    yarr = rect_mat.dot(xarr)

    # Out-of-place
    y = op(x)
    assert all_almost_equal(y, yarr)

    # In-place
    y = r2.element()
    op(x, out=y)
    assert all_almost_equal(y, yarr)
예제 #23
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_vector_equals():
    rn = Rn(5)
    weight_vec = _pos_array(rn)
    weight_elem = rn.element(weight_vec)

    weighting_vec = FnVectorWeighting(weight_vec)
    weighting_vec2 = FnVectorWeighting(weight_vec)
    weighting_elem = FnVectorWeighting(weight_elem)
    weighting_elem2 = FnVectorWeighting(weight_elem)
    weighting_other_vec = FnVectorWeighting(weight_vec - 1)
    weighting_other_exp = FnVectorWeighting(weight_vec - 1, exponent=1)

    assert weighting_vec == weighting_vec2
    assert weighting_vec != weighting_elem
    assert weighting_elem == weighting_elem2
    assert weighting_vec != weighting_other_vec
    assert weighting_vec != weighting_other_exp
예제 #24
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_pdist(exponent):
    for fn in (Rn(3, exponent=exponent), Cn(3, exponent=exponent)):
        [xarr, yarr], [x, y] = example_vectors(fn, n=2)

        correct_dist = np.linalg.norm(xarr - yarr, ord=exponent)

        assert almost_equal(fn.dist(x, y), correct_dist)
        assert almost_equal(x.dist(y), correct_dist)
예제 #25
0
파일: ntuples_test.py 프로젝트: wjp/odl
def test_vector_equals():
    rn = Rn(5)
    weight_vec = _pos_array(rn)
    weight_elem = rn.element(weight_vec)

    weighting_vec = FnVectorWeighting(weight_vec)
    weighting_vec2 = FnVectorWeighting(weight_vec)
    weighting_elem = FnVectorWeighting(weight_elem)
    weighting_elem2 = FnVectorWeighting(weight_elem)
    weighting_other_vec = FnVectorWeighting(weight_vec - 1)
    weighting_other_exp = FnVectorWeighting(weight_vec - 1, exponent=1)

    assert weighting_vec == weighting_vec2
    assert weighting_vec != weighting_elem
    assert weighting_elem == weighting_elem2
    assert weighting_vec != weighting_other_vec
    assert weighting_vec != weighting_other_exp
예제 #26
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_init_weighting(exponent):
    const = 1.5
    weight_vec = _pos_array(Rn(3, float))
    weight_mat = _dense_matrix(Rn(3, float))

    spaces = [
        Fn(3, complex, exponent=exponent, weight=const),
        Fn(3, complex, exponent=exponent, weight=weight_vec),
        Fn(3, complex, exponent=exponent, weight=weight_mat)
    ]
    weightings = [
        FnConstWeighting(const, exponent=exponent),
        FnVectorWeighting(weight_vec, exponent=exponent),
        FnMatrixWeighting(weight_mat, exponent=exponent)
    ]

    for spc, weight in zip(spaces, weightings):
        assert spc.weighting == weight
예제 #27
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_matrix_matrix():
    fn = Rn(5)
    sparse_mat = _sparse_matrix(fn)
    dense_mat = _dense_matrix(fn)

    w_sparse = FnMatrixWeighting(sparse_mat)
    w_dense = FnMatrixWeighting(dense_mat)

    assert isinstance(w_sparse.matrix, sp.sparse.spmatrix)
    assert isinstance(w_dense.matrix, np.ndarray)
예제 #28
0
    def test_matrix_norm(self):
        """Compute matrix norm of forward/backward projector using power
        norm. """

        geom = Geometry(2)
        proj_vec = Rn(geom.proj_size).element(1)

        # Compute norm for simple least squares
        cp = ODLChambollePock(geom, proj_vec)
        self.assertEqual(cp.adj_scal_fac, 1)
        mat_norm0 = cp.matrix_norm(iterations=4,
                                   vol_init=1,
                                   intermediate_results=True)
        self.assertTrue(mat_norm0[-1] > 0)

        # Resume computation
        mat_norm1, vol = cp.matrix_norm(iterations=3,
                                        vol_init=1, intermediate_results=True,
                                        return_volume=True)
        mat_norm2 = cp.matrix_norm(iterations=4, vol_init=vol,
                                   intermediate_results=True)
        self.assertNotEqual(mat_norm0[0], mat_norm2[0])

        self.assertEqual(mat_norm0[3], mat_norm2[0])

        # Compute norm for TV
        mat_norm3 = cp.matrix_norm(iterations=4, vol_init=1, tv_norm=True,
                                   intermediate_results=True)

        self.assertFalse(np.array_equal(mat_norm2, mat_norm3))
        print('LS unit init volume:', mat_norm2)
        print('TV unit init volume:', mat_norm3)

        # Use non-homogeneous initial volume
        v0 = np.random.rand(geom.vol_size)
        mat_norm4 = cp.matrix_norm(iterations=4, vol_init=v0, tv_norm=False,
                                   intermediate_results=True)
        mat_norm5 = cp.matrix_norm(iterations=4, vol_init=v0, tv_norm=True,
                                   intermediate_results=True)
        print('LS random init volume:', mat_norm4)
        print('TV random init volume:', mat_norm5)

        # test with adjoint scaling factor for backprojector
        self.assertEqual(cp.adj_scal_fac, 1)
        cp.adjoint_scaling_factor()
        self.assertFalse(cp.adj_scal_fac == 1)
        print('adjoint scaling factor:', cp.adj_scal_fac)

        mat_norm6 = cp.matrix_norm(iterations=4, vol_init=1, tv_norm=False,
                                   intermediate_results=True)
        mat_norm7 = cp.matrix_norm(iterations=4, vol_init=1, tv_norm=True,
                                   intermediate_results=True)

        print('LS init volume, adjoint rescaled:', mat_norm6)
        print('TV init volume, adjoint rescaled:', mat_norm7)
예제 #29
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_matvec_simple_properties():
    # Matrix - always ndarray in for dense input, scipy.sparse.spmatrix else
    rect_mat = 2 * np.eye(2, 3)
    r2 = Rn(2)
    r3 = Rn(3)

    op = MatVecOperator(rect_mat, r3, r2)
    assert isinstance(op.matrix, np.ndarray)

    op = MatVecOperator(np.asmatrix(rect_mat), r3, r2)
    assert isinstance(op.matrix, np.ndarray)

    op = MatVecOperator(rect_mat.tolist(), r3, r2)
    assert isinstance(op.matrix, np.ndarray)
    assert not op.matrix_issparse

    sparse_mat = _sparse_matrix(Rn(5))
    op = MatVecOperator(sparse_mat, Rn(5), Rn(5))
    assert isinstance(op.matrix, sp.sparse.spmatrix)
    assert op.matrix_issparse
예제 #30
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_vector_is_valid():
    rn = Rn(5)
    weight_vec = _pos_array(rn)
    weighting_vec = FnVectorWeighting(weight_vec)

    assert weighting_vec.is_valid()

    # Invalid
    weight_vec[0] = 0
    weighting_vec = FnVectorWeighting(weight_vec)
    assert not weighting_vec.is_valid()
예제 #31
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_matrix_equiv():
    fn = Rn(5)
    sparse_mat = _sparse_matrix(fn)
    sparse_mat_as_dense = sparse_mat.todense()
    dense_mat = _dense_matrix(fn)
    different_dense_mat = dense_mat.copy()
    different_dense_mat[0, 0] = -10

    w_sparse = FnMatrixWeighting(sparse_mat)
    w_sparse2 = FnMatrixWeighting(sparse_mat)
    w_sparse_as_dense = FnMatrixWeighting(sparse_mat_as_dense)
    w_dense = FnMatrixWeighting(dense_mat)
    w_dense_copy = FnMatrixWeighting(dense_mat.copy())
    w_different_dense = FnMatrixWeighting(different_dense_mat)

    # Equal -> True
    assert w_sparse.equiv(w_sparse)
    assert w_sparse.equiv(w_sparse2)
    # Equivalent matrices -> True
    assert w_sparse.equiv(w_sparse_as_dense)
    assert w_dense.equiv(w_dense_copy)
    # Different matrices -> False
    assert not w_dense.equiv(w_different_dense)

    # Test shortcuts
    sparse_eye = sp.sparse.eye(5)
    w_eye = FnMatrixWeighting(sparse_eye)
    w_dense_eye = FnMatrixWeighting(sparse_eye.todense())
    w_eye_vec = FnVectorWeighting(np.ones(5))

    w_eye_wrong_exp = FnMatrixWeighting(sparse_eye, exponent=1)

    sparse_smaller_eye = sp.sparse.eye(4)
    w_smaller_eye = FnMatrixWeighting(sparse_smaller_eye)

    sparse_shifted_eye = sp.sparse.eye(5, k=1)
    w_shifted_eye = FnMatrixWeighting(sparse_shifted_eye)

    sparse_almost_eye = sp.sparse.dia_matrix((np.ones(4), [0]), (5, 5))
    w_almost_eye = FnMatrixWeighting(sparse_almost_eye)

    assert w_eye.equiv(w_dense_eye)
    assert w_dense_eye.equiv(w_eye)
    assert w_eye.equiv(w_eye_vec)
    assert not w_eye.equiv(w_eye_wrong_exp)
    assert not w_eye.equiv(w_smaller_eye)
    assert not w_eye.equiv(w_shifted_eye)
    assert not w_smaller_eye.equiv(w_shifted_eye)
    assert not w_eye.equiv(w_almost_eye)

    # Bogus input
    assert not w_eye.equiv(True)
    assert not w_eye.equiv(object)
    assert not w_eye.equiv(None)
예제 #32
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_lincomb_exceptions(fn):
    # Hack to make sure otherfn is different
    otherfn = Rn(1) if fn.size != 1 else Rn(2)

    otherx = otherfn.zero()
    x, y, z = fn.zero(), fn.zero(), fn.zero()

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb(1, otherx, 1, y, z)

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb(1, y, 1, otherx, z)

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb(1, y, 1, z, otherx)

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb([], x, 1, y, z)

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb(1, x, [], y, z)
예제 #33
0
파일: pyastra.py 프로젝트: moosmann/astra
    def _get_volume(self):
        """Returns Rn vector containing the 2D or 3D volume data.

        Add description of order of dimensions.

        Returns
        -------
        :rtype: odl.space.cartesian.Rn
        :returns:  Vector in Rn containing 2D or 3D volume data.
        """

        geom = self.geom

        if geom.geom_type in self.type2d:
            return Rn(geom.vol_size).element(
                self.scaling * np.ravel(astra.data2d.get(self.vol_id)))
        elif geom.geom_type in self.type3d:
            return Rn(geom.vol_size).element(
                self.scaling * np.ravel(astra.data3d.get(self.vol_id)))
        else:
            raise Exception('Unknown geometry type.')
예제 #34
0
파일: ntuples_test.py 프로젝트: wjp/odl
def test_lincomb_exceptions(fn):
    # Hack to make sure otherfn is different
    otherfn = Rn(1) if fn.size != 1 else Rn(2)

    otherx = otherfn.zero()
    x, y, z = fn.zero(), fn.zero(), fn.zero()

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb(1, otherx, 1, y, z)

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb(1, y, 1, otherx, z)

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb(1, y, 1, z, otherx)

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb([], x, 1, y, z)

    with pytest.raises(LinearSpaceTypeError):
        fn.lincomb(1, x, [], y, z)
예제 #35
0
    def adjoint_scaling_factor(self):
        """Compute scaling factor of adjoint projector. Consider A x = y,
        the adjoint A* of A is defined as:

             <A x, y>_D = <x, A* y>_I

         Assume A* = s B with B being the ASTRA backprojector, then:

             s = <A x, A x> / <B A x, x>

        Returns
        -------
        :rtype: float
        :returns: s
        """

        vol_rn = Rn(self.geom.vol_size)
        proj_rn = Rn(self.geom.proj_size)

        vol_rn_ones = vol_rn.element(1)
        proj_rn_ones = proj_rn.element(1)

        projector = ODLProjector(self.geom, vol_rn, proj_rn)

        proj = projector.forward(vol_rn_ones)
        vol = projector.backward(proj_rn_ones)

        # print vol.data.min(), vol.data.max()
        # print proj.data.min(), proj.data.max()

        self.adj_scal_fac = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol)
        # self.adj_scal_fac = proj.norm()**2 / vol_rn.inner(vol, vol_rn_ones)
        # return proj.norm()**2 / vol_rn._inner(vol, vol_rn_ones)

        projector.clear_astra_memory()
예제 #36
0
파일: pyastra.py 프로젝트: moosmann/astra
    def __init__(self,
                 geometry_obj=Geometry(),
                 volume_space=Rn(Geometry().vol_size),
                 projections_space=Rn(Geometry().proj_size),
                 gpu_index=0):
        self.geom = geometry_obj
        self.vol_space = volume_space
        self.proj_space = projections_space
        self.gpu_index = gpu_index
        self.bp_id = None
        self.fp_id = None

        # Create volume geometry
        self.vol_geom = astra.create_vol_geom(self.geom.vol_shape)

        # Create projection geometry
        if self.geom.geom_type == 'cone':
            self.proj_geom = astra.create_proj_geom(
                self.geom.geom_type, self.geom.detector_spacing_x,
                self.geom.detector_spacing_y, self.geom.det_row_count,
                self.geom.det_col_count, self.geom.angles,
                self.geom.source_origin, self.geom.origin_detector)
        elif self.geom.geom_type == 'parallel':
            self.proj_geom = astra.create_proj_geom(
                'parallel', self.geom.detector_spacing_x,
                self.geom.det_col_count, self.geom.angles)

        # Allocate ASTRA memory for volume data and projection data
        if self.geom.vol_ndim == 2:
            self.volume_id = astra.data2d.create('-vol', self.vol_geom)
            self.proj_id = astra.data2d.create('-sino', self.proj_geom)
        elif self.geom.vol_ndim == 3:
            self.volume_id = astra.data3d.create('-vol', self.vol_geom)
            self.proj_id = astra.data3d.create('-sino', self.proj_geom)
        else:
            raise Exception("Invalid number of dimensions 'ndim'.")

        # self.scal_fac = self.geom.full_angle_rad / self.geom.angles.size
        # self.scal_fac = 1.0 / self.geom.angles.size
        self.scal_fac = self.geom.voxel_size[0] / self.geom.angles.size
예제 #37
0
    def test_adjoint_scaling_factor(self):
        """Test if back-projector A^* is adjoint of forward projector A:

            <A x, y>_D = <x,A^* y>_I .

        Define scaling factor as A^* = s B where is the implemented
        back-projector. Thus,

            s = <A x, y>_D / <x,B y>_I ,

        or using y = A x

            s = <A x, A x>_D / <x,B A x>_I .
        """
        geom = Geometry(2)

        # x = ones() and y = A x
        vol_rn = Rn(geom.vol_size)
        vol_rn_ones = vol_rn.element(1)
        proj_rn = Rn(geom.proj_size)
        projector = ODLProjector(geom, vol_rn, proj_rn)

        proj = projector.forward(vol_rn_ones)
        vol = projector.backward(proj)

        s0 = proj.norm() ** 2 / vol_rn._inner(vol, vol_rn_ones)

        # x = ones(), y = ones()
        vol_rn = Rn(geom.vol_size)
        vol_rn_ones = vol_rn.element(1)
        proj_rn = Rn(geom.proj_size)
        proj_rn_ones = proj_rn.element(1)

        projector = ODLProjector(geom, vol_rn, proj_rn)

        proj = projector.forward(vol_rn_ones)
        vol = projector.backward(proj_rn_ones)

        s1 = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol)

        # implemented function
        proj_vec = Rn(geom.proj_size).element(1)
        cp = ODLChambollePock(geom, proj_vec)
        cp.adjoint_scaling_factor()
        s2 = cp.adj_scal_fac
        self.assertEqual(s1, s2)

        print('Scaling factors:', s0, s1, s2)

        projector.clear_astra_memory()
예제 #38
0
    def test_adjoint_scaling_factor(self):

        # x
        vol_rn = Rn(self.geom.vol_size)
        vol_rn_ones = vol_rn.element(1)

        # y
        proj_rn = Rn(self.geom.proj_size)
        proj_rn_ones = proj_rn.element(1)

        # A
        projector = ODLProjector(self.geom, vol_rn, proj_rn)

        # A x
        proj = projector.forward(vol_rn_ones)
        # A^* y
        vol = projector.backward(proj_rn_ones)

        # scaling factor for x[:] = 1 and y[:] = 1
        s0 = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol)

        # A^* A x
        volp = projector.backward(proj)

        # scaling factor for x[:] = 1 and y = A x
        s1 = proj.norm() ** 2 / vol_rn._inner(volp, vol_rn_ones)

        cp = self.cp_class(self.geom, self.proj_vec)
        self.assertEqual(cp.adj_scal_fac, 1)

        cp.adjoint_scaling_factor()
        s2 = cp.adj_scal_fac
        self.assertFalse(s2 == 1)
        self.assertEqual(s0, s2)

        print ('Test adjoint')
        print ('  Scaling factor for backprojector', s0, s1, s2)

        projector.clear_astra_memory()
예제 #39
0
    def test_projector(self):

        data = ctdata.sets['parallel']
        data.load()
        print ' Data detector width:', data.detector_width_mm
        print ' Data projections shape:', data.projections.shape

        geom = self.geom_class(data, 2*(100,), 2*(100,))
        print ' Detector pixel width:', geom.det_col_spacing, geom.det_row_spacing

        projector = self.projector_class(geom)

        # Adjoint <Ax,y>=<x,Ad y> with x[:]=1 and y[:]=1
        rn_vol0 = Rn(geom.vol_size).element(1)
        rn_proj0 = Rn(geom.proj_size).element(1)
        rn_proj = projector.forward(rn_vol0)
        rn_vol = projector.backward(rn_proj0)
        l = rn_proj.inner(rn_proj0)
        r = rn_vol0.inner(rn_vol)
        print(' Adjoint with x[:]=1 and y[:]=1:')
        print('  <Ax,y> = <x,Ad y> : {0} = {1}'.format(l, r))
        print('  |<Ax,y> - <x,Ad y>| = {0}'.format(np.abs(l - r)))
        print('  <Ax,y> / <x,Ad y>  -1 = {0}'.format(l / r -1))

        # Back-project phantom data
        rn_proj = Rn(geom.proj_size).element(data.projections.ravel())
        rn_bp = projector.backward(rn_proj)

        # FBP
        rn_fbp = projector.fbp(rn_proj)
        rec = np.reshape(rn_fbp.data, geom.vol_shape)

        # import scipy.io as sio
        # sio.savemat(self.matfile, {'parallel_fbp': rec})
        # plt.imshow(rec, cmap=plt.cm.Greys)
        # plt.show()

        projector.clear_astra_memory()
예제 #40
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_vector_equiv():
    rn = Rn(5)
    weight_vec = _pos_array(rn)
    weight_elem = rn.element(weight_vec)
    diag_mat = weight_vec * np.eye(5)
    different_vec = weight_vec - 1

    w_vec = FnVectorWeighting(weight_vec)
    w_elem = FnVectorWeighting(weight_elem)
    w_diag_mat = FnMatrixWeighting(diag_mat)
    w_different_vec = FnVectorWeighting(different_vec)

    # Equal -> True
    assert w_vec.equiv(w_vec)
    assert w_vec.equiv(w_elem)
    # Equivalent matrix -> True
    assert w_vec.equiv(w_diag_mat)
    # Different vector -> False
    assert not w_vec.equiv(w_different_vec)

    # Test shortcuts
    const_vec = np.ones(5) * 1.5

    w_vec = FnVectorWeighting(const_vec)
    w_const = FnConstWeighting(1.5)
    w_wrong_const = FnConstWeighting(1)
    w_wrong_exp = FnConstWeighting(1.5, exponent=1)

    assert w_vec.equiv(w_const)
    assert not w_vec.equiv(w_wrong_const)
    assert not w_vec.equiv(w_wrong_exp)

    # Bogus input
    assert not w_vec.equiv(True)
    assert not w_vec.equiv(object)
    assert not w_vec.equiv(None)
예제 #41
0
파일: ntuples_test.py 프로젝트: wjp/odl
def test_vector_equiv():
    rn = Rn(5)
    weight_vec = _pos_array(rn)
    weight_elem = rn.element(weight_vec)
    diag_mat = weight_vec * np.eye(5)
    different_vec = weight_vec - 1

    w_vec = FnVectorWeighting(weight_vec)
    w_elem = FnVectorWeighting(weight_elem)
    w_diag_mat = FnMatrixWeighting(diag_mat)
    w_different_vec = FnVectorWeighting(different_vec)

    # Equal -> True
    assert w_vec.equiv(w_vec)
    assert w_vec.equiv(w_elem)
    # Equivalent matrix -> True
    assert w_vec.equiv(w_diag_mat)
    # Different vector -> False
    assert not w_vec.equiv(w_different_vec)

    # Test shortcuts
    const_vec = np.ones(5) * 1.5

    w_vec = FnVectorWeighting(const_vec)
    w_const = FnConstWeighting(1.5)
    w_wrong_const = FnConstWeighting(1)
    w_wrong_exp = FnConstWeighting(1.5, exponent=1)

    assert w_vec.equiv(w_const)
    assert not w_vec.equiv(w_wrong_const)
    assert not w_vec.equiv(w_wrong_exp)

    # Bogus input
    assert not w_vec.equiv(True)
    assert not w_vec.equiv(object)
    assert not w_vec.equiv(None)
예제 #42
0
파일: ntuples_test.py 프로젝트: rajmund/odl
def test_matrix_is_valid():
    fn = Rn(5)
    sparse_mat = _sparse_matrix(fn)
    dense_mat = _dense_matrix(fn)
    bad_mat = np.eye(5)
    bad_mat[0, 0] = 0

    w_sparse = FnMatrixWeighting(sparse_mat)
    w_dense = FnMatrixWeighting(dense_mat)
    w_bad = FnMatrixWeighting(bad_mat)

    with pytest.raises(NotImplementedError):
        w_sparse.is_valid()

    assert w_dense.is_valid()
    assert not w_bad.is_valid()
예제 #43
0
    def setUp(self):
        # Timing
        self.start_time = time.time()

        # DATA
        d = ctdata.sets[14]
        # d.normalize = 10000
        d.load()
        det_row_count, num_proj, det_col_count = d.shape
        voxel_size_mm = 2 * d.roi_cubic_width_mm / det_col_count
        self.geom = Geometry(
            volume_shape=(det_col_count, det_col_count, det_row_count),
            det_row_count=det_row_count,
            det_col_count=det_col_count,
            angles=d.angles_rad,
            source_origin=d.distance_source_origin_mm / voxel_size_mm,
            origin_detector=d.distance_origin_detector_mm / voxel_size_mm,
            det_col_spacing=d.detector_width_mm/det_col_count/voxel_size_mm,
            det_row_spacing=d.detector_width_mm/det_row_count/voxel_size_mm,
            voxel_size=voxel_size_mm
        )
        self.voxel_size = voxel_size_mm

        # Rn vector
        self.proj_vec = Rn(self.geom.proj_size).element(
            d.projections.ravel() * (voxel_size_mm * 1e-3))

        # Class
        self.cp_class = ODLChambollePock
        # self.L = 271.47  # for data set 13 before projector rescaling
        self.L = 1.5  # TV for data set 13

        print ('Set up unit test')
        print ('  Data set:', d.filename)
        print ('  Raw data: min, max, mean = ', d.raw_data_min,
               d.raw_data_max, d.raw_data_mean)
        print('  g: min: %g, max: %g' % (self.proj_vec.data.min(),
                                         self.proj_vec.data.max()))
        print ('  Voxel size:', voxel_size_mm)
        print ('  Dector pixels:', self.geom.det_col_count,
               self.geom.det_row_count)
        print ('  Rel. pixel size:', self.geom.detector_spacing_x,
               self.geom.detector_spacing_x)
예제 #44
0
파일: pyastra.py 프로젝트: moosmann/astra
    def _store_volume(self, rn_vector=Rn(1).element(1)):
        """Store volume data of Rn vector in ASTRA memory.

        Parameters
        ----------
        :type rn_vector: odl.space.cartesian.Rn
        :param rn_vector: Vector in Rn containing 2D or 3D volume data.
        """

        geom = self.geom

        if geom.geom_type in self.type2d:
            astra.data2d.store(self.vol_id,
                               rn_vector.data.reshape(geom.vol_shape))

        elif geom.geom_type in self.type3d:
            astra.data3d.store(self.vol_id,
                               rn_vector.data.reshape(geom.vol_shape))
        else:
            raise Exception('Unknown geometry type.')
예제 #45
0
    def matrix_norm(self, iterations, vol_init=1.0,
                    tv_norm=False, return_volume=False,
                    intermediate_results=False):
        """The matrix norm || K ||_2  of 'K' defined here as largest
        singular value of 'K'. Employs the generic power method to obtain a
        scalar 's' which tends to || K ||_2 as the iterations N increase.

        To be implemented: optionally return volume 'x', such that it can be
        re-used as initializer to continue the iteration.

        Parameters
        ----------
        :type iterations: int
        :param iterations: Number of iterations of the generic power method.
        :type vol_init: float | ndarray (default 1.0)
        :param vol_init: in I, initial image to start with.
        :type intermediate_results: bool
        :param intermediate_results: Returns list of intermediate results
        instead of scalar.
        :type return_volume: bool
        :param return_volume: Return volume in order to resume iteration via
        passing it over as initial volume.

        Returns
        -------
        :rtype: float | numpy.ndarray, numpay.array (optional)
        :returns: s, vol
         s: Scalar of final iteration or numpy.ndarray containing all
         results during iteration.
         vol: Volume vector
        """

        geom = self.geom
        vol = self.recon_space.element(vol_init)
        proj = Rn(geom.proj_size).zero()
        # projector = Projector(geom, vol.space, proj.space)
        projector = Projector(geom)
        # print 'projector scaling factor', projector.scal_fac
        tmp = None

        if intermediate_results:
            s = np.zeros(iterations)
        else:
            s = 0

        # Power method loop
        for n in range(iterations):

            # step 4: x_{n+1} <- K^T K x_n
            if tv_norm:
                # K = (A, grad) instead of K = A
                # Compute: - div grad x_n
                # use sum over generator expression
                tmp = -reduce(add,
                              (partial(
                                  partial(vol.data.reshape(geom.vol_shape),
                                          dim, geom.voxel_width[dim]),
                                  dim, geom.voxel_width[dim]) for dim in
                               range(geom.vol_ndim)))

            # x_n <- A^T (A x_n)
            vol = projector.backward(projector.forward(vol))
            vol *= self.adj_scal_fac

            if tv_norm:
                # x_n <- x_n - div grad x_n
                # print 'n: {2}. vol: min = {0}, max = {1}'.format(
                #     vol.data.min(), vol.data.max(), n)
                # print 'n: {2}. tv: min = {0}, max = {1}'.format(tmp.min(),
                #                                            tmp.max(), n)
                vol.data[:] += tmp.ravel()

            # step 5:
            # x_n <- x_n/||x_n||_2
            vol /= vol.norm()

            # step 6:
            # s_n <-|| K x ||_2
            if intermediate_results:
                # proj <- A^T x_n
                proj = projector.forward(vol)
                s[n] = proj.norm()
                if tv_norm:
                    s[n] = np.sqrt(s[n] ** 2 +
                                   reduce(add,
                                          (np.linalg.norm(
                                              partial(vol.data.reshape(
                                                  geom.vol_shape), dim,
                                                  geom.voxel_width[dim])) ** 2
                                           for dim in range(geom.vol_ndim))))

        # step 6: || K x ||_2
        if not intermediate_results:
            proj = projector.forward(vol)
            s = proj.norm()
            if tv_norm:
                s = np.sqrt(s ** 2 + reduce(add,
                                            (np.linalg.norm(partial(
                                                vol.data.reshape(
                                                    geom.vol_shape), dim,
                                                geom.voxel_width[dim])) ** 2
                                             for dim in range(geom.vol_ndim))))

        # Clear ASTRA memory
        projector.clear_astra_memory()

        # Returns
        if not return_volume:
            return s
        else:
            return s, vol.data
예제 #46
0
class TomODLChambollePock(object):
    """See docstring of class ChambollePock."""

    def __init__(self, geometry=Geometry(),
                 projections_vector=Rn(Geometry().proj_size).zero()):
        self.geom = geometry
        self.proj = projections_vector
        self.recon_space = Rn(geometry.vol_size)
        self.adj_scal_fac = 1
        self.forward_proj_scal = 1

    def adjoint_scaling_factor(self):
        """Compute scaling factor of adjoint projector. Consider A x = y,
        the adjoint A* of A is defined as:

             <A x, y>_D = <x, A* y>_I

         Assume A* = s B with B being the ASTRA backprojector, then:

             s = <A x, A x> / <B A x, x>

        Returns
        -------
        :rtype: float
        :returns: s
        """

        vol_rn = Rn(self.geom.vol_size)
        proj_rn = Rn(self.geom.proj_size)

        vol_rn_ones = vol_rn.element(1)
        proj_rn_ones = proj_rn.element(1)

        # projector = Projector(self.geom, vol_rn, proj_rn)
        projector = Projector(self.geom)

        proj = projector.forward(vol_rn_ones)
        vol = projector.backward(proj_rn_ones)

        # print vol.data.min(), vol.data.max()
        # print proj.data.min(), proj.data.max()

        self.adj_scal_fac = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol)
        # self.adj_scal_fac = proj.norm()**2 / vol_rn.inner(vol, vol_rn_ones)
        # return proj.norm()**2 / vol_rn._inner(vol, vol_rn_ones)

        projector.clear_astra_memory()

    def matrix_norm(self, iterations, vol_init=1.0,
                    tv_norm=False, return_volume=False,
                    intermediate_results=False):
        """The matrix norm || K ||_2  of 'K' defined here as largest
        singular value of 'K'. Employs the generic power method to obtain a
        scalar 's' which tends to || K ||_2 as the iterations N increase.

        To be implemented: optionally return volume 'x', such that it can be
        re-used as initializer to continue the iteration.

        Parameters
        ----------
        :type iterations: int
        :param iterations: Number of iterations of the generic power method.
        :type vol_init: float | ndarray (default 1.0)
        :param vol_init: in I, initial image to start with.
        :type intermediate_results: bool
        :param intermediate_results: Returns list of intermediate results
        instead of scalar.
        :type return_volume: bool
        :param return_volume: Return volume in order to resume iteration via
        passing it over as initial volume.

        Returns
        -------
        :rtype: float | numpy.ndarray, numpay.array (optional)
        :returns: s, vol
         s: Scalar of final iteration or numpy.ndarray containing all
         results during iteration.
         vol: Volume vector
        """

        geom = self.geom
        vol = self.recon_space.element(vol_init)
        proj = Rn(geom.proj_size).zero()
        # projector = Projector(geom, vol.space, proj.space)
        projector = Projector(geom)
        # print 'projector scaling factor', projector.scal_fac
        tmp = None

        if intermediate_results:
            s = np.zeros(iterations)
        else:
            s = 0

        # Power method loop
        for n in range(iterations):

            # step 4: x_{n+1} <- K^T K x_n
            if tv_norm:
                # K = (A, grad) instead of K = A
                # Compute: - div grad x_n
                # use sum over generator expression
                tmp = -reduce(add,
                              (partial(
                                  partial(vol.data.reshape(geom.vol_shape),
                                          dim, geom.voxel_width[dim]),
                                  dim, geom.voxel_width[dim]) for dim in
                               range(geom.vol_ndim)))

            # x_n <- A^T (A x_n)
            vol = projector.backward(projector.forward(vol))
            vol *= self.adj_scal_fac

            if tv_norm:
                # x_n <- x_n - div grad x_n
                # print 'n: {2}. vol: min = {0}, max = {1}'.format(
                #     vol.data.min(), vol.data.max(), n)
                # print 'n: {2}. tv: min = {0}, max = {1}'.format(tmp.min(),
                #                                            tmp.max(), n)
                vol.data[:] += tmp.ravel()

            # step 5:
            # x_n <- x_n/||x_n||_2
            vol /= vol.norm()

            # step 6:
            # s_n <-|| K x ||_2
            if intermediate_results:
                # proj <- A^T x_n
                proj = projector.forward(vol)
                s[n] = proj.norm()
                if tv_norm:
                    s[n] = np.sqrt(s[n] ** 2 +
                                   reduce(add,
                                          (np.linalg.norm(
                                              partial(vol.data.reshape(
                                                  geom.vol_shape), dim,
                                                  geom.voxel_width[dim])) ** 2
                                           for dim in range(geom.vol_ndim))))

        # step 6: || K x ||_2
        if not intermediate_results:
            proj = projector.forward(vol)
            s = proj.norm()
            if tv_norm:
                s = np.sqrt(s ** 2 + reduce(add,
                                            (np.linalg.norm(partial(
                                                vol.data.reshape(
                                                    geom.vol_shape), dim,
                                                geom.voxel_width[dim])) ** 2
                                             for dim in range(geom.vol_ndim))))

        # Clear ASTRA memory
        projector.clear_astra_memory()

        # Returns
        if not return_volume:
            return s
        else:
            return s, vol.data

    def least_squares(self, iterations=1, L=None, tau=None, sigma=None,
                      theta=None, non_negativiy_constraint=False,
                      tv_norm=False,
                      verbose=True):
        """Least-squares problem with optional TV-regularisation and/or
        non-negativity constraint.

        Parameters
        ----------
        :type iterations: int (default 1)
        :param iterations: Number of iterations the optimization should
        run for.
        :type L: float (defaul: None)
        :param L: Matrix norm of forward projector. If 'None' matrix_norm is
        called with 20 iterations.
        :type tau: float (default 1/L)
        :param tau:
        :type sigma: float (default 1/L)
        :param sigma:
        :type theta: float (default 1)
        :param theta:
        :type non_negativiy_constraint: bool (default False)
        :param non_negativiy_constraint: Add non-negativity constraint to
        optimization problem (via indicator function).
        :type tv_norm: bool | float (default False)
        :param tv_norm: Unless False, coincides with the numerical value of
        the parameter lambda for TV-Regularisation.
        :type verbose: bool (default False)
        :param verbose: Show intermediate reconstructions and
        convergence measures during iteration.

        Returns
        -------
        :rtype: odl.Vector, odl.Vector, numpy.ndarray, numpy.ndarray
        :returns: u, p, cpd, l2_du
         u: vector of reconstructed volume
         p: vector of dual projection variable
         cpd: condition primal-dual gap (convergence measure)
         l2_du: l2-norm of constraint-induced convergence measure
        """

        # step 1:
        if L is None:
            L = self.matrix_norm(20)
        if tau is None:
            tau = 1 / L
        if sigma is None:
            sigma = 1 / L
        if theta is None:
            theta = 1

        # print 'tau:', tau
        # print 'sigma:', sigma
        # print 'theta:', theta

        geom = self.geom
        g = self.proj  # domain: D

        # l2-norm of (volume update / tau)
        l2_du = np.zeros(iterations)
        # conditional primal-dual gap
        cpd = np.zeros(iterations)

        # step 2: initialize u and p with zeros
        u = self.recon_space.zero()  # domain: I
        p = g.space.zero()  # domain: D
        # q: spatial vector = list of ndarrays in I (not Rn vectors)
        if tv_norm:
            ndim = geom.vol_ndim
            # domain of q: V = [I, I, ...]
            q = [np.zeros(geom.vol_shape, dtype=u.data.dtype) for _ in range(
                ndim)]

        # step 3: ub <- u
        ub = u.copy()  # domain: I

        # initialize projector
        # A = Projector(geom, u.space, p.space)
        A = Projector(geom)

        # visual output instance
        disp = DisplayIntermediates(verbose=verbose, vol=u.data.reshape(
            geom.vol_shape), cpd=cpd, l2_du=l2_du)

        # step 4: repeat
        for n in range(iterations):

            # step 5: p_{n+1} <- (p_n + sigma(A^T ub_n - g)) / (1 + sigma)
            if n >= 0:
                # with(Timer('proj:')):
                #     # p_tmp <- A ub
                #     p_tmp = A.forward(ub)
                #     # p_tmp <- p_tmp - g
                #     p_tmp -= g
                #     # p <- p + sigma * p_tmp
                #     p += sigma * p_tmp
                # p_n <- p_n + sigma(A ub -g )
                tmp = A.forward(ub)
                # print 'p:', p.data.shape, 'Au:', tmp.data.shape, 'g:', \
                #     g.data.shape
                p += sigma * (A.forward(ub) - g)
            else:
                p -= sigma * g
            # p <- p / (1 + sigma)
            p /= 1 + sigma

            # TV step 6: q_{n+1} <- lambda(q_n + sigma grad ub_n) /
            # max(lambda 1_I, |q_n + sigma grad ub_n|)
            if tv_norm:

                for dim in range(ndim):
                    # q_n <- q_n + sigma * grad ub_n
                    q[dim] += sigma * partial(ub.data.reshape(
                        self.geom.vol_shape), dim, geom.voxel_width[dim])

                # |q_n|: isotropic TV
                # use div_q to save memory, q = [qi] where qi are ndarrays
                div_q = np.sqrt(reduce(add, (qi ** 2 for qi in q)))

                # max(lambda 1_I, |q_n + sigma diff ub_n|)
                # print 'q_mag:', div_q.min(), div_q.max()
                div_q[div_q < tv_norm] = tv_norm

                # q_n <- lambda * q_n / |q_n|
                for dim in range(ndim):
                    q[dim] /= div_q
                    q[dim] *= tv_norm

                # div q_{n+1}
                div_q = reduce(add, (partial(qi, dim, geom.voxel_width[dim])
                                     for (dim, qi) in enumerate(q)))
                div_q *= tau

            # step 6: u_{n+1} <- u_{n} - tau * A^T p_{n+1}
            # TV step 7: u_{n+1} <- u_{n} - tau * A^T p_{n+1} + div q_{n+1}
            # ub_tmp <- A^T p
            ub_tmp = A.backward(p)
            ub_tmp *= tau
            ub_tmp *= self.adj_scal_fac
            # l2-norm per voxel of ub_tmp = A^T p
            l2_du[n:] = ub_tmp.norm()  # / u.data.size
            if tv_norm:
                l2_du[n:] += np.linalg.norm(div_q.ravel())  # / u.data.size
            # store current u_n temporarily in ub_n
            ub = -u.copy()
            # u <- u - tau ub_tmp
            u -= ub_tmp
            # TV: u <- u + tau div q
            if tv_norm:
                print('{0}: u - A^T p: min = {1}, max = {2}'.format(
                    n, u.data.min(), u.data.max()))
                print('{0}: div q: min = {1}, max = {2}'.format(
                    n, div_q.min(), div_q.max()))
                u.data[:] += div_q.ravel()

            # Positivity constraint
            if non_negativiy_constraint:
                u.data[u.data < 0] = 0
                # print '\nu:', u.data.min(), u.data.max()

            # conditional primal-dual gap for current u and p
            # 1/2||A u - g||_2^2 + 1/2||p||_2^2 + <p,g>_D
            # p_tmp <- A u
            # p_tmp = A.forward(u)
            # p_tmp -= g
            # cpd[n:] = (0.5 * p_tmp.norm() ** 2 +
            cpd[n:] = (0.5 * p.space.norm(A.forward(u) - g) ** 2 +
                       0.5 * p.norm() ** 2 +
                       p.inner(g))  # / p.data.size
            if tv_norm:
                cpd[n:] += tv_norm * np.linalg.norm(
                    reduce(add, (partial(u.data.reshape(geom.vol_shape),
                                         dim, geom.voxel_width[dim]) for dim
                                 in range(geom.vol_ndim))
                           ).ravel(), ord=1)  # / u.data.size

            # step 7 / TV step 8: ub_{n+1} <- u_{n+1} + theta(u_{n+1} - u_n)
            # ub <- ub + u_{n+1}, remember ub = -u_n
            ub += u
            # ub <- theta * ub
            ub *= theta
            # ub <- ub + u_{n+1}
            ub += u

            # visual output
            disp.update()

        A.clear_astra_memory()

        # Should avoid window freezing
        disp.show()

        return u, p, cpd, l2_du
예제 #47
0
파일: ntuples_test.py 프로젝트: wjp/odl
def test_vector_init(exponent):
    rn = Rn(5)
    weight_vec = _pos_array(rn)

    FnVectorWeighting(weight_vec, exponent=exponent)
    FnVectorWeighting(rn.element(weight_vec), exponent=exponent)