예제 #1
0
def test_scale_property_exact():
    # Here, we test the same scaling property, but for a subset of
    # cases, which enables checks on the numerical equality of the
    # results with smaller tolerances. Specifically,
    # 1. We convert pg to a vector geometry immediately
    # 2. We only scale with "even" fractions (representable in base 2)
    # 3. We check with torch default tolerances rtol=1e-5, atol=1e-8

    vg = ts.volume(shape=(1, 64, 64))
    pg = ts.parallel(angles=96, shape=(1, 96)).to_vec()  # <-- vec already
    A = ts.operator(vg, pg)

    x = torch.zeros(*A.domain_shape).cuda()
    x[:, 20:50, 20:50] = 1.0  # box
    x[:, 30:40, 30:40] = 0.0  # and hollow

    y = A(x)
    bp = A.T(y)

    for s in [1.0, 0.5, 2.0, 4.0]:  # only "even" floating point numbers
        print(s)
        S = ts.scale(s)
        A_s = ts.operator(S * A.volume_geometry, S * A.projection_geometry)

        assert torch.allclose(y * s, A_s(x))     # smaller tolerances
        assert torch.allclose(bp, A_s.T(y / s))  # smaller tolerances
예제 #2
0
def tv_min2d(A, y, lam, num_iterations=500, L=None, non_negativity=False):
    """Computes the total-variation minimization using Chambolle-Pock

    Assumes that the data is a single 2D slice. A 3D version with 3D
    gradients is work in progress.

    :param A: `tomosipo.operator`
    :param y: `torch.tensor`
    :param lam: `float`
        regularization parameter lambda.
    :param num_iterations: `int`
    :returns:
    :rtype:

    """

    dev = y.device

    # It is preferable that the operator norm of `A` is roughly equal
    # to one. First, this makes the `lam` parameter comparable between
    # different geometries. Second, without this trick I cannot get
    # the algorithm to converge.

    # The operator norm of `A` scales with the scale of the
    # geometry. Therefore, it is easiest to rescale the geometry and
    # to divide the measurement y by the scale to preserve the
    # intensity. The validity of this appraoch is checked in the
    # tests, see `test_scale_property` in test_tv_min.py.
    scale = operator_norm(A)
    S = ts.scale(1 / scale, pos=A.volume_geometry.pos)
    A = ts.operator(S * A.volume_geometry, S * A.projection_geometry.to_vec())
    y = y / scale

    if L is None:
        L = operator_norm_plus_grad(A, num_iter=100)
    t = 1.0 / L
    s = 1.0 / L
    theta = 1

    u = torch.zeros(A.domain_shape, device=dev)
    p = torch.zeros(A.range_shape, device=dev)
    q = grad_2D(u)  # contains zeros (and has correct shape)
    u_avg = torch.clone(u)

    for n in range(num_iterations):
        p = (p + s * (A(u_avg) - y)) / (1 + s)
        q = clip(q + s * grad_2D(u_avg), lam)
        u_new = u - (t * A.T(p) + t * grad_2D_T(q))
        if non_negativity:
            u_new = torch.clamp(u_new, min=0.0, max=None)
        u_avg = u_new + theta * (u_new - u)
        u = u_new

    return u
예제 #3
0
def test_devices():
    vg = ts.volume(shape=(1, 64, 64))
    pg = ts.parallel(angles=96, shape=(1, 96))
    A = ts.operator(vg, pg)

    x = torch.zeros(*A.domain_shape)
    x[:, 20:50, 20:50] = 1.0  # box
    x[:, 30:40, 30:40] = 0.0  # and hollow

    y = A(x)

    tm.tv_min2d(A, y.cuda(), 0.1, num_iterations=10)
    tm.tv_min2d(A, y, 0.1, num_iterations=10)
예제 #4
0
def test_sirt():
    vg = ts.volume(shape=32)
    pg = ts.parallel(angles=32, shape=48)

    A = ts.operator(vg, pg)

    x = torch.zeros(*A.domain_shape)
    x[4:28, 4:28, 4:28] = 1.0
    x[12:22, 12:22, 12:22] = 0.0

    y = A(x)

    sirt(A, y, 10)
예제 #5
0
def test_fbp():
    vg = ts.volume(shape=32)
    pg = ts.parallel(angles=32, shape=48)

    A = ts.operator(vg, pg)

    x = torch.zeros(*A.domain_shape)
    x[4:28, 4:28, 4:28] = 1.0
    x[12:22, 12:22, 12:22] = 0.0

    y = A(x)

    rec = fbp(A, y)
    rec = fbp(A, y, padded=False)
예제 #6
0
def test_scale_property():
    vg = ts.volume(shape=(1, 64, 64))
    pg = ts.parallel(angles=96, shape=(1, 96))
    A = ts.operator(vg, pg)

    x = torch.zeros(*A.domain_shape).cuda()
    x[:, 20:50, 20:50] = 1.0  # box
    x[:, 30:40, 30:40] = 0.0  # and hollow

    y = A(x)
    bp = A.T(y)

    for s in [1.0, 0.5, 2.0, 1/3, 3.0, 4.0]:
        print(s)
        S = ts.scale(s)
        A_s = ts.operator(
            S * A.volume_geometry,
            S * A.projection_geometry.to_vec())

        # Relatively large tolerances, because
        # 1. converting to vector geometry incurs a loss;
        # 2. odd-numbered scalings incur heavy floating point inaccuracies
        assert torch.allclose(y * s, A_s(x), atol=1e-4, rtol=1e-3)
        assert torch.allclose(bp, A_s.T(y / s), atol=1e-4, rtol=1e-3)
예제 #7
0
def test_fbp_devices():
    vg = ts.volume(shape=32)
    pg = ts.parallel(angles=32, shape=48)
    A = ts.operator(vg, pg)

    x = torch.zeros(*A.domain_shape)
    x[4:28, 4:28, 4:28] = 1.0
    x[12:22, 12:22, 12:22] = 0.0

    y = A(x)

    devices = [torch.device("cpu"), torch.device("cuda")]
    for dev in devices:

        fbp(A, y.to(dev))
        fbp(A, y.to(dev), padded=False)
예제 #8
0
def get_torch_ray_trafo_parallel_2d_adjoint(ray_trafo, z_shape=1):
    """
    Create a torch autograd-enabled function from a 2D parallel-beam
    :class:`odl.tomo.RayTransform` using tomosipo that calls the direct
    backward projection routine of astra, which avoids copying between GPU and
    CPU (available in 1.9.9.dev4).

    Parameters
    ----------
    ray_trafo : :class:`odl.tomo.RayTransform`
        Ray transform
    z_shape : int, optional
        Batch dimension.
        Default: ``1``.

    Returns
    -------
    torch_ray_trafo_adjoint : callable
        Torch autograd-enabled function applying the parallel-beam backward
        projection.
        Input and output have a trivial leading batch dimension and a channel
        dimension specified by `z_shape` (default ``1``), i.e. the
        input shape is ``(1, z_shape) + ray_trafo.range.shape`` and the
        output shape is ``(1, z_shape) + ray_trafo.domain.shape``.
    """
    if not TOMOSIPO_AVAILABLE:
        raise ImportError(MISSING_TOMOSIPO_MESSAGE)
    if not ASTRA_AVAILABLE:
        raise RuntimeError('Astra is not available.')
    if not astra.use_cuda():
        raise RuntimeError('Astra is not able to use CUDA.')
    vg = from_odl(discretized_space_2d_to_3d(ray_trafo.domain,
                                             z_shape=z_shape))
    pg = from_odl(
        parallel_2d_to_3d_geometry(ray_trafo.geometry, det_z_shape=z_shape))
    ts_op = ts.operator(vg, pg)
    torch_ray_trafo_adjoint_ts = to_autograd(ts_op.T)
    scaling_factor = astra_cuda_bp_scaling_factor(ray_trafo.range,
                                                  ray_trafo.domain,
                                                  ray_trafo.geometry)

    def torch_ray_trafo_adjoint(y):
        return scaling_factor * torch_ray_trafo_adjoint_ts(y)

    return torch_ray_trafo_adjoint