コード例 #1
0
def op_test(static_image, output_path):
    static_image_path = "{0}/temp_static.nii".format(output_path)

    static_image.write(static_image_path)

    temp_static = reg.NiftiImageData(static_image_path)

    temp_at = reg.AffineTransformation()

    temp_at_array = temp_at.as_array()
    temp_at_array[0][0] = 1.25
    temp_at_array[1][1] = 1.25
    temp_at_array[2][2] = 1.25
    temp_at_array[3][3] = 1.25

    temp_at = reg.AffineTransformation(temp_at_array)

    resampler = reg.NiftyResample()

    resampler.set_reference_image(temp_static)
    resampler.set_floating_image(temp_static)
    resampler.add_transformation(temp_at)

    resampler.set_interpolation_type_to_linear()

    warp = warp_image_forward(resampler, temp_static)

    warped_image = static_image.clone()
    warped_image.fill(warp)

    warped_image.write("{0}/op_test_warp_forward.nii".format(output_path))

    difference = temp_static.as_array().astype(np.double) - warp

    difference_image = temp_static.clone()
    difference_image.fill(difference)

    difference_image.write(
        "{0}/op_test_warp_forward_difference.nii".format(output_path))

    warp = warp_image_adjoint(resampler, temp_static)

    warped_image = temp_static.clone()
    warped_image.fill(warp)

    warped_image.write("{0}/op_test_warp_adjoint.nii".format(output_path))

    difference = temp_static.as_array().astype(np.double) - warp

    difference_image = temp_static.clone()
    difference_image.fill(difference)

    difference_image.write(
        "{0}/warp_adjoint_difference.nii".format(output_path))

    return True
コード例 #2
0
def transform_image(fixed_im_name, moving_im_name, reg_resample):
    """
    Randomly transform 2D image by translation or rotation.
    fixed_im_name   =
    """

    trans_file = 'temp_trans_file.txt'

    angle = random.uniform(-1, 1)
    tr_x = random.uniform(-1, 1)
    tr_y = random.uniform(-1, 1)

    theta = angle * (math.pi / 2)

    transform = Reg.AffineTransformation(
        np.array([[math.cos(theta), -math.sin(theta), 0, tr_x * 25],
                  [math.sin(theta),
                   math.cos(theta), 0, tr_y * 25], [0, 0, 1, 0], [0, 0, 0,
                                                                  1]]))

    transform.write(trans_file)

    args = [
        reg_resample, "-ref", fixed_im_name + ".nii", "-flo",
        fixed_im_name + ".nii", "-res", moving_im_name + ".nii", "-trans",
        trans_file
    ]
    popen = subprocess.Popen(args, stdout=subprocess.PIPE)
    popen.wait()

    os.remove(trans_file)

    return [tr_x, tr_y, angle]
コード例 #3
0
def resample_attn_image(image):
    """Resample the attenuation image."""
    if trans_type == 'tm':
        transformation = reg.AffineTransformation(trans)
    elif trans_type == 'disp':
        transformation = reg.NiftiImageData3DDisplacement(trans)
    elif trans_type == 'def':
        transformation = reg.NiftiImageData3DDeformation(trans)
    else:
        raise ValueError("Unknown transformation type.")

    resampler = reg.NiftyResample()
    resampler.set_reference_image(image)
    resampler.set_floating_image(image)
    resampler.set_interpolation_type_to_linear()
    resampler.set_padding_value(0.0)
    resampler.add_transformation(transformation)
    return resampler.forward(image)
コード例 #4
0
def transform_image(fixed_im_name, moving_im_name):
    """
    Randomly transform 2D image by translation or rotation.
    fixed_im_name   =
    """

    trans_file = 'temp_trans_file.txt'

    angle = random.uniform(0, 1)
    tr_x = random.uniform(0, 1)
    tr_y = random.uniform(0, 1)

    theta = (angle - 0.5) * (math.pi / 2)

    #print('angle: {}\ntr_x: {}\ntr_y: {}\ntheta: {}'.format(angle, tr_x, tr_y,
    #                                                        theta))

    transform = Reg.AffineTransformation(
        np.array([[math.cos(theta), -math.sin(theta), 0, (tr_x - 0.5) * 50],
                  [math.sin(theta),
                   math.cos(theta), 0, (tr_y - 0.5) * 50], [0, 0, 1, 0],
                  [0, 0, 0, 1]]))

    transform.write(trans_file)

    #    args = ["reg_resample", "-ref", "training_data/fixed/fixed_000.nii",
    #            "-flo", "training_data/fixed/fixed_000.nii", "-res", "test",
    #            "-trans", "temp_trans_file.txt"]
    #
    #    popen = subprocess.Popen(args, stdout=subprocess.PIPE)
    #    popen.wait()
    #

    args = [
        "reg_resample", "-ref", fixed_im_name + '.nii', "-flo",
        fixed_im_name + '.nii', "-res", moving_im_name + '.nii', "-trans",
        trans_file
    ]
    popen = subprocess.Popen(args, stdout=subprocess.PIPE)
    popen.wait()

    os.remove(trans_file)

    return [-tr_x, -tr_y, -angle]
コード例 #5
0
def get_and_save_tm(i):
    """Get and save affine transformation matrix"""
    if i == 0:
        [r, t_x, t_y] = [0., 0., 0.]
    elif i == 1:
        [r, t_x, t_y] = [10., -10., 0.]
    elif i == 2:
        [r, t_x, t_y] = [20., -5., 5.]
    elif i == 3:
        [r, t_x, t_y] = [-10., 10., 5.]
    else:
        [r, t_x, t_y] = [randint(-20, 20), randint(-20, 20), randint(-20, 20)]

    r = radians(r)
    tm = reg.AffineTransformation(
        np.array([[cos(r), sin(r), 0, t_x], [-sin(r), cos(r), 0, t_y],
                  [0, 0, 1, 0], [0, 0, 0, 1]]))
    tm.write('fwd_tm_ms_' + str(i))
    return tm
コード例 #6
0
resamplers_attn.set_padding_value(0)
resamplers_attn.set_interpolation_type_to_linear()

i = 0
for num in num_tm:
    print('Begin resampling mu-Maps: {}'.format(path_EPI + 'tm_epi_inv_' +
                                                str(num) + '.txt'))

    # read tm-matrix as numpy array
    matrix = numpy.loadtxt(path_EPI + 'tm_epi_inv_' + str(num) + '.txt')

    # tm space transformation: EPI -> NAC
    # transform tm into PET space: T_nac = R⁻1 * T_epi * R
    matrix1 = numpy.matmul(tm_fwd, matrix)
    matrix2 = numpy.matmul(matrix1, tm_inv)

    # create affine transformation from numpy array
    tm = Reg.AffineTransformation(matrix2)

    resamplers_attn.clear_transformations()
    resamplers_attn.add_transformation(tm)
    new_attn = resamplers_attn.forward(attn_image)
    new_attn.write(path_mu + 'stir_mu_map_in_recon_space_' + str(i))
    Reg.ImageData(new_attn).write(path_mu + 'mu_' + str(i) + '.nii')

    print('Finish resampling mu-Maps: {}'.format(i))

    i += 1

tprint('Finish Resampling')
コード例 #7
0
    #%% resample mu-Map into NAC space and move it to each position

    ref_file = path_NAC + 'NAC_0.nii'
    ref = Eng_ref.ImageData(ref_file)
    flo = Eng_flo.ImageData(attn_image)

    tprint('Start Resampling')

    for i in range(30):
        print('Begin resampling mu-Maps: {}, with {}'.format(
            i, path_tm + 'tm_nac_inv_' + str(i) + '.txt'))

        matrix = numpy.loadtxt(path_tm + 'tm_nac_inv_' + str(i) + '.txt')

        tm_inv = Reg.AffineTransformation(matrix)

        resampler = Reg.NiftyResample()
        resampler.set_reference_image(ref)
        resampler.set_floating_image(flo)
        resampler.add_transformation(tm_inv)
        resampler.set_padding_value(0)
        resampler.set_interpolation_type_to_linear()
        mu_map_in_ith_NAC_space = resampler.forward(flo)

        mu_map_in_ith_NAC_space.write(path_mu + 'stir_mu_map_in_recon_space_' +
                                      str(i))
        Reg.ImageData(mu_map_in_ith_NAC_space).write(path_mu + 'mu_' + str(i) +
                                                     '.nii')

        print('Finish resampling mu-Maps: {}'.format(i))
コード例 #8
0
def try_complex_resample(raw_mr_filename):
    time.sleep(0.5)
    sys.stderr.write(
        '\n# --------------------------------------------------------------------------------- #\n'
    )
    sys.stderr.write(
        '#                             Starting complex resampling test...\n')
    sys.stderr.write(
        '# --------------------------------------------------------------------------------- #\n'
    )
    time.sleep(0.5)

    raw_mr = mr.AcquisitionData(raw_mr_filename)

    recon_gadgets = [
        'NoiseAdjustGadget', 'AsymmetricEchoAdjustROGadget',
        'RemoveROOversamplingGadget',
        'AcquisitionAccumulateTriggerGadget(trigger_dimension=repetition)',
        'BucketToBufferGadget(split_slices=true, verbose=false)',
        'SimpleReconGadget', 'ImageArraySplitGadget'
    ]

    recon = mr.Reconstructor(recon_gadgets)
    recon.set_input(raw_mr)
    recon.process()

    ismrmrd_im = recon.get_output()

    if ismrmrd_im.is_real():
        raise AssertionError("Expected output of reconstruction to be complex")

    # Complex component may be empty, so use imag = real / 2
    image_data_arr = ismrmrd_im.as_array()
    image_data_arr.imag = image_data_arr.real / 2
    ismrmrd_im.fill(image_data_arr)

    # Convert the complex image to two niftis
    [real, imag] = reg.ImageData.construct_from_complex_image(ismrmrd_im)
    real.write("results/real")
    imag.write("results/imag")

    # Create affine transformation
    tm = reg.AffineTransformation()
    tm_ = tm.as_array()
    tm_[0][3] = 2.
    tm = reg.AffineTransformation(tm_)

    # Resample the complex data
    res_complex = reg.NiftyResampler()
    res_complex.set_reference_image(ismrmrd_im)
    res_complex.set_floating_image(ismrmrd_im)
    res_complex.set_interpolation_type_to_linear()
    res_complex.add_transformation(tm)
    forward_cplx_sptr = res_complex.forward(ismrmrd_im)
    adjoint_cplx_sptr = res_complex.adjoint(ismrmrd_im)

    # Get the output
    [forward_cplx_real, forward_cplx_imag] = \
        reg.ImageData.construct_from_complex_image(forward_cplx_sptr)
    [adjoint_cplx_real, adjoint_cplx_imag] = \
        reg.ImageData.construct_from_complex_image(adjoint_cplx_sptr)

    forward_cplx_real.write("results/forward_cplx_real")
    forward_cplx_imag.write("results/forward_cplx_imag")
    adjoint_cplx_real.write("results/adjoint_cplx_real")
    adjoint_cplx_imag.write("results/adjoint_cplx_imag")

    # Now resample each of the components individually
    res_real = reg.NiftyResampler()
    res_real.set_reference_image(real)
    res_real.set_floating_image(real)
    res_real.set_interpolation_type_to_linear()
    res_real.add_transformation(tm)
    forward_real = res_real.forward(real)
    adjoint_real = res_real.adjoint(real)

    res_imag = reg.NiftyResampler()
    res_imag.set_reference_image(imag)
    res_imag.set_floating_image(imag)
    res_imag.set_interpolation_type_to_linear()
    res_imag.add_transformation(tm)
    forward_imag = res_imag.forward(imag)
    adjoint_imag = res_imag.adjoint(imag)

    forward_real.write("results/forward_real")
    forward_imag.write("results/forward_imag")
    adjoint_real.write("results/adjoint_real")
    adjoint_imag.write("results/adjoint_imag")

    # Compare that the real and imaginary parts match regardless
    # of whether they were resampled separately or together.
    if forward_real != forward_cplx_real or forward_imag != forward_cplx_imag:
        raise AssertionError("NiftyResampler::forward failed for complex data")
    if adjoint_real != adjoint_cplx_real or adjoint_imag != adjoint_cplx_imag:
        raise AssertionError("NiftyResampler::adjoint failed for complex data")

    time.sleep(0.5)
    sys.stderr.write(
        '\n# --------------------------------------------------------------------------------- #\n'
    )
    sys.stderr.write(
        '#                             Finished complex resampling test.\n')
    sys.stderr.write(
        '# --------------------------------------------------------------------------------- #\n'
    )
    time.sleep(0.5)
コード例 #9
0
resamplers_attn = Reg.NiftyResample()
resamplers_attn.set_reference_image(epi)
#resamplers_attn.set_floating_image(attn_image)
resamplers_attn.set_padding_value(0)
resamplers_attn.set_interpolation_type_to_linear()

i = 0
for num in num_tm:
    print('Begin resampling mu-Maps: {}'.format(path_EPI + 'tm_epi_' +
                                                str(num) + '.txt'))

    # read tm-matrix as numpy array
    matrix = numpy.loadtxt(path_EPI + 'tm_epi_inv_' + str(num) + '.txt')

    # create affine transformation from numpy array
    tm = Reg.AffineTransformation(matrix)
    tm2 = Reg.AffineTransformation(tm_shift)

    resamplers_attn.clear_transformations()
    resamplers_attn.set_floating_image(attn_image)
    resamplers_attn.add_transformation(tm2)
    new_attn = resamplers_attn.forward(attn_image)

    resamplers_attn.clear_transformations()
    resamplers_attn.set_floating_image(new_attn)
    resamplers_attn.add_transformation(tm)
    new_attn2 = resamplers_attn.forward(new_attn)

    new_attn2.write(path_mu + 'stir_mu_map_in_recon_space_' + str(i))
    Reg.ImageData(new_attn2).write(path_mu + 'mu_' + str(i) + '.nii')
コード例 #10
0
# temp = register.get_output()
# temp.write(working_folder + 'UTE_PET_space.nii')
#
# resampler = Reg.NiftyResample()
# resampler.set_reference_image(flo)
# resampler.set_floating_image(ref)
# resampler.add_transformation(tm_inv)
# resampler.set_padding_value(0)
# resampler.set_interpolation_type_to_linear()
# res_pet = resampler.forward(ref)
# res_pet.write(working_folder + '/new_pet')

# use a manually created tm (only trans in z direction)
matrix = numpy.loadtxt('tm_trans.txt')
inverse = numpy.linalg.inv(matrix)
tm_fwd2 = Reg.AffineTransformation(matrix)
tm_inv2 = Reg.AffineTransformation(inverse)
numpy.savetxt('tm_trans_inv.txt', inverse)

# resample every NAC image to UTE image (UTE space)
# and apply the tm to correct the shift between the images
for i, image in zip(range(len(os.listdir('/home/rich/Documents/ReCo/working_NAC2/recon/NAC/nii'))), sorted_alphanumeric(os.listdir('/home/rich/Documents/ReCo/working_NAC2/recon/NAC/nii'))):
    print(image)
    resampler = Reg.NiftyResample()
    #ref = Eng_ref.ImageData(working_folder + '/new_pet.nii')
    ref = Eng_ref.ImageData(working_folder + '/UTE.nii')
    resampler.set_reference_image(ref)
    flo = Eng_ref.ImageData('/home/rich/Documents/ReCo/working_NAC2/recon/NAC/nii/' + image)
    resampler.set_floating_image(flo)
    resampler.add_transformation(tm_fwd2)
    resampler.set_padding_value(0)
コード例 #11
0
def main():

    ###########################################################################
    # Parse input files
    ###########################################################################

    if trans_pattern is None:
        raise AssertionError("--trans missing")
    if sino_pattern is None:
        raise AssertionError("--sino missing")
    trans_files = sorted(glob(trans_pattern))
    sino_files = sorted(glob(sino_pattern))
    attn_files = sorted(glob(attn_pattern))
    rand_files = sorted(glob(rand_pattern))

    num_ms = len(sino_files)
    # Check some sinograms found
    if num_ms == 0:
        raise AssertionError("No sinograms found!")
    # Should have as many trans as sinos
    if num_ms != len(trans_files):
        raise AssertionError("#trans should match #sinos. "
                             "#sinos = " + str(num_ms) + ", #trans = " +
                             str(len(trans_files)))
    # If any rand, check num == num_ms
    if len(rand_files) > 0 and len(rand_files) != num_ms:
        raise AssertionError("#rand should match #sinos. "
                             "#sinos = " + str(num_ms) + ", #rand = " +
                             str(len(rand_files)))

    # For attn, there should be 0, 1 or num_ms images
    if len(attn_files) > 1 and len(attn_files) != num_ms:
        raise AssertionError("#attn should be 0, 1 or #sinos")

    ###########################################################################
    # Read input
    ###########################################################################

    if trans_type == "tm":
        trans = [reg.AffineTransformation(file) for file in trans_files]
    elif trans_type == "disp":
        trans = [
            reg.NiftiImageData3DDisplacement(file) for file in trans_files
        ]
    elif trans_type == "def":
        trans = [reg.NiftiImageData3DDeformation(file) for file in trans_files]
    else:
        raise error("Unknown transformation type")

    sinos_raw = [pet.AcquisitionData(file) for file in sino_files]
    attns = [pet.ImageData(file) for file in attn_files]
    rands = [pet.AcquisitionData(file) for file in rand_files]

    # Loop over all sinograms
    sinos = [0] * num_ms
    for ind in range(num_ms):
        # If any sinograms contain negative values
        # (shouldn't be the case), set them to 0
        sino_arr = sinos_raw[ind].as_array()
        if (sino_arr < 0).any():
            print("Input sinogram " + str(ind) +
                  " contains -ve elements. Setting to 0...")
            sinos[ind] = sinos_raw[ind].clone()
            sino_arr[sino_arr < 0] = 0
            sinos[ind].fill(sino_arr)
        else:
            sinos[ind] = sinos_raw[ind]
        # If rebinning is desired
        segs_to_combine = 1
        if args['--numSegsToCombine']:
            segs_to_combine = int(args['--numSegsToCombine'])
        views_to_combine = 1
        if args['--numViewsToCombine']:
            views_to_combine = int(args['--numViewsToCombine'])
        if segs_to_combine * views_to_combine > 1:
            sinos[ind] = sinos[ind].rebin(segs_to_combine, views_to_combine)
            # only print first time
            if ind == 0:
                print(f"Rebinned sino dimensions: {sinos[ind].dimensions()}")

    ###########################################################################
    # Initialise recon image
    ###########################################################################

    if initial_estimate:
        image = pet.ImageData(initial_estimate)
    else:
        # Create image based on ProjData
        image = sinos[0].create_uniform_image(0.0, (nxny, nxny))
        # If using GPU, need to make sure that image is right size.
        if use_gpu:
            dim = (127, 320, 320)
            spacing = (2.03125, 2.08626, 2.08626)
        # elif non-default spacing desired
        elif args['--dxdy']:
            dim = image.dimensions()
            dxdy = float(args['--dxdy'])
            spacing = (image.voxel_sizes()[0], dxdy, dxdy)
        if use_gpu or args['--dxdy']:
            image.initialise(dim=dim, vsize=spacing)
            image.fill(0.0)

    ###########################################################################
    # Set up resamplers
    ###########################################################################

    resamplers = [get_resampler(image, trans=tran) for tran in trans]

    ###########################################################################
    # Resample attenuation images (if necessary)
    ###########################################################################

    resampled_attns = None
    if len(attns) > 0:
        resampled_attns = [0] * num_ms
        # if using GPU, dimensions of attn and recon images have to match
        ref = image if use_gpu else None
        for i in range(len(attns)):
            # if we only have 1 attn image, then we need to resample into
            # space of each gate. However, if we have num_ms attn images, then
            # assume they are already in the correct position, so use None as
            # transformation.
            tran = trans[i] if len(attns) == 1 else None
            # If only 1 attn image, then resample that. If we have num_ms attn
            # images, then use each attn image of each frame.
            attn = attns[0] if len(attns) == 1 else attns[i]
            resam = get_resampler(attn, ref=ref, trans=tran)
            resampled_attns[i] = resam.forward(attn)

    ###########################################################################
    # Set up acquisition models
    ###########################################################################

    print("Setting up acquisition models...")
    if not use_gpu:
        acq_models = num_ms * [pet.AcquisitionModelUsingRayTracingMatrix()]
    else:
        acq_models = num_ms * [pet.AcquisitionModelUsingNiftyPET()]
        for acq_model in acq_models:
            acq_model.set_use_truncation(True)
            acq_model.set_cuda_verbosity(verbosity)

    # If present, create ASM from ECAT8 normalisation data
    asm_norm = None
    if norm_file:
        asm_norm = pet.AcquisitionSensitivityModel(norm_file)

    # Loop over each motion state
    for ind in range(num_ms):
        # Create attn ASM if necessary
        asm_attn = None
        if resampled_attns:
            asm_attn = get_asm_attn(sinos[ind], resampled_attns[i],
                                    acq_models[ind])

        # Get ASM dependent on attn and/or norm
        asm = None
        if asm_norm and asm_attn:
            if ind == 0:
                print("ASM contains norm and attenuation...")
            asm = pet.AcquisitionSensitivityModel(asm_norm, asm_attn)
        elif asm_norm:
            if ind == 0:
                print("ASM contains norm...")
            asm = asm_norm
        elif asm_attn:
            if ind == 0:
                print("ASM contains attenuation...")
            asm = asm_attn
        if asm:
            acq_models[ind].set_acquisition_sensitivity(asm)

        if len(rands) > 0:
            acq_models[ind].set_background_term(rands[ind])

        # Set up
        acq_models[ind].set_up(sinos[ind], image)

    ###########################################################################
    # Set up reconstructor
    ###########################################################################

    print("Setting up reconstructor...")

    # Create composition operators containing acquisition models and resamplers
    C = [
        CompositionOperator(am, res, preallocate=True)
        for am, res in zip(*(acq_models, resamplers))
    ]

    # Configure the PDHG algorithm
    if args['--normK'] and not args['--onlyNormK']:
        normK = float(args['--normK'])
    else:
        kl = [KullbackLeibler(b=sino, eta=(sino * 0 + 1e-5)) for sino in sinos]
        f = BlockFunction(*kl)
        K = BlockOperator(*C)
        # Calculate normK
        print("Calculating norm of the block operator...")
        normK = K.norm(iterations=10)
        print("Norm of the BlockOperator ", normK)
        if args['--onlyNormK']:
            exit(0)

    # Optionally rescale sinograms and BlockOperator using normK
    scale_factor = 1. / normK if args['--normaliseDataAndBlock'] else 1.0
    kl = [
        KullbackLeibler(b=sino * scale_factor, eta=(sino * 0 + 1e-5))
        for sino in sinos
    ]
    f = BlockFunction(*kl)
    K = BlockOperator(*C) * scale_factor

    # If preconditioned
    if precond:

        def get_nonzero_recip(data):
            """Get the reciprocal of a datacontainer. Voxels where input == 0
            will have their reciprocal set to 1 (instead of infinity)"""
            inv_np = data.as_array()
            inv_np[inv_np == 0] = 1
            inv_np = 1. / inv_np
            data.fill(inv_np)

        tau = K.adjoint(K.range_geometry().allocate(1))
        get_nonzero_recip(tau)

        tmp_sigma = K.direct(K.domain_geometry().allocate(1))
        sigma = 0. * tmp_sigma
        get_nonzero_recip(sigma[0])

        def precond_proximal(self, x, tau, out=None):
            """Modify proximal method to work with preconditioned tau"""
            pars = {
                'algorithm':
                FGP_TV,
                'input':
                np.asarray(x.as_array() / tau.as_array(), dtype=np.float32),
                'regularization_parameter':
                self.lambdaReg,
                'number_of_iterations':
                self.iterationsTV,
                'tolerance_constant':
                self.tolerance,
                'methodTV':
                self.methodTV,
                'nonneg':
                self.nonnegativity,
                'printingOut':
                self.printing
            }

            res, info = regularisers.FGP_TV(pars['input'],
                                            pars['regularization_parameter'],
                                            pars['number_of_iterations'],
                                            pars['tolerance_constant'],
                                            pars['methodTV'], pars['nonneg'],
                                            self.device)
            if out is not None:
                out.fill(res)
            else:
                out = x.copy()
                out.fill(res)
            out *= tau
            return out

        FGP_TV.proximal = precond_proximal
        print("Will run proximal with preconditioned tau...")

    # If not preconditioned
    else:
        sigma = float(args['--sigma'])
        # If we need to calculate default tau
        if args['--tau']:
            tau = float(args['--tau'])
        else:
            tau = 1 / (sigma * normK**2)

    if regularisation == 'none':
        G = IndicatorBox(lower=0)
    elif regularisation == 'FGP_TV':
        r_iterations = float(args['--reg_iters'])
        r_tolerance = 1e-7
        r_iso = 0
        r_nonneg = 1
        r_printing = 0
        device = 'gpu' if use_gpu else 'cpu'
        G = FGP_TV(r_alpha, r_iterations, r_tolerance, r_iso, r_nonneg,
                   r_printing, device)
    else:
        raise error("Unknown regularisation")

    if precond:

        def PDHG_new_update(self):
            """Modify the PDHG update to allow preconditioning"""
            # save previous iteration
            self.x_old.fill(self.x)
            self.y_old.fill(self.y)

            # Gradient ascent for the dual variable
            self.operator.direct(self.xbar, out=self.y_tmp)
            self.y_tmp *= self.sigma
            self.y_tmp += self.y_old

            self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y)

            # Gradient descent for the primal variable
            self.operator.adjoint(self.y, out=self.x_tmp)
            self.x_tmp *= -1 * self.tau
            self.x_tmp += self.x_old

            self.g.proximal(self.x_tmp, self.tau, out=self.x)

            # Update
            self.x.subtract(self.x_old, out=self.xbar)
            self.xbar *= self.theta
            self.xbar += self.x

        PDHG.update = PDHG_new_update

    # Get filename
    outp_file = outp_prefix
    if descriptive_fname:
        if len(attn_files) > 0:
            outp_file += "_wAC"
        if norm_file:
            outp_file += "_wNorm"
        if use_gpu:
            outp_file += "_wGPU"
        outp_file += "_Reg-" + regularisation
        if regularisation == 'FGP_TV':
            outp_file += "-alpha" + str(r_alpha)
            outp_file += "-riters" + str(r_iterations)
        if args['--normK']:
            outp_file += '_userNormK' + str(normK)
        else:
            outp_file += '_calcNormK' + str(normK)
        if args['--normaliseDataAndBlock']:
            outp_file += '_wDataScale'
        else:
            outp_file += '_noDataScale'
        if not precond:
            outp_file += "_sigma" + str(sigma)
            outp_file += "_tau" + str(tau)
        else:
            outp_file += "_wPrecond"
        outp_file += "_nGates" + str(len(sino_files))
        if resamplers is None:
            outp_file += "_noMotion"

    pdhg = PDHG(f=f,
                g=G,
                operator=K,
                sigma=sigma,
                tau=tau,
                max_iteration=num_iters,
                update_objective_interval=update_obj_fn_interval,
                x_init=image,
                log_file=outp_file + ".log")

    def callback_save(iteration, objective_value, solution):
        """Callback function to save images"""
        if (iteration + 1) % save_interval == 0:
            out = solution if not nifti else reg.NiftiImageData(solution)
            out.write(outp_file + "_iters" + str(iteration + 1))

    pdhg.run(iterations=num_iters,
             callback=callback_save,
             verbose=True,
             very_verbose=True)

    if visualisations:
        # show reconstructed image
        out = pdhg.get_output()
        out_arr = out.as_array()
        z = out_arr.shape[0] // 2
        show_2D_array('Reconstructed image', out.as_array()[z, :, :])
        pylab.show()
コード例 #12
0
                        [0, 1, 0, 0],
                        [yS, 0, yC, 0],
                        [0, 0, 0, 1]])

Rotate_Z = numpy.array([[zC, -zS, 0, 0],
                        [zS, zC, 0, 0],
                        [0, 0, 1, 0],
                        [0, 0, 0, 1]])

TM = numpy.dot(Rotate_Z, numpy.dot(Rotate_Y, numpy.dot(Rotate_X, Translate)))


#%% start simulation motion
tprint('Start TM')

# define tm matrix
tm = Reg.AffineTransformation(TM)
tm_inv = tm.get_inverse()

# apply TM to ref image and resample an image with this informations
print('Begin resampling')
resampler = Reg.NiftyResample()
resampler.set_reference_image(ref)
resampler.set_floating_image(flo)
resampler.add_transformation(tm)
resampler.set_padding_value(0)
resampler.set_interpolation_type_to_linear()
output = resampler.forward(flo)
output.write(working_folder + '/floates/test_motion.nii')

print('Resampling successful')
コード例 #13
0
# tm_attn2nac = reg_attn2nac.get_transformation_matrix_forward()
# =============================================================================

#%% resample mu-Map into each NAC space

ref_file = path_NAC + 'NAC_0.nii'
ref = Eng_ref.ImageData(ref_file)
flo = Eng_flo.ImageData(attn_image)

tprint('Start Resampling')

for i in range(30):
    print('Begin resampling mu-Maps: {}, with {}'.format(
        i, path_tm + 'tm_nac_inv_' + str(i) + '.txt'))
    tm = numpy.loadtxt(path_tm + 'tm_nac_inv_' + str(i) + '.txt')
    tm_fwd = Reg.AffineTransformation(tm)
    #tm_attn2ithNAC = tm_attn2nac * tm_fwd

    resampler = Reg.NiftyResample()
    resampler.set_reference_image(ref)
    resampler.set_floating_image(flo)
    resampler.add_transformation(tm_fwd)
    resampler.set_padding_value(0)
    resampler.set_interpolation_type_to_linear()
    mu_map_in_ith_NAC_space = resampler.forward(flo)

    mu_map_in_ith_NAC_space.write(path_mu + 'stir_mu_map_in_recon_space_' +
                                  str(i))
    Reg.ImageData(mu_map_in_ith_NAC_space).write(path_mu + 'mu_' + str(i) +
                                                 '.nii')