Beispiel #1
0
def setupPar(par):
    par["Data"] = {}
    par["Data"]["overgridfactor"] = 2
    par["Data"]["DTYPE"] = DTYPE
    par["Data"]["DTYPE_real"] = DTYPE_real

    par["FFT"] = {}
    par["FFT"]["kernelwidth"] = 5
    par["FFT"]["kernellength"] = 5000

    par["Data"]["num_coils"] = 5
    par["Data"]["image_dim"] = 256
    par["Data"]["num_proj"] = 34
    par["Data"]["num_reads"] = 512
    file = h5py.File(
        '.' + os.sep + 'python' + os.sep + 'test' + os.sep + 'smalltest.h5',
        'r')

    par["traj"] = np.array((file['real_traj'][0].astype(DTYPE_real),
                            file['imag_traj'][0].astype(DTYPE_real)))
    par["traj"] = np.transpose(par["traj"], (1, 2, 0))

    par["Data"]["coils"] = (
        np.random.randn(par["Data"]["num_coils"], par["Data"]["image_dim"],
                        par["Data"]["image_dim"]) + 1j *
        np.random.randn(par["Data"]["num_coils"], par["Data"]["image_dim"],
                        par["Data"]["image_dim"]))

    FFT = linop.NUFFT(par=par, trajectory=par["traj"])
    par["FFT"]["gridding_matrix"] = FFT.gridding_mat
    par["FFT"]["dens_cor"] = np.sqrt(
        get_density_from_gridding(par["Data"], par["FFT"]["gridding_matrix"]))
def estimate_coil_sensitivities_SOS(data, trajectory, par):
    """
      Estimate complex coil sensitivities using a sum-of-squares approach.

      Estimate complex coil sensitivities by dividing each coil channel
      with the SoS reconstruciton. A Hann window is used to filter out high
      k-space frequencies.


    Args
    ----
          data (numpy.array):
              complex k-space data
          trajectory (numpy.array):
              trajectory information
          par (dict):
              A python dict containing the necessary information to
              setup the object. Needs to contain the number of slices
              (num_slc), number of scans (num_scans),
              image dimensions (dimX, dimY), number of coils (num_coils),
              sampling pos (num_reads) and read outs (num_proj).
    """
    par["Data"]["coils"] = np.zeros(
        (par["Data"]["num_coils"], par["Data"]["image_dim"],
         par["Data"]["image_dim"]),
        dtype=par["Data"]["DTYPE"])

    par["Data"]["phase_map"] = np.zeros(
        (par["Data"]["image_dim"], par["Data"]["image_dim"]),
        dtype=par["Data"]["DTYPE"])

    FFT = linop.NUFFT(par=par, trajectory=trajectory)

    windowsize = 50
    window = np.hanning(windowsize)
    window = np.pad(window, int((par["Data"]["num_reads"] - windowsize) / 2))

    lowres_data = data * window.T

    coil_images = FFT.adjoint(lowres_data * par["FFT"]["dens_cor"])
    combined_image = np.sqrt(1 / coil_images.shape[0] *
                             np.sum(np.abs(coil_images)**2, 0))

    coils = coil_images / combined_image

    par["Data"]["coils"] = coils

    # standardize coil sensitivity profiles
    sumSqrC = np.sqrt(
        np.sum((par["Data"]["coils"] * np.conj(par["Data"]["coils"])), 0))
    par["Data"]["in_scale"] = sumSqrC
    if par["Data"]["num_coils"] == 1:
        par["Data"]["coils"] = sumSqrC
    else:
        par["Data"]["coils"] = (par["Data"]["coils"] / sumSqrC)
Beispiel #3
0
def compute_density_compensation(parameter, trajectory):
    """
    Compensate for non uniform sampling density.

    This function computes the sampling density via gridding of ones and
    the correct intensity normalization of the NUFFT operator.

    Args
    ----
        parameter (dict):
            A dictionary storing reconstruction related parameters like
           number of coils and image dimension in 2D.
        trajectory (np.array):
            The associated trajectory data
    """
    # First setup a NUFFT with the given trajectroy
    FFT = linop.NUFFT(par=parameter, trajectory=trajectory)
    # Extrakt the gridding matrix
    parameter["FFT"]["gridding_matrix"] = FFT.gridding_mat
    # Grid a k-space of all ones to get an estimated density
    # and use it as density compensation
    parameter["FFT"]["dens_cor"] = np.sqrt(
        get_density_from_gridding(parameter["Data"],
                                  parameter["FFT"]["gridding_matrix"]))
Beispiel #4
0
    def setUp(self):
        parser = tmpArgs()
        parser.streamed = False
        parser.devices = [0]
        parser.use_GPU = True

        par = {}
        setupPar(par)

        self.op = linop.NUFFT(par, par["traj"])

        self.opinfwd = (
            np.random.randn(par["Data"]["num_coils"], par["Data"]["image_dim"],
                            par["Data"]["image_dim"]) + 1j *
            np.random.randn(par["Data"]["num_coils"], par["Data"]["image_dim"],
                            par["Data"]["image_dim"]))
        self.opinadj = (
            np.random.randn(par["Data"]["num_coils"], par["Data"]["num_proj"],
                            par["Data"]["num_reads"]) + 1j *
            np.random.randn(par["Data"]["num_coils"], par["Data"]["num_proj"],
                            par["Data"]["num_reads"]))

        self.opinfwd = self.opinfwd.astype(DTYPE)
        self.opinadj = self.opinadj.astype(DTYPE)
Beispiel #5
0
def setup_parameter_dict(configfile, rawdata, trajectory):
    """
    Parameter dict generation.

    Args
    ----
        configfile (str):
            path to configuration file
        rawdata (np.complex64):
            The raw k-space data
        trajectory (np.array):
            The associated trajectory data

    Returns
    -------
        parameter (dict):
            A dictionary storing reconstruction related parameters like
            number of coils and image dimension in 2D.
    """
    # Create empty dict
    parameter = {}
    config = configparser.ConfigParser()
    ext = os.path.splitext(configfile)[-1]
    if ext != "txt":
        configfile = configfile + '.txt'

    config.read(configfile)
    for section_key in config.sections():
        parameter[section_key] = {}
        for value_key in config[section_key].keys():
            if "do_" in value_key:
                try:
                    parameter[section_key][value_key] = config.getboolean(
                        section_key, value_key)
                except:
                    parameter[section_key][value_key] = config.get(
                        section_key, value_key)
            else:
                try:
                    parameter[section_key][value_key] = config.getint(
                        section_key, value_key)
                except:
                    try:
                        parameter[section_key][value_key] = config.getfloat(
                            section_key, value_key)
                    except:
                        parameter[section_key][value_key] = config.get(
                            section_key, value_key)

    if parameter["Data"]["precision"].lower() == "single":
        parameter["Data"]["DTYPE"] = np.complex64
        parameter["Data"]["DTYPE_real"] = np.float32
    elif parameter["Data"]["precision"].lower() == "double":
        parameter["Data"]["DTYPE"] = np.complex128
        parameter["Data"]["DTYPE_real"] = np.float64
    else:
        raise ValueError("precision needs to be set to single or double.")

    [n_ch, n_spokes, num_reads] = rawdata.shape

    parameter["Data"]["num_coils"] = n_ch
    parameter["Data"]["image_dim"] = int(num_reads /
                                         parameter["Data"]["overgridfactor"])
    parameter["Data"]["num_reads"] = num_reads
    parameter["Data"]["num_proj"] = n_spokes

    # Calculate density compensation for non-cartesian data.
    if parameter["Data"]["do_density_correction"]:
        FFT = linop.NUFFT(par=parameter, trajectory=trajectory)
        parameter["FFT"]["gridding_matrix"] = FFT.gridding_mat
        parameter["FFT"]["dens_cor"] = np.sqrt(
            get_density_from_gridding(parameter["Data"],
                                      parameter["FFT"]["gridding_matrix"]))
    else:
        parameter["FFT"]["dens_cor"] = np.ones(
            trajectory.shape[:-1], dtype=parameter["Data"]["DTYPE_real"])
    return parameter
def estimate_coil_sensitivities_NLINV(data, trajectory, par):
    """
      Estimate complex coil sensitivities using NLINV from Martin Uecker et al.

      Estimate complex coil sensitivities using NLINV from Martin Uecker et al.
      Non-uniform data is first regridded and subsequently transformed back to
      k-space using a standard fft.
      The result ist stored in the parameter (par) dict. Internally the nlinvns
      function is called.

      This is just a workaround for now to allow
      for fast coil estimation. The final script will use precomputed
      profiles most likely from an Espirit reconstruction.


    Args
    ----
          data (numpy.array):
              complex k-space data
          trajectory (numpy.array):
              trajectory information
          par (dict):
              A python dict containing the necessary information to
              setup the object. Needs to contain the number of slices
              (num_slc), number of scans (num_scans),
              image dimensions (dimX, dimY), number of coils (num_coils),
              sampling pos (num_reads) and read outs (num_proj).
    """
    nlinv_newton_steps = 6
    nlinv_real_constr = False

    par["Data"]["coils"] = np.zeros(
        (par["Data"]["num_coils"], par["Data"]["image_dim"],
         par["Data"]["image_dim"]),
        dtype=par["Data"]["DTYPE"])

    par["Data"]["phase_map"] = np.zeros(
        (par["Data"]["image_dim"], par["Data"]["image_dim"]),
        dtype=par["Data"]["DTYPE"])

    FFT = linop.NUFFT(par=par, trajectory=trajectory)

    combined_data = FFT.adjoint(data * par["FFT"]["dens_cor"])
    combined_data = np.fft.fft2(combined_data, norm='ortho')

    sys.stdout.write("Computing coil sensitivity map\n")
    sys.stdout.flush()

    result = nlinvns.nlinvns(np.squeeze(combined_data), nlinv_newton_steps,
                             True, nlinv_real_constr)

    par["Data"]["coils"] = result[2:, -1]

    if not nlinv_real_constr:
        par["Data"]["phase_map"] = np.exp(1j * np.angle(result[0, -1]))

    # standardize coil sensitivity profiles
    sumSqrC = np.sqrt(
        np.sum((par["Data"]["coils"] * np.conj(par["Data"]["coils"])), 0))
    par["Data"]["in_scale"] = sumSqrC
    if par["Data"]["num_coils"] == 1:
        par["Data"]["coils"] = sumSqrC
    else:
        par["Data"]["coils"] = (par["Data"]["coils"] / sumSqrC)