Beispiel #1
0
    def process(self, acq, data, *args):
        if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):
            self.noise_data.append((acq, data))
        else:
            if len(self.noise_data):
                profiles = len(self.noise_data)
                channels = self.noise_data[0][1].shape[0]
                samples_per_profile = self.noise_data[0][1].shape[1]
                noise = np.zeros((channels, profiles * samples_per_profile),
                                 dtype=np.complex64)
                counter = 0
                for p in self.noise_data:
                    noise[:, counter *
                          samples_per_profile:(counter * samples_per_profile +
                                               samples_per_profile)] = p[1]
                    counter = counter + 1

                scale = (acq.sample_time_us /
                         self.noise_data[0][0].sample_time_us) * 0.79
                self.noise_dmtx = coils.calculate_prewhitening(
                    noise, scale_factor=scale)

                #Test the noise adjust
                d = self.noise_data[0][1]
                d2 = coils.apply_prewhitening(d, self.noise_dmtx)
                self.noise_data = list()

            if self.noise_dmtx is not None:
                data2 = coils.apply_prewhitening(data, self.noise_dmtx)
            else:
                data2 = data

            self.put_next(acq, data2)
        return 0
 def process(self,acq,data,*args):
     if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):
         self.noise_data.append((acq,data))
     else:
         if len(self.noise_data):
             profiles = len(self.noise_data)
             channels = self.noise_data[0][1].shape[0]
             samples_per_profile = self.noise_data[0][1].shape[1]
             noise = np.zeros((channels,profiles*samples_per_profile),dtype=np.complex64)
             counter = 0
             for p in self.noise_data:
                 noise[:,counter*samples_per_profile:(counter*samples_per_profile+samples_per_profile)] = p[1]
                 counter = counter + 1
             
             scale = (acq.sample_time_us/self.noise_data[0][0].sample_time_us)*0.79
             self.noise_dmtx = coils.calculate_prewhitening(noise,scale_factor=scale)
             
             #Test the noise adjust
             d = self.noise_data[0][1]
             d2 = coils.apply_prewhitening(d, self.noise_dmtx)                
             self.noise_data = list()
         
         if self.noise_dmtx is not None:
             data2 = coils.apply_prewhitening(data, self.noise_dmtx)
         else:
             data2 = data
             
         self.put_next(acq,data2)
     return 0
Beispiel #3
0
def reconstruct_gre(filename, datasetname, noise):

    # Handle the imaging data
    dset = ismrmrd.Dataset(filename, datasetname)
    header = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())
    enc = header.encoding[0]
    # Matrix size
    eNx = enc.encodedSpace.matrixSize.x
    eNy = enc.encodedSpace.matrixSize.y
    rNx = enc.reconSpace.matrixSize.x
    rNy = enc.reconSpace.matrixSize.y
    # Number of Slices
    if enc.encodingLimits.slice != None:
        nslices = enc.encodingLimits.slice.maximum + 1
    else:
        nslices = 1

    # Loop through the acquisitions ignoring the noise scans
    firstscan = 0
    while True:
        acq = dset.read_acquisition(firstscan)
        if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):
            firstscan += 1
        else:
            break

    acq = dset.read_acquisition(firstscan)
    ncoils = acq.active_channels
    gre_bw = header.acquisitionSystemInformation.relativeReceiverNoiseBandwidth

    # Initialiaze a storage array for the data
    data = np.zeros((nslices, ncoils, eNy, eNx), dtype=np.complex64)
    # Loop
    # Prewhiten and stuff into the buffer
    nacq = dset.number_of_acquisitions()
    for scan in range(firstscan, nacq):
        acq = dset.read_acquisition(scan)
        slice = acq.idx.slice
        ky = acq.idx.kspace_encode_step_1
        data[slice, :,
             ky, :] = coils.apply_prewhitening(acq.data, noise.preWMtx)

    # Reconstruct calibration images
    # 2D FFT
    im = transform.transform_kspace_to_image(data, [2, 3])  # [slice,coil,x,y]

    # Remove oversampling if needed
    if (eNx != rNx):
        x0 = (eNx - rNx) // 2
        x1 = eNx - (eNx - rNx) // 2
        im = im[:, :, :, x0:x1]

    # close the data set
    dset.close()

    return im
Beispiel #4
0
def reconstruct_gre(filename, datasetname, noise):
    
    # Handle the imaging data
    dset = ismrmrd.Dataset(filename, datasetname)
    header = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())
    enc = header.encoding[0]
    # Matrix size
    eNx = enc.encodedSpace.matrixSize.x
    eNy = enc.encodedSpace.matrixSize.y
    rNx = enc.reconSpace.matrixSize.x
    rNy = enc.reconSpace.matrixSize.y
    # Number of Slices
    if enc.encodingLimits.slice != None:
        nslices = enc.encodingLimits.slice.maximum + 1
    else:
        nslices = 1

    # Loop through the acquisitions ignoring the noise scans
    firstscan = 0
    while True:
        acq = dset.read_acquisition(firstscan)
        if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):
            firstscan += 1
        else:
            break

    acq = dset.read_acquisition(firstscan)
    ncoils = acq.active_channels
    gre_bw = header.acquisitionSystemInformation.relativeReceiverNoiseBandwidth
    
    # Initialiaze a storage array for the data
    data = np.zeros((nslices, ncoils, eNy, eNx), dtype=np.complex64)
    # Loop             
    # Prewhiten and stuff into the buffer
    nacq = dset.number_of_acquisitions()
    for scan in range(firstscan,nacq):
        acq = dset.read_acquisition(scan)
        slice = acq.idx.slice
        ky = acq.idx.kspace_encode_step_1
        data[slice, :, ky, :] = coils.apply_prewhitening(acq.data,noise.preWMtx)

    # Reconstruct calibration images
    # 2D FFT
    im = transform.transform_kspace_to_image(data, [2,3]) # [slice,coil,x,y]

    # Remove oversampling if needed
    if (eNx != rNx):
        x0 = (eNx - rNx)//2
        x1 = eNx - (eNx - rNx)//2
        im = im[:,:,:,x0:x1]

    # close the data set
    dset.close()

    return im
(data,pat) = simulation.sample_data(phan,csm,acc_factor,ref_lines)

#%%
#Add noise
noise = np.random.standard_normal(data.shape) + 1j*np.random.standard_normal(data.shape)
noise = (5.0/matrix_size)*noise
kspace = np.logical_or(pat==1,pat==3).astype('float32')*(data + noise)
data = (pat>0).astype('float32')*(data + noise)

#%%
#Calculate the noise prewhitening matrix
dmtx = coils.calculate_prewhitening(noise)

#%%
# Apply prewhitening
kspace = coils.apply_prewhitening(kspace, dmtx) 
data = coils.apply_prewhitening(data, dmtx) 


#%%
#Reconstruct aliased images
alias_img = transform.transform_kspace_to_image(kspace,dim=(1,2)) * np.sqrt(acc_factor)
show.imshow(abs(alias_img))


#%%
reload(sense)
(unmix_sense, gmap_sense) = sense.calculate_sense_unmixing(acc_factor,csm)
show.imshow(abs(gmap_sense),colorbar=True)
recon_sense = np.squeeze(np.sum(alias_img * unmix_sense,0))
show.imshow(abs(recon_sense),colorbar=True)
Beispiel #6
0
def reconstruct_calibration(filename, datasetname, noise=None):
    
    # Handle the imaging data
    dset = ismrmrd.Dataset(filename, datasetname)
    header = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())
    enc = header.encoding[0]
    # Matrix size
    eNx = enc.encodedSpace.matrixSize.x
    eNy = enc.encodedSpace.matrixSize.y
    rNx = enc.reconSpace.matrixSize.x
    rNy = enc.reconSpace.matrixSize.y
    # Number of Slices
    if enc.encodingLimits.slice != None:
        nslices = enc.encodingLimits.slice.maximum + 1
    else:
        nslices = 1

    # Loop through the acquisitions ignoring the noise scans
    firstscan = 0
    while True:
        acq = dset.read_acquisition(firstscan)
        if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):
            firstscan += 1
        else:
            break

    acq = dset.read_acquisition(firstscan)
    ncoils = acq.active_channels
    gre_bw = header.acquisitionSystemInformation.relativeReceiverNoiseBandwidth
    # The calibration data may be have fewer points than the full k
    refNx = acq.number_of_samples
    x0 = (eNx - refNx)// 2
    x1 = eNx - (eNx - refNx)//2
    
    # Reconsparallel imaging calibration scans which are GRE based
    # Initialiaze a storage array for the reference data
    ref_data = np.zeros((nslices, ncoils, eNy, eNx), dtype=np.complex64)
    # Loop             
    # Prewhiten and stuff into the buffer
    scan = firstscan
    while True:
        acq = dset.read_acquisition(scan)
        if acq.isFlagSet(ismrmrd.ACQ_IS_PARALLEL_CALIBRATION):
            slice = acq.idx.slice
            ky = acq.idx.kspace_encode_step_1
            if noise:
                ref_data[slice, :, ky, x0:x1] = coils.apply_prewhitening(acq.data,noise.preWMtx)
            else:
                ref_data[slice, :, ky, x0:x1] = acq.data
            scan += 1
        else:
            break

    # Reconstruct calibration images
    # 2D FFT
    im_ref = transform.transform_kspace_to_image(ref_data, [2,3]) # [slice,coil,x,y]

    # Remove oversampling if needed
    if (eNx != rNx):
        x0 = (eNx - rNx)//2
        x1 = eNx - (eNx - rNx)//2
        im_ref = im_ref[:,:,:,x0:x1]

    # close the data set
    dset.close()

    return im_ref
Beispiel #7
0
def reconstruct_epi(filename, datasetname, noise, gre):
    
    # Read the epi data
    dset = ismrmrd.Dataset(filename,datasetname)

    ##############################
    # Scan Parameters and Layout #
    ##############################
    header = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())
    enc = header.encoding[0]
    nkx = enc.encodedSpace.matrixSize.x
    nky = enc.encodedSpace.matrixSize.y
    ncoils = header.acquisitionSystemInformation.receiverChannels
    epi_noise_bw = header.acquisitionSystemInformation.relativeReceiverNoiseBandwidth
    acc_factor = enc.parallelImaging.accelerationFactor.kspace_encoding_step_1
    
    # Number of Slices
    if enc.encodingLimits.slice != None:
        nslices = enc.encodingLimits.slice.maximum + 1
    else:
        nslices = 1

    # Loop through the acquisitions ignoring the noise scans and the
    # parallel imaging calibration scans which are EPI based
    firstscan = 0
    while True:
        acq = dset.read_acquisition(firstscan)
        if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT) or acq.isFlagSet(ismrmrd.ACQ_IS_PARALLEL_CALIBRATION):
            firstscan += 1
        else:
            break

    #print('First imaging scan at:', firstscan)
    nsamp = acq.number_of_samples
    ncoils = acq.active_channels
    sampletime = acq.sample_time_us

    # The lines are labeled with flags as follows:
    # - Noise or Imaging using ACQ_IS_NOISE_MEASUREMENT
    # - Parallel calibration using ACQ_IS_PARALLEL_CALIBRATION
    # - Forward or Reverse using the ACQ_IS_REVERSE flag
    # - EPI navigator using ACQ_IS_PHASECORR_DATA
    # - First or last in a slice using ACQ_FIRST_IN_SLICE and ACQ_LAST_IN_SLICE
    # - The first navigator in a shot is labeled as first in slice
    # - The first imaging line in a shot is labeled as firt in slice
    # - The last imaging line in a show is labeled as last in slice
    # for n in range(firstscan-1,firstscan+60):
    #   acq = dset.read_acquisition(n)
    #   print(acq.idx.kspace_encode_step_1)
    #   if acq.isFlagSet(ismrmrd.ACQ_FIRST_IN_SLICE):
    #       print('First')
    #   elif acq.isFlagSet(ismrmrd.ACQ_LAST_IN_SLICE):
    #       print('Last')
    #   else:
    #       print('Middle')
    #   if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
    #       print('Reverse')
    #   else:
    #       print('Forward')
    #   if acq.isFlagSet(ismrmrd.ACQ_IS_PHASECORR_DATA):
    #       print('Navigator')

    # The EPI trajectory is described in the XML header
    # for o in enc.trajectoryDescription.userParameterLong:
    #    print(o.name, o.value_)
    #    
    # for o in enc.trajectoryDescription.userParameterDouble:
    #     print(o.name, o.value_)
    tup = tdown = tflat = tdelay = nsamp = nnav = etl = 0
    for o in enc.trajectoryDescription.userParameterLong:
        if o.name == 'rampUpTime':
            tup = o.value_
        if o.name == 'rampDownTime':
            tdown = o.value_
        if o.name == 'flatTopTime':
            tflat = o.value_
        if o.name == 'acqDelayTime':
            tdelay = o.value_
        if o.name == 'numSamples':
            nsamp = o.value_
        if o.name == 'numberOfNavigators':
            nnav = o.value_
        if o.name == 'etl':
            etl = o.value_

    #print(tup, tdown, tflat, tdelay, nsamp, nnav, etl)

    ####################################
    # Calculate the gridding operators #
    ####################################
    nkx = enc.encodedSpace.matrixSize.x
    nx = enc.reconSpace.matrixSize.x
    t = tdelay + sampletime*np.arange(nsamp)
    x = np.arange(nx)/nx-0.5
    up = t<=tup
    flat = (t>tup)*(t<(tup+tflat))
    down = t>=(tup+tflat)

    #Integral of trajectory (Gmax=1.0)
    k = np.zeros(nsamp)
    k[up] = 0.5/tup*t[up]**2
    k[flat] = 0.5*tup + (t[flat] - tup)
    k[down] = 0.5*tup + tflat + 0.5*tdown-0.5/tdown*(tup+tflat+tdown-t[down])**2
    #Scale to match resolution
    k *= nkx/(k[-1]-k[0])
    #Center
    k -= k[nsamp//2]
    kpos = k
    kneg = -1.0*k
    #Corresponding even range
    keven = np.arange(nkx)
    keven -= keven[nkx//2]
    #Forward model
    Qpos = np.zeros([nsamp,nkx])
    Qneg = np.zeros([nsamp,nkx])
    for p in range(nsamp):
        Qpos[p,:] = np.sinc(kpos[p]-keven)
        Qneg[p,:] = np.sinc(kneg[p]-keven)
    #Inverse
    Rpos = np.linalg.pinv(Qpos)
    Rneg = np.linalg.pinv(Qneg)
    #Take transpose because we apply from the right
    Rpos = Rpos.transpose()
    Rneg = Rneg.transpose()

    #################################
    # Calculate the kspace filter   #
    # Hanning filter after gridding #
    #################################
    import scipy.signal
    kfiltx = scipy.signal.hann(nkx)
    kfilty = scipy.signal.hann(nky)
    Rpos = np.dot(Rpos, np.diag(kfiltx))
    Rneg = np.dot(Rneg, np.diag(kfiltx))

    ####################################
    # Calculate SENSE unmixing weights #
    ####################################
    # Some basic checks
    if gre.shape[0] != nslices:
        raise ValueError('Calibration and EPI data have different number of slices')
    if gre.shape[1] != ncoils:
        raise ValueError('Calibration and EPI data have different number of coils')

    # Estimate coil sensitivites from the GRE data
    csm_orig = np.zeros(gre.shape,dtype=np.complex)
    for z in range(nslices):
        (csmtmp, actmp, rhotmp) = coils.calculate_csm_inati_iter(gre[z,:,:,:])
        weight = rhotmp**2 / (rhotmp**2 + .01*np.median(rhotmp.ravel())**2)
        csm_orig[z,:,:,:] = csmtmp*weight
 
    # Deal with difference in resolution
    # Up/down sample the coil sensitivities to the resolution of the EPI
    xcsm = np.arange(gre.shape[3])/gre.shape[3]
    ycsm = np.arange(gre.shape[2])/gre.shape[2]
    xepi = np.arange(nx)/nx
    yepi = np.arange(nky)/nky
    csm = np.zeros([nslices,ncoils,nky,nx],dtype=np.complex)
    for z in range(nslices):
        for c in range(ncoils):
            # interpolate the real part and imaginary part separately
            i_real = interp.RectBivariateSpline(ycsm,xcsm,np.real(csm_orig[z,c,:,:]))
            i_imag = interp.RectBivariateSpline(ycsm,xcsm,np.imag(csm_orig[z,c,:,:]))
            csm[z,c,:,:] = i_real(yepi,xepi) + 1j*i_imag(yepi,xepi)

    # SENSE weights
    unmix = np.zeros(csm.shape,dtype=np.complex)
    for z in range(nslices):
        unmix[z,:,:,:] = sense.calculate_sense_unmixing(acc_factor, csm[z,:,:,:])[0]
    
    ###############
    # Reconstruct #
    ###############
    # Initialize the array for a volume's worth of data
    H = np.zeros([nslices, ncoils, nky, nx],dtype=np.complex)
    # Loop over the slices
    scan = firstscan
    for z in range(nslices):
        #print('Slice %d starts at scan %d.'%(z,scan))
        # Navigator 1
        acq = dset.read_acquisition(scan)
        #print(scan,acq.idx.slice,acq.idx.kspace_encode_step_1,acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE))
        currslice = acq.idx.slice # keep track of the slice number
        data = coils.apply_prewhitening(acq.data,noise.preWMtx)
        if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
            rnav1 = transform.transform_kspace_to_image(np.dot(data, Rneg),dim=[1])
            sgn = -1.0
        else:
            rnav1 = transform.transform_kspace_to_image(np.dot(data, Rpos),dim=[1])
            sgn = 1.0
        scan += 1

        # Navigator 2
        acq = dset.read_acquisition(scan)
        #print(scan,acq.idx.slice,acq.idx.kspace_encode_step_1,acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE))
        data = coils.apply_prewhitening(acq.data,noise.preWMtx)
        if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
            rnav2 = transform.transform_kspace_to_image(np.dot(data, Rneg),dim=[1])
        else:
            rnav2 = transform.transform_kspace_to_image(np.dot(data, Rpos),dim=[1])
        scan += 1

        # Navigator 3
        acq = dset.read_acquisition(scan)
        #print(scan,acq.idx.slice,acq.idx.kspace_encode_step_1,acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE))
        data = coils.apply_prewhitening(acq.data,noise.preWMtx)
        if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
            rnav3 = transform.transform_kspace_to_image(np.dot(data, Rneg),dim=[1])
        else:
            rnav3 = transform.transform_kspace_to_image(np.dot(data, Rpos),dim=[1])
        scan += 1

        # Phase correction
        delta = np.conj(rnav1+rnav3) * rnav2
        fdelta = np.tile(np.mean(delta,axis=0),[ncoils,1])
        corr = np.exp(sgn*1j*np.angle(np.sqrt(fdelta)))

        for j in range(nky):
            acq = dset.read_acquisition(scan)
            slice = acq.idx.slice              
            if slice != currslice:
                # end of this slice
                break

            ky = acq.idx.kspace_encode_step_1
            data = coils.apply_prewhitening(acq.data,noise.preWMtx)
            if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
                rho = transform.transform_kspace_to_image(np.dot(data, Rneg),dim=[1])
                H[slice,:,ky,:] = kfilty[ky]*np.conj(corr)*rho
            else:
                rho = transform.transform_kspace_to_image(np.dot(data, Rpos),dim=[1])
                H[slice,:,ky,:] = kfilty[ky]*corr*rho        
            scan += 1

    # Close the data set
    dset.close()
    
    # Recon in along y
    H = transform.transform_kspace_to_image(H,dim=[2])
    
    # Combine with SENSE weights
    epi_im = np.abs(np.squeeze(np.sum(H*unmix,axis=1)))
    
    return epi_im
Beispiel #8
0
def reconstruct_calibration(filename, datasetname, noise=None):

    # Handle the imaging data
    dset = ismrmrd.Dataset(filename, datasetname)
    header = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())
    enc = header.encoding[0]
    # Matrix size
    eNx = enc.encodedSpace.matrixSize.x
    eNy = enc.encodedSpace.matrixSize.y
    rNx = enc.reconSpace.matrixSize.x
    rNy = enc.reconSpace.matrixSize.y
    # Number of Slices
    if enc.encodingLimits.slice != None:
        nslices = enc.encodingLimits.slice.maximum + 1
    else:
        nslices = 1

    # Loop through the acquisitions ignoring the noise scans
    firstscan = 0
    while True:
        acq = dset.read_acquisition(firstscan)
        if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):
            firstscan += 1
        else:
            break

    acq = dset.read_acquisition(firstscan)
    ncoils = acq.active_channels
    gre_bw = header.acquisitionSystemInformation.relativeReceiverNoiseBandwidth
    # The calibration data may be have fewer points than the full k
    refNx = acq.number_of_samples
    x0 = (eNx - refNx) // 2
    x1 = eNx - (eNx - refNx) // 2

    # Reconsparallel imaging calibration scans which are GRE based
    # Initialiaze a storage array for the reference data
    ref_data = np.zeros((nslices, ncoils, eNy, eNx), dtype=np.complex64)
    # Loop
    # Prewhiten and stuff into the buffer
    scan = firstscan
    while True:
        acq = dset.read_acquisition(scan)
        if acq.isFlagSet(ismrmrd.ACQ_IS_PARALLEL_CALIBRATION):
            slice = acq.idx.slice
            ky = acq.idx.kspace_encode_step_1
            if noise:
                ref_data[slice, :, ky, x0:x1] = coils.apply_prewhitening(
                    acq.data, noise.preWMtx)
            else:
                ref_data[slice, :, ky, x0:x1] = acq.data
            scan += 1
        else:
            break

    # Reconstruct calibration images
    # 2D FFT
    im_ref = transform.transform_kspace_to_image(ref_data,
                                                 [2, 3])  # [slice,coil,x,y]

    # Remove oversampling if needed
    if (eNx != rNx):
        x0 = (eNx - rNx) // 2
        x1 = eNx - (eNx - rNx) // 2
        im_ref = im_ref[:, :, :, x0:x1]

    # close the data set
    dset.close()

    return im_ref
Beispiel #9
0
def reconstruct_epi(filename, datasetname, noise, gre):

    # Read the epi data
    dset = ismrmrd.Dataset(filename, datasetname)

    ##############################
    # Scan Parameters and Layout #
    ##############################
    header = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())
    enc = header.encoding[0]
    nkx = enc.encodedSpace.matrixSize.x
    nky = enc.encodedSpace.matrixSize.y
    ncoils = header.acquisitionSystemInformation.receiverChannels
    epi_noise_bw = header.acquisitionSystemInformation.relativeReceiverNoiseBandwidth
    acc_factor = enc.parallelImaging.accelerationFactor.kspace_encoding_step_1

    # Number of Slices
    if enc.encodingLimits.slice != None:
        nslices = enc.encodingLimits.slice.maximum + 1
    else:
        nslices = 1

    # Loop through the acquisitions ignoring the noise scans and the
    # parallel imaging calibration scans which are EPI based
    firstscan = 0
    while True:
        acq = dset.read_acquisition(firstscan)
        if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT) or acq.isFlagSet(
                ismrmrd.ACQ_IS_PARALLEL_CALIBRATION):
            firstscan += 1
        else:
            break

    #print('First imaging scan at:', firstscan)
    nsamp = acq.number_of_samples
    ncoils = acq.active_channels
    sampletime = acq.sample_time_us

    # The lines are labeled with flags as follows:
    # - Noise or Imaging using ACQ_IS_NOISE_MEASUREMENT
    # - Parallel calibration using ACQ_IS_PARALLEL_CALIBRATION
    # - Forward or Reverse using the ACQ_IS_REVERSE flag
    # - EPI navigator using ACQ_IS_PHASECORR_DATA
    # - First or last in a slice using ACQ_FIRST_IN_SLICE and ACQ_LAST_IN_SLICE
    # - The first navigator in a shot is labeled as first in slice
    # - The first imaging line in a shot is labeled as firt in slice
    # - The last imaging line in a show is labeled as last in slice
    # for n in range(firstscan-1,firstscan+60):
    #   acq = dset.read_acquisition(n)
    #   print(acq.idx.kspace_encode_step_1)
    #   if acq.isFlagSet(ismrmrd.ACQ_FIRST_IN_SLICE):
    #       print('First')
    #   elif acq.isFlagSet(ismrmrd.ACQ_LAST_IN_SLICE):
    #       print('Last')
    #   else:
    #       print('Middle')
    #   if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
    #       print('Reverse')
    #   else:
    #       print('Forward')
    #   if acq.isFlagSet(ismrmrd.ACQ_IS_PHASECORR_DATA):
    #       print('Navigator')

    # The EPI trajectory is described in the XML header
    # for o in enc.trajectoryDescription.userParameterLong:
    #    print(o.name, o.value_)
    #
    # for o in enc.trajectoryDescription.userParameterDouble:
    #     print(o.name, o.value_)
    tup = tdown = tflat = tdelay = nsamp = nnav = etl = 0
    for o in enc.trajectoryDescription.userParameterLong:
        if o.name == 'rampUpTime':
            tup = o.value_
        if o.name == 'rampDownTime':
            tdown = o.value_
        if o.name == 'flatTopTime':
            tflat = o.value_
        if o.name == 'acqDelayTime':
            tdelay = o.value_
        if o.name == 'numSamples':
            nsamp = o.value_
        if o.name == 'numberOfNavigators':
            nnav = o.value_
        if o.name == 'etl':
            etl = o.value_

    #print(tup, tdown, tflat, tdelay, nsamp, nnav, etl)

    ####################################
    # Calculate the gridding operators #
    ####################################
    nkx = enc.encodedSpace.matrixSize.x
    nx = enc.reconSpace.matrixSize.x
    t = tdelay + sampletime * np.arange(nsamp)
    x = np.arange(nx) / nx - 0.5
    up = t <= tup
    flat = (t > tup) * (t < (tup + tflat))
    down = t >= (tup + tflat)

    #Integral of trajectory (Gmax=1.0)
    k = np.zeros(nsamp)
    k[up] = 0.5 / tup * t[up]**2
    k[flat] = 0.5 * tup + (t[flat] - tup)
    k[down] = 0.5 * tup + tflat + 0.5 * tdown - 0.5 / tdown * (
        tup + tflat + tdown - t[down])**2
    #Scale to match resolution
    k *= nkx / (k[-1] - k[0])
    #Center
    k -= k[nsamp // 2]
    kpos = k
    kneg = -1.0 * k
    #Corresponding even range
    keven = np.arange(nkx)
    keven -= keven[nkx // 2]
    #Forward model
    Qpos = np.zeros([nsamp, nkx])
    Qneg = np.zeros([nsamp, nkx])
    for p in range(nsamp):
        Qpos[p, :] = np.sinc(kpos[p] - keven)
        Qneg[p, :] = np.sinc(kneg[p] - keven)
    #Inverse
    Rpos = np.linalg.pinv(Qpos)
    Rneg = np.linalg.pinv(Qneg)
    #Take transpose because we apply from the right
    Rpos = Rpos.transpose()
    Rneg = Rneg.transpose()

    #################################
    # Calculate the kspace filter   #
    # Hanning filter after gridding #
    #################################
    import scipy.signal
    kfiltx = scipy.signal.hann(nkx)
    kfilty = scipy.signal.hann(nky)
    Rpos = np.dot(Rpos, np.diag(kfiltx))
    Rneg = np.dot(Rneg, np.diag(kfiltx))

    ####################################
    # Calculate SENSE unmixing weights #
    ####################################
    # Some basic checks
    if gre.shape[0] != nslices:
        raise ValueError(
            'Calibration and EPI data have different number of slices')
    if gre.shape[1] != ncoils:
        raise ValueError(
            'Calibration and EPI data have different number of coils')

    # Estimate coil sensitivites from the GRE data
    csm_orig = np.zeros(gre.shape, dtype=np.complex)
    for z in range(nslices):
        (csmtmp, actmp,
         rhotmp) = coils.calculate_csm_inati_iter(gre[z, :, :, :])
        weight = rhotmp**2 / (rhotmp**2 + .01 * np.median(rhotmp.ravel())**2)
        csm_orig[z, :, :, :] = csmtmp * weight

    # Deal with difference in resolution
    # Up/down sample the coil sensitivities to the resolution of the EPI
    xcsm = np.arange(gre.shape[3]) / gre.shape[3]
    ycsm = np.arange(gre.shape[2]) / gre.shape[2]
    xepi = np.arange(nx) / nx
    yepi = np.arange(nky) / nky
    csm = np.zeros([nslices, ncoils, nky, nx], dtype=np.complex)
    for z in range(nslices):
        for c in range(ncoils):
            # interpolate the real part and imaginary part separately
            i_real = interp.RectBivariateSpline(ycsm, xcsm,
                                                np.real(csm_orig[z, c, :, :]))
            i_imag = interp.RectBivariateSpline(ycsm, xcsm,
                                                np.imag(csm_orig[z, c, :, :]))
            csm[z, c, :, :] = i_real(yepi, xepi) + 1j * i_imag(yepi, xepi)

    # SENSE weights
    unmix = np.zeros(csm.shape, dtype=np.complex)
    for z in range(nslices):
        unmix[z, :, :, :] = sense.calculate_sense_unmixing(
            acc_factor, csm[z, :, :, :])[0]

    ###############
    # Reconstruct #
    ###############
    # Initialize the array for a volume's worth of data
    H = np.zeros([nslices, ncoils, nky, nx], dtype=np.complex)
    # Loop over the slices
    scan = firstscan
    for z in range(nslices):
        #print('Slice %d starts at scan %d.'%(z,scan))
        # Navigator 1
        acq = dset.read_acquisition(scan)
        #print(scan,acq.idx.slice,acq.idx.kspace_encode_step_1,acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE))
        currslice = acq.idx.slice  # keep track of the slice number
        data = coils.apply_prewhitening(acq.data, noise.preWMtx)
        if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
            rnav1 = transform.transform_kspace_to_image(np.dot(data, Rneg),
                                                        dim=[1])
            sgn = -1.0
        else:
            rnav1 = transform.transform_kspace_to_image(np.dot(data, Rpos),
                                                        dim=[1])
            sgn = 1.0
        scan += 1

        # Navigator 2
        acq = dset.read_acquisition(scan)
        #print(scan,acq.idx.slice,acq.idx.kspace_encode_step_1,acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE))
        data = coils.apply_prewhitening(acq.data, noise.preWMtx)
        if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
            rnav2 = transform.transform_kspace_to_image(np.dot(data, Rneg),
                                                        dim=[1])
        else:
            rnav2 = transform.transform_kspace_to_image(np.dot(data, Rpos),
                                                        dim=[1])
        scan += 1

        # Navigator 3
        acq = dset.read_acquisition(scan)
        #print(scan,acq.idx.slice,acq.idx.kspace_encode_step_1,acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE))
        data = coils.apply_prewhitening(acq.data, noise.preWMtx)
        if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
            rnav3 = transform.transform_kspace_to_image(np.dot(data, Rneg),
                                                        dim=[1])
        else:
            rnav3 = transform.transform_kspace_to_image(np.dot(data, Rpos),
                                                        dim=[1])
        scan += 1

        # Phase correction
        delta = np.conj(rnav1 + rnav3) * rnav2
        fdelta = np.tile(np.mean(delta, axis=0), [ncoils, 1])
        corr = np.exp(sgn * 1j * np.angle(np.sqrt(fdelta)))

        for j in range(nky):
            acq = dset.read_acquisition(scan)
            slice = acq.idx.slice
            if slice != currslice:
                # end of this slice
                break

            ky = acq.idx.kspace_encode_step_1
            data = coils.apply_prewhitening(acq.data, noise.preWMtx)
            if acq.isFlagSet(ismrmrd.ACQ_IS_REVERSE):
                rho = transform.transform_kspace_to_image(np.dot(data, Rneg),
                                                          dim=[1])
                H[slice, :, ky, :] = kfilty[ky] * np.conj(corr) * rho
            else:
                rho = transform.transform_kspace_to_image(np.dot(data, Rpos),
                                                          dim=[1])
                H[slice, :, ky, :] = kfilty[ky] * corr * rho
            scan += 1

    # Close the data set
    dset.close()

    # Recon in along y
    H = transform.transform_kspace_to_image(H, dim=[2])

    # Combine with SENSE weights
    epi_im = np.abs(np.squeeze(np.sum(H * unmix, axis=1)))

    return epi_im
Beispiel #10
0
    def compute(self):

        do_squeeze = self.getVal('Squeeze')
        do_remos = self.getVal('Remove Oversampling')
        do_zeropad = self.getVal('Zeropad')
        do_noiseadj = self.getVal('Noise Adjust')
        receiver_noise_bw = self.getVal('Receiver Noise BW Ratio')

        #Get the file name use the file browser widget
        fname = gpi.TranslateFileURI(self.getVal('File Browser'))

        #Check if the file exists
        if not os.path.exists(fname):
            self.log.node("Path does not exist: "+str(fname))
            return 0
        
        dset = ismrmrd.Dataset(fname, 'dataset', create_if_needed=False)

        xml_header = dset.read_xml_header()
        header = ismrmrd.xsd.CreateFromDocument(xml_header)
        self.setData('ISMRMRDHeader', str(xml_header))

        enc = header.encoding[0]

        # Matrix size
        eNx = enc.encodedSpace.matrixSize.x
        eNy = enc.encodedSpace.matrixSize.y
        eNz = enc.encodedSpace.matrixSize.z
        rNx = enc.reconSpace.matrixSize.x
        rNy = enc.reconSpace.matrixSize.y
        rNz = enc.reconSpace.matrixSize.z

        # Field of View
        eFOVx = enc.encodedSpace.fieldOfView_mm.x
        eFOVy = enc.encodedSpace.fieldOfView_mm.y
        eFOVz = enc.encodedSpace.fieldOfView_mm.z
        rFOVx = enc.reconSpace.fieldOfView_mm.x
        rFOVy = enc.reconSpace.fieldOfView_mm.y
        rFOVz = enc.reconSpace.fieldOfView_mm.z

        # Number of Slices, Reps, Contrasts, etc.
        ncoils = header.acquisitionSystemInformation.receiverChannels
        if enc.encodingLimits.slice != None:
            nslices = enc.encodingLimits.slice.maximum + 1
        else:
            nslices = 1
            
        if enc.encodingLimits.repetition != None:
            nreps = enc.encodingLimits.repetition.maximum + 1
        else:
            nreps = 1
        
        if enc.encodingLimits.contrast != None:
            ncontrasts = enc.encodingLimits.contrast.maximum + 1
        else:
            ncontrasts = 1


        # In case there are noise scans in the actual dataset, we will skip them.
        noise_data = list()
        noise_dmtx = None
        
        firstacq=0
        for acqnum in range(dset.number_of_acquisitions()):
            acq = dset.read_acquisition(acqnum)
            
            if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):
                noise_data.append((acq.getHead(),acq.data))
                continue
            else:
                firstacq = acqnum
                break    

        if len(noise_data):
            profiles = len(noise_data)
            channels = noise_data[0][1].shape[0]
            samples_per_profile = noise_data[0][1].shape[1]
            noise = np.zeros((channels,profiles*samples_per_profile),dtype=np.complex64)
            counter = 0
            for p in noise_data:
                noise[:,counter*samples_per_profile:(counter*samples_per_profile+samples_per_profile)] = p[1]
                counter = counter + 1
                
            self.setData('noise',noise)
            
            scale = (acq.sample_time_us/noise_data[0][0].sample_time_us)*receiver_noise_bw
            noise_dmtx = coils.calculate_prewhitening(noise,scale_factor=scale)
            noise_data = list()
            
        # Empty array for the output data
        acq = dset.read_acquisition(firstacq)
        ro_length = acq.number_of_samples
        padded_ro_length = (acq.number_of_samples-acq.center_sample)*2

        
        size_nx = 0
        if do_remos:
            size_nx = rNx
            do_zeropad = True
        elif do_zeropad:
            size_nx = padded_ro_length
        else:
            size_nx = ro_length
            
        all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, size_nx), dtype=np.complex64)

        # Loop through the rest of the acquisitions and stuff
        for acqnum in range(firstacq,dset.number_of_acquisitions()):
            acq = dset.read_acquisition(acqnum)

            acq_data_prw = np.zeros(acq.data.shape,dtype=np.complex64)
            acq_data_prw[:] = acq.data[:]
            
            if do_noiseadj and (noise_dmtx is not None):
                acq_data_prw = coils.apply_prewhitening(acq_data_prw, noise_dmtx)
 
            data2 = None
            
            if (padded_ro_length != ro_length) and do_zeropad: #partial fourier
                data2 = np.zeros((acq_data_prw.shape[0], padded_ro_length),dtype=np.complex64)
                offset = (padded_ro_length>>1)  - acq.center_sample
                data2[:,0+offset:offset+ro_length] = acq_data_prw
            else:
                data2 = acq_data_prw

            if do_remos:
                data2=transform.transform_kspace_to_image(data2,dim=(1,))
                data2=data2[:,(padded_ro_length>>2):(padded_ro_length>>2)+(padded_ro_length>>1)]
                data2=transform.transform_image_to_kspace(data2,dim=(1,)) * np.sqrt(float(padded_ro_length)/ro_length)
                
            # Stuff into the buffer
            rep = acq.idx.repetition
            contrast = acq.idx.contrast
            slice = acq.idx.slice
            y = acq.idx.kspace_encode_step_1
            z = acq.idx.kspace_encode_step_2
            
            all_data[rep, contrast, slice, :, z, y, :] = data2
                
        all_data = all_data.astype('complex64')

        if do_squeeze:
            all_data = np.squeeze(all_data)

        
        self.setData('data',all_data)
        
        return 0
noise_receiver_bw_ratio = 0.79
dmtx = coils.calculate_prewhitening(
    noise,
    scale_factor=(data_dwell_time / noise_dwell_time) *
    noise_receiver_bw_ratio)

#%%
# Process the actual data
all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx),
                    dtype=np.complex64)

# Loop through the rest of the acquisitions and stuff
for acqnum in range(firstacq, dset.number_of_acquisitions()):
    acq = dset.read_acquisition(acqnum)

    acq_data_prw = coils.apply_prewhitening(acq.data, dmtx)

    # Remove oversampling if needed
    if eNx != rNx:
        xline = transform.transform_kspace_to_image(acq_data_prw, [1])
        x0 = (eNx - rNx) / 2
        x1 = (eNx - rNx) / 2 + rNx
        xline = xline[:, x0:x1]
        acq.resize(rNx, acq.active_channels, acq.trajectory_dimensions)
        acq.center_sample = rNx / 2
        # need to use the [:] notation here to fill the data
        acq.data[:] = transform.transform_image_to_kspace(xline, [1])

    # Stuff into the buffer
    rep = acq.idx.repetition
    contrast = acq.idx.contrast
(data, pat) = simulation.sample_data(phan, csm, acc_factor, ref_lines)

#%%
# Add noise
noise = np.random.standard_normal(data.shape) + 1j * np.random.standard_normal(data.shape)
noise = (5.0 / matrix_size) * noise
kspace = np.logical_or(pat == 1, pat == 3).astype("float32") * (data + noise)
data = (pat > 0).astype("float32") * (data + noise)

#%%
# Calculate the noise prewhitening matrix
dmtx = coils.calculate_prewhitening(noise)

#%%
# Apply prewhitening
kspace = coils.apply_prewhitening(kspace, dmtx)
data = coils.apply_prewhitening(data, dmtx)


#%%
# Reconstruct aliased images
alias_img = transform.transform_kspace_to_image(kspace, dim=(1, 2)) * np.sqrt(acc_factor)
show.imshow(abs(alias_img))


#%%
reload(sense)
(unmix_sense, gmap_sense) = sense.calculate_sense_unmixing(acc_factor, csm)
show.imshow(abs(gmap_sense), colorbar=True)
recon_sense = np.squeeze(np.sum(alias_img * unmix_sense, 0))
show.imshow(abs(recon_sense), colorbar=True)
#Calculate prewhiterner taking BWs into consideration
a = dset.read_acquisition(firstacq)
data_dwell_time = a.sample_time_us
noise_receiver_bw_ratio = 0.79
dmtx = coils.calculate_prewhitening(noise,scale_factor=(data_dwell_time/noise_dwell_time)*noise_receiver_bw_ratio)

    
#%%
# Process the actual data
all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64)

# Loop through the rest of the acquisitions and stuff
for acqnum in range(firstacq,dset.number_of_acquisitions()):
    acq = dset.read_acquisition(acqnum)

    acq_data_prw = coils.apply_prewhitening(acq.data,dmtx)

    # Remove oversampling if needed
    if eNx != rNx:
        xline = transform.transform_kspace_to_image(acq_data_prw, [1])
        x0 = (eNx - rNx) / 2
        x1 = (eNx - rNx) / 2 + rNx
        xline = xline[:,x0:x1]
        acq.resize(rNx,acq.active_channels,acq.trajectory_dimensions)
        acq.center_sample = rNx/2
        # need to use the [:] notation here to fill the data
        acq.data[:] = transform.transform_image_to_kspace(xline, [1])
  
    # Stuff into the buffer
    rep = acq.idx.repetition
    contrast = acq.idx.contrast