Exemplo n.º 1
0
    def compute(self):
        
        all_data = self.getData('data')
        xml_header = self.getData('ISMRMRDHeader')

        header = ismrmrd.xsd.CreateFromDocument(xml_header)
        enc = header.encoding[0]

        #Parallel imaging factor
        acc_factor = 1
        if enc.parallelImaging:
            acc_factor = enc.parallelImaging.accelerationFactor.kspace_encoding_step_1
        
        # Coil combination
        print "Calculating coil images and CSM"
        coil_images = transform.transform_kspace_to_image(np.squeeze(np.mean(all_data,0)),(1,2))
        (csm,rho) = coils.calculate_csm_walsh(coil_images)
        csm_ss = np.sum(csm * np.conj(csm),0)
        csm_ss = csm_ss + 1.0*(csm_ss < np.spacing(1)).astype('float32')
        
        if acc_factor > 1:
            coil_data = np.squeeze(np.mean(all_data,0))
            
            if self.getVal('Parallel Imaging Method') == 0:
                (unmix,gmap) = grappa.calculate_grappa_unmixing(coil_data, acc_factor,csm=csm)
            elif self.getVal('Parallel Imaging Method') == 1:
                (unmix,gmap) = sense.calculate_sense_unmixing(acc_factor,csm)
            else:
                raise Exception('Unknown parallel imaging method')

        recon = np.zeros((all_data.shape[-4],all_data.shape[-2],all_data.shape[-1]), dtype=np.complex64)
        
        for r in range(0,all_data.shape[-4]):
            recon_data = transform.transform_kspace_to_image(np.squeeze(all_data[r,:,:,:]),(1,2))*np.sqrt(acc_factor)
            if acc_factor > 1:
                recon[r,:,:] = np.sum(unmix * recon_data,0)
            else:
                recon[r,:,:] = np.sum(np.conj(csm) * recon_data,0)

        print "Reconstruction done"
        
        self.setData('recon', recon)
        
        if acc_factor == 1:
            gmap = np.ones((all_data.shape[-2],all_data.shape[-1]),dtype=np.float32)
            
        self.setData('gmap',gmap)

        return 0
from ismrmrdtools import sense, grappa, show, simulation, transform,coils
from importlib import reload

#%%
#import some data
exercise_data = sp.io.loadmat('hansen_exercises2.mat')
csm = np.transpose(exercise_data['smaps'])
pat = np.transpose(exercise_data['sp'])
data = np.transpose(exercise_data['data'])
kspace = np.logical_or(pat==1,pat==3).astype('float32')*(data)

acc_factor = 4
alias_img = transform.transform_kspace_to_image(kspace,dim=(1,2)) * np.sqrt(acc_factor)
show.imshow(abs(alias_img))

(unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing(data, acc_factor, data_mask=pat>1, csm=csm,kernel_size=(4,5))
#(unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing(data, acc_factor, data_mask=pat>1)
show.imshow(abs(gmap_grappa),colorbar=True)
recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa,0))
show.imshow(abs(recon_grappa),colorbar=True)

sp.io.savemat('tmp_data.mat',{'pat_py': pat,'data_py': data,'csm_py': csm,'alias_img_py':alias_img,'unmix_grappa_py':unmix_grappa})

#%%
#Reload some modules
reload(show)
reload(sense)
reload(grappa)
reload(simulation)
reload(transform)
reload(coils)
Exemplo n.º 3
0
    def process(self, acq, data,*args):

        if self.buffer is None:
            # Matrix size
            eNx = self.enc.encodedSpace.matrixSize.x
            eNy = self.enc.encodedSpace.matrixSize.y
            eNz = self.enc.encodedSpace.matrixSize.z
            rNx = self.enc.reconSpace.matrixSize.x
            rNy = self.enc.reconSpace.matrixSize.y
            rNz = self.enc.reconSpace.matrixSize.z

            # Field of View
            eFOVx = self.enc.encodedSpace.fieldOfView_mm.x
            eFOVy = self.enc.encodedSpace.fieldOfView_mm.y
            eFOVz = self.enc.encodedSpace.fieldOfView_mm.z
            rFOVx = self.enc.reconSpace.fieldOfView_mm.x
            rFOVy = self.enc.reconSpace.fieldOfView_mm.y
            rFOVz = self.enc.reconSpace.fieldOfView_mm.z
        
            channels = acq.active_channels

            if data.shape[1] != rNx:
                raise("Error, Recon gadget expects data to be on correct matrix size in RO direction")
                
            if (rNz != 1):
                rasie("Error Recon Gadget only supports 2D for now")
                
            self.buffer = np.zeros((channels, rNy, rNx),dtype=np.complex64)
            self.samp_mask = np.zeros(self.buffer.shape[1:])
            self.header_proto = ismrmrd.ImageHeader()
            self.header_proto.matrix_size[0] = rNx
            self.header_proto.matrix_size[1] = rNy
            self.header_proto.matrix_size[2] = rNz
            self.header_proto.field_of_view[0] = rFOVx
            self.header_proto.field_of_view[1] = rFOVy
            self.header_proto.field_of_view[0] = rFOVz
        
        #Now put data in buffer
        line_offset = self.buffer.shape[1]/2 - self.enc.encodingLimits.kspace_encoding_step_1.center                                                                                 
        self.buffer[:,acq.idx.kspace_encode_step_1+line_offset,:] = data                                                          
        self.samp_mask[acq.idx.kspace_encode_step_1+line_offset,:] = 1
        
        #If last scan in buffer, do FFT and fill image header
        if acq.isFlagSet(ismrmrd.ACQ_LAST_IN_ENCODE_STEP1) or acq.isFlagSet(ismrmrd.ACQ_LAST_IN_SLICE):
            img_head = copy.deepcopy(self.header_proto)
            img_head.position = acq.position                                                                                                                               
            img_head.read_dir = acq.read_dir                                                                                                                               
            img_head.phase_dir = acq.phase_dir                                                                                                                             
            img_head.slice_dir = acq.slice_dir                                                                                                                             
            img_head.patient_table_position = acq.patient_table_position                                                                                                   
            img_head.acquisition_time_stamp = acq.acquisition_time_stamp                                                                                                   
            img_head.slice = acq.idx.slice
            img_head.channels = 1
            
            scale = self.samp_mask.size/(1.0*np.sum(self.samp_mask[:]));

            #We have not yet calculated unmixing coefficients
            if self.unmix is None:
                self.calib_buffer.append((img_head,self.buffer.copy()))
                self.buffer[:] = 0
                self.samp_mask[:] = 0
                
                if len(self.calib_buffer) >= self.calib_frames:
                    cal_data = np.zeros(self.calib_buffer[0][1].shape, dtype=np.complex64)
                    for c in self.calib_buffer:
                        cal_data = cal_data + c[1]
                        
                    mask = np.squeeze(np.sum(np.abs(cal_data),0))
                    mask = np.ones(mask.shape)*(np.abs(mask)>0.0)
                    target = None #cal_data[0:8,:,:]
                    
                    coil_images = transform.transform_kspace_to_image(cal_data,dim=(1,2))
                    (csm,rho) = coils.calculate_csm_walsh(coil_images)
                    
                    if self.method == 'grappa':
                        self.unmix, self.gmap = grappa.calculate_grappa_unmixing(cal_data, 
                                                                                 self.acc_factor, 
                                                                                 data_mask=mask, 
                                                                                 kernel_size=(4,5), 
                                                                                 csm=csm)
                    elif self.method == 'sense':
                        self.unmix, self.gmap = sense.calculate_sense_unmixing(self.acc_factor, csm)
                    else:
                        raise Exception('Unknown parallel imaging method: ' + str(self.method))
                        
                    for c in self.calib_buffer:
                        recon = transform.transform_kspace_to_image(c[1],dim=(1,2))*np.sqrt(scale)
                        recon = np.squeeze(np.sum(recon * self.unmix,0))
                        self.put_next(c[0], recon,*args)
                        
                return 0
                
            if self.unmix is None:
                raise Exception("We should never reach this point without unmixing coefficients")
                
            recon = transform.transform_kspace_to_image(self.buffer,dim=(1,2))*np.sqrt(scale)
            recon = np.squeeze(np.sum(recon * self.unmix,0))
            self.buffer[:] = 0
            self.samp_mask[:] = 0
            self.put_next(img_head,recon,*args)
        return 0
Exemplo n.º 4
0
    def process(self, acq, data, *args):

        if self.buffer is None:
            # Matrix size
            eNx = self.enc.encodedSpace.matrixSize.x
            eNy = self.enc.encodedSpace.matrixSize.y
            eNz = self.enc.encodedSpace.matrixSize.z
            rNx = self.enc.reconSpace.matrixSize.x
            rNy = self.enc.reconSpace.matrixSize.y
            rNz = self.enc.reconSpace.matrixSize.z

            # Field of View
            eFOVx = self.enc.encodedSpace.fieldOfView_mm.x
            eFOVy = self.enc.encodedSpace.fieldOfView_mm.y
            eFOVz = self.enc.encodedSpace.fieldOfView_mm.z
            rFOVx = self.enc.reconSpace.fieldOfView_mm.x
            rFOVy = self.enc.reconSpace.fieldOfView_mm.y
            rFOVz = self.enc.reconSpace.fieldOfView_mm.z

            channels = acq.active_channels

            if data.shape[1] != rNx:
                raise (
                    "Error, Recon gadget expects data to be on correct matrix size in RO direction"
                )

            if (rNz != 1):
                rasie("Error Recon Gadget only supports 2D for now")

            self.buffer = np.zeros((channels, rNy, rNx), dtype=np.complex64)
            self.samp_mask = np.zeros(self.buffer.shape[1:])
            self.header_proto = ismrmrd.ImageHeader()
            self.header_proto.matrix_size[0] = rNx
            self.header_proto.matrix_size[1] = rNy
            self.header_proto.matrix_size[2] = rNz
            self.header_proto.field_of_view[0] = rFOVx
            self.header_proto.field_of_view[1] = rFOVy
            self.header_proto.field_of_view[0] = rFOVz

        #Now put data in buffer
        line_offset = self.buffer.shape[
            1] / 2 - self.enc.encodingLimits.kspace_encoding_step_1.center
        self.buffer[:, acq.idx.kspace_encode_step_1 + line_offset, :] = data
        self.samp_mask[acq.idx.kspace_encode_step_1 + line_offset, :] = 1

        #If last scan in buffer, do FFT and fill image header
        if acq.isFlagSet(ismrmrd.ACQ_LAST_IN_ENCODE_STEP1) or acq.isFlagSet(
                ismrmrd.ACQ_LAST_IN_SLICE):
            img_head = copy.deepcopy(self.header_proto)
            img_head.position = acq.position
            img_head.read_dir = acq.read_dir
            img_head.phase_dir = acq.phase_dir
            img_head.slice_dir = acq.slice_dir
            img_head.patient_table_position = acq.patient_table_position
            img_head.acquisition_time_stamp = acq.acquisition_time_stamp
            img_head.slice = acq.idx.slice
            img_head.channels = 1

            scale = self.samp_mask.size / (1.0 * np.sum(self.samp_mask[:]))

            #We have not yet calculated unmixing coefficients
            if self.unmix is None:
                self.calib_buffer.append((img_head, self.buffer.copy()))
                self.buffer[:] = 0
                self.samp_mask[:] = 0

                if len(self.calib_buffer) >= self.calib_frames:
                    cal_data = np.zeros(self.calib_buffer[0][1].shape,
                                        dtype=np.complex64)
                    for c in self.calib_buffer:
                        cal_data = cal_data + c[1]

                    mask = np.squeeze(np.sum(np.abs(cal_data), 0))
                    mask = np.ones(mask.shape) * (np.abs(mask) > 0.0)
                    target = None  #cal_data[0:8,:,:]

                    coil_images = transform.transform_kspace_to_image(cal_data,
                                                                      dim=(1,
                                                                           2))
                    (csm, rho) = coils.calculate_csm_walsh(coil_images)

                    if self.method == 'grappa':
                        self.unmix, self.gmap = grappa.calculate_grappa_unmixing(
                            cal_data,
                            self.acc_factor,
                            data_mask=mask,
                            kernel_size=(4, 5),
                            csm=csm)
                    elif self.method == 'sense':
                        self.unmix, self.gmap = sense.calculate_sense_unmixing(
                            self.acc_factor, csm)
                    else:
                        raise Exception('Unknown parallel imaging method: ' +
                                        str(self.method))

                    for c in self.calib_buffer:
                        recon = transform.transform_kspace_to_image(
                            c[1], dim=(1, 2)) * np.sqrt(scale)
                        recon = np.squeeze(np.sum(recon * self.unmix, 0))
                        self.put_next(c[0], recon, *args)

                return 0

            if self.unmix is None:
                raise Exception(
                    "We should never reach this point without unmixing coefficients"
                )

            recon = transform.transform_kspace_to_image(
                self.buffer, dim=(1, 2)) * np.sqrt(scale)
            recon = np.squeeze(np.sum(recon * self.unmix, 0))
            self.buffer[:] = 0
            self.samp_mask[:] = 0
            self.put_next(img_head, recon, *args)
        return 0
    kspace, dim=(1, 2)) * np.sqrt(acc_factor)
show.imshow(abs(alias_img))

#%%
reload(sense)
(unmix_sense, gmap_sense) = sense.calculate_sense_unmixing(acc_factor, csm)
show.imshow(abs(gmap_sense), colorbar=True)
recon_sense = np.squeeze(np.sum(alias_img * unmix_sense, 0))
show.imshow(abs(recon_sense), colorbar=True)

#%%
reload(grappa)
#(unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing(data, acc_factor, data_mask=pat>1, csm=csm)
(unmix_grappa,
 gmap_grappa) = grappa.calculate_grappa_unmixing(data,
                                                 acc_factor,
                                                 data_mask=pat > 1)
show.imshow(abs(gmap_grappa), colorbar=True)
recon_grappa = np.squeeze(np.sum(alias_img * unmix_sense, 0))
show.imshow(abs(recon_grappa), colorbar=True)

#%%
#Pseudo replica example
reps = 255
reps_sense = np.zeros((reps, recon_grappa.shape[0], recon_grappa.shape[1]),
                      dtype=np.complex64)
reps_grappa = np.zeros((reps, recon_grappa.shape[0], recon_grappa.shape[1]),
                       dtype=np.complex64)
for r in range(0, reps):
    noise_r = np.random.standard_normal(
        kspace.shape) + 1j * np.random.standard_normal(kspace.shape)
Exemplo n.º 6
0
    all_data[rep, contrast, slice, :, z, y, :] = acq.data

all_data = all_data.astype('complex64')

#%%
# Coil combination
coil_images = transform.transform_kspace_to_image(
    np.squeeze(np.mean(all_data, 0)), (1, 2))
(csm, rho) = coils.calculate_csm_walsh(coil_images)
csm_ss = np.sum(csm * np.conj(csm), 0)
csm_ss = csm_ss + 1.0 * (csm_ss < np.spacing(1)).astype('float32')

if acc_factor > 1:
    coil_data = np.squeeze(np.mean(all_data, 0))
    reload(grappa)
    (unmix, gmap) = grappa.calculate_grappa_unmixing(coil_data, acc_factor)
    #(unmix,gmap) = sense.calculate_sense_unmixing(acc_factor,csm)
    show.imshow(abs(gmap), colorbar=True, scale=(1, 2))

recon = np.zeros((nreps, ncontrasts, nslices, eNz, eNy, rNx),
                 dtype=np.complex64)
for r in range(0, nreps):
    recon_data = transform.transform_kspace_to_image(
        np.squeeze(all_data[r, :, :, :, :, :, :]),
        (1, 2)) * np.sqrt(acc_factor)
    if acc_factor > 1:
        recon[r, :, :, :, :] = np.sum(unmix * recon_data, 0)
    else:
        recon[r, :, :, :, :] = np.sum(np.conj(csm) * recon_data, 0)

show.imshow(np.squeeze(np.std(np.abs(recon), 0)), colorbar=True, scale=(1, 2))
from ismrmrdtools import sense, grappa, show, simulation, transform, coils

#%%
# import some data
exercise_data = sp.io.loadmat("hansen_exercises2.mat")
csm = np.transpose(exercise_data["smaps"])
pat = np.transpose(exercise_data["sp"])
data = np.transpose(exercise_data["data"])
kspace = np.logical_or(pat == 1, pat == 3).astype("float32") * (data)

acc_factor = 4
alias_img = transform.transform_kspace_to_image(kspace, dim=(1, 2)) * np.sqrt(acc_factor)
show.imshow(abs(alias_img))

(unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing(
    data, acc_factor, data_mask=pat > 1, csm=csm, kernel_size=(4, 5)
)
# (unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing(data, acc_factor, data_mask=pat>1)
show.imshow(abs(gmap_grappa), colorbar=True)
recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa, 0))
show.imshow(abs(recon_grappa), colorbar=True)

sp.io.savemat(
    "tmp_data.mat",
    {"pat_py": pat, "data_py": data, "csm_py": csm, "alias_img_py": alias_img, "unmix_grappa_py": unmix_grappa},
)

#%%
# Reload some modules
reload(show)
reload(sense)
    y = acq.idx.kspace_encode_step_1
    z = acq.idx.kspace_encode_step_2
    all_data[rep, contrast, slice, :, z, y, :] = acq.data

all_data = all_data.astype('complex64')

#%%
# Coil combination
coil_images = transform.transform_kspace_to_image(np.squeeze(np.mean(all_data,0)),(1,2))
(csm,rho) = coils.calculate_csm_walsh(coil_images)
csm_ss = np.sum(csm * np.conj(csm),0)
csm_ss = csm_ss + 1.0*(csm_ss < np.spacing(1)).astype('float32')

if acc_factor > 1:
    coil_data = np.squeeze(np.mean(all_data,0))
    reload(grappa)
    (unmix,gmap) = grappa.calculate_grappa_unmixing(coil_data, acc_factor)
    #(unmix,gmap) = sense.calculate_sense_unmixing(acc_factor,csm)
    show.imshow(abs(gmap),colorbar=True,scale=(1,2))
    
recon = np.zeros((nreps, ncontrasts, nslices, eNz, eNy, rNx), dtype=np.complex64)
for r in range(0,nreps):
    recon_data = transform.transform_kspace_to_image(np.squeeze(all_data[r,:,:,:,:,:,:]),(1,2))*np.sqrt(acc_factor)
    if acc_factor > 1:
        recon[r,:,:,:,:] = np.sum(unmix * recon_data,0)
    else:
        recon[r,:,:,:,:] = np.sum(np.conj(csm) * recon_data,0)
    
show.imshow(np.squeeze(np.std(np.abs(recon),0)),colorbar=True,scale=(1,2))