Example #1
0
    def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ) :
        if doMergeB0:
            nS = 1+self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['D']     = np.zeros( (len(self.d_perps),181,181,nS), dtype=np.float32 )
        KERNELS['CSF']   = np.zeros( (len(self.d_isos),nS), dtype=np.float32 )

        nATOMS = len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Tensor compartment(s)
        for i in xrange(len(self.d_perps)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['D'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Isotropic compartment(s)
        for i in xrange(len(self.d_isos)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['CSF'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx]
            progress.update()

        return KERNELS
Example #2
0
    def resample( self, in_path, idx_out, Ylm_out ):
        nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1

        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wm']    = np.zeros( (nATOMS-1,181,181,self.scheme.nS), dtype=np.float32 )
        KERNELS['iso']   = np.zeros( self.scheme.nS, dtype=np.float32 )
        KERNELS['kappa'] = np.zeros( nATOMS-1, dtype=np.float32 )
        KERNELS['icvf']  = np.zeros( nATOMS-1, dtype=np.float32 )
        KERNELS['norms'] = np.zeros( (self.scheme.dwi_count, nATOMS-1) )

        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Coupled contributions
        for i in xrange( len(self.IC_ODs) ):
            for j in xrange( len(self.IC_VFs) ):
                lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
                idx = progress.i - 1
                KERNELS['wm'][idx,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )
                KERNELS['kappa'][idx] = 1.0 / np.tan( self.IC_ODs[i]*np.pi/2.0 )
                KERNELS['icvf'][idx]  = self.IC_VFs[j]
                KERNELS['norms'][:,idx] = 1 / np.linalg.norm( KERNELS['wm'][idx,0,0,self.scheme.dwi_idx] ) # norm of coupled atoms (for l1 minimization)
                progress.update()

        # Isotropic
        lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
        KERNELS['iso'] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )
        progress.update()

        return KERNELS
Example #3
0
    def generate( self, out_path, aux, idx_in, idx_out ):
        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale = 1 )
        protocolHR = self.scheme2noddi( scheme_high )

        nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Coupled contributions
        IC_KAPPAs = 1 / np.tan(self.IC_ODs*np.pi/2)
        for kappa in IC_KAPPAs:
            signal_ic = self.synth_meas_watson_SH_cyl_neuman_PGSE( np.array([self.dPar*1E-6, 0, kappa]), protocolHR['grad_dirs'], np.squeeze(protocolHR['gradient_strength']), np.squeeze(protocolHR['delta']), np.squeeze(protocolHR['smalldel']), np.array([0,0,1]), 0 )

            for v_ic in self.IC_VFs:
                dPerp = self.dPar*1E-6 * (1 - v_ic)
                signal_ec = self.synth_meas_watson_hindered_diffusion_PGSE( np.array([self.dPar*1E-6, dPerp, kappa]), protocolHR['grad_dirs'], np.squeeze(protocolHR['gradient_strength']), np.squeeze(protocolHR['delta']), np.squeeze(protocolHR['smalldel']), np.array([0,0,1]) )

                signal = v_ic*signal_ic + (1-v_ic)*signal_ec
                lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
                np.save( pjoin( out_path, 'A_%03d.npy'%progress.i) , lm )
                progress.update()

        # Isotropic
        signal = self.synth_meas_iso_GPD( self.dIso*1E-6, protocolHR)
        lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True )
        np.save( pjoin( out_path, 'A_%03d.npy'%progress.i) , lm )
        progress.update()
Example #4
0
    def resample( self, in_path, idx_out, Ylm_out ) :
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wmr'] = np.zeros( (len(self.Rs),181,181,self.scheme.nS,), dtype=np.float32 )
        KERNELS['wmh'] = np.zeros( (len(self.ICVFs),181,181,self.scheme.nS,), dtype=np.float32 )
        KERNELS['iso'] = np.zeros( (len(self.d_ISOs),self.scheme.nS,), dtype=np.float32 )

        nATOMS = len(self.Rs) + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Cylinder(s)
        for i in xrange(len(self.Rs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmr'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )
            progress.update()

        # Zeppelin(s)
        for i in xrange(len(self.ICVFs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmh'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )
            progress.update()

        # Ball(s)
        for i in xrange(len(self.d_ISOs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['iso'][i,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )
            progress.update()

        return KERNELS
Example #5
0
    def generate(self, out_path, aux, idx_in, idx_out):
        scheme_high = amico.lut.create_high_resolution_scheme(self.scheme,
                                                              b_scale=1)
        gtab = gradient_table(scheme_high.b, scheme_high.raw[:, 0:3])

        nATOMS = 1 + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar(n=nATOMS, prefix="   ", erase=True)

        # Stick
        signal = single_tensor(gtab, evals=[0, 0, self.d_par])
        lm = amico.lut.rotate_kernel(signal, aux, idx_in, idx_out, False)
        np.save(pjoin(out_path, 'A_001.npy'), lm)
        progress.update()

        # Zeppelin(s)
        for d in [self.d_par * (1.0 - ICVF) for ICVF in self.ICVFs]:
            signal = single_tensor(gtab, evals=[d, d, self.d_par])
            lm = amico.lut.rotate_kernel(signal, aux, idx_in, idx_out, False)
            np.save(pjoin(out_path, 'A_%03d.npy' % progress.i), lm)
            progress.update()

        # Ball(s)
        for d in self.d_ISOs:
            signal = single_tensor(gtab, evals=[d, d, d])
            lm = amico.lut.rotate_kernel(signal, aux, idx_in, idx_out, True)
            np.save(pjoin(out_path, 'A_%03d.npy' % progress.i), lm)
            progress.update()
Example #6
0
def debiasRician(DWI,SNR,mask,scheme):
    debiased_DWI = np.zeros(DWI.shape)
    t = time.time()
    progress = ProgressBar( n=mask.sum(), prefix="   ", erase=True )
    for ix in range(DWI.shape[0]):
        for iy in range(DWI.shape[1]):
            for iz in range(DWI.shape[2]):
                if mask[ix,iy,iz]:
                    b0 = DWI[ix,iy,iz,scheme.b0_idx].mean()
                    sigma_diff = b0/SNR
                    init_guess = DWI[ix,iy,iz,:].copy()
                    tmp = minimize(F_norm_Diff_K, init_guess, args=(init_guess,sigma_diff), method = 'L-BFGS-B', jac=der_Diff)
                    debiased_DWI[ix,iy,iz] = tmp.x
                    progress.update()
    print('   [ %s ]' % ( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time()-t) ) ))
    return debiased_DWI
Example #7
0
    def resample( self, in_path, idx_out, Ylm_out ) :
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wmr'] = np.zeros( (len(self.Rs),181,181,self.scheme.nS,), dtype=np.float32 )
        KERNELS['wmh'] = np.zeros( (len(self.ICVFs),181,181,self.scheme.nS,), dtype=np.float32 )
        KERNELS['iso'] = np.zeros( (len(self.d_ISOs),self.scheme.nS,), dtype=np.float32 )

        nATOMS = len(self.Rs) + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Cylinder(s)
        for i in xrange(len(self.Rs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmr'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )
            progress.update()

        # Zeppelin(s)
        for i in xrange(len(self.ICVFs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmh'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )
            progress.update()

        # Ball(s)
        for i in xrange(len(self.d_ISOs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['iso'][i,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )
            progress.update()

        return KERNELS
Example #8
0
    def generate( self, out_path, aux, idx_in, idx_out ) :
        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale=1 )
        gtab = gradient_table( scheme_high.b, scheme_high.raw[:,0:3] )

        nATOMS = 1 + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Stick
        signal = single_tensor( gtab, evals=[0, 0, self.d_par] )
        lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
        np.save( pjoin( out_path, 'A_001.npy' ), lm )
        progress.update()

        # Zeppelin(s)
        for d in [ self.d_par*(1.0-ICVF) for ICVF in self.ICVFs] :
            signal = single_tensor( gtab, evals=[d, d, self.d_par] )
            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()

        # Ball(s)
        for d in self.d_ISOs :
            signal = single_tensor( gtab, evals=[d, d, d] )
            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()
Example #9
0
    def generate( self, out_path, aux, idx_in, idx_out, ndirs ) :
        if self.scheme.version != 1 :
            ERROR( 'This model requires a "VERSION: STEJSKALTANNER" scheme' )

        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale=1E6 )
        filename_scheme = pjoin( out_path, 'scheme.txt' )
        np.savetxt( filename_scheme, scheme_high.raw, fmt='%15.8e', delimiter=' ', header='VERSION: STEJSKALTANNER', comments='' )

        # temporary file where to store "datasynth" output
        filename_signal = pjoin( tempfile._get_default_tempdir(), next(tempfile._get_candidate_names())+'.Bfloat' )

        nATOMS = len(self.Rs) + len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=False )

        # Cylinder(s)
        for R in self.Rs :
            CMD = 'datasynth -synthmodel compartment 1 CYLINDERGPD %E 0 0 %E -schemefile %s -voxels 1 -outputfile %s 2> /dev/null' % ( self.d_par*1E-6, R, filename_scheme, filename_signal )
            subprocess.call( CMD, shell=True )
            if not exists( filename_signal ) :
                ERROR( 'Problems generating the signal with "datasynth"' )
            signal  = np.fromfile( filename_signal, dtype='>f4' )
            if exists( filename_signal ) :
                remove( filename_signal )

            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()

        # Zeppelin(s)
        for d in self.d_perps :
            CMD = 'datasynth -synthmodel compartment 1 ZEPPELIN %E 0 0 %E -schemefile %s -voxels 1 -outputfile %s 2> /dev/null' % ( self.d_par*1E-6, d*1e-6, filename_scheme, filename_signal )
            subprocess.call( CMD, shell=True )
            if not exists( filename_signal ) :
                ERROR( 'Problems generating the signal with "datasynth"' )
            signal  = np.fromfile( filename_signal, dtype='>f4' )
            if exists( filename_signal ) :
                remove( filename_signal )

            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()

        # Ball(s)
        for d in self.d_isos :
            CMD = 'datasynth -synthmodel compartment 1 BALL %E -schemefile %s -voxels 1 -outputfile %s 2> /dev/null' % ( d*1e-6, filename_scheme, filename_signal )
            subprocess.call( CMD, shell=True )
            if not exists( filename_signal ) :
                ERROR( 'Problems generating the signal with "datasynth"' )
            signal  = np.fromfile( filename_signal, dtype='>f4' )
            if exists( filename_signal ) :
                remove( filename_signal )

            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()
Example #10
0
    def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ) :
        if doMergeB0:
            nS = 1+self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wmr'] = np.zeros( (len(self.Rs),181,181,nS,), dtype=np.float32 )
        KERNELS['wmh'] = np.zeros( (len(self.ICVFs),181,181,nS,), dtype=np.float32 )
        KERNELS['iso'] = np.zeros( (len(self.d_ISOs),nS,), dtype=np.float32 )

        nATOMS = len(self.Rs) + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Cylinder(s)
        for i in xrange(len(self.Rs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmr'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Zeppelin(s)
        for i in xrange(len(self.ICVFs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmh'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Ball(s)
        for i in xrange(len(self.d_ISOs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['iso'][i,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx]
            progress.update()

        return KERNELS
Example #11
0
def debiasRician(DWI, SNR, mask, scheme):
    debiased_DWI = np.zeros(DWI.shape)
    progress = ProgressBar(n=mask.sum(), prefix="   ", erase=True)
    for ix in range(DWI.shape[0]):
        for iy in range(DWI.shape[1]):
            for iz in range(DWI.shape[2]):
                if mask[ix, iy, iz]:
                    b0 = DWI[ix, iy, iz, scheme.b0_idx].mean()
                    sigma_diff = b0 / SNR
                    init_guess = DWI[ix, iy, iz, :].copy()
                    tmp = minimize(F_norm_Diff_K,
                                   init_guess,
                                   args=(init_guess, sigma_diff),
                                   method='L-BFGS-B',
                                   jac=der_Diff)
                    debiased_DWI[ix, iy, iz] = tmp.x
                    progress.update()
    return debiased_DWI
Example #12
0
    def resample(self, in_path, idx_out, Ylm_out, doMergeB0):
        if doMergeB0:
            nS = 1 + self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0], self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wmr'] = np.zeros((
            len(self.Rs),
            181,
            181,
            nS,
        ),
                                  dtype=np.float32)
        KERNELS['wmh'] = np.zeros((
            len(self.ICVFs),
            181,
            181,
            nS,
        ),
                                  dtype=np.float32)
        KERNELS['iso'] = np.zeros((
            len(self.d_ISOs),
            nS,
        ), dtype=np.float32)

        nATOMS = len(self.Rs) + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar(n=nATOMS, prefix="   ", erase=True)

        # Cylinder(s)
        for i in xrange(len(self.Rs)):
            lm = np.load(pjoin(in_path, 'A_%03d.npy' % progress.i))
            KERNELS['wmr'][i, :, :, :] = amico.lut.resample_kernel(
                lm, self.scheme.nS, idx_out, Ylm_out, False)[:, :, merge_idx]
            progress.update()

        # Zeppelin(s)
        for i in xrange(len(self.ICVFs)):
            lm = np.load(pjoin(in_path, 'A_%03d.npy' % progress.i))
            KERNELS['wmh'][i, :, :, :] = amico.lut.resample_kernel(
                lm, self.scheme.nS, idx_out, Ylm_out, False)[:, :, merge_idx]
            progress.update()

        # Ball(s)
        for i in xrange(len(self.d_ISOs)):
            lm = np.load(pjoin(in_path, 'A_%03d.npy' % progress.i))
            KERNELS['iso'][i, :] = amico.lut.resample_kernel(
                lm, self.scheme.nS, idx_out, Ylm_out, True)[merge_idx]
            progress.update()

        return KERNELS
Example #13
0
    def generate( self, out_path, aux, idx_in, idx_out ) :
        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale=1 )
        gtab = gradient_table( scheme_high.b, scheme_high.raw[:,0:3] )

        nATOMS = len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Tensor compartment(s)
        for d in self.d_perps :
            signal = single_tensor( gtab, evals=[d, d, self.d_par] )
            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()

        # Isotropic compartment(s)
        for d in self.d_isos :
            signal = single_tensor( gtab, evals=[d, d, d] )
            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()
Example #14
0
    def resample(self, in_path, idx_out, Ylm_out, doMergeB0):
        if doMergeB0:
            nS = 1 + self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0], self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['D'] = np.zeros((len(self.d_perps), 181, 181, nS),
                                dtype=np.float32)
        KERNELS['CSF'] = np.zeros((len(self.d_isos), nS), dtype=np.float32)

        nATOMS = len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar(n=nATOMS, prefix="   ", erase=True)

        # Tensor compartment(s)
        for i in xrange(len(self.d_perps)):
            lm = np.load(pjoin(in_path, 'A_%03d.npy' % progress.i))
            KERNELS['D'][i, ...] = amico.lut.resample_kernel(
                lm, self.scheme.nS, idx_out, Ylm_out, False)[:, :, merge_idx]
            progress.update()

        # Isotropic compartment(s)
        for i in xrange(len(self.d_isos)):
            lm = np.load(pjoin(in_path, 'A_%03d.npy' % progress.i))
            KERNELS['CSF'][i, ...] = amico.lut.resample_kernel(
                lm, self.scheme.nS, idx_out, Ylm_out, True)[merge_idx]
            progress.update()

        return KERNELS
Example #15
0
    def generate( self, out_path, aux, idx_in, idx_out ) :
        if self.scheme.version != 1 :
            raise RuntimeError( 'This model requires a "VERSION: STEJSKALTANNER" scheme.' )

        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale=1E6 )
        filename_scheme = pjoin( out_path, 'scheme.txt' )
        np.savetxt( filename_scheme, scheme_high.raw, fmt='%15.8e', delimiter=' ', header='VERSION: STEJSKALTANNER', comments='' )

        # temporary file where to store "datasynth" output
        filename_signal = pjoin( tempfile._get_default_tempdir(), next(tempfile._get_candidate_names())+'.Bfloat' )

        nATOMS = len(self.Rs) + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Cylinder(s)
        for R in self.Rs :
            CMD = 'datasynth -synthmodel compartment 1 CYLINDERGPD %E 0 0 %E -schemefile %s -voxels 1 -outputfile %s 2> /dev/null' % ( self.d_par*1E-6, R, filename_scheme, filename_signal )
            subprocess.call( CMD, shell=True )
            if not exists( filename_signal ) :
                raise RuntimeError( 'Problems generating the signal with "datasynth"' )
            signal  = np.fromfile( filename_signal, dtype='>f4' )
            if exists( filename_signal ) :
                remove( filename_signal )

            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()

        # Zeppelin(s)
        for d in [ self.d_par*(1.0-ICVF) for ICVF in self.ICVFs] :
            CMD = 'datasynth -synthmodel compartment 1 ZEPPELIN %E 0 0 %E -schemefile %s -voxels 1 -outputfile %s 2> /dev/null' % ( self.d_par*1E-6, d*1e-6, filename_scheme, filename_signal )
            subprocess.call( CMD, shell=True )
            if not exists( filename_signal ) :
                raise RuntimeError( 'Problems generating the signal with "datasynth"' )
            signal  = np.fromfile( filename_signal, dtype='>f4' )
            if exists( filename_signal ) :
                remove( filename_signal )

            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()

        # Ball(s)
        for d in self.d_ISOs :
            CMD = 'datasynth -synthmodel compartment 1 BALL %E -schemefile %s -voxels 1 -outputfile %s 2> /dev/null' % ( d*1e-6, filename_scheme, filename_signal )
            subprocess.call( CMD, shell=True )
            if not exists( filename_signal ) :
                raise RuntimeError( 'Problems generating the signal with "datasynth"' )
            signal  = np.fromfile( filename_signal, dtype='>f4' )
            if exists( filename_signal ) :
                remove( filename_signal )

            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()
Example #16
0
    def resample( self, in_path, idx_out, Ylm_out ):
        nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1

        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wm']    = np.zeros( (nATOMS-1,181,181,self.scheme.nS), dtype=np.float32 )
        KERNELS['iso']   = np.zeros( self.scheme.nS, dtype=np.float32 )
        KERNELS['kappa'] = np.zeros( nATOMS-1, dtype=np.float32 )
        KERNELS['icvf']  = np.zeros( nATOMS-1, dtype=np.float32 )
        KERNELS['norms'] = np.zeros( (self.scheme.dwi_count, nATOMS-1) )

        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Coupled contributions
        for i in xrange( len(self.IC_ODs) ):
            for j in xrange( len(self.IC_VFs) ):
                lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
                idx = progress.i - 1
                KERNELS['wm'][idx,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )
                KERNELS['kappa'][idx] = 1.0 / np.tan( self.IC_ODs[i]*np.pi/2.0 )
                KERNELS['icvf'][idx]  = self.IC_VFs[j]
                KERNELS['norms'][:,idx] = 1 / np.linalg.norm( KERNELS['wm'][idx,0,0,self.scheme.dwi_idx] ) # norm of coupled atoms (for l1 minimization)
                progress.update()

        # Isotropic
        lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
        KERNELS['iso'] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )
        progress.update()

        return KERNELS
Example #17
0
    def generate( self, out_path, aux, idx_in, idx_out, ndirs ):
        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale = 1 )
        protocolHR = self.scheme2noddi( scheme_high )

        nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=False )

        # Coupled contributions
        IC_KAPPAs = 1 / np.tan(self.IC_ODs*np.pi/2)
        for kappa in IC_KAPPAs:
            signal_ic = self.synth_meas_watson_SH_cyl_neuman_PGSE( np.array([self.dPar*1E-6, 0, kappa]), protocolHR['grad_dirs'], np.squeeze(protocolHR['gradient_strength']), np.squeeze(protocolHR['delta']), np.squeeze(protocolHR['smalldel']), np.array([0,0,1]), 0 )

            for v_ic in self.IC_VFs:
                dPerp = self.dPar*1E-6 * (1 - v_ic)
                signal_ec = self.synth_meas_watson_hindered_diffusion_PGSE( np.array([self.dPar*1E-6, dPerp, kappa]), protocolHR['grad_dirs'], np.squeeze(protocolHR['gradient_strength']), np.squeeze(protocolHR['delta']), np.squeeze(protocolHR['smalldel']), np.array([0,0,1]) )

                signal = v_ic*signal_ic + (1-v_ic)*signal_ec
                lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
                np.save( pjoin( out_path, 'A_%03d.npy'%progress.i) , lm )
                progress.update()

        # Isotropic
        signal = self.synth_meas_iso_GPD( self.dIso*1E-6, protocolHR)
        lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
        np.save( pjoin( out_path, 'A_%03d.npy'%progress.i) , lm )
        progress.update()
Example #18
0
    def resample(self, in_path, idx_out, Ylm_out):
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['D'] = np.zeros((len(self.d_perps), 181, 181, self.scheme.nS),
                                dtype=np.float32)
        KERNELS['CSF'] = np.zeros((len(self.d_isos), self.scheme.nS),
                                  dtype=np.float32)

        nATOMS = len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar(n=nATOMS, prefix="   ", erase=True)

        # Tensor compartment(s)
        for i in xrange(len(self.d_perps)):
            lm = np.load(pjoin(in_path, 'A_%03d.npy' % progress.i))
            KERNELS['D'][i, ...] = amico.lut.resample_kernel(
                lm, self.scheme.nS, idx_out, Ylm_out, False)
            progress.update()

        # Isotropic compartment(s)
        for i in xrange(len(self.d_isos)):
            lm = np.load(pjoin(in_path, 'A_%03d.npy' % progress.i))
            KERNELS['CSF'][i, ...] = amico.lut.resample_kernel(
                lm, self.scheme.nS, idx_out, Ylm_out, True)
            progress.update()

        return KERNELS
Example #19
0
    def resample( self, in_path, idx_out, Ylm_out ) :
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['D']     = np.zeros( (len(self.d_perps),181,181,self.scheme.nS), dtype=np.float32 )
        KERNELS['CSF']   = np.zeros( (len(self.d_isos),self.scheme.nS), dtype=np.float32 )

        nATOMS = len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Tensor compartment(s)
        for i in xrange(len(self.d_perps)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['D'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )
            progress.update()

        # Isotropic compartment(s)
        for i in xrange(len(self.d_isos)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['CSF'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )
            progress.update()

        return KERNELS
Example #20
0
    def resample( self, in_path, idx_out, Ylm_out, doMergeB0, ndirs ) :
        if doMergeB0:
            nS = 1+self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wmr'] = np.zeros( (len(self.Rs),ndirs,nS,), dtype=np.float32 )
        KERNELS['wmh'] = np.zeros( (len(self.d_perps),ndirs,nS,), dtype=np.float32 )
        KERNELS['iso'] = np.zeros( (len(self.d_isos),nS,), dtype=np.float32 )

        nATOMS = len(self.Rs) + len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=False )

        # Cylinder(s)
        for i in range(len(self.Rs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            if lm.shape[0] != ndirs:
                ERROR( 'Outdated LUT. Call "generate_kernels( regenerate=True )" to update the LUT' )
            KERNELS['wmr'][i,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False, ndirs )[:,merge_idx]
            progress.update()

        # Zeppelin(s)
        for i in range(len(self.d_perps)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            if lm.shape[0] != ndirs:
                ERROR( 'Outdated LUT. Call "generate_kernels( regenerate=True )" to update the LUT' )
            KERNELS['wmh'][i,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False, ndirs )[:,merge_idx]
            progress.update()

        # Ball(s)
        for i in range(len(self.d_isos)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['iso'][i,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True, ndirs )[merge_idx]
            progress.update()

        return KERNELS
Example #21
0
    def generate( self, out_path, aux, idx_in, idx_out, ndirs ) :
        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale=1 )
        gtab = gradient_table( scheme_high.b, scheme_high.raw[:,0:3] )

        nATOMS = len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=False )

        # Tensor compartment(s)
        for d in self.d_perps :
            signal = single_tensor( gtab, evals=[d, d, self.d_par] )
            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()

        # Isotropic compartment(s)
        for d in self.d_isos :
            signal = single_tensor( gtab, evals=[d, d, d] )
            lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
            np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm )
            progress.update()
Example #22
0
    def resample( self, in_path, idx_out, Ylm_out, doMergeB0, ndirs ):
        nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1
        if doMergeB0:
            nS = 1+self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wm']    = np.zeros( (nATOMS-1,ndirs,nS), dtype=np.float32 )
        KERNELS['iso']   = np.zeros( nS, dtype=np.float32 )
        KERNELS['kappa'] = np.zeros( nATOMS-1, dtype=np.float32 )
        KERNELS['icvf']  = np.zeros( nATOMS-1, dtype=np.float32 )
        KERNELS['norms'] = np.zeros( (self.scheme.dwi_count, nATOMS-1) )

        progress = ProgressBar( n=nATOMS, prefix="   ", erase=False )

        # Coupled contributions
        for i in range( len(self.IC_ODs) ):
            for j in range( len(self.IC_VFs) ):
                lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
                if lm.shape[0] != ndirs:
                    ERROR( 'Outdated LUT. Call "generate_kernels( regenerate=True )" to update the LUT' )
                idx = progress.i - 1
                KERNELS['wm'][idx,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False, ndirs )[:,merge_idx]
                KERNELS['kappa'][idx] = 1.0 / np.tan( self.IC_ODs[i]*np.pi/2.0 )
                KERNELS['icvf'][idx]  = self.IC_VFs[j]
                if doMergeB0:
                    KERNELS['norms'][:,idx] = 1 / np.linalg.norm( KERNELS['wm'][idx,0,1:] ) # norm of coupled atoms (for l1 minimization)
                else:
                    KERNELS['norms'][:,idx] = 1 / np.linalg.norm( KERNELS['wm'][idx,0,self.scheme.dwi_idx] ) # norm of coupled atoms (for l1 minimization)
                progress.update()

        # Isotropic
        lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
        KERNELS['iso'] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True, ndirs )[merge_idx]
        progress.update()

        return KERNELS
Example #23
0
    def fit( self ) :
        """Fit the model to the data iterating over all voxels (in the mask) one after the other.
        Call the appropriate fit() method of the actual model used.
        """
        if self.niiDWI is None :
            raise RuntimeError( 'Data not loaded; call "load_data()" first.' )
        if self.model is None :
            raise RuntimeError( 'Model not set; call "set_model()" first.' )
        if self.KERNELS is None :
            raise RuntimeError( 'Response functions not generated; call "generate_kernels()" and "load_kernels()" first.' )
        if self.KERNELS['model'] != self.model.id :
            raise RuntimeError( 'Response functions were not created with the same model.' )

        self.set_config('fit_time', None)
        totVoxels = np.count_nonzero(self.niiMASK_img)
        print '\n-> Fitting "%s" model to %d voxels:' % ( self.model.name, totVoxels )

        # setup fitting directions
        peaks_filename = self.get_config('peaks_filename')
        if peaks_filename is None :
            DIRs = np.zeros( [self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2], 3], dtype=np.float32 )
            nDIR = 1
            if self.get_config('doMergeB0'):
                gtab = gradient_table( np.hstack((0,self.scheme.b[self.scheme.dwi_idx])), np.vstack((np.zeros((1,3)),self.scheme.raw[self.scheme.dwi_idx,:3])) )
            else:
                gtab = gradient_table( self.scheme.b, self.scheme.raw[:,:3] )
            DTI = dti.TensorModel( gtab )
        else :
            niiPEAKS = nibabel.load( pjoin( self.get_config('DATA_path'), peaks_filename) )
            DIRs = niiPEAKS.get_data().astype(np.float32)
            nDIR = np.floor( DIRs.shape[3]/3 )
            print '\t* peaks dim = %d x %d x %d x %d' % DIRs.shape[:4]
            if DIRs.shape[:3] != self.niiMASK_img.shape[:3] :
                raise ValueError( 'PEAKS geometry does not match with DWI data' )

        # setup other output files
        MAPs = np.zeros( [self.get_config('dim')[0], self.get_config('dim')[1], 
                          self.get_config('dim')[2], len(self.model.maps_name)], dtype=np.float32 )

        if self.get_config('doComputeNRMSE') :
            NRMSE = np.zeros( [self.get_config('dim')[0], 
                               self.get_config('dim')[1], self.get_config('dim')[2]], dtype=np.float32 )
            
        if self.get_config('doSaveCorrectedDWI') :
            DWI_corrected = np.zeros(self.niiDWI.shape, dtype=np.float32)


        # fit the model to the data
        # =========================
        t = time.time()
        progress = ProgressBar( n=totVoxels, prefix="   ", erase=True )
        for iz in xrange(self.niiMASK_img.shape[2]) :
            for iy in xrange(self.niiMASK_img.shape[1]) :
                for ix in xrange(self.niiMASK_img.shape[0]) :
                    if self.niiMASK_img[ix,iy,iz]==0 :
                        continue

                    # prepare the signal
                    y = self.niiDWI_img[ix,iy,iz,:].astype(np.float64)
                    y[ y < 0 ] = 0 # [NOTE] this should not happen!

                    # fitting directions
                    if peaks_filename is None :
                        dirs = DTI.fit( y ).directions[0]
                    else :
                        dirs = DIRs[ix,iy,iz,:]

                    # dispatch to the right handler for each model
                    MAPs[ix,iy,iz,:], DIRs[ix,iy,iz,:], x, A = self.model.fit( y, dirs.reshape(-1,3), self.KERNELS, self.get_config('solver_params') )

                    # compute fitting error
                    if self.get_config('doComputeNRMSE') :
                        y_est = np.dot( A, x )
                        den = np.sum(y**2)
                        NRMSE[ix,iy,iz] = np.sqrt( np.sum((y-y_est)**2) / den ) if den > 1e-16 else 0
                                            
                    if self.get_config('doSaveCorrectedDWI') :

                        if self.model.name == 'Free-Water' :
                            n_iso = len(self.model.d_isos)
                            x[-1*n_iso:] = 0

                            #print(y, x, b0, A.shape)
                            if self.get_config('doNormalizeSignal') and self.scheme.b0_count > 0 :
                                y_fw_corrected = np.dot( A, x ) * self.mean_b0s[ix,iy,iz]
                            else :
                                y_fw_corrected = np.dot( A, x )

                            if self.get_config('doKeepb0Intact') and self.scheme.b0_count > 0 :
                                # put original b0 data back in. 
                                y_fw_corrected[self.scheme.b0_idx] = y[self.scheme.b0_idx]*self.mean_b0s[ix,iy,iz]

                            DWI_corrected[ix,iy,iz,:] = y_fw_corrected

                            
                    progress.update()

        self.set_config('fit_time', time.time()-t)
        print '   [ %s ]' % ( time.strftime("%Hh %Mm %Ss", time.gmtime(self.get_config('fit_time')) ) )

        # store results
        self.RESULTS = {}
        self.RESULTS['DIRs']  = DIRs
        self.RESULTS['MAPs']  = MAPs
        if self.get_config('doComputeNRMSE') :
            self.RESULTS['NRMSE'] = NRMSE
        if self.get_config('doSaveCorrectedDWI') :
            self.RESULTS['DWI_corrected'] = DWI_corrected
Example #24
0
    def fit( self ) :
        """Fit the model to the data iterating over all voxels (in the mask) one after the other.
        Call the appropriate fit() method of the actual model used.
        """
        if self.niiDWI is None :
            raise RuntimeError( 'Data not loaded; call "load_data()" first.' )
        if self.model is None :
            raise RuntimeError( 'Model not set; call "set_model()" first.' )
        if self.KERNELS is None :
            raise RuntimeError( 'Response functions not generated; call "generate_kernels()" and "load_kernels()" first.' )
        if self.KERNELS['model'] != self.model.id :
            raise RuntimeError( 'Response functions were not created with the same model.' )

        self.set_config('fit_time', None)
        totVoxels = np.count_nonzero(self.niiMASK_img)
        print '\n-> Fitting "%s" model to %d voxels:' % ( self.model.name, totVoxels )

        # setup fitting directions
        peaks_filename = self.get_config('peaks_filename')
        if peaks_filename is None :
            DIRs = np.zeros( [self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2], 3], dtype=np.float32 )
            nDIR = 1
            gtab = gradient_table( self.scheme.b, self.scheme.raw[:,:3] )
            DTI = dti.TensorModel( gtab )
        else :
            niiPEAKS = nibabel.load( pjoin( self.get_config('DATA_path'), peaks_filename) )
            DIRs = niiPEAKS.get_data().astype(np.float32)
            nDIR = np.floor( DIRs.shape[3]/3 )
            print '\t* peaks dim = %d x %d x %d x %d' % DIRs.shape[:4]
            if DIRs.shape[:3] != self.niiMASK_img.shape[:3] :
                raise ValueError( 'PEAKS geometry does not match with DWI data' )

        # setup other output files
        MAPs = np.zeros( [self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2], len(self.model.maps_name)], dtype=np.float32 )
        if self.get_config('doComputeNRMSE') :
            NRMSE = np.zeros( [self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2]], dtype=np.float32 )

        # fit the model to the data
        # =========================
        t = time.time()
        progress = ProgressBar( n=totVoxels, prefix="   ", erase=True )
        for iz in xrange(self.niiMASK_img.shape[2]) :
            for iy in xrange(self.niiMASK_img.shape[1]) :
                for ix in xrange(self.niiMASK_img.shape[0]) :
                    if self.niiMASK_img[ix,iy,iz]==0 :
                        continue

                    # prepare the signal
                    y = self.niiDWI_img[ix,iy,iz,:].astype(np.float64)
                    y[ y < 0 ] = 0 # [NOTE] this should not happen!

                    if self.get_config('doNormalizeSignal') and self.scheme.b0_count > 0 :
                        b0 = np.mean( y[self.scheme.b0_idx] )
                        if b0 > 1e-3 :
                            y = y / b0

                    # fitting directions
                    if peaks_filename is None :
                        dirs = DTI.fit( y ).directions[0]
                    else :
                        dirs = DIRs[ix,iy,iz,:]

                    # dispatch to the right handler for each model
                    MAPs[ix,iy,iz,:], DIRs[ix,iy,iz,:], x, A = self.model.fit( y, dirs.reshape(-1,3), self.KERNELS, self.get_config('solver_params') )

                    # compute fitting error
                    if self.get_config('doComputeNRMSE') :
                        y_est = np.dot( A, x )
                        den = np.sum(y**2)
                        NRMSE[ix,iy,iz] = np.sqrt( np.sum((y-y_est)**2) / den ) if den > 1e-16 else 0

                    progress.update()

        self.set_config('fit_time', time.time()-t)
        print '   [ %s ]' % ( time.strftime("%Hh %Mm %Ss", time.gmtime(self.get_config('fit_time')) ) )

        # store results
        self.RESULTS = {}
        self.RESULTS['DIRs']  = DIRs
        self.RESULTS['MAPs']  = MAPs
        if self.get_config('doComputeNRMSE') :
            self.RESULTS['NRMSE'] = NRMSE
Example #25
0
    def fit(self):
        """Fit the model to the data iterating over all voxels (in the mask) one after the other.
        Call the appropriate fit() method of the actual model used.
        """
        if self.niiDWI is None:
            raise RuntimeError('Data not loaded; call "load_data()" first.')
        if self.model is None:
            raise RuntimeError('Model not set; call "set_model()" first.')
        if self.KERNELS is None:
            raise RuntimeError(
                'Response functions not generated; call "generate_kernels()" and "load_kernels()" first.'
            )
        if self.KERNELS["model"] != self.model.id:
            raise RuntimeError("Response functions were not created with the same model.")

        self.set_config("fit_time", None)
        totVoxels = np.count_nonzero(self.niiMASK_img)
        print '\n-> Fitting "%s" model to %d voxels:' % (self.model.name, totVoxels)

        # setup fitting directions
        peaks_filename = self.get_config("peaks_filename")
        if peaks_filename is None:
            DIRs = np.zeros(
                [self.get_config("dim")[0], self.get_config("dim")[1], self.get_config("dim")[2], 3], dtype=np.float32
            )
            nDIR = 1
            gtab = gradient_table(self.scheme.b, self.scheme.raw[:, :3])
            DTI = dti.TensorModel(gtab)
        else:
            niiPEAKS = nibabel.load(pjoin(self.get_config("DATA_path"), peaks_filename))
            DIRs = niiPEAKS.get_data().astype(np.float32)
            nDIR = np.floor(DIRs.shape[3] / 3)
            print "\t* peaks dim = %d x %d x %d x %d" % DIRs.shape[:4]
            if DIRs.shape[:3] != self.niiMASK_img.shape[:3]:
                raise ValueError("PEAKS geometry does not match with DWI data")

        # setup other output files
        MAPs = np.zeros(
            [
                self.get_config("dim")[0],
                self.get_config("dim")[1],
                self.get_config("dim")[2],
                len(self.model.maps_name),
            ],
            dtype=np.float32,
        )

        if self.get_config("doComputeNRMSE"):
            NRMSE = np.zeros(
                [self.get_config("dim")[0], self.get_config("dim")[1], self.get_config("dim")[2]], dtype=np.float32
            )

        if self.get_config("doSaveCorrectedDWI"):
            DWI_corrected = np.zeros(self.niiDWI.shape, dtype=np.float32)

        # fit the model to the data
        # =========================
        t = time.time()
        progress = ProgressBar(n=totVoxels, prefix="   ", erase=True)
        for iz in xrange(self.niiMASK_img.shape[2]):
            for iy in xrange(self.niiMASK_img.shape[1]):
                for ix in xrange(self.niiMASK_img.shape[0]):
                    if self.niiMASK_img[ix, iy, iz] == 0:
                        continue

                    # prepare the signal
                    y = self.niiDWI_img[ix, iy, iz, :].astype(np.float64)
                    y[y < 0] = 0  # [NOTE] this should not happen!

                    if self.scheme.b0_count > 0:
                        b0 = np.mean(y[self.scheme.b0_idx])

                    if self.get_config("doNormalizeSignal") and self.scheme.b0_count > 0:
                        if b0 > 1e-3:
                            y = y / b0

                    # fitting directions
                    if peaks_filename is None:
                        dirs = DTI.fit(y).directions[0]
                    else:
                        dirs = DIRs[ix, iy, iz, :]

                    # dispatch to the right handler for each model
                    MAPs[ix, iy, iz, :], DIRs[ix, iy, iz, :], x, A = self.model.fit(
                        y, dirs.reshape(-1, 3), self.KERNELS, self.get_config("solver_params")
                    )

                    # compute fitting error
                    if self.get_config("doComputeNRMSE"):
                        y_est = np.dot(A, x)
                        den = np.sum(y ** 2)
                        NRMSE[ix, iy, iz] = np.sqrt(np.sum((y - y_est) ** 2) / den) if den > 1e-16 else 0

                    if self.get_config("doSaveCorrectedDWI"):

                        if self.model.name == "Free-Water":
                            n_iso = len(self.model.d_isos)
                            x[-1 * n_iso :] = 0

                            # print(y, x, b0, A.shape)
                            if self.get_config("doNormalizeSignal") and self.scheme.b0_count > 0:
                                y_fw_corrected = np.dot(A, x) * b0
                            else:
                                y_fw_corrected = np.dot(A, x)

                            if self.get_config("doKeepb0Intact") and self.scheme.b0_count > 0:
                                # put original b0 data back in.
                                y_fw_corrected[self.scheme.b0_idx] = y[self.scheme.b0_idx] * b0

                            DWI_corrected[ix, iy, iz, :] = y_fw_corrected

                    progress.update()

        self.set_config("fit_time", time.time() - t)
        print "   [ %s ]" % (time.strftime("%Hh %Mm %Ss", time.gmtime(self.get_config("fit_time"))))

        # store results
        self.RESULTS = {}
        self.RESULTS["DIRs"] = DIRs
        self.RESULTS["MAPs"] = MAPs
        if self.get_config("doComputeNRMSE"):
            self.RESULTS["NRMSE"] = NRMSE
        if self.get_config("doSaveCorrectedDWI"):
            self.RESULTS["DWI_corrected"] = DWI_corrected