Beispiel #1
0
def objectiveFunctionDataCons(x, N, ph, data, k, kmask=None):
    #import pdb; pdb.set_trace()
   
    if len(k.shape)==len(N):
        ph0 = ph.reshape(N)
        obj_data = k*(tf.fft2c(x,ph,axes=(-2,-1),kmask=kmask)-data)
    else:
        ph0 = ph.reshape(np.hstack([-1,N]))
        obj_data = k*(tf.fft2c(x,ph,axes=(-2,-1),kmask=kmask)-data)
        obj_data = np.sum(obj_data,axis=0)

    return obj_data*obj_data.conj() #L2 Norm
Beispiel #2
0
def gObj(x,N,ph,
         data_from_scanner,
         samp_mask):
    '''
    Here, we are attempting to get the objective derivative from the
    function. This gradient is how the current data compares to the 
    data from the scanner in order to try and enforce data consistency.
    
    Inputs:
    [np.array] x - data that we're looking at
    [np.array] data_from_scanner - the original data from the scanner
    [int/boolean] samp_mask - Mask so we only compare the data from the regions of k-space that were sampled
    [int]      p - The norm of the value that we're using
    [float]  l1smooth - Smoothing value
    
    Outputs:
    [np.array] grad - the gradient of the XFM
    
    '''
    if len(N) == 2:
        N = np.hstack([1, N])

    #grad = np.zeros([x.shape])

    # Here we're going to convert the data into the k-sapce data, and then subtract
    # off the original data from the scanner. Finally, we will convert this data 
    # back into image space
    x0 = x.reshape(N)
    data_from_scanner.shape = N
    x_data = np.fft.fftshift(samp_mask)*tf.fft2c(x0,ph); # Issue, feeding in 3D data to a 2D fft alg...
    
    grad = -2*tf.ifft2c(data_from_scanner - x_data,ph).real; # -1* & ,real
    
    return grad
Beispiel #3
0
def gDataCons(x, N, ph, data, k, kmask=None):
    '''
    Here, we are attempting to get the objective derivative from the
    function. This gradient is how the current data compares to the 
    data from the scanner in order to try and enforce data consistency.
    
    Inputs:
    [np.array] x - data that we're looking at
    [np.array] data_from_scanner - the original data from the scanner
    [int/boolean] samp_mask - Mask so we only compare the data from the regions of k-space that were sampled
    [int]      p - The norm of the value that we're using
    [float]  l1smooth - Smoothing value
    
    Outputs:
    [np.array] grad - the gradient of the XFM
    
    '''
    if len(N) == 2:
        N = np.hstack([1,N])
    #grad = np.zeros([x.shape])

    # Here we're going to convert the data into the k-sapce data, and then subtract
    # off the original data from the scanner. Finally, we will convert this data 
    # back into image space
    x0 = x.reshape(N)
    data.shape = N
    grad = np.zeros(N)
    
    if len(k.shape)==len(N):
        ph0 = ph.reshape(N)
        xdata = tf.fft2c(x0,ph0,axes=(-2,-1),kmask=kmask)
        grad = tf.ifft2c(2 * k * (xdata - data),ph0,axes=(-2,-1),kmask=kmask).real
    else:
        ph0 = ph.reshape(np.hstack([-1,N]))
        xdata = tf.fft2c(x0,ph0,axes=(-2,-1),kmask=kmask)
        grad = tf.ifft2c(2 * k * (xdata - data),ph0,axes=(-2,-1),kmask=kmask).real
        grad = np.sum(grad,axis=0)
    
    return grad
def loRes(im,pctg):
    N = im.shape
    ph_ones=np.ones(N)
    [x,y] = np.meshgrid(np.linspace(-1,1,N[1]),np.linspace(-1,1,N[0]))
    rsq = x**2 + y**2
    loResMaskLocs = np.where(rsq < pctg)
    loResMask = np.zeros(N)
    loResMask[loResMaskLocs] = 1
    loResMask = sp.ndimage.filters.gaussian_filter(loResMask,3)
    data = np.fft.fftshift(loResMask)*tf.fft2c(im, ph=ph_ones)
    im_lr_wph = tf.ifft2c(data,ph=ph_ones)
    ph_lr = tf.matlab_style_gauss2D(im_lr_wph,shape=(5,5))
    ph_lr = np.exp(1j*ph_lr)
    im_lr = tf.ifft2c(data, ph=ph_lr)
    return im_lr
Beispiel #5
0
def loRes(im, pctg):
    N = im.shape
    ph_ones = np.ones(N)
    [x, y] = np.meshgrid(np.linspace(-1, 1, N[1]), np.linspace(-1, 1, N[0]))
    rsq = x**2 + y**2
    loResMaskLocs = np.where(rsq < pctg)
    loResMask = np.zeros(N)
    loResMask[loResMaskLocs] = 1
    loResMask = sp.ndimage.filters.gaussian_filter(loResMask, 3)
    data = np.fft.fftshift(loResMask) * tf.fft2c(im, ph=ph_ones)
    im_lr_wph = tf.ifft2c(data, ph=ph_ones)
    ph_lr = tf.matlab_style_gauss2D(im_lr_wph, shape=(5, 5))
    ph_lr = np.exp(1j * ph_lr)
    im_lr = tf.ifft2c(data, ph=ph_lr)
    return im_lr
    def perform(self, x, k0, mask, sensitivity, coil_dim=1):
        k = T.fft2c(D.project_all_coils(x, sensitivity, coil_dim))
        k = k.view(k.size(0),
                   k.size(1) * k.size(2), k.size(3), k.size(4), k.size(5))
        k = self.conv(k)
        k = k.view(k.size(0), 12, 8, k.size(2), k.size(3), k.size(4))

        if self.noise_lvl is not None:  # noisy case
            v = torch.sigmoid(self.noise_lvl)  # Normalize to 0~1
            v = v.unsqueeze(0).unsqueeze(1).unsqueeze(3).unsqueeze(
                4).unsqueeze(5)

            k = (1 - mask) * k + mask * (v * k + (1 - v) * k0)
        else:  # noiseless case
            k = (1 - mask) * k + mask * k0
        return D.combine_all_coils(T.ifft2c(k), sensitivity, coil_dim)
Beispiel #7
0
def gDataCons(x, N, ph, data_from_scanner, samp_mask):
    '''
    Here, we are attempting to get the objective derivative from the
    function. This gradient is how the current data compares to the 
    data from the scanner in order to try and enforce data consistency.
    
    Inputs:
    [np.array] x - data that we're looking at
    [np.array] data_from_scanner - the original data from the scanner
    [int/boolean] samp_mask - Mask so we only compare the data from the regions of k-space that were sampled
    [int]      p - The norm of the value that we're using
    [float]  l1smooth - Smoothing value
    
    Outputs:
    [np.array] grad - the gradient of the XFM
    
    '''
    if len(N) == 2:
        N = np.hstack([1, N])
    #grad = np.zeros([x.shape])

    # Here we're going to convert the data into the k-sapce data, and then subtract
    # off the original data from the scanner. Finally, we will convert this data
    # back into image space
    x0 = x.reshape(N)
    data_from_scanner.shape = N
    grad = np.zeros(N)
    ph0 = ph.reshape(N)
    #samp_mask = samp_mask.reshape(N)

    for kk in range(N[0]):
        x_data = tf.fft2c(x0[kk, :, :], ph0[kk, :, :])
        # Issue, feeding in 3D data to a 2D fft alg...

        grad[kk, :, :] = -2 * tf.ifft2c(
            samp_mask[kk, :, :] *
            (data_from_scanner[kk, :, :] - x_data), ph0[kk, :, :]).real
        # -1* & ,real
    #import pdb; pdb.set_trace()
    return grad
Beispiel #8
0
def optfun(x, N, lam1, lam2, data, k, strtag, ph, dirWeight=0, dirs=None,
           dirInfo=[None,None,None,None], nmins=0,wavelet='db4',mode="per",a=1.0):
    '''
    This is the optimization function that we're trying to optimize. We are optimizing x here, and testing it within the funcitons that we want, as called by the functions that we've created
    '''
    #dirInfo[0] is M
    #import pdb; pdb.set_trace()
    tv = 0
    xfm = 0
    data.shape = N
    x.shape = N
    obj_data = np.fft.fftshift(k)*(data - tf.fft2c(x,ph))
    obj = np.sum(obj_data*obj_data.conj()) #L2 Norm
    #tv = np.sum(abs(tf.TV(x,N,strtag,dirWeight,dirs,nmins,M))) #L1 Norm
    if lam1:
        tv = np.sum((1/a)*np.log(np.cosh(a*tf.TV(x,N,strtag,dirWeight,dirs,nmins,dirInfo))))
    #xfm cost calc
    if lam2:
        if len(N) > 2:
            xfm=0
            for kk in range(N[0]):
                wvlt = tf.xfm(x[kk,:,:],wavelet=wavelet,mode=mode)
                xfm += np.sum((1/a)*np.log(np.cosh(a*wvlt[0])))
                for i in xrange(1,len(wvlt)):
                    xfm += np.sum([np.sum((1/a)*np.log(np.cosh(a*wvlt[i][j]))) for j in xrange(3)]) 
        else:
            wvlt = tf.xfm(x,wavelet=wavelet,mode=mode)
            xfm = np.sum((1/a)*np.log(np.cosh(a*wvlt[0])))
            for i in xrange(1,len(wvlt)):
                xfm += np.sum([np.sum((1/a)*np.log(np.cosh(a*wvlt[i][j]))) for j in xrange(3)]) 
    
    x.shape = (x.size,) # Not the most efficient way to do this, but we need the shape to reset.
    data.shape = (data.size,)
    #output
    #print('obj: %.2f' % abs(obj))
    #print('tv: %.2f' % abs(lam1*tv))
    #print('xfm: %.2f' % abs(lam2*xfm))
    return abs(obj + lam1*tv + lam2*xfm)
Beispiel #9
0
                                      pctg,
                                      radius=radius,
                                      cyl=np.hstack([1, N]),
                                      style='mult')
                    break
                except:
                    radius = 0.5 * radius
            # Set the sampling pattern -- checked and this gives the right percentage
            k = samp.genSampling(pdf, 50, 2)[0].astype(int)
            if len(N) == 2:
                N = np.hstack([1, N])
                k = k.reshape(N)

            # Here is where we build the undersampled data
            ph_ones = np.ones(im.shape, complex)
            data = np.fft.ifftshift(k) * tf.fft2c(im, ph=ph_ones)
            # data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());
            #filt = tf.fermifilt(N)
            #data = data * filt

            # IMAGE from the "scanner data"
            #ph_ones = np.ones(im.shape, complex)
            im_scan_wph = tf.ifft2c(data, ph=ph_ones)
            ph_scan = tf.matlab_style_gauss2D(im_scan_wph, shape=(5, 5))
            #ph_scan = tf.matlab_style_gauss2D(im,shape=(5,5))

            #for i in range(phIter):
            #ph_scan = tf.laplacianUnwrap(ph_scan,N,[75,75])

            ph_scan = np.exp(1j * ph_scan)
            im_scan = tf.ifft2c(data, ph=ph_scan)
def runCSAlgorithm(fromfid=False,
                   filename='/home/asalerno/Documents/pyDirectionCompSense/brainData/P14/data/fullySampledBrain.npy',
                   sliceChoice=150,
                   strtag = ['','spatial', 'spatial'],
                   xtol = [1e-2, 1e-3, 5e-4, 5e-4],
                   TV = [0.01, 0.005, 0.002, 0.001],
                   XFM = [0.01,.005, 0.002, 0.001],
                   dirWeight=0,
                   pctg=0.25,
                   radius=0.2,
                   P=2,
                   pft=False,
                   ext=0.5,
                   wavelet='db4',
                   mode='per',
                   method='CG',
                   ItnLim=30,
                   lineSearchItnLim=30,
                   alpha_0=0.6,
                   c=0.6,
                   a=10.0,
                   kern = 
                   np.array([[[ 0.,  0.,  0.], 
                   [ 0.,  0.,  0.], 
                   [ 0.,  0.,  0.]],                
                  [[ 0.,  0.,  0.],
                  [ 0., -1.,  0.],
                  [ 0.,  1.,  0.]],
                  [[ 0.,  0.,  0.],
                  [ 0., -1.,  1.],
                  [ 0.,  0.,  0.]]]),
                   dirFile = None,
                   nmins = None,
                   dirs = None,
                   M = None,
                   dirInfo = [None]*4,
                   saveNpy=False,
                   saveNpyFile=None,
                   saveImsPng=False,
                   saveImsPngFile=None,
                   saveImDiffPng=False,
                   saveImDiffPngFile=None,
                   disp=False):
    ##import pdb; pdb.set_trace()
    if fromfid==True:
        inputdirectory=filename[0]
        petable=filename[1]
        fullImData = rff.getDataFromFID(petable,inputdirectory,2)[0,:,:,:]
        fullImData = fullImData/np.max(abs(fullImData))
        im = fullImData[:,:,sliceChoice]
    else:
        im = np.load(filename)[sliceChoice,:,:]
        
    N = np.array(im.shape)  # image Size

    pdf = samp.genPDF(N[-2:], P, pctg, radius=radius, cyl=np.hstack([1, N[-2:]]), style='mult', pft=pft, ext=ext)
    if pft:
        print('Partial Fourier sampling method used')
    k = samp.genSampling(pdf, 50, 2)[0].astype(int)
    if len(N) == 2:
        N = np.hstack([1, N])
        k = k.reshape(N)
        im = im.reshape(N)
    elif (len(N) == 3) and ('dir' not in strtag):
        k = k.reshape(np.hstack([1,N[-2:]])).repeat(N[0],0)

    ph_ones = np.ones(N[-2:], complex)
    ph_scan = np.zeros(N, complex)
    data = np.zeros(N,complex)
    im_scan = np.zeros(N,complex)
    for i in range(N[0]):
        k[i,:,:] = np.fft.fftshift(k[i,:,:])
        data[i,:,:] = k[i,:,:]*tf.fft2c(im[i,:,:], ph=ph_ones)

        # IMAGE from the "scanner data"
        im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
        ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
        ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
        im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
        #im_lr = samp.loRes(im,pctg)
    
    # ------------------------------------------------------------------ #
    # A quick way to look at the PSF of the sampling pattern that we use #
    delta = np.zeros(N[-2:])
    delta[int(N[-2]/2),int(N[-1]/2)] = 1
    psf = tf.ifft2c(tf.fft2c(delta,ph_ones)*k,ph_ones)
    # ------------------------------------------------------------------ #


    ## ------------------------------------------------------------------ #
    ## -- Currently broken - Need to figure out what's happening here. -- #
    ## ------------------------------------------------------------------ #
    #if pft:
        #for i in xrange(N[0]):
            #dataHold = np.fft.fftshift(data[i,:,:])
            #kHold = np.fft.fftshift(k[i,:,:])
            #loc = 98
            #for ix in xrange(N[-2]):
                #for iy in xrange(loc,N[-1]):
                    #dataHold[-ix,-iy] = dataHold[ix,iy].conj()
                    #kHold[-ix,-iy] = kHold[ix,iy]
    ## ------------------------------------------------------------------ #
    
    pdfDiv = pdf.copy()
    pdfZeros = np.where(pdf==0)
    pdfDiv[pdfZeros] = 1
    #im_scan_imag = im_scan.imag
    #im_scan = im_scan.real

    N_im = N.copy()
    hld, dims, dimOpt, dimLenOpt = tf.wt(im_scan[0].real,wavelet,mode)
    N = np.hstack([N_im[0], hld.shape])

    w_scan = np.zeros(N)
    w_full = np.zeros(N)
    im_dc = np.zeros(N_im)
    w_dc = np.zeros(N)

    for i in xrange(N[0]):
        w_scan[i,:,:] = tf.wt(im_scan.real[i,:,:],wavelet,mode,dims,dimOpt,dimLenOpt)[0]
        w_full[i,:,:] = tf.wt(abs(im[i,:,:]),wavelet,mode,dims,dimOpt,dimLenOpt)[0]

        im_dc[i,:,:] = tf.ifft2c(data[i,:,:] / np.fft.ifftshift(pdfDiv), ph=ph_scan[i,:,:]).real.copy()
        w_dc[i,:,:] = tf.wt(im_dc,wavelet,mode,dims,dimOpt,dimLenOpt)[0]

    w_dc = w_dc.flatten()
    im_sp = im_dc.copy().reshape(N_im)
    minval = np.min(abs(im))
    maxval = np.max(abs(im))
    data = np.ascontiguousarray(data)

    imdcs = [im_dc,np.zeros(N_im),np.ones(N_im),np.random.randn(np.prod(N_im)).reshape(N_im)]
    imdcs[-1] = imdcs[-1] - np.min(imdcs[-1])
    imdcs[-1] = imdcs[-1]/np.max(abs(imdcs[-1]))
    mets = ['Density Corrected','Zeros','1/2''s','Gaussian Random Shift (0,1)']
    wdcs = []
    for i in range(len(imdcs)):
        wdcs.append(tf.wt(imdcs[i][0],wavelet,mode,dims,dimOpt,dimLenOpt)[0].reshape(N))

    ims = []
    #print('Starting the CS Algorithm')
    for kk in range(len(wdcs)):
        w_dc = wdcs[kk]
        print(mets[kk])
        for i in range(len(TV)):
            args = (N, N_im, dims, dimOpt, dimLenOpt, TV[i], XFM[i], data, k, strtag, ph_scan, kern, dirWeight, dirs, dirInfo, nmins, wavelet, mode, a)
            w_result = opt.minimize(f, w_dc, args=args, method=method, jac=df, 
                                        options={'maxiter': ItnLim, 'lineSearchItnLim': lineSearchItnLim, 'gtol': 0.01, 'disp': 1, 'alpha_0': alpha_0, 'c': c, 'xtol': xtol[i], 'TVWeight': TV[i], 'XFMWeight': XFM[i], 'N': N})
            if np.any(np.isnan(w_result['x'])):
                print('Some nan''s found. Dropping TV and XFM values')
            elif w_result['status'] != 0:
                print('TV and XFM values too high -- no solution found. Dropping...')
            else:
                w_dc = w_result['x']
                
        w_res = w_dc.reshape(N)
        im_res = np.zeros(N_im)
        for i in xrange(N[0]):
            im_res[i,:,:] = tf.iwt(w_res[i,:,:],wavelet,mode,dims,dimOpt,dimLenOpt)
        ims.append(im_res)
    
    if saveNpy:
        if saveNpyFile is None:
            np.save('./holdSave_im_res_' + str(int(pctg*100)) + 'p_all_SP',ims)
        else:
            np.save(saveNpyFile,ims)
    
    if saveImsPng:
        vis.figSubplots(ims,titles=mets,clims=(minval,maxval),colorbar=True)
        if not disp:
            if saveImsPngFile is None:
                saveFig.save('./holdSave_ims_' + str(int(pctg*100)) + 'p_all_SP')
            else:
                saveFig.save(saveImsPngFile)
    
    if saveImDiffPng:
        imdiffs, clims = vis.imDiff(ims)
        diffMets = ['DC-Zeros','DC-Ones','DC-Random','Zeros-Ones','Zeros-Random','Ones-Random']
        vis.figSubplots(imdiffs,titles=diffMets,clims=clims,colorbar=True)
        if not disp:
            if saveImDiffPngFile is None:
                saveFig.save('./holdSave_im_diffs_' + str(int(pctg*100)) + 'p_all_SP')
            else:
                saveFig.save(saveImDiffPngFile)
    
    if disp:
        plt.show()
Beispiel #11
0
    k = k.reshape(N)
    im = im.reshape(N)
elif len(N) == 3:
    k = k.reshape(np.hstack([1, N[-2:]])).repeat(N[0], 0)

im_scan = abs(im).reshape(N)
im_dc = np.load(
    '/home/asalerno/Documents/pyDirectionCompSense/brainData/P14/data/im_dc.npy'
)

ph_ones = np.ones(N[-2:], complex)
ph_scan = np.exp(1.j * np.angle(im))
data = np.zeros(N, complex)
for i in range(N[0]):
    k[i, :, :] = np.fft.fftshift(k[i, :, :])
    data[i, :, :] = k[i, :, :] * tf.fft2c(im[i, :, :], ph=ph_ones)

N_im = N
hld, dims, dimOpt, dimLenOpt = tf.wt(im_scan[0].real, wavelet, mode)
N = np.hstack([N_im[0], hld.shape])

w_scan = np.zeros(N)
w_full = np.zeros(N)
w_dc = np.zeros(N)

for i in xrange(N[0]):
    w_scan[i, :, :] = tf.wt(im_scan.real[i, :, :], wavelet, mode, dims, dimOpt,
                            dimLenOpt)[0]
    w_full[i, :, :] = tf.wt(abs(im[i, :, :]), wavelet, mode, dims, dimOpt,
                            dimLenOpt)[0]
    w_dc[i, :, :] = tf.wt(im_dc[i, :, :], wavelet, mode, dims, dimOpt,
Beispiel #12
0
def objectiveFunctionDataCons(x, N, ph, data, k):
    obj_data = k * (data - tf.fft2c(x, ph))
    return obj_data * obj_data.conj()  #L2 Norm
Beispiel #13
0
def objectiveFunctionDataCons(x, N, ph, data, k, sz, strtag):
    #import pdb; pdb.set_trace()
    xdata = tf.fft2c(x, ph, sz=sz, axes=(-2, -1))
    # Currently only will iterate over the first axis. Should include something at the beginning of the functions to have it make sure that the order is ['other','spatial','spatial'] via swapaxis and then swap back if we want after.
    obj_data = k * (data - xdata)
    return obj_data * obj_data.conj()  #L2 Norm
for i in range(phIter):
    ph = tf.laplacianUnwrap(ph,N,[75,75])

ph = np.exp(1j*ph)





# Generate the PDF for the sampling case -- note that this type is only used in non-directionally biased cases.
pdf = samp.genPDF(N, P, pctg, radius=radius, cyl=[1,294,294], style='mult') 
# Set the sampling pattern -- checked and this gives the right percentage
k = samp.genSampling(pdf, 50, 2)[0].astype(int)

# Here is where we build the undersampled data
data = np.fft.fftshift(k) * tf.fft2c(im, ph=ph_ones)
# ph = phase_Calculation(im,is_kspace = False)
# data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());
#filt = tf.fermifilt(N)
#data = data * filt

# IMAGE from the "scanner data"
im_scan = tf.ifft2c(data, ph=ph)
minval = np.min(abs(im))
maxval = np.max(abs(im))
# Primary first guess. What we're using for now. Density corrected
#im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf), ph=ph).real.flatten().copy()
#for imdcs in ['zeros','ones','densCorr','imFull']:
for imdcs in ['densCorr','densCorr_Completed']:
    if imdcs == 'zeros':
        im_dc = np.zeros(data.shape)
Beispiel #15
0
pdf = samp.genPDF(
    N, P, pctg, radius=0.1, cyl=[0]
)  # Currently not working properly for the cylindrical case -- can fix at home
# Set the sampling pattern -- checked and this gives the right percentage
k = samp.genSampling(pdf, 10, 60)[0].astype(int)

# Diffusion information that we need
if dirFile:
    dirs = np.loadtxt(dirFile)
    M = d.calc_Mid_Matrix(dirs, nmins=4)
else:
    dirs = None
    M = None

# Here is where we build the undersampled data
data = np.fft.ifftshift(k) * tf.fft2c(im, ph)
#ph = phase_Calculation(im,is_kspace = False)
#data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());

# IMAGE from the "scanner data"
im_scan = tf.ifft2c(data, ph)

# Primary first guess. What we're using for now. Density corrected
im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf), ph).flatten().copy()

# Optimization algortihm -- this is where everything culminates together
im_result = opt.minimize(optfun,
                         im_dc,
                         args=(N, TVWeight, XFMWeight, data, k, strtag,
                               dirWeight, dirs, M, nmins, scaling_factor, L,
                               ph),
Beispiel #16
0
#ax.set_title('RMSE for Different Pctg and Starting Point Combinations')
#ax.set_xticks(ind)
#ax.set_xticklabels(('zeros','ones','densCorr','imFull'))
#ax.set_xlabel('Percent Sampled')
#ax.legend((rects1[0], rects2[0], rects3[0], rects4[0], rects5[0], rects6[0]),('20%', '25%', '33%', '40%', '50%'), title='im_dc',loc='upper right'); #loc='center left', bbox_to_anchor=(1, 0.5))


# im_dc and pctg Comparison
# TV = 0.005 -- 3
# XFM = 0.005 -- 3
rmse_lr = np.zeros(len(pctg))
for i in range(len(pctg)):
lrLoc = int(np.ceil((N[0]-np.ceil(N[0]/np.sqrt(1/pctg)))/2))
data_lr_us = np.fft.fftshift(tf.ifft2c(im,np.ones(im.shape)))[lrLoc:-lrLoc,lrLoc:-lrLoc]
data_lr_rs = zpad(data_lr_us,im.shape)
im_lr_rs = abs(tf.fft2c(np.fft.fftshift(data_lr_rs),np.ones(im.shape)))
rmse_lr[i] = err.rmse(abs(im_lr_rs),im)

N = len(pctg)

ind = np.arange(N)+.5
width =0.15

fig, ax = plt.subplots()
rects0 = ax.bar(ind - 4*width, rmse_lr, width, color='b', alpha=0.1)
rects1 = ax.bar(ind - 3*width, rmse_data[:,3,3,0], width, color='b', alpha=.25)
rects2 = ax.bar(ind - 2*width, rmse_data[:,3,3,1], width, color='b', alpha=.5)
rects3 = ax.bar(ind - 1*width, rmse_data[:,3,3,2], width, color='b', alpha=.75)
rects4 = ax.bar(ind + 0*width, rmse_data[:,3,3,3], width, color='b', alpha=1)

ax.set_ylabel('RMSE')
    print('Partial Fourier sampling method used')
k = samp.genSampling(pdf, 50, 2)[0].astype(int)
if len(N) == 2:
    N = np.hstack([1, N])
    k = k.reshape(N)
    im = im.reshape(N)
elif len(N) == 3:
    k = k.reshape(np.hstack([1,N[-2:]])).repeat(N[0],0)

ph_ones = np.ones(N[-2:], complex)
ph_scan = np.zeros(N, complex)
data = np.zeros(N,complex)
im_scan = np.zeros(N,complex)
for i in range(N[0]):
    k[i,:,:] = np.fft.fftshift(k[i,:,:])
    data[i,:,:] = k[i,:,:]*tf.fft2c(im[i,:,:], ph=ph_ones)

    # IMAGE from the "scanner data"
    im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
    ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
    im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
    
    
    #im_lr = samp.loRes(im,pctg)


# ------------------------------------------------------------------ #
# A quick way to look at the PSF of the sampling pattern that we use #
delta = np.zeros(N[-2:])
delta[int(N[-2]/2),int(N[-1]/2)] = 1
N = np.array(im.shape) #image Size
tupleN = tuple(N)
pctg = 0.25 # undersampling factor
P = 5 # Variable density polymonial degree
#ph = tf.matlab_style_gauss2D(im,shape=(5,5));
ph=np.ones(im.shape,complex)


# Generate the PDF for the sampling case -- note that this type is only used in non-directionally biased cases.
pdf = samp.genPDF(N,P,pctg,radius = 0.3,cyl=[0]) # Currently not working properly for the cylindrical case -- can fix at home
# Set the sampling pattern -- checked and this gives the right percentage
k = samp.genSampling(pdf,50,2)[0].astype(int)

# Here is where we build the undersampled data
data = np.fft.ifftshift(k)*tf.fft2c(im,ph=ph)
#ph = phase_Calculation(im,is_kspace = False)
#data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());

# IMAGE from the "scanner data"
im_scan = tf.ifft2c(data,ph=ph)

# Primary first guess. What we're using for now. Density corrected
im_dc = tf.ifft2c(data/np.fft.ifftshift(pdf),ph=ph).real.flatten().copy()

# Optimization algortihm -- this is where everything culminates together
a=10.0
testargs = (N,TVWeight,XFMWeight,data,k,strtag,ph,dirWeight,dirs,M,nmins,wavelet,mode,a)


# Get things set to test alpha values
Beispiel #19
0
#tupleN = tuple(N)

for pctg in [0.25, 0.33, 0.40, 0.50]:
    for TVWeight in [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05]:
        for XFMWeight in [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05]:
            P = 5  # Variable density polymonial degree
            ph = tf.matlab_style_gauss2D(im, shape=(5, 5))
            #ph = np.ones(im.shape, complex)

            # Generate the PDF for the sampling case -- note that this type is only used in non-directionally biased cases.
            pdf = samp.genPDF(N, P, pctg, radius=radius, cyl=[0])
            # Set the sampling pattern -- checked and this gives the right percentage
            k = samp.genSampling(pdf, 50, 2)[0].astype(int)

            # Here is where we build the undersampled data
            data = np.fft.ifftshift(k) * tf.fft2c(im, ph=ph)
            # ph = phase_Calculation(im,is_kspace = False)
            # data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());
            #filt = tf.fermifilt(N)
            #data = data * filt

            # IMAGE from the "scanner data"
            im_scan = tf.ifft2c(data, ph=ph)
            minval = np.min(im)
            maxval = np.max(im)
            # Primary first guess. What we're using for now. Density corrected
            #im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf), ph=ph).real.flatten().copy()
            for imdcs in ['zeros', 'ones', 'densCorr', 'imFull']:
                if imdcs == 'zeros':
                    im_dc = np.zeros(data.shape)
                elif imdcs == 'ones':
Beispiel #20
0
k = samp.genSampling(pdf, 50, 2)[0].astype(int)
if len(N) == 2:
    N = np.hstack([1, N])
    k = k.reshape(N)
    im = im.reshape(N)
elif len(N) == 3:
    k = k.reshape(np.hstack([1,N[-2:]])).repeat(N[0],0)

ph_ones = np.ones(N[-2:], complex)
ph_scan = np.zeros(N, complex)
data = np.zeros(N,complex)
dataFull = np.zeros(N,complex)
im_scan = np.zeros(N,complex)
for i in range(N[0]):
    #k[i,:,:] = np.fft.fftshift(k[i,:,:])
    data[i,:,:] = np.fft.fftshift(k[i,:,:])*tf.fft2c(im[i,:,:], ph=ph_ones)
    dataFull[i,:,:] = np.fft.fftshift(tf.fft2c(im[i,:,:], ph=ph_ones))

    # IMAGE from the "scanner data"
    im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
    ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
    im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
    
    
    #im_lr = samp.loRes(im,pctg)


# ------------------------------------------------------------------ #
# A quick way to look at the PSF of the sampling pattern that we use #
delta = np.zeros(N[-2:])
Beispiel #21
0
def runCSAlgorithm(
        fromfid=False,
        filename='/home/asalerno/Documents/pyDirectionCompSense/brainData/P14/data/fullySampledBrain.npy',
        sliceChoice=150,
        strtag=['', 'spatial', 'spatial'],
        xtol=[1e-2, 1e-3, 5e-4, 5e-4],
        TV=[0.01, 0.005, 0.002, 0.001],
        XFM=[0.01, .005, 0.002, 0.001],
        dirWeight=0,
        pctg=0.25,
        radius=0.2,
        P=2,
        pft=False,
        ext=0.5,
        wavelet='db4',
        mode='per',
        method='CG',
        ItnLim=30,
        lineSearchItnLim=30,
        alpha_0=0.6,
        c=0.6,
        a=10.0,
        kern=np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
                       [[0., 0., 0.], [0., -1., 0.], [0., 1., 0.]],
                       [[0., 0., 0.], [0., -1., 1.], [0., 0., 0.]]]),
        dirFile=None,
        nmins=None,
        dirs=None,
        M=None,
        dirInfo=[None] * 4,
        saveNpy=False,
        saveNpyFile=None,
        saveImsPng=False,
        saveImsPngFile=None,
        saveImDiffPng=False,
        saveImDiffPngFile=None,
        disp=False):
    ##import pdb; pdb.set_trace()
    if fromfid == True:
        inputdirectory = filename[0]
        petable = filename[1]
        fullImData = rff.getDataFromFID(petable, inputdirectory, 2)[0, :, :, :]
        fullImData = fullImData / np.max(abs(fullImData))
        im = fullImData[:, :, sliceChoice]
    else:
        im = np.load(filename)[sliceChoice, :, :]

    N = np.array(im.shape)  # image Size

    pdf = samp.genPDF(N[-2:],
                      P,
                      pctg,
                      radius=radius,
                      cyl=np.hstack([1, N[-2:]]),
                      style='mult',
                      pft=pft,
                      ext=ext)
    if pft:
        print('Partial Fourier sampling method used')
    k = samp.genSampling(pdf, 50, 2)[0].astype(int)
    if len(N) == 2:
        N = np.hstack([1, N])
        k = k.reshape(N)
        im = im.reshape(N)
    elif (len(N) == 3) and ('dir' not in strtag):
        k = k.reshape(np.hstack([1, N[-2:]])).repeat(N[0], 0)

    ph_ones = np.ones(N[-2:], complex)
    ph_scan = np.zeros(N, complex)
    data = np.zeros(N, complex)
    im_scan = np.zeros(N, complex)
    for i in range(N[0]):
        k[i, :, :] = np.fft.fftshift(k[i, :, :])
        data[i, :, :] = k[i, :, :] * tf.fft2c(im[i, :, :], ph=ph_ones)

        # IMAGE from the "scanner data"

        im_scan_wph = tf.ifft2c(data[i, :, :], ph=ph_ones)
        ph_scan[i, :, :] = tf.matlab_style_gauss2D(im_scan_wph, shape=(5, 5))
        ph_scan[i, :, :] = np.exp(1j * ph_scan[i, :, :])
        im_scan[i, :, :] = tf.ifft2c(data[i, :, :], ph=ph_scan[i, :, :])
        #im_lr = samp.loRes(im,pctg)

    # ------------------------------------------------------------------ #
    # A quick way to look at the PSF of the sampling pattern that we use #
    delta = np.zeros(N[-2:])
    delta[int(N[-2] / 2), int(N[-1] / 2)] = 1
    psf = tf.ifft2c(tf.fft2c(delta, ph_ones) * k, ph_ones)
    # ------------------------------------------------------------------ #

    ## ------------------------------------------------------------------ #
    ## -- Currently broken - Need to figure out what's happening here. -- #
    ## ------------------------------------------------------------------ #
    #if pft:
    #for i in xrange(N[0]):
    #dataHold = np.fft.fftshift(data[i,:,:])
    #kHold = np.fft.fftshift(k[i,:,:])
    #loc = 98
    #for ix in xrange(N[-2]):
    #for iy in xrange(loc,N[-1]):
    #dataHold[-ix,-iy] = dataHold[ix,iy].conj()
    #kHold[-ix,-iy] = kHold[ix,iy]
    ## ------------------------------------------------------------------ #

    pdfDiv = pdf.copy()
    pdfZeros = np.where(pdf == 0)
    pdfDiv[pdfZeros] = 1
    #im_scan_imag = im_scan.imag
    #im_scan = im_scan.real

    N_im = N.copy()
    hld, dims, dimOpt, dimLenOpt = tf.wt(im_scan[0].real, wavelet, mode)
    N = np.hstack([N_im[0], hld.shape])

    w_scan = np.zeros(N)
    w_full = np.zeros(N)
    im_dc = np.zeros(N_im)
    w_dc = np.zeros(N)

    for i in xrange(N[0]):
        w_scan[i, :, :] = tf.wt(im_scan.real[i, :, :], wavelet, mode, dims,
                                dimOpt, dimLenOpt)[0]
        w_full[i, :, :] = tf.wt(abs(im[i, :, :]), wavelet, mode, dims, dimOpt,
                                dimLenOpt)[0]

        im_dc[i, :, :] = tf.ifft2c(data[i, :, :] / np.fft.ifftshift(pdfDiv),
                                   ph=ph_scan[i, :, :]).real.copy()
        w_dc[i, :, :] = tf.wt(im_dc, wavelet, mode, dims, dimOpt, dimLenOpt)[0]

    w_dc = w_dc.flatten()
    im_sp = im_dc.copy().reshape(N_im)
    minval = np.min(abs(im))
    maxval = np.max(abs(im))
    data = np.ascontiguousarray(data)

    imdcs = [
        im_dc,
        np.zeros(N_im),
        np.ones(N_im),
        np.random.randn(np.prod(N_im)).reshape(N_im)
    ]
    imdcs[-1] = imdcs[-1] - np.min(imdcs[-1])
    imdcs[-1] = imdcs[-1] / np.max(abs(imdcs[-1]))
    mets = [
        'Density Corrected', 'Zeros', '1/2'
        's', 'Gaussian Random Shift (0,1)'
    ]
    wdcs = []
    for i in range(len(imdcs)):
        wdcs.append(
            tf.wt(imdcs[i][0], wavelet, mode, dims, dimOpt,
                  dimLenOpt)[0].reshape(N))

    ims = []
    #print('Starting the CS Algorithm')
    for kk in range(len(wdcs)):
        w_dc = wdcs[kk]
        print(mets[kk])
        for i in range(len(TV)):
            args = (N, N_im, dims, dimOpt, dimLenOpt, TV[i], XFM[i], data, k,
                    strtag, ph_scan, kern, dirWeight, dirs, dirInfo, nmins,
                    wavelet, mode, a)
            w_result = opt.minimize(f,
                                    w_dc,
                                    args=args,
                                    method=method,
                                    jac=df,
                                    options={
                                        'maxiter': ItnLim,
                                        'lineSearchItnLim': lineSearchItnLim,
                                        'gtol': 0.01,
                                        'disp': 1,
                                        'alpha_0': alpha_0,
                                        'c': c,
                                        'xtol': xtol[i],
                                        'TVWeight': TV[i],
                                        'XFMWeight': XFM[i],
                                        'N': N
                                    })
            if np.any(np.isnan(w_result['x'])):
                print('Some nan' 's found. Dropping TV and XFM values')
            elif w_result['status'] != 0:
                print(
                    'TV and XFM values too high -- no solution found. Dropping...'
                )
            else:
                w_dc = w_result['x']

        w_res = w_dc.reshape(N)
        im_res = np.zeros(N_im)
        for i in xrange(N[0]):
            im_res[i, :, :] = tf.iwt(w_res[i, :, :], wavelet, mode, dims,
                                     dimOpt, dimLenOpt)
        ims.append(im_res)

    if saveNpy:
        if saveNpyFile is None:
            np.save('./holdSave_im_res_' + str(int(pctg * 100)) + 'p_all_SP',
                    ims)
        else:
            np.save(saveNpyFile, ims)

    if saveImsPng:
        vis.figSubplots(ims,
                        titles=mets,
                        clims=(minval, maxval),
                        colorbar=True)
        if not disp:
            if saveImsPngFile is None:
                saveFig.save('./holdSave_ims_' + str(int(pctg * 100)) +
                             'p_all_SP')
            else:
                saveFig.save(saveImsPngFile)

    if saveImDiffPng:
        imdiffs, clims = vis.imDiff(ims)
        diffMets = [
            'DC-Zeros', 'DC-Ones', 'DC-Random', 'Zeros-Ones', 'Zeros-Random',
            'Ones-Random'
        ]
        vis.figSubplots(imdiffs, titles=diffMets, clims=clims, colorbar=True)
        if not disp:
            if saveImDiffPngFile is None:
                saveFig.save('./holdSave_im_diffs_' + str(int(pctg * 100)) +
                             'p_all_SP')
            else:
                saveFig.save(saveImDiffPngFile)

    if disp:
        plt.show()
Beispiel #22
0
    print('Partial Fourier sampling method used')
k = samp.genSampling(pdf, 50, 2)[0].astype(int)
if len(N) == 2:
    N = np.hstack([1, N])
    k = k.reshape(N)
    im = im.reshape(N)
elif len(N) == 3:
    k = k.reshape(np.hstack([1,N[-2:]])).repeat(N[0],0)

ph_ones = np.ones(N[-2:], complex)
ph_scan = np.zeros(N, complex)
data = np.zeros(N,complex)
im_scan = np.zeros(N,complex)
for i in range(N[0]):
    k[i,:,:] = np.fft.fftshift(k[i,:,:])
    data[i,:,:] = k[i,:,:]*tf.fft2c(im[i,:,:], ph=ph_ones)

    # IMAGE from the "scanner data"
    im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
    ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
    im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
    
    
    #im_lr = samp.loRes(im,pctg)


# ------------------------------------------------------------------ #
# A quick way to look at the PSF of the sampling pattern that we use #
delta = np.zeros(N[-2:])
delta[int(N[-2]/2),int(N[-1]/2)] = 1
def objectiveFunctionDataCons(x, N, ph, data, k, sz, strtag):
    #import pdb; pdb.set_trace()
    xdata = tf.fft2c(x,ph,sz=sz,axes=(-2,-1))
    # Currently only will iterate over the first axis. Should include something at the beginning of the functions to have it make sure that the order is ['other','spatial','spatial'] via swapaxis and then swap back if we want after.
    obj_data = k*(data - xdata)
    return obj_data*obj_data.conj() #L2 Norm
Beispiel #24
0
# Generate the PDF for the sampling case -- note that this type is only used in non-directionally biased cases.
while True:
    try:
        pdf = samp.genPDF(N, P, pctg, radius=radius, cyl=np.hstack([1, N[-2:]]), style='mult')
        break
    except:
        radius = 0.5*radius
# Set the sampling pattern -- checked and this gives the right percentage

if len(N) == 2:
    N = np.hstack([1, N])
    k = k.reshape(N)

# Here is where we build the undersampled data
ph_ones = np.ones(im.shape, complex)
data = tf.fft2c(im, ph=ph_ones)
data_full = tf.fft2c(imf, ph=ph_ones)

# IMAGE from the "scanner data"
im_scan_wph = tf.ifft2c(data, ph=ph_ones)
ph_scan = np.exp(1j*tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5)))
im_scan = tf.ifft2c(data, ph=ph_scan)

ph_full = np.exp(1j*tf.matlab_style_gauss2D(imf,shape=(5,5)))
im_full = tf.ifft2c(data_full, ph=ph_full)
#im_scan = abs(tf.ifft2c(data,ph_ones))
#data = tf.fft2c(im_scan,ph_ones).reshape(data.size).reshape(N)
#ph_scan = ph_ones

minval = np.min(abs(im))
maxval = np.max(abs(im))
Beispiel #25
0
def calcPhase(im, k, w1=10, w2=8, w3=4, eps = np.pi/18, sig = 0):
    '''
    This function is to try to iteratively calculate the phase as per Tisdall and Atkins 2005 (https://www.cs.sfu.ca/~stella/papers/2005/spie.pdf)
    
    With X(p) being our spatial domain value at pixel p:
        - X(p) = image we have
        - s(p) = signal proper
        - n_r(p) = real noise -- Gaussian
        - n_i(p) = imaginary noise -- Gaussian
        
    Assume: X(p) = s(p) exp[i φ(p)] + n_r(p) + i n_i(p)
    
    We want to calculate φ^(p) which is an estimate of φ(p), thrn multiply it in. If φ(p) == φ^(p), then:
    
    X(p) exp[-i φ^(p)] = s(p) + (n_r(p) + i n_i(p)) exp[-i φ^(p)]
        Because exp[-i φ^(p)] is just a rotation, the rotation of noise, just makes different noise, so (n_r(p) + i n_i (p)) exp[-i φ^(p)] == n_r`(p) + i n_i`(p)
    
    So our new measurement is:
        X(p) exp[-i φ^(p)] = s(p) + (n_r`(p) + i n_i`(p)) 
        
    '''
    if sig==0:
        sig = np.var(im[:50,:50])
    
    ph_ones = np.ones(im.shape)
    data = np.fft.ifftshift(k) * tf.fft2c(im, ph=ph_ones)
    im_scan_ph = tf.ifft2c(data, ph=ph_ones)
    ph = tf.matlab_style_gauss2D(im_scan_ph,shape=(5,5))
    im_scan = tf.ifft2c(data, ph=ph)

    N = im.shape
    window1 = int(np.ceil(w1/2))
    window2 = int(np.ceil(w2/2))
    window3 = int(np.ceil(w3/2))

    
    im_wrap = np.pad(im_scan,window1,'wrap')

    #ph = np.zeros(im.shape,complex)
    #ph_new = np.zeros(im.shape,complex)
    ph_new = ph.copy()
    wgts = np.ones(im.size)    
        
    '''
    
    We then do this over three steps. Step 1 is:
    
        1. Apply the given phase correction, φ^ to the recorded image, I to get our current best guess image, I`.
    
        2. Calculate the mean of the imaginary component of all the pixels in I` in a window of width w1 around p.
        
        3. If the mean imaginary component is greater than ||p||, the magnitude of p, set p’s new phase estimate, φ^`(p), to be π/2 to correct as much as possible.
        
        4. If the mean imaginary component is less than −||p||, set φ^`(p)=−π/2 to correct as much as possible.
        
        5. Set φ^`(p) so p’s phase is on (−π/2,π/2) and its imaginary component cancels the mean component of all the other pixels in the window.
    
    
    '''
    
    ph = ph_new.copy()
    
    for x in range(N[0]):
        for y in range(N[1]):
            mn = np.mean(im_wrap[x:x+1+w1,y:y+1+w1].imag)
            if mn > abs(im_scan[x,y]):
                ph_new[x,y] = +1j
            elif mn < -abs(im_scan[x,y]):
                ph_new[x,y] = -1j
            else:
                ph_new[x,y] = abs(ph_new[x,y].real) - mn*1j
                # The abs() is required here to ensure that the phase is on (-π/2,π/2)
    
    
    ''' 
    
    Step 2 requires us to look at those times where we shifted positives to negatives, and try to flip it back when necessary.
    
    This then follows three more substeps:
        
        1. Calculate the mean of the distances, wrapped onto the range [−π,π), from φ^(p) to each other phase estimate pixel in a window of with w2 centered on p.
        
        2. Calculate the mean of the distances, wrapped onto the range [−π,π), from φ^(p) + π to each other phase estimate pixel in a window of with w2 centered on p.
        
        3. If the second mean distance is smaller than the first, mark p as flipped.
        
    '''
    
    # need to map phases from [-pi,pi)
    #ph_wrap_angles_piShift = (np.angle(np.pad(ph_new,window2,'wrap')) + np.pi) % (2*np.pi)
    ph_wrap_angles = np.arctan2(ph_new.imag, ph_new.real)
    cnt = 0
    
    for x in range(N[0]):
        for y in range(N[1]):
            diffs = np.sum(np.diff(ph_wrap_angles[x:x+1+w2,y:y+1+w2],axis=0)) + \
                    np.sum(np.diff(ph_wrap_angles[x:x+1+w2,y:y+1+w2],axis=1)) 
            ph_wrap_hold = np.exp(1j*ph_wrap_angles[x,y]+np.pi)
            ph_wrap_angles[x,y] = np.arctan2(ph_wrap_hold.imag,ph_wrap_hold.real)
            diffs_piShift = np.sum(np.diff(ph_wrap_angles[x:x+1+w2,y:y+1+w2],axis=0)) + \
                            np.sum(np.diff(ph_wrap_angles[x:x+1+w2,y:y+1+w2],axis=1)) 
            
            if diffs_piShift < diffs:
                #print('Smaller')
                cnt+=1
                ph_new[x,y] = np.exp(1j*ph_wrap_angles[x,y])
            
            ph_wrap_hold = np.exp(1j*ph_wrap_angles[x,y]-np.pi)
            ph_wrap_angles[x,y] = np.arctan2(ph_wrap_hold.imag,ph_wrap_hold.real)
        
    ph_new = np.exp(1j*ph_wrap_angles)
    
    
            
Beispiel #26
0
    print('Partial Fourier sampling method used')
k = samp.genSampling(pdf, 50, 2)[0].astype(int)
if len(N) == 2:
    N = np.hstack([1, N])
    k = k.reshape(N)
    im = im.reshape(N)
elif len(N) == 3:
    k = k.reshape(np.hstack([1, N[-2:]])).repeat(N[0], 0)

ph_ones = np.ones(N[-2:], complex)
ph_scan = np.zeros(N, complex)
data = np.zeros(N, complex)
im_scan = np.zeros(N, complex)
for i in range(N[0]):
    k[i, :, :] = np.fft.fftshift(k[i, :, :])
    data[i, :, :] = k[i, :, :] * tf.fft2c(im[i, :, :], ph=ph_ones)

    # IMAGE from the "scanner data"

    im_scan_wph = tf.ifft2c(data[i, :, :], ph=ph_ones)
    ph_scan[i, :, :] = tf.matlab_style_gauss2D(im_scan_wph, shape=(5, 5))
    ph_scan[i, :, :] = np.exp(1j * ph_scan[i, :, :])
    im_scan[i, :, :] = tf.ifft2c(data[i, :, :], ph=ph_scan[i, :, :])

    #im_lr = samp.loRes(im,pctg)

# ------------------------------------------------------------------ #
# A quick way to look at the PSF of the sampling pattern that we use #
delta = np.zeros(N[-2:])
delta[int(N[-2] / 2), int(N[-1] / 2)] = 1
psf = tf.ifft2c(tf.fft2c(delta, ph_ones) * k, ph_ones)
# Now we initialize to build up "what we would get from the
# scanner" -- as well as our phase corrections
#ph_scan = np.zeros(N, complex)
#data = np.zeros(N,complex)
#dataFull = np.zeros(N,complex)

# We need to try to make this be as efficient and accurate as 
# possible. The beauty of this, is if we are using data that is
# anatomical, we can use the RO direction as well
# NOTE: Something that we can do later is make this estimation of
# phase inclue the RO direction, and then do a split later. This is 
# post-processing, but pre-CS
k = np.fft.fftshift(k, axes=(-2,-1))
     
ph_ones = np.ones(N, complex)
dataFull = tf.fft2c(im, ph=ph_ones,axes=(-2,-1))
data = k*dataFull
k = np.fft.fftshift(k, axes=(-2,-1))
#im_scan_wph = tf.ifft2c(data,ph=ph_ones)
#ph_scan = np.angle(gaussian_filter(im_scan_wph.real,0) +  1.j*gaussian_filter(im_scan_wph.imag,0))
#ph_scan = np.exp(1j*ph_scan)
#im_scan = tf.ifft2c(data,ph=ph_scan,sz=szFull)


# Now, we can use the PDF (for right now) to make our starting point
# NOTE: This won't be a viable method for data that we undersample
#       because we wont have a PDF -- or if we have uniformly undersampled
#       data, we need to come up with a method to have a good SP
pdfDiv = pdf.copy()
pdfZeros = np.where(pdf==0)
pdfDiv[pdfZeros] = 1
Beispiel #28
0
        N = np.hstack([1, N])
        k = k.reshape(N)
        im = im.reshape(N)
    elif len(N) == 3:
        k = k.reshape(np.hstack([1, N[-2:]])).repeat(N[0], 0)

    # Here is where we build the undersampled data
    ph_ones = np.ones(N[-2:], complex)
    ph_scan = np.zeros(N, complex)
    data = np.zeros(N, complex)
    im_dc = np.zeros(N, complex)
    im_scan = np.zeros(N, complex)
    #print('Looping through data')
    for i in range(N[0]):
        #print(i)
        data[i, :, :] = np.fft.ifftshift(k[i, :, :]) * tf.fft2c(im[i, :, :],
                                                                ph=ph_ones)

        # IMAGE from the "scanner data"
        im_scan_wph = tf.ifft2c(data[i, :, :], ph=ph_ones)
        ph_scan[i, :, :] = tf.matlab_style_gauss2D(im_scan_wph, shape=(5, 5))
        ph_scan[i, :, :] = np.exp(1j * ph_scan[i, :, :])
        im_scan[i, :, :] = tf.ifft2c(data[i, :, :], ph=ph_scan[i, :, :])
        #im_lr = samp.loRes(im,pctg)

        pdfDiv = pdf
        pdfZeros = np.where(pdf == 0)
        pdfDiv[pdfZeros] = 1
        im_dc[i, :, :] = tf.ifft2c(data[i, :, :] / np.fft.ifftshift(pdfDiv),
                                   ph=ph_scan[i, :, :]).real.copy()

    im_dc = im_dc.flatten()
Beispiel #29
0
    # Now we initialize to build up "what we would get from the
    # scanner" -- as well as our phase corrections
    #ph_scan = np.zeros(N, complex)
    #data = np.zeros(N,complex)
    #dataFull = np.zeros(N,complex)

    # We need to try to make this be as efficient and accurate as
    # possible. The beauty of this, is if we are using data that is
    # anatomical, we can use the RO direction as well
    # NOTE: Something that we can do later is make this estimation of
    # phase inclue the RO direction, and then do a split later. This is
    # post-processing, but pre-CS
    #k = np.fft.fftshift(k, axes=(-2,-1))

    ph_ones = np.ones(N, complex)
    dataFull = np.fft.fftshift(tf.fft2c(im, ph=ph_ones, axes=(-2, -1)),
                               axes=(-2, -1))
    data = k * dataFull
    #k = np.fft.fftshift(k, axes=(-2,-1))
    #im_scan_wph = tf.ifft2c(data,ph=ph_ones)
    #ph_scan = np.angle(gaussian_filter(im_scan_wph.real,0) +  1.j*gaussian_filter(im_scan_wph.imag,0))
    #ph_scan = np.exp(1j*ph_scan)
    #im_scan = tf.ifft2c(data,ph=ph_scan,sz=szFull)

    # Now, we can use the PDF (for right now) to make our starting point
    # NOTE: This won't be a viable method for data that we undersample
    #       because we wont have a PDF -- or if we have uniformly undersampled
    #       data, we need to come up with a method to have a good SP
    pdfDiv = pdf.copy()
    pdfZeros = np.where(pdf == 0)
    pdfDiv[pdfZeros] = 1

ph_ones = np.ones(N[-2:], complex)
dataFull = np.zeros(N,complex)

ph_scan = np.zeros(N, complex)
data = np.zeros(N,complex)
im_scan = np.zeros(N, complex)

ph_scanDir = np.zeros(N, complex)
dataDir = np.zeros(N,complex)
im_scanDir = np.zeros(N, complex)

print('Data Production')
for i in range(N[0]):
    data[i,:,:] = np.fft.fftshift(k[i,:,:])*tf.fft2c(im[i,:,:], ph=ph_ones)
    dataDir[i,:,:] = np.fft.fftshift(kDir[i,:,:])*tf.fft2c(im[i,:,:], ph=ph_ones)
    dataFull[i,:,:] = np.fft.fftshift(tf.fft2c(im[i,:,:], ph=ph_ones))
    im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
    im_scan_wphDir = tf.ifft2c(dataDir[i,:,:], ph=ph_ones)
    ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scanDir[i,:,:] = tf.matlab_style_gauss2D(im_scan_wphDir,shape=(5,5))
    ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
    ph_scanDir[i,:,:] = np.exp(1j*ph_scanDir[i,:,:])
    im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
    im_scanDir[i,:,:] = tf.ifft2c(dataDir[i,:,:], ph=ph_scanDir[i,:,:])


print('Mix the Data')
dataDirComb = d.dirDataSharing(kDir,dataDir,dirs,N[-2:],maxCheck=5,bymax=1)
dataComb = d.dirDataSharing(k,data,dirs,N[-2:],maxCheck=5,bymax=1)
Beispiel #31
0
    # scanner" -- as well as our phase corrections
    #ph_ones = np.ones(N[-2:], complex)
    #ph_scan = np.zeros(N, complex)
    #data = np.zeros(N,complex)
    #dataFull = np.zeros(N,complex)

    # We need to try to make this be as efficient and accurate as
    # possible. The beauty of this, is if we are using data that is
    # anatomical, we can use the RO direction as well
    # NOTE: Something that we can do later is make this estimation of
    # phase inclue the RO direction, and then do a split later. This is
    # post-processing, but pre-CS
    k = np.fft.fftshift(k, axes=(-2, -1))

    ph_ones = np.ones(N)
    dataFull = tf.fft2c(im, ph=ph_ones, axes=(-2, -1))
    data = k * dataFull
    dataFull = np.fft.fftshift(dataFull, axes=(-2, -1))

    # Now, we can use the PDF (for right now) to make our starting point
    # NOTE: This won't be a viable method for data that we undersample
    #       because we wont have a PDF -- or if we have uniformly undersampled
    #       data, we need to come up with a method to have a good SP
    pdfDiv = pdf.copy()
    pdfZeros = np.where(pdf == 0)
    pdfDiv[pdfZeros] = 1

    k = np.fft.fftshift(k, axes=(-2, -1))
    # Here, we look at the number of "steps" we want to do and step
    # up from there. The "steps" are chose based on the percentage that
    # we can sample and is based on the number of steps we can take.
Beispiel #32
0
def create_scanner_k_space(im,
                           N,
                           P=2,
                           pctg=0.25,
                           dirData=False,
                           dirs=None,
                           radius=0.2,
                           cyl=[0],
                           style='mult',
                           pft=False,
                           ext=0.5):
    '''
    Read in the data, size, and the directions (if they exist) so that we can create a
    retrospectively sampled set of data for testing.
    '''

    # Create a pdf so that we can use it to make a starting point
    pdf = samp.genPDF(N[-2:],
                      P,
                      pctg,
                      radius=radius,
                      cyl=[1, N[-2], N[-1]],
                      style='mult',
                      pft=pft,
                      ext=0.5)

    # Generate the sampling scheme, depending on whether or not
    if dirData:
        if dirs is None:
            raise ValueError(
                'If we have directional data, you need to feed this into the function'
            )
        k = d.dirPDFSamp([int(dirs.shape[0]), N[-2], N[-1]],
                         P=2,
                         pctg=pctg,
                         radius=radius,
                         dirs=dirs,
                         cyl=True,
                         taper=0.25)[0]
    else:
        k = samp.genSampling(pdf, 50, 2)[0].astype(int)

    # Since our functions are built to work in 3D datasets, here we
    # make sure that N and things are all in 3D
    if len(N) == 2:
        N = np.hstack([1, N])
        k = k.reshape(N)
        im = im.reshape(N)
    elif len(N) == 3:
        if k.ndim == 2:
            k = k.reshape(np.hstack([1, N[-2:]])).repeat(N[0], 0)

    k = np.fft.fftshift(k, axes=(-2, -1))
    # Convert the image data into k-space
    ph_ones = np.ones(N, complex)
    dataFull = tf.fft2c(im, ph=ph_ones, axes=(-2, -1))
    # Apply our sampling
    data = k * dataFull
    # Now we need to calculate the phase in order to deal with the undersampled image and the
    # non perfect cancellation of terms
    #filtdata = gaussian_filter(im_scan_wph.real,0,0) + 1j*gaussian_filter(im_scan_wph.imag,0,0)
    #ph_scan = np.exp(1j*np.angle(filtdata.conj()))
    im_scan_wph = tf.ifft2c(data, ph=ph_ones)
    ph_scan = np.angle(
        gaussian_filter(im_scan_wph.real, 0) +
        1.j * gaussian_filter(im_scan_wph.imag, 0))
    ph_scan = np.exp(1j * ph_scan)
    im_scan = tf.ifft2c(data, ph=ph_scan)

    pdfDiv = pdf.copy()
    pdfZeros = np.where(pdf < 1e-4)
    pdfDiv[pdfZeros] = 1
    datadc = data / pdfDiv

    return dataFull, data, datadc, pdf, k, im_scan, ph_scan
#tupleN = tuple(N)

for pctg in [0.25, 0.33, 0.40, 0.50]:
    for TVWeight in [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05]:
        for XFMWeight in [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05]:
            P = 5  # Variable density polymonial degree
            ph = tf.matlab_style_gauss2D(im,shape=(5,5));
            #ph = np.ones(im.shape, complex)

            # Generate the PDF for the sampling case -- note that this type is only used in non-directionally biased cases.
            pdf = samp.genPDF(N, P, pctg, radius=radius, cyl=[0]) 
            # Set the sampling pattern -- checked and this gives the right percentage
            k = samp.genSampling(pdf, 50, 2)[0].astype(int)

            # Here is where we build the undersampled data
            data = np.fft.ifftshift(k) * tf.fft2c(im, ph=ph)
            # ph = phase_Calculation(im,is_kspace = False)
            # data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());
            #filt = tf.fermifilt(N)
            #data = data * filt

            # IMAGE from the "scanner data"
            im_scan = tf.ifft2c(data, ph=ph)
            minval = np.min(im)
            maxval = np.max(im)
            # Primary first guess. What we're using for now. Density corrected
            #im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf), ph=ph).real.flatten().copy()
            for imdcs in ['zeros','ones','densCorr','imFull']:
                if imdcs == 'zeros':
                    im_dc = np.zeros(data.shape)
                elif imdcs == 'ones':
    N = np.hstack([1, N])
    k = k.reshape(N)
    im = im.reshape(N)
elif len(N) == 3:
    k = k.reshape(np.hstack([1,N[-2:]])).repeat(N[0],0)

im_scan = abs(im).reshape(N)
im_dc = np.load('/home/asalerno/Documents/pyDirectionCompSense/brainData/P14/data/im_dc.npy')


ph_ones = np.ones(N[-2:], complex)
ph_scan = np.exp(1.j*np.angle(im))
data = np.zeros(N,complex)
for i in range(N[0]):
    k[i,:,:] = np.fft.fftshift(k[i,:,:])
    data[i,:,:] = k[i,:,:]*tf.fft2c(im[i,:,:], ph=ph_ones)

N_im = N
hld, dims, dimOpt, dimLenOpt = tf.wt(im_scan[0].real,wavelet,mode)
N = np.hstack([N_im[0], hld.shape])

w_scan = np.zeros(N)
w_full = np.zeros(N)
w_dc = np.zeros(N)

for i in xrange(N[0]):
    w_scan[i,:,:] = tf.wt(im_scan.real[i,:,:],wavelet,mode,dims,dimOpt,dimLenOpt)[0]
    w_full[i,:,:] = tf.wt(abs(im[i,:,:]),wavelet,mode,dims,dimOpt,dimLenOpt)[0]
    w_dc[i,:,:] = tf.wt(im_dc[i,:,:],wavelet,mode,dims,dimOpt,dimLenOpt)[0]

w_dc = w_dc.flatten()
Beispiel #35
0
    k[i, :, :] = samp.genSampling(pdf, 50, 2)[0].astype(int)

ph_ones = np.ones(N[-2:], complex)
dataFull = np.zeros(N, complex)

ph_scan = np.zeros(N, complex)
data = np.zeros(N, complex)
im_scan = np.zeros(N, complex)

ph_scanDir = np.zeros(N, complex)
dataDir = np.zeros(N, complex)
im_scanDir = np.zeros(N, complex)

print('Data Production')
for i in range(N[0]):
    data[i, :, :] = np.fft.fftshift(k[i, :, :]) * tf.fft2c(im[i, :, :],
                                                           ph=ph_ones)
    dataDir[i, :, :] = np.fft.fftshift(kDir[i, :, :]) * tf.fft2c(im[i, :, :],
                                                                 ph=ph_ones)
    dataFull[i, :, :] = np.fft.fftshift(tf.fft2c(im[i, :, :], ph=ph_ones))
    im_scan_wph = tf.ifft2c(data[i, :, :], ph=ph_ones)
    im_scan_wphDir = tf.ifft2c(dataDir[i, :, :], ph=ph_ones)
    ph_scan[i, :, :] = tf.matlab_style_gauss2D(im_scan_wph, shape=(5, 5))
    ph_scanDir[i, :, :] = tf.matlab_style_gauss2D(im_scan_wphDir, shape=(5, 5))
    ph_scan[i, :, :] = np.exp(1j * ph_scan[i, :, :])
    ph_scanDir[i, :, :] = np.exp(1j * ph_scanDir[i, :, :])
    im_scan[i, :, :] = tf.ifft2c(data[i, :, :], ph=ph_scan[i, :, :])
    im_scanDir[i, :, :] = tf.ifft2c(dataDir[i, :, :], ph=ph_scanDir[i, :, :])

print('Mix the Data')
dataDirComb = d.dirDataSharing(kDir,
                               dataDir,
def objectiveFunctionDataCons(x, N, ph, data, k):
    obj_data = k*(data - tf.fft2c(x,ph))
    return obj_data*obj_data.conj() #L2 Norm
im = np.load(filename)
im = im/np.max(abs(im))
N = im.shape
ph_scan = np.zeros(N,dtype=complex)
ph_ones = np.ones(N[-2:],dtype=complex)

    
# Generate the sampling or pull it from file
#k = samp.genSamplingDir(img_sz=[180,180], dirFile=dirFile, pctg=pctg, cyl=[1],radius=radius, nmins=nmins, engfile='/micehome/asalerno/Documents/pyDirectionCompSense/engFile30dir.npy', endSize=[256,256])
k = np.load('/home/asalerno/Documents/pyDirectionCompSense/directionData/30dirSampling_5mins.npy')

data = np.zeros(im.shape,dtype=complex)
im_scan = np.zeros(im.shape,dtype=complex)
im_dc = np.zeros(im.shape,dtype=complex)

minval = np.min(abs(im))
maxval = np.max(abs(im))

for i in range(len(dirs)):
    data[i,:,:] = np.fft.ifftshift(k[i,:,:]) * tf.fft2c(im[i,:,:], ph=ph_ones)
    
    # IMAGE from the "scanner data"
    im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
    ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
    im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
    
data_dc = d.dir_dataSharing(k,data,dirs,[180,180],maxCheck=5,bymax=1)

for i in range(len(dirs)):
    im_dc[i,:,:] =  tf.ifft2c(data_dc[i,:,:], ph=ph_scan[i,:,:])