Пример #1
0
def meas_phase(data):
    ph_ones = np.ones(data.shape, complex)
    im_scan_wph = tf.ifft2c(data, ph=ph_ones)
    ph_scan = np.angle(
        gaussian_filter(im_scan_wph.real, 0) +
        1.j * gaussian_filter(im_scan_wph.imag, 0))
    ph_scan = np.exp(1j * ph_scan)
    im_scan = tf.ifft2c(data, ph=ph_scan)
    return im_scan, ph_scan
Пример #2
0
def loRes(im, pctg):
    N = im.shape
    ph_ones = np.ones(N)
    [x, y] = np.meshgrid(np.linspace(-1, 1, N[1]), np.linspace(-1, 1, N[0]))
    rsq = x**2 + y**2
    loResMaskLocs = np.where(rsq < pctg)
    loResMask = np.zeros(N)
    loResMask[loResMaskLocs] = 1
    loResMask = sp.ndimage.filters.gaussian_filter(loResMask, 3)
    data = np.fft.fftshift(loResMask) * tf.fft2c(im, ph=ph_ones)
    im_lr_wph = tf.ifft2c(data, ph=ph_ones)
    ph_lr = tf.matlab_style_gauss2D(im_lr_wph, shape=(5, 5))
    ph_lr = np.exp(1j * ph_lr)
    im_lr = tf.ifft2c(data, ph=ph_lr)
    return im_lr
Пример #3
0
def loRes(im,pctg):
    N = im.shape
    ph_ones=np.ones(N)
    [x,y] = np.meshgrid(np.linspace(-1,1,N[1]),np.linspace(-1,1,N[0]))
    rsq = x**2 + y**2
    loResMaskLocs = np.where(rsq < pctg)
    loResMask = np.zeros(N)
    loResMask[loResMaskLocs] = 1
    loResMask = sp.ndimage.filters.gaussian_filter(loResMask,3)
    data = np.fft.fftshift(loResMask)*tf.fft2c(im, ph=ph_ones)
    im_lr_wph = tf.ifft2c(data,ph=ph_ones)
    ph_lr = tf.matlab_style_gauss2D(im_lr_wph,shape=(5,5))
    ph_lr = np.exp(1j*ph_lr)
    im_lr = tf.ifft2c(data, ph=ph_lr)
    return im_lr
Пример #4
0
def gObj(x,N,ph,
         data_from_scanner,
         samp_mask):
    '''
    Here, we are attempting to get the objective derivative from the
    function. This gradient is how the current data compares to the 
    data from the scanner in order to try and enforce data consistency.
    
    Inputs:
    [np.array] x - data that we're looking at
    [np.array] data_from_scanner - the original data from the scanner
    [int/boolean] samp_mask - Mask so we only compare the data from the regions of k-space that were sampled
    [int]      p - The norm of the value that we're using
    [float]  l1smooth - Smoothing value
    
    Outputs:
    [np.array] grad - the gradient of the XFM
    
    '''
    if len(N) == 2:
        N = np.hstack([1, N])

    #grad = np.zeros([x.shape])

    # Here we're going to convert the data into the k-sapce data, and then subtract
    # off the original data from the scanner. Finally, we will convert this data 
    # back into image space
    x0 = x.reshape(N)
    data_from_scanner.shape = N
    x_data = np.fft.fftshift(samp_mask)*tf.fft2c(x0,ph); # Issue, feeding in 3D data to a 2D fft alg...
    
    grad = -2*tf.ifft2c(data_from_scanner - x_data,ph).real; # -1* & ,real
    
    return grad
Пример #5
0
def phase_Calculation(data,is_kspace = 0,is_fftshifted = 0):
    
    if is_kspace:
        data = tf.ifft2c(data)
        if is_fftshifted:
            data = np.ifftshift(data)

    filtdata = sp.ndimage.uniform_filter(data,size=5)
    return exp(1.j*np.angle(filtdata)) 
Пример #6
0
def gDataCons(x, N, ph, data, k, kmask=None):
    '''
    Here, we are attempting to get the objective derivative from the
    function. This gradient is how the current data compares to the 
    data from the scanner in order to try and enforce data consistency.
    
    Inputs:
    [np.array] x - data that we're looking at
    [np.array] data_from_scanner - the original data from the scanner
    [int/boolean] samp_mask - Mask so we only compare the data from the regions of k-space that were sampled
    [int]      p - The norm of the value that we're using
    [float]  l1smooth - Smoothing value
    
    Outputs:
    [np.array] grad - the gradient of the XFM
    
    '''
    if len(N) == 2:
        N = np.hstack([1,N])
    #grad = np.zeros([x.shape])

    # Here we're going to convert the data into the k-sapce data, and then subtract
    # off the original data from the scanner. Finally, we will convert this data 
    # back into image space
    x0 = x.reshape(N)
    data.shape = N
    grad = np.zeros(N)
    
    if len(k.shape)==len(N):
        ph0 = ph.reshape(N)
        xdata = tf.fft2c(x0,ph0,axes=(-2,-1),kmask=kmask)
        grad = tf.ifft2c(2 * k * (xdata - data),ph0,axes=(-2,-1),kmask=kmask).real
    else:
        ph0 = ph.reshape(np.hstack([-1,N]))
        xdata = tf.fft2c(x0,ph0,axes=(-2,-1),kmask=kmask)
        grad = tf.ifft2c(2 * k * (xdata - data),ph0,axes=(-2,-1),kmask=kmask).real
        grad = np.sum(grad,axis=0)
    
    return grad
    def perform(self, x, k0, mask, sensitivity, coil_dim=1):
        k = T.fft2c(D.project_all_coils(x, sensitivity, coil_dim))
        k = k.view(k.size(0),
                   k.size(1) * k.size(2), k.size(3), k.size(4), k.size(5))
        k = self.conv(k)
        k = k.view(k.size(0), 12, 8, k.size(2), k.size(3), k.size(4))

        if self.noise_lvl is not None:  # noisy case
            v = torch.sigmoid(self.noise_lvl)  # Normalize to 0~1
            v = v.unsqueeze(0).unsqueeze(1).unsqueeze(3).unsqueeze(
                4).unsqueeze(5)

            k = (1 - mask) * k + mask * (v * k + (1 - v) * k0)
        else:  # noiseless case
            k = (1 - mask) * k + mask * k0
        return D.combine_all_coils(T.ifft2c(k), sensitivity, coil_dim)
Пример #8
0
    def __getitem__(self, ind):

        if (self.Y_JLORAKS is None):  # Open in thread
            self.tables = tables.open_file(self.fname, 'r')
            self.Y_JLORAKS = self.tables.root.Y_JLORAKS
            self.Sens = self.tables.root.Sens
            self.X_kJLORAKS = self.tables.root.X_kJLORAKS

        Y_JLORAKS = np.float32(self.Y_JLORAKS[ind])
        Sens = np.float32(self.Sens[ind])
        X_kJLORAKS = np.float32(self.X_kJLORAKS[ind])
        """ Data Loading """
        mask = np.float32(np.abs(X_kJLORAKS[:, 0:1, 0:1]) > 0)
        """ Augmentation (Random Flipping (None, Left-Right, Up-Down), Scaling (0.9 - 1.1) """
        if self.augmentation:
            """ Random Flipping """
            if np.random.random() < 0.5:
                #                pdb.set_trace()
                Y_JLORAKS = self.AugmentFlip(Y_JLORAKS, 1)
                Sens = self.AugmentFlip(Sens, 2)
                X_kJLORAKS = self.AugmentFlip(X_kJLORAKS, 2)
                mask = self.AugmentFlip(mask, 2)

            if np.random.random() < 0.5:
                Y_JLORAKS = self.AugmentFlip(Y_JLORAKS, 2)
                Sens = self.AugmentFlip(Sens, 3)
                X_kJLORAKS = self.AugmentFlip(X_kJLORAKS, 3)
                mask = self.AugmentFlip(mask, 3)

            scale_f = np.random.uniform(0.9, 1.1)
            Y_JLORAKS = self.AugmentScale(Y_JLORAKS, scale_f)
            X_kJLORAKS = self.AugmentScale(X_kJLORAKS, scale_f)

        Y = torch.from_numpy(Y_JLORAKS)
        S = torch.from_numpy(Sens)
        X_k = torch.from_numpy(X_kJLORAKS)
        m = torch.from_numpy(mask)

        X = T.ifft2c(X_k)
        X = combine_all_coils(X, S, 0)
        return X, Y, S, X_k, m
Пример #9
0
def gDataCons(x, N, ph, data_from_scanner, samp_mask):
    '''
    Here, we are attempting to get the objective derivative from the
    function. This gradient is how the current data compares to the 
    data from the scanner in order to try and enforce data consistency.
    
    Inputs:
    [np.array] x - data that we're looking at
    [np.array] data_from_scanner - the original data from the scanner
    [int/boolean] samp_mask - Mask so we only compare the data from the regions of k-space that were sampled
    [int]      p - The norm of the value that we're using
    [float]  l1smooth - Smoothing value
    
    Outputs:
    [np.array] grad - the gradient of the XFM
    
    '''
    if len(N) == 2:
        N = np.hstack([1, N])
    #grad = np.zeros([x.shape])

    # Here we're going to convert the data into the k-sapce data, and then subtract
    # off the original data from the scanner. Finally, we will convert this data
    # back into image space
    x0 = x.reshape(N)
    data_from_scanner.shape = N
    grad = np.zeros(N)
    ph0 = ph.reshape(N)
    #samp_mask = samp_mask.reshape(N)

    for kk in range(N[0]):
        x_data = tf.fft2c(x0[kk, :, :], ph0[kk, :, :])
        # Issue, feeding in 3D data to a 2D fft alg...

        grad[kk, :, :] = -2 * tf.ifft2c(
            samp_mask[kk, :, :] *
            (data_from_scanner[kk, :, :] - x_data), ph0[kk, :, :]).real
        # -1* & ,real
    #import pdb; pdb.set_trace()
    return grad
Пример #10
0
            # Set the sampling pattern -- checked and this gives the right percentage
            k = samp.genSampling(pdf, 50, 2)[0].astype(int)
            if len(N) == 2:
                N = np.hstack([1, N])
                k = k.reshape(N)

            # Here is where we build the undersampled data
            ph_ones = np.ones(im.shape, complex)
            data = np.fft.ifftshift(k) * tf.fft2c(im, ph=ph_ones)
            # data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());
            #filt = tf.fermifilt(N)
            #data = data * filt

            # IMAGE from the "scanner data"
            #ph_ones = np.ones(im.shape, complex)
            im_scan_wph = tf.ifft2c(data, ph=ph_ones)
            ph_scan = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
            #ph_scan = tf.matlab_style_gauss2D(im,shape=(5,5))

            #for i in range(phIter):
                #ph_scan = tf.laplacianUnwrap(ph_scan,N,[75,75])

            ph_scan = np.exp(1j*ph_scan)
            im_scan = tf.ifft2c(data, ph=ph_scan)
            #im_scan = abs(tf.ifft2c(data,ph_ones))
            #data = tf.fft2c(im_scan,ph_ones).reshape(data.size).reshape(N)
            #ph_scan = ph_ones

            minval = np.min(abs(im))
            maxval = np.max(abs(im))
def runCSAlgorithm(fromfid=False,
                   filename='/home/asalerno/Documents/pyDirectionCompSense/brainData/P14/data/fullySampledBrain.npy',
                   sliceChoice=150,
                   strtag = ['','spatial', 'spatial'],
                   xtol = [1e-2, 1e-3, 5e-4, 5e-4],
                   TV = [0.01, 0.005, 0.002, 0.001],
                   XFM = [0.01,.005, 0.002, 0.001],
                   dirWeight=0,
                   pctg=0.25,
                   radius=0.2,
                   P=2,
                   pft=False,
                   ext=0.5,
                   wavelet='db4',
                   mode='per',
                   method='CG',
                   ItnLim=30,
                   lineSearchItnLim=30,
                   alpha_0=0.6,
                   c=0.6,
                   a=10.0,
                   kern = 
                   np.array([[[ 0.,  0.,  0.], 
                   [ 0.,  0.,  0.], 
                   [ 0.,  0.,  0.]],                
                  [[ 0.,  0.,  0.],
                  [ 0., -1.,  0.],
                  [ 0.,  1.,  0.]],
                  [[ 0.,  0.,  0.],
                  [ 0., -1.,  1.],
                  [ 0.,  0.,  0.]]]),
                   dirFile = None,
                   nmins = None,
                   dirs = None,
                   M = None,
                   dirInfo = [None]*4,
                   saveNpy=False,
                   saveNpyFile=None,
                   saveImsPng=False,
                   saveImsPngFile=None,
                   saveImDiffPng=False,
                   saveImDiffPngFile=None,
                   disp=False):
    ##import pdb; pdb.set_trace()
    if fromfid==True:
        inputdirectory=filename[0]
        petable=filename[1]
        fullImData = rff.getDataFromFID(petable,inputdirectory,2)[0,:,:,:]
        fullImData = fullImData/np.max(abs(fullImData))
        im = fullImData[:,:,sliceChoice]
    else:
        im = np.load(filename)[sliceChoice,:,:]
        
    N = np.array(im.shape)  # image Size

    pdf = samp.genPDF(N[-2:], P, pctg, radius=radius, cyl=np.hstack([1, N[-2:]]), style='mult', pft=pft, ext=ext)
    if pft:
        print('Partial Fourier sampling method used')
    k = samp.genSampling(pdf, 50, 2)[0].astype(int)
    if len(N) == 2:
        N = np.hstack([1, N])
        k = k.reshape(N)
        im = im.reshape(N)
    elif (len(N) == 3) and ('dir' not in strtag):
        k = k.reshape(np.hstack([1,N[-2:]])).repeat(N[0],0)

    ph_ones = np.ones(N[-2:], complex)
    ph_scan = np.zeros(N, complex)
    data = np.zeros(N,complex)
    im_scan = np.zeros(N,complex)
    for i in range(N[0]):
        k[i,:,:] = np.fft.fftshift(k[i,:,:])
        data[i,:,:] = k[i,:,:]*tf.fft2c(im[i,:,:], ph=ph_ones)

        # IMAGE from the "scanner data"
        im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
        ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
        ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
        im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
        #im_lr = samp.loRes(im,pctg)
    
    # ------------------------------------------------------------------ #
    # A quick way to look at the PSF of the sampling pattern that we use #
    delta = np.zeros(N[-2:])
    delta[int(N[-2]/2),int(N[-1]/2)] = 1
    psf = tf.ifft2c(tf.fft2c(delta,ph_ones)*k,ph_ones)
    # ------------------------------------------------------------------ #


    ## ------------------------------------------------------------------ #
    ## -- Currently broken - Need to figure out what's happening here. -- #
    ## ------------------------------------------------------------------ #
    #if pft:
        #for i in xrange(N[0]):
            #dataHold = np.fft.fftshift(data[i,:,:])
            #kHold = np.fft.fftshift(k[i,:,:])
            #loc = 98
            #for ix in xrange(N[-2]):
                #for iy in xrange(loc,N[-1]):
                    #dataHold[-ix,-iy] = dataHold[ix,iy].conj()
                    #kHold[-ix,-iy] = kHold[ix,iy]
    ## ------------------------------------------------------------------ #
    
    pdfDiv = pdf.copy()
    pdfZeros = np.where(pdf==0)
    pdfDiv[pdfZeros] = 1
    #im_scan_imag = im_scan.imag
    #im_scan = im_scan.real

    N_im = N.copy()
    hld, dims, dimOpt, dimLenOpt = tf.wt(im_scan[0].real,wavelet,mode)
    N = np.hstack([N_im[0], hld.shape])

    w_scan = np.zeros(N)
    w_full = np.zeros(N)
    im_dc = np.zeros(N_im)
    w_dc = np.zeros(N)

    for i in xrange(N[0]):
        w_scan[i,:,:] = tf.wt(im_scan.real[i,:,:],wavelet,mode,dims,dimOpt,dimLenOpt)[0]
        w_full[i,:,:] = tf.wt(abs(im[i,:,:]),wavelet,mode,dims,dimOpt,dimLenOpt)[0]

        im_dc[i,:,:] = tf.ifft2c(data[i,:,:] / np.fft.ifftshift(pdfDiv), ph=ph_scan[i,:,:]).real.copy()
        w_dc[i,:,:] = tf.wt(im_dc,wavelet,mode,dims,dimOpt,dimLenOpt)[0]

    w_dc = w_dc.flatten()
    im_sp = im_dc.copy().reshape(N_im)
    minval = np.min(abs(im))
    maxval = np.max(abs(im))
    data = np.ascontiguousarray(data)

    imdcs = [im_dc,np.zeros(N_im),np.ones(N_im),np.random.randn(np.prod(N_im)).reshape(N_im)]
    imdcs[-1] = imdcs[-1] - np.min(imdcs[-1])
    imdcs[-1] = imdcs[-1]/np.max(abs(imdcs[-1]))
    mets = ['Density Corrected','Zeros','1/2''s','Gaussian Random Shift (0,1)']
    wdcs = []
    for i in range(len(imdcs)):
        wdcs.append(tf.wt(imdcs[i][0],wavelet,mode,dims,dimOpt,dimLenOpt)[0].reshape(N))

    ims = []
    #print('Starting the CS Algorithm')
    for kk in range(len(wdcs)):
        w_dc = wdcs[kk]
        print(mets[kk])
        for i in range(len(TV)):
            args = (N, N_im, dims, dimOpt, dimLenOpt, TV[i], XFM[i], data, k, strtag, ph_scan, kern, dirWeight, dirs, dirInfo, nmins, wavelet, mode, a)
            w_result = opt.minimize(f, w_dc, args=args, method=method, jac=df, 
                                        options={'maxiter': ItnLim, 'lineSearchItnLim': lineSearchItnLim, 'gtol': 0.01, 'disp': 1, 'alpha_0': alpha_0, 'c': c, 'xtol': xtol[i], 'TVWeight': TV[i], 'XFMWeight': XFM[i], 'N': N})
            if np.any(np.isnan(w_result['x'])):
                print('Some nan''s found. Dropping TV and XFM values')
            elif w_result['status'] != 0:
                print('TV and XFM values too high -- no solution found. Dropping...')
            else:
                w_dc = w_result['x']
                
        w_res = w_dc.reshape(N)
        im_res = np.zeros(N_im)
        for i in xrange(N[0]):
            im_res[i,:,:] = tf.iwt(w_res[i,:,:],wavelet,mode,dims,dimOpt,dimLenOpt)
        ims.append(im_res)
    
    if saveNpy:
        if saveNpyFile is None:
            np.save('./holdSave_im_res_' + str(int(pctg*100)) + 'p_all_SP',ims)
        else:
            np.save(saveNpyFile,ims)
    
    if saveImsPng:
        vis.figSubplots(ims,titles=mets,clims=(minval,maxval),colorbar=True)
        if not disp:
            if saveImsPngFile is None:
                saveFig.save('./holdSave_ims_' + str(int(pctg*100)) + 'p_all_SP')
            else:
                saveFig.save(saveImsPngFile)
    
    if saveImDiffPng:
        imdiffs, clims = vis.imDiff(ims)
        diffMets = ['DC-Zeros','DC-Ones','DC-Random','Zeros-Ones','Zeros-Random','Ones-Random']
        vis.figSubplots(imdiffs,titles=diffMets,clims=clims,colorbar=True)
        if not disp:
            if saveImDiffPngFile is None:
                saveFig.save('./holdSave_im_diffs_' + str(int(pctg*100)) + 'p_all_SP')
            else:
                saveFig.save(saveImDiffPngFile)
    
    if disp:
        plt.show()
if len(N) == 2:
    N = np.hstack([1, N])
    k = k.reshape(N)
    im = im.reshape(N)

ph_ones = np.ones(N[-2:], complex)
ph_scan = np.zeros(N, complex)
data = np.zeros(N,complex)
dataFull = np.zeros(N,complex)
im_scan = np.zeros(N, complex)


for i in range(N[0]):
    data[i,:,:] = np.fft.fftshift(k[i,:,:])*tf.fft2c(im[i,:,:], ph=ph_ones)
    dataFull[i,:,:] = np.fft.fftshift(tf.fft2c(im[i,:,:], ph=ph_ones))
    im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
    #ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scan[i,:,:] = np.angle(gaussian_filter(im_scan_wph.real,2) +  1.j*gaussian_filter(im_scan_wph.imag,2))
    ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
    im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])


N_im = N
hld, dims, dimOpt, dimLenOpt = tf.wt(im_scan[0].real,wavelet,mode)
N = np.hstack([N_im[0], hld.shape])

w_scan = np.zeros(N)
w_full = np.zeros(N)
im_dc = np.zeros(N_im)
w_dc = np.zeros(N)
Пример #13
0
    im = im.reshape(N)
elif len(N) == 3:
    k = k.reshape(np.hstack([1,N[-2:]])).repeat(N[0],0)

ph_ones = np.ones(N[-2:], complex)
ph_scan = np.zeros(N, complex)
data = np.zeros(N,complex)
dataFull = np.zeros(N,complex)
im_scan = np.zeros(N,complex)
for i in range(N[0]):
    #k[i,:,:] = np.fft.fftshift(k[i,:,:])
    data[i,:,:] = np.fft.fftshift(k[i,:,:])*tf.fft2c(im[i,:,:], ph=ph_ones)
    dataFull[i,:,:] = np.fft.fftshift(tf.fft2c(im[i,:,:], ph=ph_ones))

    # IMAGE from the "scanner data"
    im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
    ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
    im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
    
    
    #im_lr = samp.loRes(im,pctg)


# ------------------------------------------------------------------ #
# A quick way to look at the PSF of the sampling pattern that we use #
delta = np.zeros(N[-2:])
delta[int(N[-2]/2),int(N[-1]/2)] = 1
psf = tf.ifft2c(tf.fft2c(delta,ph_ones)*k,ph_ones)
# ------------------------------------------------------------------ #
    N = np.hstack([1, N])
    k = k.reshape(N)
    im = im.reshape(N)
elif len(N) == 3:
    k = k.reshape(np.hstack([1,N[-2:]])).repeat(N[0],0)

ph_ones = np.ones(N[-2:], complex)
ph_scan = np.zeros(N, complex)
data = np.zeros(N,complex)
im_scan = np.zeros(N,complex)
for i in range(N[0]):
    k[i,:,:] = np.fft.fftshift(k[i,:,:])
    data[i,:,:] = k[i,:,:]*tf.fft2c(im[i,:,:], ph=ph_ones)

    # IMAGE from the "scanner data"
    im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
    ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
    im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
    
    
    #im_lr = samp.loRes(im,pctg)


# ------------------------------------------------------------------ #
# A quick way to look at the PSF of the sampling pattern that we use #
delta = np.zeros(N[-2:])
delta[int(N[-2]/2),int(N[-1]/2)] = 1
psf = tf.ifft2c(tf.fft2c(delta,ph_ones)*k,ph_ones)
# ------------------------------------------------------------------ #
Пример #15
0
#ph = tf.matlab_style_gauss2D(im,shape=(5,5));
ph=np.ones(im.shape,complex)


# Generate the PDF for the sampling case -- note that this type is only used in non-directionally biased cases.
pdf = samp.genPDF(N,P,pctg,radius = 0.3,cyl=[0]) # Currently not working properly for the cylindrical case -- can fix at home
# Set the sampling pattern -- checked and this gives the right percentage
k = samp.genSampling(pdf,50,2)[0].astype(int)

# Here is where we build the undersampled data
data = np.fft.ifftshift(k)*tf.fft2c(im,ph=ph)
#ph = phase_Calculation(im,is_kspace = False)
#data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());

# IMAGE from the "scanner data"
im_scan = tf.ifft2c(data,ph=ph)

# Primary first guess. What we're using for now. Density corrected
im_dc = tf.ifft2c(data/np.fft.ifftshift(pdf),ph=ph).real.flatten().copy()

# Optimization algortihm -- this is where everything culminates together
a=10.0
testargs = (N,TVWeight,XFMWeight,data,k,strtag,ph,dirWeight,dirs,M,nmins,wavelet,mode,a)


# Get things set to test alpha values
f = optfun
fprime = derivative_fun
x0 = np.asarray(im_dc).flatten()

func_calls, f1 = wrap_function(f, testargs)
            #ph = np.ones(im.shape, complex)

            # Generate the PDF for the sampling case -- note that this type is only used in non-directionally biased cases.
            pdf = samp.genPDF(N, P, pctg, radius=radius, cyl=[0]) 
            # Set the sampling pattern -- checked and this gives the right percentage
            k = samp.genSampling(pdf, 50, 2)[0].astype(int)

            # Here is where we build the undersampled data
            data = np.fft.ifftshift(k) * tf.fft2c(im, ph=ph)
            # ph = phase_Calculation(im,is_kspace = False)
            # data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());
            #filt = tf.fermifilt(N)
            #data = data * filt

            # IMAGE from the "scanner data"
            im_scan = tf.ifft2c(data, ph=ph)
            minval = np.min(im)
            maxval = np.max(im)
            # Primary first guess. What we're using for now. Density corrected
            #im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf), ph=ph).real.flatten().copy()
            for imdcs in ['zeros','ones','densCorr','imFull']:
                if imdcs == 'zeros':
                    im_dc = np.zeros(data.shape)
                elif imdcs == 'ones':
                    im_dc = np.ones(data.shape)
                elif imdcs == 'densCorr':
                    im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf), ph=ph).real.flatten().copy()
                elif imdcs == 'imFull':
                    im_dc = im
                
                # Optimization algortihm -- this is where everything culminates together
Пример #17
0
    for j in range(nSteps + 1):
        # we need to now step through and make sure that we
        # take care of all the proper step sizes
        NSub = np.array([N[0], N[1] - 2 * locSteps[j],
                         N[2] - 2 * locSteps[j]]).astype(int)
        ph_onesSub = np.ones(NSub, complex)
        ph_scanSub = np.zeros(NSub, complex)
        dataSub = np.zeros(NSub, complex)
        im_scanSub = np.zeros(NSub, complex)
        im_FullSub = np.zeros(NSub, complex)
        kSub = np.zeros(NSub)
        if locSteps[j] == 0:
            kSub = k.copy()
            dataSub = np.fft.fftshift(kSub * dataFull, axes=(-2, -1))
            im_FullSub = tf.ifft2c(np.fft.fftshift(dataFull, axes=(-2, -1)),
                                   ph=ph_onesSub,
                                   sz=szFull)
        else:
            kSub = k[:, locSteps[j]:-locSteps[j],
                     locSteps[j]:-locSteps[j]].copy()
            dataSub = np.fft.fftshift(kSub *
                                      dataFull[:, locSteps[j]:-locSteps[j],
                                               locSteps[j]:-locSteps[j]],
                                      axes=(-2, -1))
            im_FullSub = tf.ifft2c(np.fft.fftshift(
                dataFull[:, locSteps[j]:-locSteps[j],
                         locSteps[j]:-locSteps[j]],
                axes=(-2, -1)),
                                   ph=ph_onesSub,
                                   sz=szFull)
Пример #18
0
if len(N) == 2:
    N = np.hstack([1, N])
    k = k.reshape(N)
    im = im.reshape(N)

ph_ones = np.ones(N[-2:], complex)
ph_scan = np.zeros(N, complex)
data = np.zeros(N, complex)
dataFull = np.zeros(N, complex)
im_scan = np.zeros(N, complex)

for i in range(N[0]):
    data[i, :, :] = np.fft.fftshift(k[i, :, :]) * tf.fft2c(im[i, :, :],
                                                           ph=ph_ones)
    dataFull[i, :, :] = np.fft.fftshift(tf.fft2c(im[i, :, :], ph=ph_ones))
    im_scan_wph = tf.ifft2c(data[i, :, :], ph=ph_ones)
    #ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scan[i, :, :] = np.angle(
        gaussian_filter(im_scan_wph.real, 2) +
        1.j * gaussian_filter(im_scan_wph.imag, 2))
    ph_scan[i, :, :] = np.exp(1j * ph_scan[i, :, :])
    im_scan[i, :, :] = tf.ifft2c(data[i, :, :], ph=ph_scan[i, :, :])

N_im = N
hld, dims, dimOpt, dimLenOpt = tf.wt(im_scan[0].real, wavelet, mode)
N = np.hstack([N_im[0], hld.shape])

w_scan = np.zeros(N)
w_full = np.zeros(N)
im_dc = np.zeros(N_im)
w_dc = np.zeros(N)
Пример #19
0
        k = k.reshape(np.hstack([1, N[-2:]])).repeat(N[0], 0)

    # Here is where we build the undersampled data
    ph_ones = np.ones(N[-2:], complex)
    ph_scan = np.zeros(N, complex)
    data = np.zeros(N, complex)
    im_dc = np.zeros(N, complex)
    im_scan = np.zeros(N, complex)
    #print('Looping through data')
    for i in range(N[0]):
        #print(i)
        data[i, :, :] = np.fft.ifftshift(k[i, :, :]) * tf.fft2c(im[i, :, :],
                                                                ph=ph_ones)

        # IMAGE from the "scanner data"
        im_scan_wph = tf.ifft2c(data[i, :, :], ph=ph_ones)
        ph_scan[i, :, :] = tf.matlab_style_gauss2D(im_scan_wph, shape=(5, 5))
        ph_scan[i, :, :] = np.exp(1j * ph_scan[i, :, :])
        im_scan[i, :, :] = tf.ifft2c(data[i, :, :], ph=ph_scan[i, :, :])
        #im_lr = samp.loRes(im,pctg)

        pdfDiv = pdf
        pdfZeros = np.where(pdf == 0)
        pdfDiv[pdfZeros] = 1
        im_dc[i, :, :] = tf.ifft2c(data[i, :, :] / np.fft.ifftshift(pdfDiv),
                                   ph=ph_scan[i, :, :]).real.copy()

    im_dc = im_dc.flatten()
    minval = np.min(abs(im))
    maxval = np.max(abs(im))
Пример #20
0
            #ph = np.ones(im.shape, complex)

            # Generate the PDF for the sampling case -- note that this type is only used in non-directionally biased cases.
            pdf = samp.genPDF(N, P, pctg, radius=radius, cyl=[0])
            # Set the sampling pattern -- checked and this gives the right percentage
            k = samp.genSampling(pdf, 50, 2)[0].astype(int)

            # Here is where we build the undersampled data
            data = np.fft.ifftshift(k) * tf.fft2c(im, ph=ph)
            # ph = phase_Calculation(im,is_kspace = False)
            # data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());
            #filt = tf.fermifilt(N)
            #data = data * filt

            # IMAGE from the "scanner data"
            im_scan = tf.ifft2c(data, ph=ph)
            minval = np.min(im)
            maxval = np.max(im)
            # Primary first guess. What we're using for now. Density corrected
            #im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf), ph=ph).real.flatten().copy()
            for imdcs in ['zeros', 'ones', 'densCorr', 'imFull']:
                if imdcs == 'zeros':
                    im_dc = np.zeros(data.shape)
                elif imdcs == 'ones':
                    im_dc = np.ones(data.shape)
                elif imdcs == 'densCorr':
                    im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf),
                                      ph=ph).real.flatten().copy()
                elif imdcs == 'imFull':
                    im_dc = im
Пример #21
0
ph = np.exp(1j * ph)

# Generate the PDF for the sampling case -- note that this type is only used in non-directionally biased cases.
pdf = samp.genPDF(N, P, pctg, radius=radius, cyl=[1, 294, 294], style='mult')
# Set the sampling pattern -- checked and this gives the right percentage
k = samp.genSampling(pdf, 50, 2)[0].astype(int)

# Here is where we build the undersampled data
data = np.fft.fftshift(k) * tf.fft2c(im, ph=ph_ones)
# ph = phase_Calculation(im,is_kspace = False)
# data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());
#filt = tf.fermifilt(N)
#data = data * filt

# IMAGE from the "scanner data"
im_scan = tf.ifft2c(data, ph=ph)
minval = np.min(abs(im))
maxval = np.max(abs(im))
# Primary first guess. What we're using for now. Density corrected
#im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf), ph=ph).real.flatten().copy()
#for imdcs in ['zeros','ones','densCorr','imFull']:
for imdcs in ['densCorr', 'densCorr_Completed']:
    if imdcs == 'zeros':
        im_dc = np.zeros(data.shape)
    elif imdcs == 'ones':
        im_dc = np.ones(data.shape)
    elif imdcs == 'densCorr':
        pdfDiv = pdf
        pdfZeros = np.where(pdf == 0)
        pdfDiv[pdfZeros] = 1
        im_dc = tf.ifft2c(data / np.fft.ifftshift(pdfDiv),
Пример #22
0
# Diffusion information that we need
if dirFile:
    dirs = np.loadtxt(dirFile)
    M = d.calc_Mid_Matrix(dirs, nmins=4)
else:
    dirs = None
    M = None

# Here is where we build the undersampled data
data = np.fft.ifftshift(k) * tf.fft2c(im, ph)
#ph = phase_Calculation(im,is_kspace = False)
#data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());

# IMAGE from the "scanner data"
im_scan = tf.ifft2c(data, ph)

# Primary first guess. What we're using for now. Density corrected
im_dc = tf.ifft2c(data / np.fft.ifftshift(pdf), ph).flatten().copy()

# Optimization algortihm -- this is where everything culminates together
im_result = opt.minimize(optfun,
                         im_dc,
                         args=(N, TVWeight, XFMWeight, data, k, strtag,
                               dirWeight, dirs, M, nmins, scaling_factor, L,
                               ph),
                         method=method,
                         jac=derivative_fun,
                         options={
                             'maxiter': ItnLim,
                             'gtol': epsilon,
Пример #23
0
def create_scanner_k_space(im,
                           N,
                           P=2,
                           pctg=0.25,
                           dirData=False,
                           dirs=None,
                           radius=0.2,
                           cyl=[0],
                           style='mult',
                           pft=False,
                           ext=0.5):
    '''
    Read in the data, size, and the directions (if they exist) so that we can create a
    retrospectively sampled set of data for testing.
    '''

    # Create a pdf so that we can use it to make a starting point
    pdf = samp.genPDF(N[-2:],
                      P,
                      pctg,
                      radius=radius,
                      cyl=[1, N[-2], N[-1]],
                      style='mult',
                      pft=pft,
                      ext=0.5)

    # Generate the sampling scheme, depending on whether or not
    if dirData:
        if dirs is None:
            raise ValueError(
                'If we have directional data, you need to feed this into the function'
            )
        k = d.dirPDFSamp([int(dirs.shape[0]), N[-2], N[-1]],
                         P=2,
                         pctg=pctg,
                         radius=radius,
                         dirs=dirs,
                         cyl=True,
                         taper=0.25)[0]
    else:
        k = samp.genSampling(pdf, 50, 2)[0].astype(int)

    # Since our functions are built to work in 3D datasets, here we
    # make sure that N and things are all in 3D
    if len(N) == 2:
        N = np.hstack([1, N])
        k = k.reshape(N)
        im = im.reshape(N)
    elif len(N) == 3:
        if k.ndim == 2:
            k = k.reshape(np.hstack([1, N[-2:]])).repeat(N[0], 0)

    k = np.fft.fftshift(k, axes=(-2, -1))
    # Convert the image data into k-space
    ph_ones = np.ones(N, complex)
    dataFull = tf.fft2c(im, ph=ph_ones, axes=(-2, -1))
    # Apply our sampling
    data = k * dataFull
    # Now we need to calculate the phase in order to deal with the undersampled image and the
    # non perfect cancellation of terms
    #filtdata = gaussian_filter(im_scan_wph.real,0,0) + 1j*gaussian_filter(im_scan_wph.imag,0,0)
    #ph_scan = np.exp(1j*np.angle(filtdata.conj()))
    im_scan_wph = tf.ifft2c(data, ph=ph_ones)
    ph_scan = np.angle(
        gaussian_filter(im_scan_wph.real, 0) +
        1.j * gaussian_filter(im_scan_wph.imag, 0))
    ph_scan = np.exp(1j * ph_scan)
    im_scan = tf.ifft2c(data, ph=ph_scan)

    pdfDiv = pdf.copy()
    pdfZeros = np.where(pdf < 1e-4)
    pdfDiv[pdfZeros] = 1
    datadc = data / pdfDiv

    return dataFull, data, datadc, pdf, k, im_scan, ph_scan
# Here we step through the different steps as per our n 
# that we chose at the beginning. 
    for j in range(nSteps+1):
        # we need to now step through and make sure that we 
        # take care of all the proper step sizes
        NSub = np.array([N[0], N[1]-2*locSteps[j], N[2]-2*locSteps[j]]).astype(int)
        ph_onesSub = np.ones(NSub, complex)
        ph_scanSub = np.zeros(NSub, complex)
        dataSub = np.zeros(NSub,complex)
        im_scanSub = np.zeros(NSub,complex)
        im_FullSub = np.zeros(NSub,complex)
        kSub = np.zeros(NSub)
        if locSteps[j]==0:
            kSub = k.copy()
            dataSub = np.fft.fftshift(kSub*dataFull,axes=(-2,-1))
            im_FullSub = tf.ifft2c(np.fft.fftshift(dataFull,axes=(-2,-1)),ph=ph_onesSub,sz=szFull)
        else:
            kSub = k[:,locSteps[j]:-locSteps[j],locSteps[j]:-locSteps[j]].copy()
            dataSub = np.fft.fftshift(kSub*dataFull[:,locSteps[j]:-locSteps[j],locSteps[j]:-locSteps[j]],axes=(-2,-1))
            im_FullSub = tf.ifft2c(np.fft.fftshift(dataFull[:,locSteps[j]:-locSteps[j],locSteps[j]:-locSteps[j]],axes=(-2,-1)),ph=ph_onesSub,sz=szFull)
                
        im_scan_wphSub = tf.ifft2c(dataSub, ph=ph_onesSub, sz=szFull)
        ph_scanSub = np.angle(gaussian_filter(im_scan_wphSub.real,0) +  1.j*gaussian_filter(im_scan_wphSub.imag,0))
        #ph_scanSub[i,:,:] = tf.matlab_style_gauss2D(im_scan_wphSub,shape=(5,5))
        ph_scanSub = np.exp(1j*ph_scanSub)
        im_scanSub = tf.ifft2c(dataSub, ph=ph_scanSub, sz=szFull)
        
        if j == 0:
            kMasked = kSub.copy()
        else:
            kHld = np.zeros(NSub)
Пример #25
0
        break
    except:
        radius = 0.5*radius
# Set the sampling pattern -- checked and this gives the right percentage

if len(N) == 2:
    N = np.hstack([1, N])
    k = k.reshape(N)

# Here is where we build the undersampled data
ph_ones = np.ones(im.shape, complex)
data = tf.fft2c(im, ph=ph_ones)
data_full = tf.fft2c(imf, ph=ph_ones)

# IMAGE from the "scanner data"
im_scan_wph = tf.ifft2c(data, ph=ph_ones)
ph_scan = np.exp(1j*tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5)))
im_scan = tf.ifft2c(data, ph=ph_scan)

ph_full = np.exp(1j*tf.matlab_style_gauss2D(imf,shape=(5,5)))
im_full = tf.ifft2c(data_full, ph=ph_full)
#im_scan = abs(tf.ifft2c(data,ph_ones))
#data = tf.fft2c(im_scan,ph_ones).reshape(data.size).reshape(N)
#ph_scan = ph_ones

minval = np.min(abs(im))
maxval = np.max(abs(im))

# Primary first guess. What we're using for now. Density corrected
pdfDiv = pdf.copy()
pdfZeros = np.where(pdf<0.01)
Пример #26
0
ph_scan = np.zeros(N, complex)
data = np.zeros(N, complex)
im_scan = np.zeros(N, complex)

ph_scanDir = np.zeros(N, complex)
dataDir = np.zeros(N, complex)
im_scanDir = np.zeros(N, complex)

print('Data Production')
for i in range(N[0]):
    data[i, :, :] = np.fft.fftshift(k[i, :, :]) * tf.fft2c(im[i, :, :],
                                                           ph=ph_ones)
    dataDir[i, :, :] = np.fft.fftshift(kDir[i, :, :]) * tf.fft2c(im[i, :, :],
                                                                 ph=ph_ones)
    dataFull[i, :, :] = np.fft.fftshift(tf.fft2c(im[i, :, :], ph=ph_ones))
    im_scan_wph = tf.ifft2c(data[i, :, :], ph=ph_ones)
    im_scan_wphDir = tf.ifft2c(dataDir[i, :, :], ph=ph_ones)
    ph_scan[i, :, :] = tf.matlab_style_gauss2D(im_scan_wph, shape=(5, 5))
    ph_scanDir[i, :, :] = tf.matlab_style_gauss2D(im_scan_wphDir, shape=(5, 5))
    ph_scan[i, :, :] = np.exp(1j * ph_scan[i, :, :])
    ph_scanDir[i, :, :] = np.exp(1j * ph_scanDir[i, :, :])
    im_scan[i, :, :] = tf.ifft2c(data[i, :, :], ph=ph_scan[i, :, :])
    im_scanDir[i, :, :] = tf.ifft2c(dataDir[i, :, :], ph=ph_scanDir[i, :, :])

print('Mix the Data')
dataDirComb = d.dirDataSharing(kDir,
                               dataDir,
                               dirs,
                               N[-2:],
                               maxCheck=5,
                               bymax=1)
Пример #27
0
def runCSAlgorithm(
        fromfid=False,
        filename='/home/asalerno/Documents/pyDirectionCompSense/brainData/P14/data/fullySampledBrain.npy',
        sliceChoice=150,
        strtag=['', 'spatial', 'spatial'],
        xtol=[1e-2, 1e-3, 5e-4, 5e-4],
        TV=[0.01, 0.005, 0.002, 0.001],
        XFM=[0.01, .005, 0.002, 0.001],
        dirWeight=0,
        pctg=0.25,
        radius=0.2,
        P=2,
        pft=False,
        ext=0.5,
        wavelet='db4',
        mode='per',
        method='CG',
        ItnLim=30,
        lineSearchItnLim=30,
        alpha_0=0.6,
        c=0.6,
        a=10.0,
        kern=np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
                       [[0., 0., 0.], [0., -1., 0.], [0., 1., 0.]],
                       [[0., 0., 0.], [0., -1., 1.], [0., 0., 0.]]]),
        dirFile=None,
        nmins=None,
        dirs=None,
        M=None,
        dirInfo=[None] * 4,
        saveNpy=False,
        saveNpyFile=None,
        saveImsPng=False,
        saveImsPngFile=None,
        saveImDiffPng=False,
        saveImDiffPngFile=None,
        disp=False):
    ##import pdb; pdb.set_trace()
    if fromfid == True:
        inputdirectory = filename[0]
        petable = filename[1]
        fullImData = rff.getDataFromFID(petable, inputdirectory, 2)[0, :, :, :]
        fullImData = fullImData / np.max(abs(fullImData))
        im = fullImData[:, :, sliceChoice]
    else:
        im = np.load(filename)[sliceChoice, :, :]

    N = np.array(im.shape)  # image Size

    pdf = samp.genPDF(N[-2:],
                      P,
                      pctg,
                      radius=radius,
                      cyl=np.hstack([1, N[-2:]]),
                      style='mult',
                      pft=pft,
                      ext=ext)
    if pft:
        print('Partial Fourier sampling method used')
    k = samp.genSampling(pdf, 50, 2)[0].astype(int)
    if len(N) == 2:
        N = np.hstack([1, N])
        k = k.reshape(N)
        im = im.reshape(N)
    elif (len(N) == 3) and ('dir' not in strtag):
        k = k.reshape(np.hstack([1, N[-2:]])).repeat(N[0], 0)

    ph_ones = np.ones(N[-2:], complex)
    ph_scan = np.zeros(N, complex)
    data = np.zeros(N, complex)
    im_scan = np.zeros(N, complex)
    for i in range(N[0]):
        k[i, :, :] = np.fft.fftshift(k[i, :, :])
        data[i, :, :] = k[i, :, :] * tf.fft2c(im[i, :, :], ph=ph_ones)

        # IMAGE from the "scanner data"

        im_scan_wph = tf.ifft2c(data[i, :, :], ph=ph_ones)
        ph_scan[i, :, :] = tf.matlab_style_gauss2D(im_scan_wph, shape=(5, 5))
        ph_scan[i, :, :] = np.exp(1j * ph_scan[i, :, :])
        im_scan[i, :, :] = tf.ifft2c(data[i, :, :], ph=ph_scan[i, :, :])
        #im_lr = samp.loRes(im,pctg)

    # ------------------------------------------------------------------ #
    # A quick way to look at the PSF of the sampling pattern that we use #
    delta = np.zeros(N[-2:])
    delta[int(N[-2] / 2), int(N[-1] / 2)] = 1
    psf = tf.ifft2c(tf.fft2c(delta, ph_ones) * k, ph_ones)
    # ------------------------------------------------------------------ #

    ## ------------------------------------------------------------------ #
    ## -- Currently broken - Need to figure out what's happening here. -- #
    ## ------------------------------------------------------------------ #
    #if pft:
    #for i in xrange(N[0]):
    #dataHold = np.fft.fftshift(data[i,:,:])
    #kHold = np.fft.fftshift(k[i,:,:])
    #loc = 98
    #for ix in xrange(N[-2]):
    #for iy in xrange(loc,N[-1]):
    #dataHold[-ix,-iy] = dataHold[ix,iy].conj()
    #kHold[-ix,-iy] = kHold[ix,iy]
    ## ------------------------------------------------------------------ #

    pdfDiv = pdf.copy()
    pdfZeros = np.where(pdf == 0)
    pdfDiv[pdfZeros] = 1
    #im_scan_imag = im_scan.imag
    #im_scan = im_scan.real

    N_im = N.copy()
    hld, dims, dimOpt, dimLenOpt = tf.wt(im_scan[0].real, wavelet, mode)
    N = np.hstack([N_im[0], hld.shape])

    w_scan = np.zeros(N)
    w_full = np.zeros(N)
    im_dc = np.zeros(N_im)
    w_dc = np.zeros(N)

    for i in xrange(N[0]):
        w_scan[i, :, :] = tf.wt(im_scan.real[i, :, :], wavelet, mode, dims,
                                dimOpt, dimLenOpt)[0]
        w_full[i, :, :] = tf.wt(abs(im[i, :, :]), wavelet, mode, dims, dimOpt,
                                dimLenOpt)[0]

        im_dc[i, :, :] = tf.ifft2c(data[i, :, :] / np.fft.ifftshift(pdfDiv),
                                   ph=ph_scan[i, :, :]).real.copy()
        w_dc[i, :, :] = tf.wt(im_dc, wavelet, mode, dims, dimOpt, dimLenOpt)[0]

    w_dc = w_dc.flatten()
    im_sp = im_dc.copy().reshape(N_im)
    minval = np.min(abs(im))
    maxval = np.max(abs(im))
    data = np.ascontiguousarray(data)

    imdcs = [
        im_dc,
        np.zeros(N_im),
        np.ones(N_im),
        np.random.randn(np.prod(N_im)).reshape(N_im)
    ]
    imdcs[-1] = imdcs[-1] - np.min(imdcs[-1])
    imdcs[-1] = imdcs[-1] / np.max(abs(imdcs[-1]))
    mets = [
        'Density Corrected', 'Zeros', '1/2'
        's', 'Gaussian Random Shift (0,1)'
    ]
    wdcs = []
    for i in range(len(imdcs)):
        wdcs.append(
            tf.wt(imdcs[i][0], wavelet, mode, dims, dimOpt,
                  dimLenOpt)[0].reshape(N))

    ims = []
    #print('Starting the CS Algorithm')
    for kk in range(len(wdcs)):
        w_dc = wdcs[kk]
        print(mets[kk])
        for i in range(len(TV)):
            args = (N, N_im, dims, dimOpt, dimLenOpt, TV[i], XFM[i], data, k,
                    strtag, ph_scan, kern, dirWeight, dirs, dirInfo, nmins,
                    wavelet, mode, a)
            w_result = opt.minimize(f,
                                    w_dc,
                                    args=args,
                                    method=method,
                                    jac=df,
                                    options={
                                        'maxiter': ItnLim,
                                        'lineSearchItnLim': lineSearchItnLim,
                                        'gtol': 0.01,
                                        'disp': 1,
                                        'alpha_0': alpha_0,
                                        'c': c,
                                        'xtol': xtol[i],
                                        'TVWeight': TV[i],
                                        'XFMWeight': XFM[i],
                                        'N': N
                                    })
            if np.any(np.isnan(w_result['x'])):
                print('Some nan' 's found. Dropping TV and XFM values')
            elif w_result['status'] != 0:
                print(
                    'TV and XFM values too high -- no solution found. Dropping...'
                )
            else:
                w_dc = w_result['x']

        w_res = w_dc.reshape(N)
        im_res = np.zeros(N_im)
        for i in xrange(N[0]):
            im_res[i, :, :] = tf.iwt(w_res[i, :, :], wavelet, mode, dims,
                                     dimOpt, dimLenOpt)
        ims.append(im_res)

    if saveNpy:
        if saveNpyFile is None:
            np.save('./holdSave_im_res_' + str(int(pctg * 100)) + 'p_all_SP',
                    ims)
        else:
            np.save(saveNpyFile, ims)

    if saveImsPng:
        vis.figSubplots(ims,
                        titles=mets,
                        clims=(minval, maxval),
                        colorbar=True)
        if not disp:
            if saveImsPngFile is None:
                saveFig.save('./holdSave_ims_' + str(int(pctg * 100)) +
                             'p_all_SP')
            else:
                saveFig.save(saveImsPngFile)

    if saveImDiffPng:
        imdiffs, clims = vis.imDiff(ims)
        diffMets = [
            'DC-Zeros', 'DC-Ones', 'DC-Random', 'Zeros-Ones', 'Zeros-Random',
            'Ones-Random'
        ]
        vis.figSubplots(imdiffs, titles=diffMets, clims=clims, colorbar=True)
        if not disp:
            if saveImDiffPngFile is None:
                saveFig.save('./holdSave_im_diffs_' + str(int(pctg * 100)) +
                             'p_all_SP')
            else:
                saveFig.save(saveImDiffPngFile)

    if disp:
        plt.show()
Пример #28
0
dataFull = np.zeros(N,complex)

ph_scan = np.zeros(N, complex)
data = np.zeros(N,complex)
im_scan = np.zeros(N, complex)

ph_scanDir = np.zeros(N, complex)
dataDir = np.zeros(N,complex)
im_scanDir = np.zeros(N, complex)

print('Data Production')
for i in range(N[0]):
    data[i,:,:] = np.fft.fftshift(k[i,:,:])*tf.fft2c(im[i,:,:], ph=ph_ones)
    dataDir[i,:,:] = np.fft.fftshift(kDir[i,:,:])*tf.fft2c(im[i,:,:], ph=ph_ones)
    dataFull[i,:,:] = np.fft.fftshift(tf.fft2c(im[i,:,:], ph=ph_ones))
    im_scan_wph = tf.ifft2c(data[i,:,:], ph=ph_ones)
    im_scan_wphDir = tf.ifft2c(dataDir[i,:,:], ph=ph_ones)
    ph_scan[i,:,:] = tf.matlab_style_gauss2D(im_scan_wph,shape=(5,5))
    ph_scanDir[i,:,:] = tf.matlab_style_gauss2D(im_scan_wphDir,shape=(5,5))
    ph_scan[i,:,:] = np.exp(1j*ph_scan[i,:,:])
    ph_scanDir[i,:,:] = np.exp(1j*ph_scanDir[i,:,:])
    im_scan[i,:,:] = tf.ifft2c(data[i,:,:], ph=ph_scan[i,:,:])
    im_scanDir[i,:,:] = tf.ifft2c(dataDir[i,:,:], ph=ph_scanDir[i,:,:])


print('Mix the Data')
dataDirComb = d.dirDataSharing(kDir,dataDir,dirs,N[-2:],maxCheck=5,bymax=1)
dataComb = d.dirDataSharing(k,data,dirs,N[-2:],maxCheck=5,bymax=1)
kDirComb = d.dirDataSharing(kDir,kDir,dirs,N[-2:],maxCheck=5,bymax=1)
kComb = d.dirDataSharing(k,k,dirs,N[-2:],maxCheck=5,bymax=1)
data_b1_full = data[:dirs.shape[0],:,:]
data_b1 = data_b1_full*np.fft.fftshift(k,axes=(-2,-1))
N = data_b1.shape

######################################################
# Remember that the b0 will ALWAYS BE FULLY SAMPLED

# Try to find the phase of the fully sampled b0s as well, so have a ph_ones
ph_ones = np.ones(N[-2:])

ph_b0 = np.ones(data_b0.shape, dtype='complex')
im_b0_wph = np.zeros(data_b0.shape, dtype='complex')
im_b0_scan = np.zeros(data_b0.shape, dtype='complex')

for i in range(data_b0.shape[0]):
    im_b0_wph[i,:,:] = tf.ifft2c(data_b0[i,:,:],ph=ph_ones)
    ph_b0[i,:,:] = np.exp(1j*tf.matlab_style_gauss2D(im_b0_wph[i,:,:],shape=(5,5)))
    im_b0_scan[i,:,:] = tf.ifft2c(data_b0[i,:,:],ph_b0[i,:,:])

im_b0_avg = np.mean(im_b0_scan,axis=(0))
minval = np.min(abs(im_b0_avg))
maxval = np.max(abs(im_b0_avg))

###############################################################################
# Now for both the undersampled cases and fully sampled cases for the actual
ph_b1 = np.ones(data_b1.shape, dtype='complex')
im_b1_wph = np.zeros(data_b1.shape, dtype='complex')
im_b1_scan = np.zeros(data_b1.shape, dtype='complex')

ph_b1_full = np.ones(data_b1.shape, dtype='complex')
im_b1_wph_full = np.zeros(data_b1.shape, dtype='complex')
Пример #30
0
            # Set the sampling pattern -- checked and this gives the right percentage
            k = samp.genSampling(pdf, 50, 2)[0].astype(int)
            if len(N) == 2:
                N = np.hstack([1, N])
                k = k.reshape(N)

            # Here is where we build the undersampled data
            ph_ones = np.ones(im.shape, complex)
            data = np.fft.ifftshift(k) * tf.fft2c(im, ph=ph_ones)
            # data = np.fft.ifftshift(np.fft.fftshift(data)*ph.conj());
            #filt = tf.fermifilt(N)
            #data = data * filt

            # IMAGE from the "scanner data"
            #ph_ones = np.ones(im.shape, complex)
            im_scan_wph = tf.ifft2c(data, ph=ph_ones)
            ph_scan = tf.matlab_style_gauss2D(im_scan_wph, shape=(5, 5))
            #ph_scan = tf.matlab_style_gauss2D(im,shape=(5,5))

            #for i in range(phIter):
            #ph_scan = tf.laplacianUnwrap(ph_scan,N,[75,75])

            ph_scan = np.exp(1j * ph_scan)
            im_scan = tf.ifft2c(data, ph=ph_scan)
            #im_scan = abs(tf.ifft2c(data,ph_ones))
            #data = tf.fft2c(im_scan,ph_ones).reshape(data.size).reshape(N)
            #ph_scan = ph_ones

            minval = np.min(abs(im))
            maxval = np.max(abs(im))
Пример #31
0
data_b1_full = data[:dirs.shape[0], :, :]
data_b1 = data_b1_full * np.fft.fftshift(k, axes=(-2, -1))
N = data_b1.shape

######################################################
# Remember that the b0 will ALWAYS BE FULLY SAMPLED

# Try to find the phase of the fully sampled b0s as well, so have a ph_ones
ph_ones = np.ones(N[-2:])

ph_b0 = np.ones(data_b0.shape, dtype='complex')
im_b0_wph = np.zeros(data_b0.shape, dtype='complex')
im_b0_scan = np.zeros(data_b0.shape, dtype='complex')

for i in range(data_b0.shape[0]):
    im_b0_wph[i, :, :] = tf.ifft2c(data_b0[i, :, :], ph=ph_ones)
    ph_b0[i, :, :] = np.exp(
        1j * tf.matlab_style_gauss2D(im_b0_wph[i, :, :], shape=(5, 5)))
    im_b0_scan[i, :, :] = tf.ifft2c(data_b0[i, :, :], ph_b0[i, :, :])

im_b0_avg = np.mean(im_b0_scan, axis=(0))
minval = np.min(abs(im_b0_avg))
maxval = np.max(abs(im_b0_avg))

###############################################################################
# Now for both the undersampled cases and fully sampled cases for the actual
ph_b1 = np.ones(data_b1.shape, dtype='complex')
im_b1_wph = np.zeros(data_b1.shape, dtype='complex')
im_b1_scan = np.zeros(data_b1.shape, dtype='complex')

ph_b1_full = np.ones(data_b1.shape, dtype='complex')
Пример #32
0
def calcPhase(im, k, w1=10, w2=8, w3=4, eps = np.pi/18, sig = 0):
    '''
    This function is to try to iteratively calculate the phase as per Tisdall and Atkins 2005 (https://www.cs.sfu.ca/~stella/papers/2005/spie.pdf)
    
    With X(p) being our spatial domain value at pixel p:
        - X(p) = image we have
        - s(p) = signal proper
        - n_r(p) = real noise -- Gaussian
        - n_i(p) = imaginary noise -- Gaussian
        
    Assume: X(p) = s(p) exp[i φ(p)] + n_r(p) + i n_i(p)
    
    We want to calculate φ^(p) which is an estimate of φ(p), thrn multiply it in. If φ(p) == φ^(p), then:
    
    X(p) exp[-i φ^(p)] = s(p) + (n_r(p) + i n_i(p)) exp[-i φ^(p)]
        Because exp[-i φ^(p)] is just a rotation, the rotation of noise, just makes different noise, so (n_r(p) + i n_i (p)) exp[-i φ^(p)] == n_r`(p) + i n_i`(p)
    
    So our new measurement is:
        X(p) exp[-i φ^(p)] = s(p) + (n_r`(p) + i n_i`(p)) 
        
    '''
    if sig==0:
        sig = np.var(im[:50,:50])
    
    ph_ones = np.ones(im.shape)
    data = np.fft.ifftshift(k) * tf.fft2c(im, ph=ph_ones)
    im_scan_ph = tf.ifft2c(data, ph=ph_ones)
    ph = tf.matlab_style_gauss2D(im_scan_ph,shape=(5,5))
    im_scan = tf.ifft2c(data, ph=ph)

    N = im.shape
    window1 = int(np.ceil(w1/2))
    window2 = int(np.ceil(w2/2))
    window3 = int(np.ceil(w3/2))

    
    im_wrap = np.pad(im_scan,window1,'wrap')

    #ph = np.zeros(im.shape,complex)
    #ph_new = np.zeros(im.shape,complex)
    ph_new = ph.copy()
    wgts = np.ones(im.size)    
        
    '''
    
    We then do this over three steps. Step 1 is:
    
        1. Apply the given phase correction, φ^ to the recorded image, I to get our current best guess image, I`.
    
        2. Calculate the mean of the imaginary component of all the pixels in I` in a window of width w1 around p.
        
        3. If the mean imaginary component is greater than ||p||, the magnitude of p, set p’s new phase estimate, φ^`(p), to be π/2 to correct as much as possible.
        
        4. If the mean imaginary component is less than −||p||, set φ^`(p)=−π/2 to correct as much as possible.
        
        5. Set φ^`(p) so p’s phase is on (−π/2,π/2) and its imaginary component cancels the mean component of all the other pixels in the window.
    
    
    '''
    
    ph = ph_new.copy()
    
    for x in range(N[0]):
        for y in range(N[1]):
            mn = np.mean(im_wrap[x:x+1+w1,y:y+1+w1].imag)
            if mn > abs(im_scan[x,y]):
                ph_new[x,y] = +1j
            elif mn < -abs(im_scan[x,y]):
                ph_new[x,y] = -1j
            else:
                ph_new[x,y] = abs(ph_new[x,y].real) - mn*1j
                # The abs() is required here to ensure that the phase is on (-π/2,π/2)
    
    
    ''' 
    
    Step 2 requires us to look at those times where we shifted positives to negatives, and try to flip it back when necessary.
    
    This then follows three more substeps:
        
        1. Calculate the mean of the distances, wrapped onto the range [−π,π), from φ^(p) to each other phase estimate pixel in a window of with w2 centered on p.
        
        2. Calculate the mean of the distances, wrapped onto the range [−π,π), from φ^(p) + π to each other phase estimate pixel in a window of with w2 centered on p.
        
        3. If the second mean distance is smaller than the first, mark p as flipped.
        
    '''
    
    # need to map phases from [-pi,pi)
    #ph_wrap_angles_piShift = (np.angle(np.pad(ph_new,window2,'wrap')) + np.pi) % (2*np.pi)
    ph_wrap_angles = np.arctan2(ph_new.imag, ph_new.real)
    cnt = 0
    
    for x in range(N[0]):
        for y in range(N[1]):
            diffs = np.sum(np.diff(ph_wrap_angles[x:x+1+w2,y:y+1+w2],axis=0)) + \
                    np.sum(np.diff(ph_wrap_angles[x:x+1+w2,y:y+1+w2],axis=1)) 
            ph_wrap_hold = np.exp(1j*ph_wrap_angles[x,y]+np.pi)
            ph_wrap_angles[x,y] = np.arctan2(ph_wrap_hold.imag,ph_wrap_hold.real)
            diffs_piShift = np.sum(np.diff(ph_wrap_angles[x:x+1+w2,y:y+1+w2],axis=0)) + \
                            np.sum(np.diff(ph_wrap_angles[x:x+1+w2,y:y+1+w2],axis=1)) 
            
            if diffs_piShift < diffs:
                #print('Smaller')
                cnt+=1
                ph_new[x,y] = np.exp(1j*ph_wrap_angles[x,y])
            
            ph_wrap_hold = np.exp(1j*ph_wrap_angles[x,y]-np.pi)
            ph_wrap_angles[x,y] = np.arctan2(ph_wrap_hold.imag,ph_wrap_hold.real)
        
    ph_new = np.exp(1j*ph_wrap_angles)
    
    
            
Пример #33
0
#ax.set_ylabel('RMSE')
#ax.set_title('RMSE for Different Pctg and Starting Point Combinations')
#ax.set_xticks(ind)
#ax.set_xticklabels(('zeros','ones','densCorr','imFull'))
#ax.set_xlabel('Percent Sampled')
#ax.legend((rects1[0], rects2[0], rects3[0], rects4[0], rects5[0], rects6[0]),('20%', '25%', '33%', '40%', '50%'), title='im_dc',loc='upper right'); #loc='center left', bbox_to_anchor=(1, 0.5))


# im_dc and pctg Comparison
# TV = 0.005 -- 3
# XFM = 0.005 -- 3
rmse_lr = np.zeros(len(pctg))
for i in range(len(pctg)):
lrLoc = int(np.ceil((N[0]-np.ceil(N[0]/np.sqrt(1/pctg)))/2))
data_lr_us = np.fft.fftshift(tf.ifft2c(im,np.ones(im.shape)))[lrLoc:-lrLoc,lrLoc:-lrLoc]
data_lr_rs = zpad(data_lr_us,im.shape)
im_lr_rs = abs(tf.fft2c(np.fft.fftshift(data_lr_rs),np.ones(im.shape)))
rmse_lr[i] = err.rmse(abs(im_lr_rs),im)

N = len(pctg)

ind = np.arange(N)+.5
width =0.15

fig, ax = plt.subplots()
rects0 = ax.bar(ind - 4*width, rmse_lr, width, color='b', alpha=0.1)
rects1 = ax.bar(ind - 3*width, rmse_data[:,3,3,0], width, color='b', alpha=.25)
rects2 = ax.bar(ind - 2*width, rmse_data[:,3,3,1], width, color='b', alpha=.5)
rects3 = ax.bar(ind - 1*width, rmse_data[:,3,3,2], width, color='b', alpha=.75)
rects4 = ax.bar(ind + 0*width, rmse_data[:,3,3,3], width, color='b', alpha=1)