def objectiveFunctionTV(x, N, strtag, kern, dirWeight=0, dirs=None, nmins=0, dirInfo=[None,None,None,None], a=10): if np.max(a*x) < 10: obj = (1/a)*np.log(np.cosh(a*tf.TV(x,N,strtag,kern,dirWeight,dirs,nmins,dirInfo))) else: obj = abs(tf.TV(x,N,strtag,kern,dirWeight,dirs,nmins,dirInfo)) return obj
def objectiveFunctionTV(x, N, strtag, dirWeight=0, dirs=None, nmins=0, dirInfo=[None, None, None, None], a=10): return (1 / a) * np.log( np.cosh(a * tf.TV(x, N, strtag, dirWeight, dirs, nmins, dirInfo)))
def gTV(x, N, strtag, kern, dirWeight, dirs=None, nmins=0, dirInfo=[None,None,None], a=10): if nmins: Ahat = dirInfo[0] dI = dirInfo[1] inds = dirInfo[2] else: Ahat = None dI = None inds = None if len(x.shape) == 2: N = np.hstack([1,N]) x0 = x.reshape(N) grad = np.zeros(np.hstack([N[0], len(strtag), N[1:]]))#,dtype=complex) if kern.shape == 3: Nkern = np.hstack(1,[kern.shape[1:]]) else: Nkern = kern.shape[1:] TV_data = tf.TV(x0,N,strtag,kern,dirWeight,dirs,nmins,dirInfo).real for i in xrange(len(strtag)): if strtag[i] == 'spatial': kernHld = np.flipud(np.fliplr(kern[i])).reshape(Nkern) grad[:,i,:,:] = correlate(np.tanh(a*TV_data[i]),kernHld,mode='wrap') #grad[:,i,:,:] = np.tanh(a*TV_data[i]) #grad[:,i,:,:] = fftconvolve(np.tanh(a*TV_data[i]),kern[i].reshape(Nkern),mode='same') elif strtag[i] == 'diff': for nDir in xrange(N[0]): rows = np.where(dI[nDir] == 1)[0] dDir = np.zeros([len(rows) + 1, Ahat.shape[1], N[-2]*N[-1]],complex) r = inds[nDir,:] Iq = x0[nDir,:] Ir = x0[inds[nDir],:] Irq = (Ir - Iq).reshape(nmins,-1) dDir[0] = -1*np.tanh(a*np.dot(Ahat[nDir],Irq)) for q in range(1,dDir.shape[0]): #import pdb; pdb.set_trace() diffHld = x0[nDir].flatten() - x0[rows[q-1]].flatten() dDir[q] = np.tanh(a*np.dot(Ahat[rows[q-1]],np.dot(dI[nDir,rows[q-1],:].reshape(nmins,1),diffHld.reshape(1,-1)))) grad[nDir,i,:,:] = np.sum(dDir,axis=(0,1)).reshape(N[-2],N[-1]) grad = np.sum(grad,axis=1) return grad
def gTV(x,N,strtag,dirWeight,dirs = None,nmins = 0, dirInfo = None, p = 1,l1smooth = 1e-15, a = 1.0): if dirInfo: M = dirInfo[0] dIM = dirInfo[1] Ause = dirInfo[2] inds = dirInfo[3] else: M = None dIM = None Ause = None inds = None if len(N) == 2: N = np.hstack([1, N]) x0 = x.reshape(N) grad = np.zeros(np.hstack([N[0], len(strtag),N[1],N[2]])) for kk in range(N[0]): TV_data = tf.TV(x0[kk,:,:],N,strtag,dirWeight,dirs,nmins,M) for i in xrange(len(strtag)): if strtag[i] == 'spatial': TV_dataRoll = np.roll(TV_data[i,:,:],1,axis=i) grad[kk,i,:,:] = -np.tanh(a*(TV_data[i,:,:])) + np.tanh(a*(TV_dataRoll)) #grad[i,:,:] = -np.sign(TV_data[i,:,:]) + np.sign(TV_dataRoll) elif strtag[i] == 'diff': None #for d in xrange(N[i]): #dDirx = np.zeros(np.hstack([N,M.shape[1]])) # dDirx.shape = [nDirs,imx,imy,nmins] #for ind_q in xrange(N[i]): #for ind_r in xrange(M.shape[1]): #dDirx[ind_q,:,:,ind_r] = x0[inds[ind_q,ind_r],:,:] - x0[ind_q,:,:] #for comb in xrange(len(Ause[kk])): #colUse = Ause[dir][comb] #for qr in xrange(M.shape[1]): #grad[i,d,:,:] = np.dot(dIM[d,qr,colUse],dDirx[d,:,:,qr]) + grad[i,d,:,:] # Need to make sure here that we're iterating over the correct dimension # As of right now, this assumes that we're working on a slice by slice basis # I'll have to implement 3D data work soon. grad = np.sum(grad,axis=0) return grad
def gTV(x, N, strtag, kern, dirWeight, dirs=None, nmins=0, dirInfo=[None, None, None, None], a=10): if nmins: M = dirInfo[0] dIM = dirInfo[1] Ause = dirInfo[2] inds = dirInfo[3] else: M = None dIM = None Ause = None inds = None if len(x.shape) == 2: N = np.hstack([1, N]) x0 = x.reshape(N) grad = np.zeros(np.hstack([N[0], len(strtag), N[1:]]), dtype=float) if kern.shape == 3: Nkern = np.hstack(1, [kern.shape[1:]]) else: Nkern = kern.shape[1:] TV_data = tf.TV(x0, N, strtag, kern, dirWeight, dirs, nmins, dirInfo) for i in xrange(len(strtag)): if strtag[i] == 'spatial': grad[:, i, :, :] = convolve(np.tanh(a * TV_data[i]), kern[i].reshape(Nkern), mode='wrap') elif strtag[i] == 'diff': None grad = np.sum(grad, axis=1) return grad
def optfun(x, N, lam1, lam2, data, k, strtag, ph, dirWeight=0, dirs=None, dirInfo=[None,None,None,None], nmins=0,wavelet='db4',mode="per",a=1.0): ''' This is the optimization function that we're trying to optimize. We are optimizing x here, and testing it within the funcitons that we want, as called by the functions that we've created ''' #dirInfo[0] is M #import pdb; pdb.set_trace() tv = 0 xfm = 0 data.shape = N x.shape = N obj_data = np.fft.fftshift(k)*(data - tf.fft2c(x,ph)) obj = np.sum(obj_data*obj_data.conj()) #L2 Norm #tv = np.sum(abs(tf.TV(x,N,strtag,dirWeight,dirs,nmins,M))) #L1 Norm if lam1: tv = np.sum((1/a)*np.log(np.cosh(a*tf.TV(x,N,strtag,dirWeight,dirs,nmins,dirInfo)))) #xfm cost calc if lam2: if len(N) > 2: xfm=0 for kk in range(N[0]): wvlt = tf.xfm(x[kk,:,:],wavelet=wavelet,mode=mode) xfm += np.sum((1/a)*np.log(np.cosh(a*wvlt[0]))) for i in xrange(1,len(wvlt)): xfm += np.sum([np.sum((1/a)*np.log(np.cosh(a*wvlt[i][j]))) for j in xrange(3)]) else: wvlt = tf.xfm(x,wavelet=wavelet,mode=mode) xfm = np.sum((1/a)*np.log(np.cosh(a*wvlt[0]))) for i in xrange(1,len(wvlt)): xfm += np.sum([np.sum((1/a)*np.log(np.cosh(a*wvlt[i][j]))) for j in xrange(3)]) x.shape = (x.size,) # Not the most efficient way to do this, but we need the shape to reset. data.shape = (data.size,) #output #print('obj: %.2f' % abs(obj)) #print('tv: %.2f' % abs(lam1*tv)) #print('xfm: %.2f' % abs(lam2*xfm)) return abs(obj + lam1*tv + lam2*xfm)