def ATUV(yy): yy1 = torch.cuda.FloatTensor(nbasis, 1, 2, nx, nx).fill_(0) for i in range(nbasis): tmp = v1[:, :, i] * yy tmp = torch.reshape(tmp, (1, nch, 2, NF * nintl * nx)) yy1[i] = adjnufft_ob(tmp * (dcf[i]).unsqueeze(0).cuda(), ktrajT[i].unsqueeze(0).cuda(), smap=smapT[i].unsqueeze(0).cuda()) return yy1 recT = torch.zeros_like(atb) B = lambda x: ATUV(AUV(x)) sf.tic() recT = sf.pt_cg(B, atb, recT, 16, 1e-5) sf.toc() mx = torch.max((recT**2).sum(dim=-3).sqrt()) recT = recT / mx kdataT1 = kdataT / mx kdata_ch = torch.reshape(kdata_ch, (1, 3, 2, NF, nintl * nx)) ktrajt = torch.reshape(ktrajT, (nbasis, 2, NF, nintl * nx)) dcft = torch.reshape(dcf, (nbasis, 3, 2, NF, nintl * nx)) #z1=torch.cuda.FloatTensor(3,2*nbasis,nx,nx).fill_(0) #z2=torch.cuda.FloatTensor(3,1,nbasis,nf1).fill_(0) #for itt in range(3): itt = 0 #L1=np.asarray(d2['L300'])
# z=torch.mm(D,Y) # #z=z.permute(3,0,1,2) # z=torch.reshape(z,(nbasis,nx,nx,2)) # u2=torch.reshape(u2,(nbasis,nx,nx,2)) # z=u2-z # return z #%% lam2 = 1e-2 cgIter = 10 cgTol = 1e-15 out_iter = 5 cgIter1 = 10 recT = torch.zeros_like(atbT) AtA1 = lambda x: AtAUV(x, csmT, maskT) #+lam2*reg_term1(x,VT,NF,lam2) recT = sf.pt_cg(AtA1, atbT, recT, 40, cgTol) #%% run method #indx=torch.randint(0,NF,(NF,)) #indx=0:NF #indx=torch.tensor(indx) vt = VT AtA = lambda x: AtAUV(x, csmT, maskT) + lam2 * reg_term(x, vt) #recT=torch.zeros_like(atbT) atbT1 = atbT sf.tic() for i in range(out_iter): atbT1 = atbT + lam2 * rhs(recT, vt) recT = sf.pt_cg(AtA, atbT1, recT, cgIter, cgTol) sf.toc() #%%
reg = torch.mm(sT, x) reg = torch.reshape(reg, (nbasis, nx, nx, 2)) atbv = atbv + reg del x, reg return atbv #%% cgTol = 1e-15 cgIter1 = 15 maskT1 = torch.reshape(maskT1, (NF, nx, nx)) AtA1 = lambda x: AtAUV(x, csmT, maskT1) recT = torch.zeros_like(atbT) sf.tic() recT = sf.pt_cg(AtA1, atbT.cuda(), recT.cuda(), cgIter1, cgTol) sf.toc() #del AtA1, maskT1, atbT #%% G = gn2.generator().to(gpu) #G.load_state_dict(torch.load('tempUfull.pt')) z = torch.randn((1, 10, 16, 16), device=gpu, dtype=dtype) z = Variable(z, requires_grad=True) #optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}]) optimizer = torch.optim.AdamW([{ 'params': z, 'lr': 1e-4 }, {
cgTol=1e-15 A=lambda x: pt_A(x,csmT,maskT) At=lambda x: pt_At(x,csmT,maskT) bT=A(orgT) noiseT=torch.randn(bT.shape)*sigma noiseT=noiseT.to(gpu) bT=bT+noiseT atbT=At(bT) #%% run cg-sense B=lambda x: At(A(x))+lam*x sf.tic() recT=sf.pt_cg(B,atbT,cgIter,cgTol) sf.toc() #%% fn=lambda x: sf.normalize01(np.abs(sf.r2c(x.cpu().numpy()))) normOrg=fn(orgT) normAtb=fn(atbT) normRec=fn(recT) psnrAtb=sf.myPSNR(normOrg,normAtb) #ssimAtb=sf.mySSIM(normOrg,normAtb) psnrRec=sf.myPSNR(normOrg,normRec) #ssimRec=sf.mySSIM(normOrg,normRec) print (' ' + 'Noisy ' + 'Rec') print (' {0:.2f} {1:.2f}'.format(psnrAtb.mean(),psnrRec.mean())) #print (' {0:.2f} {1:.2f}'.format(ssimAtb.mean(),ssimRec.mean()))
csmT = torch.tensor(sf.c2r(csm)) maskT = torch.tensor(maskTst) VT = torch.tensor(V) #%% take them to gpu csmT = csmT.to(gpu) maskT = maskT.to(gpu, dtype=torch.float32) VT = VT.to(gpu) #%% z = ss.ATBV(kdataT, csmT, VT) maskT = torch.reshape(maskT, (NF, nx, nx)) AtA1 = lambda x: ss.AtAUV(x, csmT, maskT, VT) recT = torch.zeros_like(z) sf.tic() recT = sf.pt_cg(AtA1, z, recT.cuda(), 15, 1 - 15) sf.toc() #del recT #z = torch.randn((1,60,512,512),device=gpu, dtype=dtype) #z = Variable(z,requires_grad=False) #z1 = Variable(z1,requires_grad=False) z = recT + torch.normal(0, 0.005, (recT.shape[0], recT.shape[1], recT.shape[2], recT.shape[3])).cuda() z = z.permute(3, 0, 1, 2) z = torch.reshape(z, (1, 2 * nbasis, nx, nx)).to(gpu) for it in range(500): u1 = G(z.cuda()) u1 = torch.reshape(u1, (2, nbasis, nx, nx))
sbasis = sbasis.to(gpu) #%%% Estimating coil images and coil senstivity maps nuf_ob = KbNufft(im_size=im_size).to(dtype) nuf_ob = nuf_ob.to(gpu) adjnuf_ob = AdjKbNufft(im_size=im_size).to(dtype) adjnuf_ob = adjnuf_ob.to(gpu) coilimages = torch.zeros((1, nch, 2, nx, nx)) A = lambda x: nuf_ob(x, ktrajT) At = lambda x: adjnuf_ob(x, ktrajT) AtA = lambda x: At(A(x)) for i in range(nch): temp = adjnuf_ob(kdataT[:, i].unsqueeze(1), ktrajT[0].unsqueeze(0)) ini = torch.zeros_like(temp) coilimages[:, i] = sf.pt_cg(AtA, temp, ini, 50, 1e-15) X = coilimages.cpu().numpy() x = X[:, :, 0] + X[:, :, 1] * 1j x = np.transpose(x, (0, 2, 3, 1)) x_f = ifft(x, (0, 1, 2)) csmTrn = espirit(x_f, 6, 24, 0.1, 0.9925) csm = csmTrn[0, :, :, :, 0] csm = np.transpose(csm, (2, 0, 1)) smap = np.stack((np.real(csm), np.imag(csm)), axis=1) smap = np.tile(smap, (nbasis, 1, 1, 1, 1)) smapT = torch.tensor(smap).to(dtype) smapT = smapT.to(gpu) #%% generate MRI sense Nufft operator nufft_ob = MriSenseNufft(im_size=im_size, smap=smapT).to(dtype)
yy2 = sb.cuda() * x return yy2 def Sbasis2(sb, x): x = torch.reshape(x, (nbasis, 1 * 2 * nx * nx)) yy2 = sb.T.cuda() @ x yy2 = torch.reshape(yy2, (nbasis, 1, 2, nx, nx)) return yy2 recT = torch.zeros_like(atb) #recT=atb B = lambda x: ATUV(AUV(x)) #+0.0001*Sbasis2(torch.diag(sb),x) sf.tic() recT = sf.pt_cg(B, atb, recT, 4, 1e-5) #recT=cf.tor_conjgrad(B,atb,recT,6,1e-6) sf.toc() mx = torch.max((recT**2).sum(dim=-3).sqrt()) recT = recT / mx kdataT1 = kdataT / mx #%% kdata_ch = torch.reshape(kdata_ch, (1, nch, 2, NF, nintl * nx)) ktrajt = torch.reshape(ktrajT, (nbasis, 2, NF, nintl * nx)) dcft = torch.reshape(dcf, (nbasis, nch, 2, NF, nintl * nx)) #z1=torch.cuda.FloatTensor(3,2*nbasis,nx,nx).fill_(0) #z2=torch.cuda.FloatTensor(3,1,nbasis,nf1).fill_(0) #for itt in range(3):
tmp1 = torch.reshape(tmp1, (nbasis, nbasis, nx * nx * 2)) #tmp1=np.matmul(np.expand_dims(tmp,axis=0).T,np.expand_dims(V[:,i],axis=1).T) #tmp7=tmp1.sum(axis=0) #tmp1=tmp1.sum(axis=1) res = res + tmp1 #AtA1=lambda x: AtAUV(x,csmT,maskT)+lam2*reg_term1(x,VT,NF,lam2) #%% run method indx = torch.randint(0, NF, (10, )) #indx=0:NF #indx=torch.tensor(indx) epLoss = 0 vt = VT[:, indx] recT = torch.zeros_like(atbT) AtA = lambda x: AtAUV(x, csmT, csmConj, res) recT = sf.pt_cg(AtA, atbT, atbT, 80, cgTol) #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%555 #from torch.utils.tensorboard import SummaryWriter # #diter=iter(ldr) #ii,orgg=diter.next() #writer = SummaryWriter('runs2/') #ii=ii.to(gpu) #writer.add_graph(atbT1,input_to_model=None) #writer.close() AtA = lambda x: AtAUV(x, csmT, csmConj, res) + lam2 * reg_term(x, vt) for ep in range(epochs): #sf.tic()
AtA1 = lambda x: AtAUV(x, csmT, maskT) #+lam2*reg_term1(x,VT,NF,lam2) #%% run method #recT=atbT #sf.tic() #for i in range(out_iter): # indx=torch.randint(0,NF,(50,)) # vt=VT[:,indx] # AtA=lambda x: AtAUV(x,csmT,maskT)+lam2*reg_term(x,vt) # atbT1=atbT+lam2*rhs(recT,vt) # recT=sf.pt_cg(AtA,atbT1,cgIter,cgTol) #sf.toc() #%% recT = torch.zeros_like(atbT) sf.tic() recT = sf.pt_cg(AtA1, atbT, recT, cgIter1, cgTol) sf.toc() #%% rec = np.squeeze(recT.cpu().numpy()) rec = rec[:, :, :, 0] + 1j * rec[:, :, :, 1] #for i in range(30): # plt.imshow((np.squeeze(np.abs(xx[27,:,:]))),cmap='gray') # plt.show() # plt.pause(1) #xx=np.fft.fftshift(np.fft.fftshift(atbV,1),2) rec = np.reshape(rec, (nbasis, nx * nx)) rec1 = rec.T @ V rec1 = np.reshape(rec1, (nx, nx, NF)) rec1 = np.fft.fftshift(np.fft.fftshift(rec1, 0), 1) #plt.figure(1,[15,15]) for i in range(10):
x = torch.mm(D.T, u) u2 = torch.mm(D, x) u2 = torch.reshape(u2, (nbasis, nx, nx, 2)) return u2 #%% run cg-sense lam2 = 1e-1 cgIter = 3 cgTol = 1e-15 recT = torch.zeros_like(atbT) vt = VT AtA = lambda x: AtA_UV(x) + lam2 * reg_term(x, vt) #recT=torch.zeros_like(atbT) atbT1 = atbT sf.tic() for i in range(2): atbT1 = atbT + lam2 * rhs(recT, vt) recT = sf.pt_cg(AtA, atbT1, recT, cgIter, cgTol) sf.toc() #%% rec = np.squeeze(recT.cpu().numpy()) rec = rec[:, 0] + 1j * rec[:, 1] for i in range(100, 180): plt.imshow((np.squeeze(np.abs(rec[3, :, :, i]))), cmap='gray') plt.show() plt.pause(0.01)