#ktrajT=ktrajT.to(gpu)
        smapT = smapT.cpu()
        ktrajT = ktrajT.to(cpu)
        df = torch.reshape(dcf[0, 0, 0], (900, nintl * nx))
        df = df.unsqueeze(1).unsqueeze(1)
        df = df.repeat(1, 3, 2, 1)
        df1 = torch.reshape(df, (NF, 3 * 2 * nintl * nx))

        #        df=df.permute(1,0,2)
        #        df=torch.reshape(df,(900,1,2*nintl*nx))
        #        df1=df.repeat(1,3,1)
        #        df1=torch.reshape(df1,(900,3*10240))
        #xx=torch.zeros((nbasis,nch,2,NF*nx*nintl)).to(gpu)
        #xx=torch.cuda.FloatTensor(nbasis,nch,2,NF*nx*nintl).fill_(0)
        #optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':1e-3,'momentum':0.9}])
        sf.tic()
        for it in range(50):
            for bat in range(1):
                #v1=GV(z1.cuda()).squeeze(0).squeeze(0)
                u1 = G(z.cuda())
                #u2=u1
                u1 = torch.reshape(u1, (nbasis, 2, nx, nx))
                #u1=u1.permute(1,0,2,3)
                #                u1=u1.unsqueeze(1)

                loss = (u1 - atb).pow(2).sum()
                print(fl, loss.item())
                #torch.cuda.empty_cache()
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
예제 #2
0
def myPSNR(org,recon):
    sqrError=abs(org-recon)**2
    N=torch.prod(torch.tensor(org.shape[-2:]))
    mse=sqrError.sum(dim=(-1,-2))
    mse=mse/N
    
    #maxval=np.max(org,axis=(-1,-2)) + 1e-15
    psnr=10*torch.log10(mx**2/(mse+1e-15))

    return psnr
        
#%%        
        #G=UnetClass().to(gpu)
        G=SmallModel1().to(gpu)
        G.load_state_dict(torch.load('tempp_10000.pt'))
        optimizer=torch.optim.Adam([{'params':G.parameters(),'lr':1e-4}])
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.7, patience=6, verbose=True, min_lr=1e-5)
        #z1=recT+torch.normal(0,0.02,(recT1.shape[0],recT1.shape[1],recT1.shape[2],recT1.shape[3],recT1.shape[4])).cuda()
        z=recT1
        z=torch.reshape(z,(1,2*nbasis,nx,nx)).to(gpu)
        #z1=torch.cuda.FloatTensor(3,2*nbasis,nx,nx).fill_(0) 
        #noi=[0.0003,0.05,0.01]
        #noi=[0.003,0.0005,0.01]
        #noi=[0.05,0.0005,0.01]
        #noi=[0.05,0.01,0.1]

#        for it in range(3):
#            z1[it]=z+torch.normal(0,noi[it],(z.shape[0],z.shape[1],z.shape[2],z.shape[3])).cuda()
#        z=z.repeat(3,1,1,1)
        pp=np.array([])

        sf.tic()
        for rn in range(40):
            #z=atb#+torch.normal(0,noi[it],(atb.shape[0],atb.shape[1],atb.shape[2],atb.shape[3])).cuda()
            #z=torch.reshape(z,(1,2*nbasis,nx,nx))
                #z=atb+torch.normal(0,0.005,(atb.shape[0],atb.shape[1],atb.shape[2],atb.shape[3])).cuda()
                #z=torch.reshape(z,(1,2*nbasis,nx,nx))
            u1=G(z)#+z.cuda()
            u1=torch.reshape(u1,(nbasis,1,2,nx,nx))
            l1_reg=0
            for param in G.parameters():
                l1_reg += param.abs().sum()
            loss=((abs(u1-recT1))).pow(2).sum()+1.0*l1_reg
            if rn%20==0:
#                print(rn,loss.item())
            #torch.cuda.empty_cache()
                plt.figure(rn)
                dd=(v2[0,0,:,20].unsqueeze(1).unsqueeze(1)*u1.squeeze(1)).sum(dim=0).detach().cpu().numpy()
                plt.imshow(np.abs((dd[0,100:400,100:400]+dd[1,100:400,100:400]*1j)),cmap='gray')
                plt.show()
                plt.pause(1)
            
            dd=(v2[0,0,:,10:50].unsqueeze(-1).unsqueeze(-1).detach().cpu()*u1[:,:,:,100:400,100:400].detach().cpu()).sum(dim=0).detach().cpu()
            org1=(v2[0,0,:,10:50].unsqueeze(-1).unsqueeze(-1).detach().cpu()*recT[:,:,:,100:400,100:400].detach().cpu()).sum(dim=0)
            psnr=torch.mean(myPSNR(org1,dd))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step(loss.detach())
            print(rn,loss.item(),psnr.item())
            pp=np.append(pp,psnr)
            
        sf.toc()
        
        #%%
        #        sf.tic()
        #z1=torch.normal(0,0.01,(1,1,2,nf1)).cuda()
        z1=0.01*torch.ones((1,1,2,nf1)).to(gpu)
        z1[0,0,1,:]=torch.tensor(V[22,0:nf1])
        z1[0,0,0,:]=torch.tensor(V[27,0:nf1])

        #vv=V[30:32]
        #z1=torch.reshape(torch.tensor(V[29:31,0:nf1]),(1,1,2,nf1)).to(gpu)
        z1 = Variable(z1,requires_grad=True)
        V1=torch.tensor(V[:,0:nf1]).cuda()#+torch.normal(0,0.01,(V.shape[0],V.shape[1])).cuda()
        #V1=torch.reshape(V1,(1,1,nbasis,nf1))
        GV=SmallModel(16).to(gpu)
        #GV=generatorV(2).to(gpu)
        optimizer=torch.optim.Adam([{'params':GV.parameters(),'lr':1e-4},{'params':z1,'lr':1e-6}])
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.7, patience=6, verbose=True, min_lr=1e-7)
        pp=np.array([])
        for ep1 in range(25000):
            for bat in range(1):
                
                #z1=torch.reshape(V1,(1,1,nbasis,nf1))
                l1_reg=0.
                for param in GV.parameters():
                    l1_reg += param.abs().sum()  
                v1=GV(z1)
                #v1=v1.permute(1,0)
                loss=abs(v1[0,0]-V1).pow(2).sum()+0.000*l1_reg+10.0*Smoothness(z1[0,0,:,:]) 
                if ep1%10==0:
                    print(ep1,loss.item())
                
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                scheduler.step(loss.detach())
                #pp=np.append(pp,loss.item())

        
        del kdaT,smap,adjnufft_ob
        #%%
        def Smoothness(zvec):        
            zsmoothness = zvec[:,1:]-zvec[:,:-1]
            zsmoothness = torch.sum(zsmoothness*zsmoothness,axis=1).squeeze()
            zsmoothness = torch.sum(zsmoothness,axis=0)
            return(zsmoothness)
        
        z=recT1#+torch.normal(0,0.03,(atb.shape[0],atb.shape[1],atb.shape[2],atb.shape[3])).cuda()#.permute(1,0,2,3)
        z=torch.reshape(z,(1,2*nbasis,nx,nx))
        #z1=torch.cuda.FloatTensor(1,1,nbasis,nf1).fill_(0)
        #z1=torch.normal(0,1,(1,1,nbasis,nf1))
        z1=torch.reshape(torch.tensor(V[:,0:nf1]),(1,1,nbasis,nf1))
        #z1 = Variable(z1,requires_grad=True)
        #%%    
        torch.cuda.empty_cache()
        #from dn_modelU2 import SmallModel1
#        G=SmallModel1().to(gpu)
#        GV=SmallModel().to(gpu)
#        G.load_state_dict(torch.load('tempp_199.pt'))
##        G.load_state_dict(torch.load('wtsN-U3.pt'))
##        GV.load_state_dict(torch.load('wtsN-V3.pt'))        
#        #optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-4},{'params':GV.parameters(),'lr':1e-4},{'params':z,'lr':1e-4}])
        #v1=(z1.cuda()).unsqueeze(-1)
        #v1 = Variable(v1,requires_grad=True)
        optimizer=torch.optim.Adam([{'params':G.parameters(),'lr':1e-5},{'params':GV.parameters(),'lr':1e-4},{'params':z1,'lr':1e-6}])
#        #optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-4},{'params':GV.parameters(),'lr':1e-4}])
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.7, patience=6, verbose=True, min_lr=1e-5)
        
        #optimizer=torch.optim.SGD([{'params':v1,'lr':1e-4}])#,{'params':z,'lr':5e-3,'momentum':0.9}])
        
        smapT=smapT.cpu()
        ktrajT=ktrajT.to(cpu)
        dcf=torch.reshape(dcf,(nbasis,3,2,NF,nintl*nx))
        plt.figure(2)
        dd=(np.reshape(V[:,26],(nbasis,1,1,1))*recT.detach().cpu().numpy()).sum(axis=0)
        plt.imshow(np.abs((dd[0,100:400,100:400]+dd[1,100:400,100:400]*1j)),cmap='gray')
        pp=np.array([])
        plt.pause(1)
        plt.figure(1)
        sf.tic()
        for it in range(500):    
#            for bat in range(1):
                
            v1=GV(z1.cuda()).unsqueeze(-1)
            u1=G(z.cuda())#+z.cuda()
            u1=torch.reshape(u1,(nbasis,2,nx,nx))
            u1=u1.unsqueeze(1)
                  
            l1_regU=0.
            for param in G.parameters():
                l1_regU += param.abs().sum()
                
            l1_regV=0.
            for param in GV.parameters():
                l1_regV += param.abs().sum()
                
            err=abs(AUV1(u1,v1)-kdataT1[:,:,0:nf1].cuda()).pow(2)
            loss=((dcf[0,:,:,0:nf1])*(err)).sum()+0.000005*l1_regU+0.00001*l1_regV+10.0*Smoothness(z1[0,0,:,:]) 
#            err=abs(yy1.squeeze(1)-atb).pow(2)          
#            loss=(err).sum()#+10*l1_reg#+0.1*(z.cpu()-u2.cpu()).pow(2).sum()+0.1*(z1[0,0].cpu()-v2.cpu()).pow(2).sum()
            if it%10==0:
                plt.figure(it)
                dd=(v1[0,0,:,20].unsqueeze(1).unsqueeze(1)*u1.squeeze(1)).sum(dim=0).detach().cpu().numpy()
                plt.imshow(np.abs((dd[0,100:400,100:400]+dd[1,100:400,100:400]*1j)),cmap='gray')
                plt.show()
                plt.pause(1)           
#            if it%10==0:
#                torch.save(u1,'tmpU'+str(it)+'.pt')           
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step(loss.detach())
            
            dd=(v2[0,0,:,10:50].unsqueeze(-1).unsqueeze(-1).detach().cpu()*u1[:,:,:,100:400,100:400].detach().cpu()).sum(dim=0).detach().cpu()
            org1=(v2[0,0,:,10:50].unsqueeze(-1).unsqueeze(-1).detach().cpu()*recT[:,:,:,100:400,100:400].detach().cpu()).sum(dim=0)
            psnr=torch.mean(myPSNR(org1,dd))
            print(it,loss.item(),psnr.item())
            pp=np.append(pp,psnr)
        sf.toc()