Exemple #1
0
recT=recT.to(gpu)
#%%
def maskV(msk,Vv):
    bsz=msk.size(0)
    res=torch.zeros((bsz,nbasis,nx*nx))
    res=res.to(gpu)
    for k in range(bsz):
        tmp3=msk[k].repeat(nbasis,1)#*tmp2
        res[k]=torch.diag(Vv[k])@tmp3.to(torch.float32)
    return res

#%%
wtsFname1='wts-3U'+str(62+1)+'.pt'
#wtsFname1='wts-2U'+str(60+1)+'.pt'

G=UnetClass().to(gpu)
G.load_state_dict(torch.load(wtsFname1))

#atbT=atbT*W
z=atbT.permute(3,0,1,2)
z=torch.reshape(z,(1,2*nbasis,512,512)).to(gpu)
#z = torch.randn((1,60,512,512),device=gpu, dtype=dtype)
z = Variable(z,requires_grad=True)


wtsFname2='wts-3V'+str(62+1)+'.pt'
#wtsFname2='wts-2V'+str(60+1)+'.pt'

GV=SmallModel().to(gpu)
GV.load_state_dict(torch.load(wtsFname2))
#def for_nufft(nuff,x,csm,traj):
#def for_nufft(nufft_ob,x,csm,traj):
#    nch=csm.shape[1]
#    nbas=x.shape[0]
#    tmp1=torch.zeros((nch,nbas,2,NF*nx*nintl)).cuda()
#    for i in range(nch):
#        tmp=sf.complex_mult(x,csm[:,i].repeat(nbas,1,1,1,1),dim=2)
#        tmp=torch.reshape(tmp,(nbas,1,2,nx,nx))
#        #xx=ktrajT[0].unsqueeze(0)
#        tmp1[i]=nufft_ob(tmp,traj.repeat(nbas,1,1).cuda()).squeeze(1)
#    tmp1=tmp1.permute(1,0,2,3)
#    return tmp1

#%%
G = UnetClass().to(gpu)
G = SmallModel1().to(gpu)

GV = SmallModel().to(gpu)
#G.load_state_dict(torch.load('wts-10U2.pt'))
#GV.load_state_dict(torch.load('wts-10V2.pt'))
#GV.load_state_dict(torch.load('./PTmodels/27Oct_112451am_500ep_27oct/wts-500.pt'))

#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{'params': G.parameters(), 'lr': 1e-3}])

#optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-3},{'params':GV.parameters(),'lr':1e-3}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
                              patience=6,
Exemple #3
0
        tmp5 = torch.cuda.FloatTensor(self.nch, self.NF,
                                      self.nx * self.nx * 2).fill_(0)
        x = torch.reshape(x, (nbas, self.nx, self.nx, 2))
        for i in range(nch):
            tmp2 = sf.pt_fft2c(
                sf.pt_cpx_multipy(x, self.csmT[i].repeat(nbas, 1, 1, 1)))
            tmp2 = torch.reshape(tmp2, (nbas, self.NX))
            tmp2 = tmp2.repeat(self.NF, 1, 1) * mask
            tmp5[i] = tmp2.sum(axis=1)

        del tmp2, x
        return tmp5.cpu()


#%%
G = UnetClass().to(gpu)
GV = SmallModel().to(gpu)
G.load_state_dict(torch.load('wts-5U6.pt'))
GV.load_state_dict(torch.load('wts-5V6.pt'))
#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{
    'params': G.parameters(),
    'lr': 1e-4
}, {
    'params': GV.parameters(),
    'lr': 1e-4
}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
                              patience=6,
#%% Loading trained networks parameters

wts1 = dir2 + 'wts-20.pt'
wts2 = dir3 + 'wts-500.pt'

gnu = SmallModel1().to(gpu)
gnu.load_state_dict(torch.load(wts1))

gnv = SmallModel().to(gpu)
gnv.load_state_dict(torch.load(wts2))

wtsFname1 = 'wts-10U' + str(2) + '.pt'
#wtsFname1='wts-2U'+str(60+1)+'.pt'

G = UnetClass().to(gpu)
G.load_state_dict(torch.load(wtsFname1))

#atbT=atbT*W
z = recT.permute(3, 0, 1, 2)
z = torch.reshape(z, (1, 2 * nbasis, 512, 512)).to(gpu)
#z = torch.randn((1,60,512,512),device=gpu, dtype=dtype)
z = Variable(z, requires_grad=False)

wtsFname2 = 'wts-10V' + str(2) + '.pt'
#wtsFname2='wts-2V'+str(60+1)+'.pt'

GV = SmallModel().to(gpu)
GV.load_state_dict(torch.load(wtsFname2))

#z1 = torch.randn((1,30,900),device=gpu, dtype=dtype)
Exemple #5
0
#def for_nufft(nuff,x,csm,traj):
#def for_nufft(nufft_ob,x,csm,traj):
#    nch=csm.shape[1]
#    nbas=x.shape[0]
#    tmp1=torch.zeros((nch,nbas,2,NF*nx*nintl)).cuda()
#    for i in range(nch):
#        tmp=sf.complex_mult(x,csm[:,i].repeat(nbas,1,1,1,1),dim=2)
#        tmp=torch.reshape(tmp,(nbas,1,2,nx,nx))
#        #xx=ktrajT[0].unsqueeze(0)
#        tmp1[i]=nufft_ob(tmp,traj.repeat(nbas,1,1).cuda()).squeeze(1)
#    tmp1=tmp1.permute(1,0,2,3)
#    return tmp1

#%%
G = UnetClass().to(gpu)
G = SmallModel1().to(gpu)

GV = SmallModel().to(gpu)
#G.load_state_dict(torch.load('wts-10U2.pt'))
#GV.load_state_dict(torch.load('wts-10V2.pt'))
#GV.load_state_dict(torch.load('./PTmodels/27Oct_112451am_500ep_27oct/wts-500.pt'))

#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{'params': G.parameters(), 'lr': 1e-3}])

#optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-3},{'params':GV.parameters(),'lr':1e-3}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
                              patience=6,
        tmp5 = torch.FloatTensor(self.nch, nf, nx, nx, 2).fill_(0)
        #tmp4=torch.FloatTensor(nf,nbas,nx*nx*2).fill_(0)
        x = torch.reshape(x, (nbas, nx * nx * 2))
        uv = Vv @ x
        uv = torch.reshape(uv, (nf, nx, nx, 2))
        for i in range(nch):
            tmp2 = sf.pt_fft2c(
                sf.pt_cpx_multipy(uv, csmT[i].repeat(nf, 1, 1, 1)))
            tmp5[i] = tmp2 * mask.repeat(1, 1, 1, 2)

        del tmp2, x
        return tmp5.cpu()


#%%
G = UnetClass().to(gpu)
GV = SmallModel().to(gpu)
#G.load_state_dict(torch.load('wts-10U2.pt'))
#GV.load_state_dict(torch.load('wts-10V2.pt'))
#GV.load_state_dict(torch.load('./PTmodels/27Oct_112451am_500ep_27oct/wts-500.pt'))

#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{
    'params': G.parameters(),
    'lr': 1e-3
}, {
    'params': GV.parameters(),
    'lr': 1e-3
}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
        tmp5 = torch.cuda.FloatTensor(self.nch, self.NF,
                                      self.nx * self.nx * 2).fill_(0)
        x = torch.reshape(x, (nbas, self.nx, self.nx, 2))
        for i in range(nch):
            tmp2 = sf.pt_fft2c(
                sf.pt_cpx_multipy(x, self.csmT[i].repeat(nbas, 1, 1, 1)))
            tmp2 = torch.reshape(tmp2, (nbas, self.NX))
            tmp2 = tmp2.repeat(self.NF, 1, 1) * mask
            tmp5[i] = tmp2.sum(axis=1)

        del tmp2, x
        return tmp5.cpu()


#%%
G = UnetClass().to(gpu)
GV = SmallModel().to(gpu)
G.load_state_dict(torch.load('wts-5aU22.pt'))
GV.load_state_dict(torch.load('wts-5aV22.pt'))
#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{
    'params': G.parameters(),
    'lr': 1e-4
}, {
    'params': GV.parameters(),
    'lr': 1e-4
}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
                              patience=6,