#    nch=csm.shape[1]
#    nbas=x.shape[0]
#    tmp1=torch.zeros((nch,nbas,2,NF*nx*nintl)).cuda()
#    for i in range(nch):
#        tmp=sf.complex_mult(x,csm[:,i].repeat(nbas,1,1,1,1),dim=2)
#        tmp=torch.reshape(tmp,(nbas,1,2,nx,nx))
#        #xx=ktrajT[0].unsqueeze(0)
#        tmp1[i]=nufft_ob(tmp,traj.repeat(nbas,1,1).cuda()).squeeze(1)
#    tmp1=tmp1.permute(1,0,2,3)
#    return tmp1

#%%
G = UnetClass().to(gpu)
G = SmallModel1().to(gpu)

GV = SmallModel().to(gpu)
#G.load_state_dict(torch.load('wts-10U2.pt'))
#GV.load_state_dict(torch.load('wts-10V2.pt'))
#GV.load_state_dict(torch.load('./PTmodels/27Oct_112451am_500ep_27oct/wts-500.pt'))

#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{'params': G.parameters(), 'lr': 1e-3}])

#optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-3},{'params':GV.parameters(),'lr':1e-3}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
                              patience=6,
                              verbose=True,
                              min_lr=5e-5)
Ejemplo n.º 2
0
#wtsFname1='wts-2U'+str(60+1)+'.pt'

G=UnetClass().to(gpu)
G.load_state_dict(torch.load(wtsFname1))

#atbT=atbT*W
z=atbT.permute(3,0,1,2)
z=torch.reshape(z,(1,2*nbasis,512,512)).to(gpu)
#z = torch.randn((1,60,512,512),device=gpu, dtype=dtype)
z = Variable(z,requires_grad=True)


wtsFname2='wts-3V'+str(62+1)+'.pt'
#wtsFname2='wts-2V'+str(60+1)+'.pt'

GV=SmallModel().to(gpu)
GV.load_state_dict(torch.load(wtsFname2))

#z1 = torch.randn((1,30,900),device=gpu, dtype=dtype)
z1=torch.reshape(VT,(1,1,nbasis,NF))
z1 = Variable(z1,requires_grad=False)

#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer1=torch.optim.AdamW([{'params':z,'lr':1e-3},{'params':G.parameters(),'lr':1e-3}])
scheduler = ReduceLROnPlateau(optimizer1, mode='min', factor=0.7, patience=6, verbose=True, min_lr=5e-5)

#z1=z
sf.tic()
for ep1 in range(5):
    for bat in range(1):
        u1=G(z)
Ejemplo n.º 3
0
NF = 900  #100#900
nx = 512
#N=400
N1 = 300
N2 = 50
nch = 4
thres = 0.05
nbasis = 30
lam = 0.01
st = 0
batch_sz = 100
TF = 900
#%%
G = UnetClass().to(gpu)
GV = SmallModel().to(gpu)
#G.load_state_dict(torch.load('wts-4U96.pt'))
#GV.load_state_dict(torch.load('wts-4V96.pt'))
#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{
    'params': G.parameters(),
    'lr': 1e-4
}, {
    'params': GV.parameters(),
    'lr': 1e-4
}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
                              patience=6,
                              verbose=True,
Ejemplo n.º 4
0
#    nch=csm.shape[1]
#    nbas=x.shape[0]
#    tmp1=torch.zeros((nch,nbas,2,NF*nx*nintl)).cuda()
#    for i in range(nch):
#        tmp=sf.complex_mult(x,csm[:,i].repeat(nbas,1,1,1,1),dim=2)
#        tmp=torch.reshape(tmp,(nbas,1,2,nx,nx))
#        #xx=ktrajT[0].unsqueeze(0)
#        tmp1[i]=nufft_ob(tmp,traj.repeat(nbas,1,1).cuda()).squeeze(1)
#    tmp1=tmp1.permute(1,0,2,3)
#    return tmp1

#%%
#G=UnetClass().to(gpu)
G = SmallModel1().to(gpu)

GV = SmallModel().to(gpu)
G.load_state_dict(torch.load('wtsN-U3.pt'))
GV.load_state_dict(torch.load('wtsN-V3.pt'))
#GV.load_state_dict(torch.load('./PTmodels/27Oct_112451am_500ep_27oct/wts-500.pt'))

#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
#optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-4}])

optimizer = torch.optim.AdamW([{
    'params': G.parameters(),
    'lr': 1e-4
}, {
    'params': GV.parameters(),
    'lr': 1e-4
}, {
    'params': z,
Ejemplo n.º 5
0
        for i in range(nch):
            tmp2 = sf.pt_fft2c(
                sf.pt_cpx_multipy(x, self.csmT[i].repeat(nbas, 1, 1, 1)))
            tmp2 = torch.reshape(tmp2, (nbas, self.NX))
            tmp2 = tmp2.repeat(self.NF, 1, 1) * mask
            tmp5[i] = tmp2.sum(axis=1)

        del tmp2, x
        return tmp5.cpu()


#%%
#G=UnetClass().to(gpu)
G = SmallModel1().to(gpu)

GV = SmallModel().to(gpu)
#G.load_state_dict(torch.load('wts-10U1.pt'))
#GV.load_state_dict(torch.load('wts-10V5.pt'))
#GV.load_state_dict(torch.load('./PTmodels/27Oct_112451am_500ep_27oct/wts-500.pt'))

#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{
    'params': G.parameters(),
    'lr': 1e-4
}, {
    'params': GV.parameters(),
    'lr': 1e-4
}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
Ejemplo n.º 6
0
#%%
#        sf.tic()
#z1=torch.normal(0,0.01,(1,1,2,nf1)).cuda()
z1 = 0.01 * torch.ones((1, 1, 2, nf1)).to(gpu)
#z1[0,0,1]=-0.01

z1[0, 0, 1, :] = torch.tensor(V[22, 0:nf1])
z1[0, 0, 0, :] = torch.tensor(V[27, 0:nf1])

#vv=V[30:32]
#z1=torch.reshape(torch.tensor(V[29:31,0:nf1]),(1,1,2,nf1)).to(gpu)
z1 = Variable(z1, requires_grad=True)
V1 = torch.tensor(
    V[:, 0:nf1]).cuda()  #+torch.normal(0,0.01,(V.shape[0],V.shape[1])).cuda()
#V1=torch.reshape(V1,(1,1,nbasis,nf1))
GV = SmallModel(16).to(gpu)
#GV=generatorV(2).to(gpu)
optimizer = torch.optim.Adam([{
    'params': GV.parameters(),
    'lr': 1e-4
}, {
    'params': z1,
    'lr': 1e-4
}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
                              patience=6,
                              verbose=True,
                              min_lr=1e-7)
pp = np.array([])
Ejemplo n.º 7
0
def myPSNR(org,recon):
    sqrError=abs(org-recon)**2
    N=torch.prod(torch.tensor(org.shape[-2:]))
    mse=sqrError.sum(dim=(-1,-2))
    mse=mse/N
    
    #maxval=np.max(org,axis=(-1,-2)) + 1e-15
    psnr=10*torch.log10(mx**2/(mse+1e-15))

    return psnr
        
#%%        
        #G=UnetClass().to(gpu)
        G=SmallModel1().to(gpu)
        G.load_state_dict(torch.load('tempp_10000.pt'))
        optimizer=torch.optim.Adam([{'params':G.parameters(),'lr':1e-4}])
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.7, patience=6, verbose=True, min_lr=1e-5)
        #z1=recT+torch.normal(0,0.02,(recT1.shape[0],recT1.shape[1],recT1.shape[2],recT1.shape[3],recT1.shape[4])).cuda()
        z=recT1
        z=torch.reshape(z,(1,2*nbasis,nx,nx)).to(gpu)
        #z1=torch.cuda.FloatTensor(3,2*nbasis,nx,nx).fill_(0) 
        #noi=[0.0003,0.05,0.01]
        #noi=[0.003,0.0005,0.01]
        #noi=[0.05,0.0005,0.01]
        #noi=[0.05,0.01,0.1]

#        for it in range(3):
#            z1[it]=z+torch.normal(0,noi[it],(z.shape[0],z.shape[1],z.shape[2],z.shape[3])).cuda()
#        z=z.repeat(3,1,1,1)
        pp=np.array([])

        sf.tic()
        for rn in range(40):
            #z=atb#+torch.normal(0,noi[it],(atb.shape[0],atb.shape[1],atb.shape[2],atb.shape[3])).cuda()
            #z=torch.reshape(z,(1,2*nbasis,nx,nx))
                #z=atb+torch.normal(0,0.005,(atb.shape[0],atb.shape[1],atb.shape[2],atb.shape[3])).cuda()
                #z=torch.reshape(z,(1,2*nbasis,nx,nx))
            u1=G(z)#+z.cuda()
            u1=torch.reshape(u1,(nbasis,1,2,nx,nx))
            l1_reg=0
            for param in G.parameters():
                l1_reg += param.abs().sum()
            loss=((abs(u1-recT1))).pow(2).sum()+1.0*l1_reg
            if rn%20==0:
#                print(rn,loss.item())
            #torch.cuda.empty_cache()
                plt.figure(rn)
                dd=(v2[0,0,:,20].unsqueeze(1).unsqueeze(1)*u1.squeeze(1)).sum(dim=0).detach().cpu().numpy()
                plt.imshow(np.abs((dd[0,100:400,100:400]+dd[1,100:400,100:400]*1j)),cmap='gray')
                plt.show()
                plt.pause(1)
            
            dd=(v2[0,0,:,10:50].unsqueeze(-1).unsqueeze(-1).detach().cpu()*u1[:,:,:,100:400,100:400].detach().cpu()).sum(dim=0).detach().cpu()
            org1=(v2[0,0,:,10:50].unsqueeze(-1).unsqueeze(-1).detach().cpu()*recT[:,:,:,100:400,100:400].detach().cpu()).sum(dim=0)
            psnr=torch.mean(myPSNR(org1,dd))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step(loss.detach())
            print(rn,loss.item(),psnr.item())
            pp=np.append(pp,psnr)
            
        sf.toc()
        
        #%%
        #        sf.tic()
        #z1=torch.normal(0,0.01,(1,1,2,nf1)).cuda()
        z1=0.01*torch.ones((1,1,2,nf1)).to(gpu)
        z1[0,0,1,:]=torch.tensor(V[22,0:nf1])
        z1[0,0,0,:]=torch.tensor(V[27,0:nf1])

        #vv=V[30:32]
        #z1=torch.reshape(torch.tensor(V[29:31,0:nf1]),(1,1,2,nf1)).to(gpu)
        z1 = Variable(z1,requires_grad=True)
        V1=torch.tensor(V[:,0:nf1]).cuda()#+torch.normal(0,0.01,(V.shape[0],V.shape[1])).cuda()
        #V1=torch.reshape(V1,(1,1,nbasis,nf1))
        GV=SmallModel(16).to(gpu)
        #GV=generatorV(2).to(gpu)
        optimizer=torch.optim.Adam([{'params':GV.parameters(),'lr':1e-4},{'params':z1,'lr':1e-6}])
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.7, patience=6, verbose=True, min_lr=1e-7)
        pp=np.array([])
        for ep1 in range(25000):
            for bat in range(1):
                
                #z1=torch.reshape(V1,(1,1,nbasis,nf1))
                l1_reg=0.
                for param in GV.parameters():
                    l1_reg += param.abs().sum()  
                v1=GV(z1)
                #v1=v1.permute(1,0)
                loss=abs(v1[0,0]-V1).pow(2).sum()+0.000*l1_reg+10.0*Smoothness(z1[0,0,:,:]) 
                if ep1%10==0:
                    print(ep1,loss.item())
                
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                scheduler.step(loss.detach())
                #pp=np.append(pp,loss.item())

        
        del kdaT,smap,adjnufft_ob
        #%%
        def Smoothness(zvec):        
            zsmoothness = zvec[:,1:]-zvec[:,:-1]
            zsmoothness = torch.sum(zsmoothness*zsmoothness,axis=1).squeeze()
            zsmoothness = torch.sum(zsmoothness,axis=0)
            return(zsmoothness)
        
        z=recT1#+torch.normal(0,0.03,(atb.shape[0],atb.shape[1],atb.shape[2],atb.shape[3])).cuda()#.permute(1,0,2,3)
        z=torch.reshape(z,(1,2*nbasis,nx,nx))
        #z1=torch.cuda.FloatTensor(1,1,nbasis,nf1).fill_(0)
        #z1=torch.normal(0,1,(1,1,nbasis,nf1))
        z1=torch.reshape(torch.tensor(V[:,0:nf1]),(1,1,nbasis,nf1))
        #z1 = Variable(z1,requires_grad=True)
        #%%    
        torch.cuda.empty_cache()
        #from dn_modelU2 import SmallModel1
#        G=SmallModel1().to(gpu)
#        GV=SmallModel().to(gpu)
#        G.load_state_dict(torch.load('tempp_199.pt'))
##        G.load_state_dict(torch.load('wtsN-U3.pt'))
##        GV.load_state_dict(torch.load('wtsN-V3.pt'))        
#        #optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-4},{'params':GV.parameters(),'lr':1e-4},{'params':z,'lr':1e-4}])
        #v1=(z1.cuda()).unsqueeze(-1)
        #v1 = Variable(v1,requires_grad=True)
        optimizer=torch.optim.Adam([{'params':G.parameters(),'lr':1e-5},{'params':GV.parameters(),'lr':1e-4},{'params':z1,'lr':1e-6}])
#        #optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-4},{'params':GV.parameters(),'lr':1e-4}])
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.7, patience=6, verbose=True, min_lr=1e-5)
        
        #optimizer=torch.optim.SGD([{'params':v1,'lr':1e-4}])#,{'params':z,'lr':5e-3,'momentum':0.9}])
        
        smapT=smapT.cpu()
        ktrajT=ktrajT.to(cpu)
        dcf=torch.reshape(dcf,(nbasis,3,2,NF,nintl*nx))
        plt.figure(2)
        dd=(np.reshape(V[:,26],(nbasis,1,1,1))*recT.detach().cpu().numpy()).sum(axis=0)
        plt.imshow(np.abs((dd[0,100:400,100:400]+dd[1,100:400,100:400]*1j)),cmap='gray')
        pp=np.array([])
        plt.pause(1)
        plt.figure(1)
        sf.tic()
        for it in range(500):    
#            for bat in range(1):
                
            v1=GV(z1.cuda()).unsqueeze(-1)
            u1=G(z.cuda())#+z.cuda()
            u1=torch.reshape(u1,(nbasis,2,nx,nx))
            u1=u1.unsqueeze(1)
                  
            l1_regU=0.
            for param in G.parameters():
                l1_regU += param.abs().sum()
                
            l1_regV=0.
            for param in GV.parameters():
                l1_regV += param.abs().sum()
                
            err=abs(AUV1(u1,v1)-kdataT1[:,:,0:nf1].cuda()).pow(2)
            loss=((dcf[0,:,:,0:nf1])*(err)).sum()+0.000005*l1_regU+0.00001*l1_regV+10.0*Smoothness(z1[0,0,:,:]) 
#            err=abs(yy1.squeeze(1)-atb).pow(2)          
#            loss=(err).sum()#+10*l1_reg#+0.1*(z.cpu()-u2.cpu()).pow(2).sum()+0.1*(z1[0,0].cpu()-v2.cpu()).pow(2).sum()
            if it%10==0:
                plt.figure(it)
                dd=(v1[0,0,:,20].unsqueeze(1).unsqueeze(1)*u1.squeeze(1)).sum(dim=0).detach().cpu().numpy()
                plt.imshow(np.abs((dd[0,100:400,100:400]+dd[1,100:400,100:400]*1j)),cmap='gray')
                plt.show()
                plt.pause(1)           
#            if it%10==0:
#                torch.save(u1,'tmpU'+str(it)+'.pt')           
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step(loss.detach())
            
            dd=(v2[0,0,:,10:50].unsqueeze(-1).unsqueeze(-1).detach().cpu()*u1[:,:,:,100:400,100:400].detach().cpu()).sum(dim=0).detach().cpu()
            org1=(v2[0,0,:,10:50].unsqueeze(-1).unsqueeze(-1).detach().cpu()*recT[:,:,:,100:400,100:400].detach().cpu()).sum(dim=0)
            psnr=torch.mean(myPSNR(org1,dd))
            print(it,loss.item(),psnr.item())
            pp=np.append(pp,psnr)
        sf.toc()
Ejemplo n.º 8
0
#    nch=csm.shape[1]
#    nbas=x.shape[0]
#    tmp1=torch.zeros((nch,nbas,2,NF*nx*nintl)).cuda()
#    for i in range(nch):
#        tmp=sf.complex_mult(x,csm[:,i].repeat(nbas,1,1,1,1),dim=2)
#        tmp=torch.reshape(tmp,(nbas,1,2,nx,nx))
#        #xx=ktrajT[0].unsqueeze(0)
#        tmp1[i]=nufft_ob(tmp,traj.repeat(nbas,1,1).cuda()).squeeze(1)
#    tmp1=tmp1.permute(1,0,2,3)
#    return tmp1

#%%
G = UnetClass().to(gpu)
G = SmallModel1().to(gpu)

GV = SmallModel().to(gpu)
#G.load_state_dict(torch.load('wts-10U2.pt'))
#GV.load_state_dict(torch.load('wts-10V2.pt'))
#GV.load_state_dict(torch.load('./PTmodels/27Oct_112451am_500ep_27oct/wts-500.pt'))

#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{'params': G.parameters(), 'lr': 1e-3}])

#optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-3},{'params':GV.parameters(),'lr':1e-3}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
                              patience=6,
                              verbose=True,
                              min_lr=5e-5)
Ejemplo n.º 9
0
n_select = 30
nbasis = 30
#%%Generate a meaningful filename to save the trainined models for testing
print('*************************************************')
start_time = time.time()
saveDir = 'PTmodels/'
cwd = os.getcwd()
directory=saveDir+datetime.now().strftime("%d%b_%I%M%S%P_")+ \
 str(epochs)+'ep_' +'27oct'

if not os.path.exists(directory):
    os.makedirs(directory)
sessFileName = directory + '/modelVb'

#%% creating the training model
unet = SmallModel()
unet = unet.to(gpu)
optimizer = torch.optim.Adam(unet.parameters(), lr=1e-3)


def lossFun(pred, org):
    loss = torch.mean(torch.abs(pred - org))
    return loss


#%% training code
#print ('training started on', datetime.now().strftime("%d-%b-%Y at %I:%M %P"))
#start_time=time.time()
torch.save(unet.state_dict(), directory + '/wts-0.pt')
#writer = SummaryWriter(log_dir=directory+'/')
#writer.add_graph(unet,torch.randn(1,2,512,512,30).to(gpu))
    res = res.to(gpu)
    for k in range(bsz):
        tmp3 = msk[k].repeat(nbasis, 1)  #*tmp2
        res[k] = torch.diag(Vv[k]) @ tmp3.to(torch.float32)
    return res


#%% Loading trained networks parameters

wts1 = dir2 + 'wts-20.pt'
wts2 = dir3 + 'wts-500.pt'

gnu = SmallModel1().to(gpu)
gnu.load_state_dict(torch.load(wts1))

gnv = SmallModel().to(gpu)
gnv.load_state_dict(torch.load(wts2))

wtsFname1 = 'wts-10U' + str(2) + '.pt'
#wtsFname1='wts-2U'+str(60+1)+'.pt'

G = UnetClass().to(gpu)
G.load_state_dict(torch.load(wtsFname1))

#atbT=atbT*W
z = recT.permute(3, 0, 1, 2)
z = torch.reshape(z, (1, 2 * nbasis, 512, 512)).to(gpu)
#z = torch.randn((1,60,512,512),device=gpu, dtype=dtype)
z = Variable(z, requires_grad=False)

wtsFname2 = 'wts-10V' + str(2) + '.pt'
                                      self.nx * self.nx * 2).fill_(0)
        x = torch.reshape(x, (nbas, self.nx, self.nx, 2))
        for i in range(nch):
            tmp2 = sf.pt_fft2c(
                sf.pt_cpx_multipy(x, self.csmT[i].repeat(nbas, 1, 1, 1)))
            tmp2 = torch.reshape(tmp2, (nbas, self.NX))
            tmp2 = tmp2.repeat(self.NF, 1, 1) * mask
            tmp5[i] = tmp2.sum(axis=1)

        del tmp2, x
        return tmp5.cpu()


#%%
G = UnetClass().to(gpu)
GV = SmallModel().to(gpu)
G.load_state_dict(torch.load('wts-5aU22.pt'))
GV.load_state_dict(torch.load('wts-5aV22.pt'))
#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
optimizer = torch.optim.AdamW([{
    'params': G.parameters(),
    'lr': 1e-4
}, {
    'params': GV.parameters(),
    'lr': 1e-4
}])
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.7,
                              patience=6,
                              verbose=True,
#    nch=csm.shape[1]
#    nbas=x.shape[0]
#    tmp1=torch.zeros((nch,nbas,2,NF*nx*nintl)).cuda()
#    for i in range(nch):
#        tmp=sf.complex_mult(x,csm[:,i].repeat(nbas,1,1,1,1),dim=2)
#        tmp=torch.reshape(tmp,(nbas,1,2,nx,nx))
#        #xx=ktrajT[0].unsqueeze(0)
#        tmp1[i]=nufft_ob(tmp,traj.repeat(nbas,1,1).cuda()).squeeze(1)
#    tmp1=tmp1.permute(1,0,2,3)
#    return tmp1
    
#%%
#G=UnetClass().to(gpu)
G=SmallModel1().to(gpu)

GV=SmallModel().to(gpu)
#G.load_state_dict(torch.load('wtsUB-131.pt'))
#GV.load_state_dict(torch.load('wtsVB-400.pt'))
#GV.load_state_dict(torch.load('./PTmodels/27Oct_112451am_500ep_27oct/wts-500.pt'))

#optimizer=torch.optim.SGD([{'params':G.parameters(),'lr':5e-3,'momentum':0.9}])
#optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-4}])

optimizer=torch.optim.AdamW([{'params':G.parameters(),'lr':1e-4},{'params':GV.parameters(),'lr':1e-4}])
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.7, patience=6, verbose=True, min_lr=1e-5)

trnFiles=os.listdir('/Shared/lss_jcb/abdul/prashant_cardiac_data/Data/d2/')
sz=len(trnFiles)
#data2=np.zeros((sz,1,n_select,N,N)).astype(np.complex64)

rndm=random.sample(range(sz),sz)