nstep=1000, step_size=3., nelec=wf.nelec, ndim=wf.ndim, domain={ 'min': -5, 'max': 5 }) # optimizer opt = optim.Adam(wf.parameters(), lr=0.01) # network net = DeepQMC(wf=wf, sampler=sampler, optimizer=opt) pos, obs_dict = None, None # set up the plotter plt.ion() plot2D = plotter2d(wf, domain, [25, 25], sol=ho2d_sol, kinetic=False) # train the wavefunction pos, obs_dict = net.train(100, batchsize=250, pos=pos, obs_dict=obs_dict, resample=100, ntherm=-1, loss='variance', plot=plot2D) plot_results(net, obs_dict, domain, [25, 25], ho2d_sol, e0=1.)
class TestRbfNetworkHarmonicOscillator1D(unittest.TestCase): def setUp(self): # wavefunction self.wf = RBF_HO1D(ndim=1, nelec=1, ncenter=5) #sampler self.mh_sampler = METROPOLIS(nwalkers=250, nstep=1000, step_size=3., nelec=self.wf.nelec, ndim=self.wf.ndim, domain={ 'min': -5, 'max': 5 }) #sampler self.hmc_sampler = HAMILTONIAN(nwalkers=250, nstep=1000, step_size=0.01, nelec=self.wf.nelec, ndim=self.wf.ndim, domain={ 'min': -5, 'max': 5 }, L=5) # optimizer self.opt = optim.Adam(self.wf.parameters(), lr=0.01) # network self.net = DeepQMC(wf=self.wf, sampler=self.mh_sampler, optimizer=self.opt) def test_single_point_metropolis_hasting_sampling(self): # initiaize the fc layer self.net.wf.fc.weight.data.fill_(0.) self.net.wf.fc.weight.data[0, 2] = 1. # sample and compute observables pos = self.net.sample(ntherm=-1, with_tqdm=False) e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() assert np.allclose([e, v], [0.5, 0], atol=1E-6) def test_single_point_hamiltonian_mc_sampling(self): #switch to HMC sampling self.net.sampler = self.hmc_sampler # initiaize the fc layer self.net.wf.fc.weight.data.fill_(0.) self.net.wf.fc.weight.data[0, 2] = 1. # sample and compute observables pos = self.net.sample(ntherm=-1, with_tqdm=False) e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() assert np.allclose([e, v], [0.5, 0], atol=1E-6) def test_optimization(self): #switch to MH sampling self.net.sampler = self.mh_sampler # optimize the weight of the FC layer # do not optimize the pos of the centers self.net.wf.fc.weight.requires_grad = True self.net.wf.rbf.centers.requires_grad = False # randomize the weights nn.init.uniform_(self.wf.fc.weight, 0, 1) # train pos, obs_dict = self.net.train(250, batchsize=250, pos=None, obs_dict=None, resample=100, resample_from_last=True, resample_every=1, ntherm=-1, loss='variance', plot=None, save_model='best_model.pth') # load the best model best_model = torch.load('best_model.pth') self.net.wf.load_state_dict(best_model['model_state_dict']) self.net.wf.eval() # sample and compute variables pos = self.net.sample() e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() assert np.allclose([e, v], [0.5, 0], atol=1E-3)
class TestHydrogen(unittest.TestCase): def setUp(self): # wavefunction self.wf = RBF_H(centers=torch.tensor([[0., 0., 0.]]), sigma=torch.tensor([1.])) #sampler self.mh_sampler = METROPOLIS(nwalkers=1000, nstep=1000, step_size=3., nelec=self.wf.nelec, ndim=self.wf.ndim, domain={ 'min': -5, 'max': 5 }) #sampler self.hmc_sampler = HAMILTONIAN(nwalkers=1000, nstep=1000, step_size=0.01, nelec=self.wf.nelec, ndim=self.wf.ndim, domain={ 'min': -5, 'max': 5 }, L=5) # optimizer self.opt = optim.Adam(self.wf.parameters(), lr=0.01) # network self.net = DeepQMC(wf=self.wf, sampler=self.mh_sampler, optimizer=self.opt) # ground state energy self.ground_state_energy = -0.5 def test_single_point_metropolis_hasting_sampling(self): # sample and compute observables pos = self.net.sample(ntherm=-1, with_tqdm=False) e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() assert np.allclose([e, v], [self.ground_state_energy, 0], atol=1E-6) def test_single_point_hamiltonian_mc_sampling(self): #switch to HMC sampling self.net.sampler = self.hmc_sampler # sample and compute observables pos = self.net.sample(ntherm=-1, with_tqdm=False) e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() assert np.allclose([e, v], [self.ground_state_energy, 0], atol=1E-6) def test_optimization(self): #switch to MH sampling self.net.sampler = self.mh_sampler # optimize the weight of the FC layer # do not optimize the pos of the centers self.net.wf.fc.weight.requires_grad = False self.net.wf.rbf.sigma.requires_grad = True # modify the sigma self.net.wf.rbf.sigma.data[0] = 1.5 # train pos, obs_dict = self.net.train(250, batchsize=250, pos=None, obs_dict=None, resample=100, resample_from_last=True, resample_every=1, ntherm=-1, loss='variance', plot=None, save_model='best_model.pth') # load the best model best_model = torch.load('best_model.pth') self.net.wf.load_state_dict(best_model['model_state_dict']) self.net.wf.eval() # sample and compute variables pos = self.net.sample() e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() assert np.allclose([e, v], [self.ground_state_energy, 0], atol=1E-3)
e = net.wf.energy(pos) s = net.wf.variance(pos) print('Energy :', e) print('Variance :', s) sys.exit() if 1: plot1D = plotter1d(wf, domain, ncenter, sol=h2plus_sol) # do not optimize the weights of fc net.wf.fc.weight.requires_grad = False # optimize the position of the centers # do not optimize the std of the gaussian net.wf.rbf.sigma.requires_grad = False net.wf.rbf.centers.requires_grad = True # train pos, obs_dict = net.train(250, batchsize=500, pos=None, obs_dict=obs_dict, resample=200, ntherm=-1, loss='energy', plot=plot1D) plot_results(net, obs_dict, domain, ncenter)
'zmin': -boundary, 'zmax': boundary } ncenter = [11, 11, 11] # network net = DeepQMC(wf=wf, sampler=sampler, optimizer=opt) obs_dict = {'local_energy': [], 'atomic_distance': [], 'get_sigma': []} if 1: pos = Variable(net.sample()) pos.requires_grad = True e = net.wf.energy(pos) s = net.wf.variance(pos) print('Energy :', e) print('Variance :', s) if 0: net.wf.layer_mo.weight.data = torch.eye(net.wf.nao).double() for param in net.wf.layer_ci.parameters(): param.requires_grad = False pos = net.sample(ntherm=0) pos = pos.reshape(100, 100, 6) var_ = net.observalbe(net.wf.variance, pos) plt.plot(var_) plt.show() net.train(50, pos=pos, ntherm=-1)
pos = None obs_dict = None plt.ion() fig = plt.figure() for i in range(1): net.wf.fc.weight.requires_grad = True net.wf.rbf.centers.requires_grad = False pos, obs_dict = net.train(250, batchsize=250, pos=pos, obs_dict=obs_dict, resample=100, ntherm=-1, loss='variance', sol=morse_sol, fig=fig) # net.wf.fc.weight.requires_grad = False # net.wf.rbf.centers.requires_grad = True # pos,obs_dict = net.train(50, # batchsize=250, # pos = pos, # obs_dict = obs_dict, # resample=100, # ntherm=-1, # loss = 'variance',
class TestH2plus(unittest.TestCase): def setUp(self): # optimal parameters self.opt_r = 0.97 # the two h are at +0.97 and -0.97 self.opt_sigma = 1.20 # wavefunction centers = torch.tensor([[0.,0.,-self.opt_r],[0.,0.,self.opt_r]]) sigma = torch.tensor([self.opt_sigma,self.opt_sigma]) self.wf = RBF_H2p(centers = centers, sigma = sigma ) #sampler self.mh_sampler = METROPOLIS(nwalkers=1000, nstep=1000, step_size = 0.5, nelec = self.wf.nelec, ndim = self.wf.ndim, domain = {'min':-5,'max':5}) #sampler self.hmc_sampler = HAMILTONIAN(nwalkers=1000, nstep=200, step_size = 0.1, nelec = self.wf.nelec, ndim = self.wf.ndim, domain = {'min':-5,'max':5}, L=5) # optimizer self.opt = optim.Adam(self.wf.parameters(),lr=0.001) # network self.net = DeepQMC(wf=self.wf,sampler=self.mh_sampler,optimizer=self.opt) # ground state energy self.ground_state_energy = -0.597 def test_single_point_metropolis_hasting_sampling(self): # sample and compute observables pos = self.net.sample(ntherm=-1,with_tqdm=False) e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() print('Energy :', e) print('Variance :', v) assert np.allclose([e,v],[self.ground_state_energy,0],atol=1E-1) def test_single_point_hamiltonian_mc_sampling(self): #switch to HMC sampling self.net.sampler = self.hmc_sampler # sample and compute observables pos = self.net.sample(ntherm=-1,with_tqdm=False) e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() print('Energy :', e) print('Variance :', v) assert np.allclose([e,v],[self.ground_state_energy,0],atol=1E-1) def test_energy_curve(self): #switch to MH sampling self.net.sampler = self.mh_sampler # fix the sigma of the AO s = 1.20 self.net.wf.rbf.sigma.data[:] = s X = np.linspace(0.1,2,25) emin = 1E3 for x in X: # move the atoms self.net.wf.rbf.centers.data[0,2] = -x self.net.wf.rbf.centers.data[1,2] = x pos = Variable(self.net.sample()) pos.requires_grad = True e = self.net.wf.energy(pos) if e<emin: emin = e assert(emin<-0.55) def test_sigma_optimization(self): #switch to MH sampling self.net.sampler = self.mh_sampler # move the atoms x = 0.97 self.net.wf.rbf.centers.data[0,2] = -x self.net.wf.rbf.centers.data[1,2] = x # fix the sigma of the AO s = 1. self.net.wf.rbf.sigma.data[:] = s # do not optimize the weight of the FC layer # optimize the pos of the centers # do not optimize the sigma of the AO self.net.wf.fc.weight.requires_grad = False self.net.wf.rbf.centers.requires_grad = False self.net.wf.rbf.sigma.requires_grad = True # define the observable we want obs_dict = {'local_energy':[], 'atomic_distance':[], 'get_sigma':[]} # train pos,obs_dict = self.net.train(100, batchsize=1000, pos = None, obs_dict = obs_dict, resample = 100, resample_from_last = True, resample_every = 1, ntherm = -1, loss = 'variance', plot = None, save_model = 'best_model.pth') # load the best model best_model = torch.load('best_model.pth') self.net.wf.load_state_dict(best_model['model_state_dict']) self.net.wf.eval() # sample and compute variables pos = self.net.sample() e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() # it might be too much to assert with the ground state energy assert (e < -0.5) assert (v < 0.1) def test_geo_optimization(self): #switch to MH sampling self.net.sampler = self.mh_sampler # move the atoms x = 0.5 self.net.wf.rbf.centers.data[0,2] = -x self.net.wf.rbf.centers.data[1,2] = x # fix the sigma of the AO s = 1.20 self.net.wf.rbf.sigma.data[:] = s # do not optimize the weight of the FC layer # optimize the pos of the centers # do not optimize the sigma of the AO self.net.wf.fc.weight.requires_grad = False self.net.wf.rbf.centers.requires_grad = True self.net.wf.rbf.sigma.requires_grad = False # define the observable we want obs_dict = {'local_energy':[], 'atomic_distance':[], 'get_sigma':[]} # train pos,obs_dict = self.net.train(200, batchsize=1000, pos = None, obs_dict = obs_dict, resample = 100, resample_from_last = True, resample_every = 1, ntherm = -1, loss = 'energy', plot = None, save_model = 'best_model.pth') # load the best model best_model = torch.load('best_model.pth') self.net.wf.load_state_dict(best_model['model_state_dict']) self.net.wf.eval() # sample and compute variables pos = self.net.sample() e = self.wf.energy(pos).detach().numpy() v = self.wf.variance(pos).detach().numpy() # it might be too much to assert with the ground state energy assert (e < -0.5) assert (v < 0.1)