예제 #1
0
    def setUp(self):

        # wavefunction
        self.wf = RBF_HO1D(ndim=1, nelec=1, ncenter=5)

        #sampler
        self.mh_sampler = METROPOLIS(nwalkers=250,
                                     nstep=1000,
                                     step_size=3.,
                                     nelec=self.wf.nelec,
                                     ndim=self.wf.ndim,
                                     domain={
                                         'min': -5,
                                         'max': 5
                                     })

        #sampler
        self.hmc_sampler = HAMILTONIAN(nwalkers=250,
                                       nstep=1000,
                                       step_size=0.01,
                                       nelec=self.wf.nelec,
                                       ndim=self.wf.ndim,
                                       domain={
                                           'min': -5,
                                           'max': 5
                                       },
                                       L=5)

        # optimizer
        self.opt = optim.Adam(self.wf.parameters(), lr=0.01)

        # network
        self.net = DeepQMC(wf=self.wf,
                           sampler=self.mh_sampler,
                           optimizer=self.opt)
예제 #2
0
    def setUp(self):

        # optimal parameters
        self.opt_r = 0.97 # the two h are at +0.97 and -0.97
        self.opt_sigma = 1.20

        # wavefunction
        centers = torch.tensor([[0.,0.,-self.opt_r],[0.,0.,self.opt_r]])
        sigma = torch.tensor([self.opt_sigma,self.opt_sigma])

        self.wf = RBF_H2p(centers = centers,
                          sigma   = sigma )

        #sampler
        self.mh_sampler = METROPOLIS(nwalkers=1000, nstep=1000, 
                             step_size = 0.5, nelec = self.wf.nelec, 
                             ndim = self.wf.ndim, domain = {'min':-5,'max':5})

        #sampler
        self.hmc_sampler = HAMILTONIAN(nwalkers=1000, nstep=200, 
                             step_size = 0.1, nelec = self.wf.nelec, 
                             ndim = self.wf.ndim, domain = {'min':-5,'max':5}, L=5)

        # optimizer
        self.opt = optim.Adam(self.wf.parameters(),lr=0.001)

        # network
        self.net = DeepQMC(wf=self.wf,sampler=self.mh_sampler,optimizer=self.opt)


        # ground state energy
        self.ground_state_energy = -0.597
예제 #3
0
    def setUp(self):

        # wavefunction
        self.wf = RBF_H(centers=torch.tensor([[0., 0., 0.]]),
                        sigma=torch.tensor([1.]))

        #sampler
        self.mh_sampler = METROPOLIS(nwalkers=1000,
                                     nstep=1000,
                                     step_size=3.,
                                     nelec=self.wf.nelec,
                                     ndim=self.wf.ndim,
                                     domain={
                                         'min': -5,
                                         'max': 5
                                     })

        #sampler
        self.hmc_sampler = HAMILTONIAN(nwalkers=1000,
                                       nstep=1000,
                                       step_size=0.01,
                                       nelec=self.wf.nelec,
                                       ndim=self.wf.ndim,
                                       domain={
                                           'min': -5,
                                           'max': 5
                                       },
                                       L=5)

        # optimizer
        self.opt = optim.Adam(self.wf.parameters(), lr=0.01)

        # network
        self.net = DeepQMC(wf=self.wf,
                           sampler=self.mh_sampler,
                           optimizer=self.opt)

        # ground state energy
        self.ground_state_energy = -0.5
예제 #4
0
class TestRbfNetworkHarmonicOscillator1D(unittest.TestCase):
    def setUp(self):

        # wavefunction
        self.wf = RBF_HO1D(ndim=1, nelec=1, ncenter=5)

        #sampler
        self.mh_sampler = METROPOLIS(nwalkers=250,
                                     nstep=1000,
                                     step_size=3.,
                                     nelec=self.wf.nelec,
                                     ndim=self.wf.ndim,
                                     domain={
                                         'min': -5,
                                         'max': 5
                                     })

        #sampler
        self.hmc_sampler = HAMILTONIAN(nwalkers=250,
                                       nstep=1000,
                                       step_size=0.01,
                                       nelec=self.wf.nelec,
                                       ndim=self.wf.ndim,
                                       domain={
                                           'min': -5,
                                           'max': 5
                                       },
                                       L=5)

        # optimizer
        self.opt = optim.Adam(self.wf.parameters(), lr=0.01)

        # network
        self.net = DeepQMC(wf=self.wf,
                           sampler=self.mh_sampler,
                           optimizer=self.opt)

    def test_single_point_metropolis_hasting_sampling(self):

        # initiaize the fc layer
        self.net.wf.fc.weight.data.fill_(0.)
        self.net.wf.fc.weight.data[0, 2] = 1.

        # sample and compute observables
        pos = self.net.sample(ntherm=-1, with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [0.5, 0], atol=1E-6)

    def test_single_point_hamiltonian_mc_sampling(self):

        #switch to HMC sampling
        self.net.sampler = self.hmc_sampler

        # initiaize the fc layer
        self.net.wf.fc.weight.data.fill_(0.)
        self.net.wf.fc.weight.data[0, 2] = 1.

        # sample and compute observables
        pos = self.net.sample(ntherm=-1, with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [0.5, 0], atol=1E-6)

    def test_optimization(self):

        #switch to MH sampling
        self.net.sampler = self.mh_sampler

        # optimize the weight of the FC layer
        # do not optimize the pos of the centers
        self.net.wf.fc.weight.requires_grad = True
        self.net.wf.rbf.centers.requires_grad = False

        # randomize the weights
        nn.init.uniform_(self.wf.fc.weight, 0, 1)

        # train
        pos, obs_dict = self.net.train(250,
                                       batchsize=250,
                                       pos=None,
                                       obs_dict=None,
                                       resample=100,
                                       resample_from_last=True,
                                       resample_every=1,
                                       ntherm=-1,
                                       loss='variance',
                                       plot=None,
                                       save_model='best_model.pth')

        # load the best model
        best_model = torch.load('best_model.pth')
        self.net.wf.load_state_dict(best_model['model_state_dict'])
        self.net.wf.eval()

        # sample and compute variables
        pos = self.net.sample()
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [0.5, 0], atol=1E-3)
예제 #5
0
#sampler
sampler = METROPOLIS(nwalkers=250,
                     nstep=1000,
                     step_size=3.,
                     nelec=wf.nelec,
                     ndim=wf.ndim,
                     domain={
                         'min': -5,
                         'max': 5
                     })

# optimizer
opt = optim.Adam(wf.parameters(), lr=0.01)

# network
net = DeepQMC(wf=wf, sampler=sampler, optimizer=opt)
pos, obs_dict = None, None

# set up the plotter
plt.ion()
plot2D = plotter2d(wf, domain, [25, 25], sol=ho2d_sol, kinetic=False)

# train the wavefunction
pos, obs_dict = net.train(100,
                          batchsize=250,
                          pos=pos,
                          obs_dict=obs_dict,
                          resample=100,
                          ntherm=-1,
                          loss='variance',
                          plot=plot2D)
예제 #6
0
class TestHydrogen(unittest.TestCase):
    def setUp(self):

        # wavefunction
        self.wf = RBF_H(centers=torch.tensor([[0., 0., 0.]]),
                        sigma=torch.tensor([1.]))

        #sampler
        self.mh_sampler = METROPOLIS(nwalkers=1000,
                                     nstep=1000,
                                     step_size=3.,
                                     nelec=self.wf.nelec,
                                     ndim=self.wf.ndim,
                                     domain={
                                         'min': -5,
                                         'max': 5
                                     })

        #sampler
        self.hmc_sampler = HAMILTONIAN(nwalkers=1000,
                                       nstep=1000,
                                       step_size=0.01,
                                       nelec=self.wf.nelec,
                                       ndim=self.wf.ndim,
                                       domain={
                                           'min': -5,
                                           'max': 5
                                       },
                                       L=5)

        # optimizer
        self.opt = optim.Adam(self.wf.parameters(), lr=0.01)

        # network
        self.net = DeepQMC(wf=self.wf,
                           sampler=self.mh_sampler,
                           optimizer=self.opt)

        # ground state energy
        self.ground_state_energy = -0.5

    def test_single_point_metropolis_hasting_sampling(self):

        # sample and compute observables
        pos = self.net.sample(ntherm=-1, with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [self.ground_state_energy, 0], atol=1E-6)

    def test_single_point_hamiltonian_mc_sampling(self):

        #switch to HMC sampling
        self.net.sampler = self.hmc_sampler

        # sample and compute observables
        pos = self.net.sample(ntherm=-1, with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [self.ground_state_energy, 0], atol=1E-6)

    def test_optimization(self):

        #switch to MH sampling
        self.net.sampler = self.mh_sampler

        # optimize the weight of the FC layer
        # do not optimize the pos of the centers
        self.net.wf.fc.weight.requires_grad = False
        self.net.wf.rbf.sigma.requires_grad = True

        # modify the sigma
        self.net.wf.rbf.sigma.data[0] = 1.5

        # train
        pos, obs_dict = self.net.train(250,
                                       batchsize=250,
                                       pos=None,
                                       obs_dict=None,
                                       resample=100,
                                       resample_from_last=True,
                                       resample_every=1,
                                       ntherm=-1,
                                       loss='variance',
                                       plot=None,
                                       save_model='best_model.pth')

        # load the best model
        best_model = torch.load('best_model.pth')
        self.net.wf.load_state_dict(best_model['model_state_dict'])
        self.net.wf.eval()

        # sample and compute variables
        pos = self.net.sample()
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [self.ground_state_energy, 0], atol=1E-3)
예제 #7
0
    wf = H2(mol)

    #sampler
    sampler = METROPOLIS(nwalkers=1000,
                         nstep=1000,
                         step_size=0.5,
                         nelec=wf.nelec,
                         move='one',
                         ndim=wf.ndim,
                         domain={
                             'min': -5,
                             'max': 5
                         })

    # optimizer
    opt = optim.Adam(wf.parameters(), lr=0.005)  # <- good for geo opt
    #opt = optim.Adam(wf.parameters(),lr=0.01) # <- good for coeff opt
    #opt = optim.SGD(wf.parameters(),lr=0.1)

    # network
    net = DeepQMC(wf=wf, sampler=sampler, optimizer=opt)

    # single point
    #single_point(net,x=0.5,alpha=0.01)

    # curve wrt to position
    pos_curve(net)

    # geo opt
    #geo_opt(net,x=0.4)
예제 #8
0
                     domain={
                         'min': -5,
                         'max': 5
                     })

# optimizer
opt = optim.Adam(wf.parameters(), lr=0.05)
#opt = optim.SGD(wf.parameters(),lr=0.1)

# domain for the RBF Network
boundary = 5.
domain = {'xmin': -boundary, 'xmax': boundary}
ncenter = 51

# network
net = DeepQMC(wf=wf, sampler=sampler, optimizer=opt)

obs_dict = {'local_energy': [], 'atomic_distance': []}

if 0:
    plot_wf_1d(net,
               domain,
               ncenter,
               sol=h2plus_sol,
               hist=False,
               pot=False,
               grad=True)

if 0:

    X = np.linspace(0.1, 2, 25)
예제 #9
0
#opt = optim.SGD(wf.parameters(),lr=0.1)

# domain for the RBF Network
boundary = 5.
domain = {
    'xmin': -boundary,
    'xmax': boundary,
    'ymin': -boundary,
    'ymax': boundary,
    'zmin': -boundary,
    'zmax': boundary
}
ncenter = [11, 11, 11]

# network
net = DeepQMC(wf=wf, sampler=sampler, optimizer=opt)
obs_dict = {'local_energy': [], 'atomic_distance': [], 'get_sigma': []}

if 1:
    pos = Variable(net.sample())
    pos.requires_grad = True
    e = net.wf.energy(pos)
    s = net.wf.variance(pos)

    print('Energy   :', e)
    print('Variance :', s)

if 0:
    net.wf.layer_mo.weight.data = torch.eye(net.wf.nao).double()
    for param in net.wf.layer_ci.parameters():
        param.requires_grad = False
예제 #10
0
#sampler
sampler = METROPOLIS(nwalkers=250,
                     nstep=1000,
                     step_size=3.,
                     nelec=wf.nelec,
                     ndim=wf.ndim,
                     domain={
                         'min': -2,
                         'max': 5
                     })

# optimizer
opt = optim.Adam(wf.parameters(), lr=0.01, weight_decay=0.0)

# network
net = DeepQMC(wf=wf, sampler=sampler, optimizer=opt)
pos = None
obs_dict = None

plt.ion()
fig = plt.figure()

for i in range(1):

    net.wf.fc.weight.requires_grad = True
    net.wf.rbf.centers.requires_grad = False

    pos, obs_dict = net.train(250,
                              batchsize=250,
                              pos=pos,
                              obs_dict=obs_dict,
예제 #11
0
class TestH2plus(unittest.TestCase):

    def setUp(self):

        # optimal parameters
        self.opt_r = 0.97 # the two h are at +0.97 and -0.97
        self.opt_sigma = 1.20

        # wavefunction
        centers = torch.tensor([[0.,0.,-self.opt_r],[0.,0.,self.opt_r]])
        sigma = torch.tensor([self.opt_sigma,self.opt_sigma])

        self.wf = RBF_H2p(centers = centers,
                          sigma   = sigma )

        #sampler
        self.mh_sampler = METROPOLIS(nwalkers=1000, nstep=1000, 
                             step_size = 0.5, nelec = self.wf.nelec, 
                             ndim = self.wf.ndim, domain = {'min':-5,'max':5})

        #sampler
        self.hmc_sampler = HAMILTONIAN(nwalkers=1000, nstep=200, 
                             step_size = 0.1, nelec = self.wf.nelec, 
                             ndim = self.wf.ndim, domain = {'min':-5,'max':5}, L=5)

        # optimizer
        self.opt = optim.Adam(self.wf.parameters(),lr=0.001)

        # network
        self.net = DeepQMC(wf=self.wf,sampler=self.mh_sampler,optimizer=self.opt)


        # ground state energy
        self.ground_state_energy = -0.597

    def test_single_point_metropolis_hasting_sampling(self):

        # sample and compute observables
        pos = self.net.sample(ntherm=-1,with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()

        print('Energy   :', e)
        print('Variance :', v)

        assert np.allclose([e,v],[self.ground_state_energy,0],atol=1E-1)

    def test_single_point_hamiltonian_mc_sampling(self):

        #switch to HMC sampling
        self.net.sampler = self.hmc_sampler

        # sample and compute observables
        pos = self.net.sample(ntherm=-1,with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()

        print('Energy   :', e)
        print('Variance :', v)

        assert np.allclose([e,v],[self.ground_state_energy,0],atol=1E-1)

    def test_energy_curve(self):

        #switch to MH sampling
        self.net.sampler = self.mh_sampler

        # fix the sigma of the AO
        s = 1.20
        self.net.wf.rbf.sigma.data[:] = s 

        X = np.linspace(0.1,2,25)
        emin = 1E3
        for x in X:

            # move the atoms
            self.net.wf.rbf.centers.data[0,2] = -x
            self.net.wf.rbf.centers.data[1,2] = x

            pos = Variable(self.net.sample())
            pos.requires_grad = True
            e = self.net.wf.energy(pos)

            if e<emin:
                emin = e            

        assert(emin<-0.55)


    def test_sigma_optimization(self):

            #switch to MH sampling
            self.net.sampler = self.mh_sampler

            # move the atoms
            x = 0.97
            self.net.wf.rbf.centers.data[0,2] = -x
            self.net.wf.rbf.centers.data[1,2] = x

            # fix the sigma of the AO
            s = 1.
            self.net.wf.rbf.sigma.data[:] = s 

            # do not optimize the weight of the FC layer
            # optimize the pos of the centers
            # do not optimize the sigma of the AO
            self.net.wf.fc.weight.requires_grad = False
            self.net.wf.rbf.centers.requires_grad = False
            self.net.wf.rbf.sigma.requires_grad = True

            # define the observable we want
            obs_dict = {'local_energy':[],
                        'atomic_distance':[],
                        'get_sigma':[]}
            # train
            pos,obs_dict = self.net.train(100,
                     batchsize=1000,
                     pos = None,
                     obs_dict = obs_dict,
                     resample = 100,
                     resample_from_last = True,
                     resample_every = 1,
                     ntherm = -1,
                     loss = 'variance',
                     plot = None,
                     save_model = 'best_model.pth')

            # load the best model
            best_model = torch.load('best_model.pth')
            self.net.wf.load_state_dict(best_model['model_state_dict'])
            self.net.wf.eval()

            # sample and compute variables
            pos = self.net.sample()
            e = self.wf.energy(pos).detach().numpy()
            v = self.wf.variance(pos).detach().numpy()

            # it might be too much to assert with the ground state energy
            assert (e < -0.5)
            assert (v < 0.1)


    def test_geo_optimization(self):

        #switch to MH sampling
        self.net.sampler = self.mh_sampler

        # move the atoms
        x = 0.5
        self.net.wf.rbf.centers.data[0,2] = -x
        self.net.wf.rbf.centers.data[1,2] = x

        # fix the sigma of the AO
        s = 1.20
        self.net.wf.rbf.sigma.data[:] = s 

        # do not optimize the weight of the FC layer
        # optimize the pos of the centers
        # do not optimize the sigma of the AO
        self.net.wf.fc.weight.requires_grad = False
        self.net.wf.rbf.centers.requires_grad = True
        self.net.wf.rbf.sigma.requires_grad = False

        # define the observable we want
        obs_dict = {'local_energy':[],
                    'atomic_distance':[],
                    'get_sigma':[]}
        # train
        pos,obs_dict = self.net.train(200,
                 batchsize=1000,
                 pos = None,
                 obs_dict = obs_dict,
                 resample = 100,
                 resample_from_last = True,
                 resample_every = 1,
                 ntherm = -1,
                 loss = 'energy',
                 plot = None,
                 save_model = 'best_model.pth')

        # load the best model
        best_model = torch.load('best_model.pth')
        self.net.wf.load_state_dict(best_model['model_state_dict'])
        self.net.wf.eval()

        # sample and compute variables
        pos = self.net.sample()
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()

        # it might be too much to assert with the ground state energy
        assert (e < -0.5)
        assert (v < 0.1)
예제 #12
0
#sampler
sampler = METROPOLIS(nwalkers=250,
                     nstep=1000,
                     step_size=3.,
                     nelec=wf.nelec,
                     ndim=wf.ndim,
                     domain={
                         'min': -5,
                         'max': 5
                     })

# optimizer
opt = optim.Adam(wf.parameters(), lr=0.005, weight_decay=0.0)

# network
net = DeepQMC(wf=wf, sampler=sampler, optimizer=opt)
pos = None
obs_dict = None

plt.ion()
fig = plt.figure()

for i in range(1):

    net.wf.fc.weight.requires_grad = True
    net.wf.rbf.centers.requires_grad = False

    pos, obs_dict = net.train(1000,
                              batchsize=250,
                              pos=pos,
                              obs_dict=obs_dict,