Exemplo n.º 1
0
class TestRbfNetworkHarmonicOscillator1D(unittest.TestCase):
    def setUp(self):

        # wavefunction
        self.wf = RBF_HO1D(ndim=1, nelec=1, ncenter=5)

        #sampler
        self.mh_sampler = METROPOLIS(nwalkers=250,
                                     nstep=1000,
                                     step_size=3.,
                                     nelec=self.wf.nelec,
                                     ndim=self.wf.ndim,
                                     domain={
                                         'min': -5,
                                         'max': 5
                                     })

        #sampler
        self.hmc_sampler = HAMILTONIAN(nwalkers=250,
                                       nstep=1000,
                                       step_size=0.01,
                                       nelec=self.wf.nelec,
                                       ndim=self.wf.ndim,
                                       domain={
                                           'min': -5,
                                           'max': 5
                                       },
                                       L=5)

        # optimizer
        self.opt = optim.Adam(self.wf.parameters(), lr=0.01)

        # network
        self.net = DeepQMC(wf=self.wf,
                           sampler=self.mh_sampler,
                           optimizer=self.opt)

    def test_single_point_metropolis_hasting_sampling(self):

        # initiaize the fc layer
        self.net.wf.fc.weight.data.fill_(0.)
        self.net.wf.fc.weight.data[0, 2] = 1.

        # sample and compute observables
        pos = self.net.sample(ntherm=-1, with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [0.5, 0], atol=1E-6)

    def test_single_point_hamiltonian_mc_sampling(self):

        #switch to HMC sampling
        self.net.sampler = self.hmc_sampler

        # initiaize the fc layer
        self.net.wf.fc.weight.data.fill_(0.)
        self.net.wf.fc.weight.data[0, 2] = 1.

        # sample and compute observables
        pos = self.net.sample(ntherm=-1, with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [0.5, 0], atol=1E-6)

    def test_optimization(self):

        #switch to MH sampling
        self.net.sampler = self.mh_sampler

        # optimize the weight of the FC layer
        # do not optimize the pos of the centers
        self.net.wf.fc.weight.requires_grad = True
        self.net.wf.rbf.centers.requires_grad = False

        # randomize the weights
        nn.init.uniform_(self.wf.fc.weight, 0, 1)

        # train
        pos, obs_dict = self.net.train(250,
                                       batchsize=250,
                                       pos=None,
                                       obs_dict=None,
                                       resample=100,
                                       resample_from_last=True,
                                       resample_every=1,
                                       ntherm=-1,
                                       loss='variance',
                                       plot=None,
                                       save_model='best_model.pth')

        # load the best model
        best_model = torch.load('best_model.pth')
        self.net.wf.load_state_dict(best_model['model_state_dict'])
        self.net.wf.eval()

        # sample and compute variables
        pos = self.net.sample()
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [0.5, 0], atol=1E-3)
Exemplo n.º 2
0
class TestHydrogen(unittest.TestCase):
    def setUp(self):

        # wavefunction
        self.wf = RBF_H(centers=torch.tensor([[0., 0., 0.]]),
                        sigma=torch.tensor([1.]))

        #sampler
        self.mh_sampler = METROPOLIS(nwalkers=1000,
                                     nstep=1000,
                                     step_size=3.,
                                     nelec=self.wf.nelec,
                                     ndim=self.wf.ndim,
                                     domain={
                                         'min': -5,
                                         'max': 5
                                     })

        #sampler
        self.hmc_sampler = HAMILTONIAN(nwalkers=1000,
                                       nstep=1000,
                                       step_size=0.01,
                                       nelec=self.wf.nelec,
                                       ndim=self.wf.ndim,
                                       domain={
                                           'min': -5,
                                           'max': 5
                                       },
                                       L=5)

        # optimizer
        self.opt = optim.Adam(self.wf.parameters(), lr=0.01)

        # network
        self.net = DeepQMC(wf=self.wf,
                           sampler=self.mh_sampler,
                           optimizer=self.opt)

        # ground state energy
        self.ground_state_energy = -0.5

    def test_single_point_metropolis_hasting_sampling(self):

        # sample and compute observables
        pos = self.net.sample(ntherm=-1, with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [self.ground_state_energy, 0], atol=1E-6)

    def test_single_point_hamiltonian_mc_sampling(self):

        #switch to HMC sampling
        self.net.sampler = self.hmc_sampler

        # sample and compute observables
        pos = self.net.sample(ntherm=-1, with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [self.ground_state_energy, 0], atol=1E-6)

    def test_optimization(self):

        #switch to MH sampling
        self.net.sampler = self.mh_sampler

        # optimize the weight of the FC layer
        # do not optimize the pos of the centers
        self.net.wf.fc.weight.requires_grad = False
        self.net.wf.rbf.sigma.requires_grad = True

        # modify the sigma
        self.net.wf.rbf.sigma.data[0] = 1.5

        # train
        pos, obs_dict = self.net.train(250,
                                       batchsize=250,
                                       pos=None,
                                       obs_dict=None,
                                       resample=100,
                                       resample_from_last=True,
                                       resample_every=1,
                                       ntherm=-1,
                                       loss='variance',
                                       plot=None,
                                       save_model='best_model.pth')

        # load the best model
        best_model = torch.load('best_model.pth')
        self.net.wf.load_state_dict(best_model['model_state_dict'])
        self.net.wf.eval()

        # sample and compute variables
        pos = self.net.sample()
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()
        assert np.allclose([e, v], [self.ground_state_energy, 0], atol=1E-3)
Exemplo n.º 3
0
domain = {
    'xmin': -boundary,
    'xmax': boundary,
    'ymin': -boundary,
    'ymax': boundary,
    'zmin': -boundary,
    'zmax': boundary
}
ncenter = [11, 11, 11]

# network
net = DeepQMC(wf=wf, sampler=sampler, optimizer=opt)
obs_dict = {'local_energy': [], 'atomic_distance': [], 'get_sigma': []}

if 1:
    pos = Variable(net.sample())
    pos.requires_grad = True
    e = net.wf.energy(pos)
    s = net.wf.variance(pos)

    print('Energy   :', e)
    print('Variance :', s)

if 0:
    net.wf.layer_mo.weight.data = torch.eye(net.wf.nao).double()
    for param in net.wf.layer_ci.parameters():
        param.requires_grad = False

    pos = net.sample(ntherm=0)
    pos = pos.reshape(100, 100, 6)
    var_ = net.observalbe(net.wf.variance, pos)
Exemplo n.º 4
0
               domain,
               ncenter,
               sol=h2plus_sol,
               hist=False,
               pot=False,
               grad=True)

if 0:

    X = np.linspace(0.1, 2, 25)
    energy, var = [], []
    for x in X:

        net.wf.rbf.centers.data[0] = -x
        net.wf.rbf.centers.data[1] = x
        pos = Variable(net.sample())
        pos.requires_grad = True
        e = net.wf.energy(pos)
        s = net.wf.variance(pos)

        energy.append(e)
        var.append(s)

    plt.plot(X, energy)
    plt.show()
    plt.plot(X, var)
    plt.show()

if 0:
    pos = Variable(net.sample())
    pos.requires_grad = True
Exemplo n.º 5
0
class TestH2plus(unittest.TestCase):

    def setUp(self):

        # optimal parameters
        self.opt_r = 0.97 # the two h are at +0.97 and -0.97
        self.opt_sigma = 1.20

        # wavefunction
        centers = torch.tensor([[0.,0.,-self.opt_r],[0.,0.,self.opt_r]])
        sigma = torch.tensor([self.opt_sigma,self.opt_sigma])

        self.wf = RBF_H2p(centers = centers,
                          sigma   = sigma )

        #sampler
        self.mh_sampler = METROPOLIS(nwalkers=1000, nstep=1000, 
                             step_size = 0.5, nelec = self.wf.nelec, 
                             ndim = self.wf.ndim, domain = {'min':-5,'max':5})

        #sampler
        self.hmc_sampler = HAMILTONIAN(nwalkers=1000, nstep=200, 
                             step_size = 0.1, nelec = self.wf.nelec, 
                             ndim = self.wf.ndim, domain = {'min':-5,'max':5}, L=5)

        # optimizer
        self.opt = optim.Adam(self.wf.parameters(),lr=0.001)

        # network
        self.net = DeepQMC(wf=self.wf,sampler=self.mh_sampler,optimizer=self.opt)


        # ground state energy
        self.ground_state_energy = -0.597

    def test_single_point_metropolis_hasting_sampling(self):

        # sample and compute observables
        pos = self.net.sample(ntherm=-1,with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()

        print('Energy   :', e)
        print('Variance :', v)

        assert np.allclose([e,v],[self.ground_state_energy,0],atol=1E-1)

    def test_single_point_hamiltonian_mc_sampling(self):

        #switch to HMC sampling
        self.net.sampler = self.hmc_sampler

        # sample and compute observables
        pos = self.net.sample(ntherm=-1,with_tqdm=False)
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()

        print('Energy   :', e)
        print('Variance :', v)

        assert np.allclose([e,v],[self.ground_state_energy,0],atol=1E-1)

    def test_energy_curve(self):

        #switch to MH sampling
        self.net.sampler = self.mh_sampler

        # fix the sigma of the AO
        s = 1.20
        self.net.wf.rbf.sigma.data[:] = s 

        X = np.linspace(0.1,2,25)
        emin = 1E3
        for x in X:

            # move the atoms
            self.net.wf.rbf.centers.data[0,2] = -x
            self.net.wf.rbf.centers.data[1,2] = x

            pos = Variable(self.net.sample())
            pos.requires_grad = True
            e = self.net.wf.energy(pos)

            if e<emin:
                emin = e            

        assert(emin<-0.55)


    def test_sigma_optimization(self):

            #switch to MH sampling
            self.net.sampler = self.mh_sampler

            # move the atoms
            x = 0.97
            self.net.wf.rbf.centers.data[0,2] = -x
            self.net.wf.rbf.centers.data[1,2] = x

            # fix the sigma of the AO
            s = 1.
            self.net.wf.rbf.sigma.data[:] = s 

            # do not optimize the weight of the FC layer
            # optimize the pos of the centers
            # do not optimize the sigma of the AO
            self.net.wf.fc.weight.requires_grad = False
            self.net.wf.rbf.centers.requires_grad = False
            self.net.wf.rbf.sigma.requires_grad = True

            # define the observable we want
            obs_dict = {'local_energy':[],
                        'atomic_distance':[],
                        'get_sigma':[]}
            # train
            pos,obs_dict = self.net.train(100,
                     batchsize=1000,
                     pos = None,
                     obs_dict = obs_dict,
                     resample = 100,
                     resample_from_last = True,
                     resample_every = 1,
                     ntherm = -1,
                     loss = 'variance',
                     plot = None,
                     save_model = 'best_model.pth')

            # load the best model
            best_model = torch.load('best_model.pth')
            self.net.wf.load_state_dict(best_model['model_state_dict'])
            self.net.wf.eval()

            # sample and compute variables
            pos = self.net.sample()
            e = self.wf.energy(pos).detach().numpy()
            v = self.wf.variance(pos).detach().numpy()

            # it might be too much to assert with the ground state energy
            assert (e < -0.5)
            assert (v < 0.1)


    def test_geo_optimization(self):

        #switch to MH sampling
        self.net.sampler = self.mh_sampler

        # move the atoms
        x = 0.5
        self.net.wf.rbf.centers.data[0,2] = -x
        self.net.wf.rbf.centers.data[1,2] = x

        # fix the sigma of the AO
        s = 1.20
        self.net.wf.rbf.sigma.data[:] = s 

        # do not optimize the weight of the FC layer
        # optimize the pos of the centers
        # do not optimize the sigma of the AO
        self.net.wf.fc.weight.requires_grad = False
        self.net.wf.rbf.centers.requires_grad = True
        self.net.wf.rbf.sigma.requires_grad = False

        # define the observable we want
        obs_dict = {'local_energy':[],
                    'atomic_distance':[],
                    'get_sigma':[]}
        # train
        pos,obs_dict = self.net.train(200,
                 batchsize=1000,
                 pos = None,
                 obs_dict = obs_dict,
                 resample = 100,
                 resample_from_last = True,
                 resample_every = 1,
                 ntherm = -1,
                 loss = 'energy',
                 plot = None,
                 save_model = 'best_model.pth')

        # load the best model
        best_model = torch.load('best_model.pth')
        self.net.wf.load_state_dict(best_model['model_state_dict'])
        self.net.wf.eval()

        # sample and compute variables
        pos = self.net.sample()
        e = self.wf.energy(pos).detach().numpy()
        v = self.wf.variance(pos).detach().numpy()

        # it might be too much to assert with the ground state energy
        assert (e < -0.5)
        assert (v < 0.1)