def run_optimization_best_practice_2states(**kwargs): """ First optimize the ground state and then optimize the excited states while fixing the """ mol, mf, mc = H2_casci() import copy mf.output = None mol.output = None mc.output = None mc.stdout = None mol.stdout = None mc.stdout = None nstates = 2 mcs = [copy.copy(mc) for i in range(nstates)] for i in range(nstates): mcs[i].ci = mc.ci[i] wfs = [] to_opts = [] for i in range(nstates): wf, to_opt = pyq.generate_wf( mol, mf, mc=mcs[i], slater_kws=dict(optimize_determinants=True)) wfs.append(wf) to_opts.append(to_opt) configs = pyq.initial_guess(mol, 1000) pgrad1 = pyq.gradient_generator(mol, wfs[0], to_opt=to_opts[0]) wfs[0], _ = pyq.line_minimization(wfs[0], configs, pgrad1, verbose=True, max_iterations=10) for k in to_opts[0]: to_opts[0][k] = np.zeros_like(to_opts[0][k]) to_opts[0]['wf1det_coeff'][0] = True #Bug workaround for linear transform for to_opt in to_opts[1:]: to_opt['wf1det_coeff'] = np.ones_like(to_opt['wf1det_coeff']) transforms = [ pyqmc.accumulators.LinearTransform(wf.parameters, to_opt) for wf, to_opt in zip(wfs, to_opts) ] for wf in wfs[1:]: for k in wf.parameters.keys(): if 'wf2' in k: wf.parameters[k] = wfs[0].parameters[k].copy() _, configs = pyq.vmc(wfs[0], configs) energy = pyq.EnergyAccumulator(mol) return optimize(wfs, configs, energy, transforms, **kwargs)
def test_sampler(H2_casci): mol, mf, mc = H2_casci ci_energies= mc.e_tot mc1 = copy.copy(mc) mc2 = copy.copy(mc) mc1.ci = mc.ci[0] mc2.ci = (mc.ci[0]+mc.ci[1])/np.sqrt(2) wf1, to_opt1 = pyq.generate_slater(mol, mf,mc=mc1, optimize_determinants=True) wf2, to_opt2 = pyq.generate_slater(mol, mf, mc=mc2, optimize_determinants=True) for to_opt in [to_opt1, to_opt2]: to_opt['det_coeff'] = np.ones_like(to_opt['det_coeff'],dtype=bool) transform1 = pyqmc.accumulators.LinearTransform(wf1.parameters,to_opt1) transform2 = pyqmc.accumulators.LinearTransform(wf2.parameters,to_opt2) configs = pyq.initial_guess(mol, 2000) _, configs = pyq.vmc(wf1, configs) energy =pyq.EnergyAccumulator(mol) data_weighted, data_unweighted, configs = sample_overlap_worker([wf1,wf2],configs, energy, [transform1,transform2], nsteps=40, nblocks=20) avg, error = average(data_weighted, data_unweighted) print(avg, error) ref_energy1 = 0.5*(ci_energies[0] + ci_energies[1]) assert abs(avg['total'][1,1] - ref_energy1) < 3*error['total'][1][1] ref_energy01 = ci_energies[0]/np.sqrt(2) assert abs(avg['total'][0,1] - ref_energy01) < 3*error['total'][0,1] overlap_tolerance = 0.2# magic number..be careful. terms = collect_terms(avg,error) norm = [np.sum(np.abs(m.ci)**2) for m in [mc1,mc2]] norm_ref = norm assert np.all( np.abs(norm_ref - terms['norm']) < overlap_tolerance) norm_derivative_ref = 2*np.real(mc2.ci).flatten() print(terms[('dp_norm',1)].shape, norm_derivative_ref.shape) assert np.all(np.abs(norm_derivative_ref - terms[('dp_norm',1)])<overlap_tolerance) overlap_ref = np.sum(mc1.ci*mc2.ci) print('overlap test', overlap_ref, terms['overlap'][0,1]) assert abs(overlap_ref - terms['overlap'][0,1]) < overlap_tolerance overlap_derivative_ref = (mc1.ci.flatten() - 0.5*overlap_ref * norm_derivative_ref) assert np.all( np.abs(overlap_derivative_ref - terms[('dp_overlap',1)][:,0,1]) < overlap_tolerance) en_derivative = take_derivative_casci_energy(mc, mc2.ci) assert(np.all(abs(terms[('dp_energy',1)][:,1,1].reshape(mc2.ci.shape)-en_derivative) -overlap_tolerance) ) derivative = objective_function_derivative(terms,1.0, norm_relative_penalty=1.0, offdiagonal_energy_penalty=0.1)
def test_correlated_sampling(H2_casci): mol, mf, mc = H2_casci ci_energies= mc.e_tot import copy mc1 = copy.copy(mc) mc2 = copy.copy(mc) mc1.ci = mc.ci[0] mc2.ci = mc.ci[1] wf1, to_opt1 = pyq.generate_slater(mol, mf,mc=mc1, optimize_determinants=True) wf2, to_opt2 = pyq.generate_slater(mol, mf, mc=mc2, optimize_determinants=True) for to_opt in [to_opt1, to_opt2]: to_opt['det_coeff'] = np.ones_like(to_opt['det_coeff'],dtype=bool) transform1 = pyqmc.accumulators.LinearTransform(wf1.parameters,to_opt1) transform2 = pyqmc.accumulators.LinearTransform(wf2.parameters,to_opt2) configs = pyq.initial_guess(mol, 1000) _, configs = pyq.vmc(wf1, configs) energy =pyq.EnergyAccumulator(mol) data_weighted, data_unweighted, configs = sample_overlap_worker([wf1,wf2],configs, energy, [transform1,transform2], nsteps=10, nblocks=10) parameters1 = transform1.serialize_parameters(wf1.parameters) parameters2 = transform1.serialize_parameters(wf2.parameters) sample_parameters = [] energies_reference = [] overlap_reference = [] for theta in np.linspace(0,np.pi/2, 4): a = np.cos(theta) b = np.sin(theta) sample_parameters.append([a*parameters1 + b*parameters2, a*parameters1 - b*parameters2]) energies_reference.append([a**2*ci_energies[0] + b**2*ci_energies[1]]*2) overlap_reference.append([[1.0, a**2-b**2], [a**2-b**2,1.0]] ) energies_reference=np.asarray(energies_reference) overlap_reference=np.asarray(overlap_reference) correlated_results = correlated_sampling([wf1,wf2], configs,energy, [transform1,transform2], sample_parameters ) print(correlated_results) energy_sample = correlated_results['energy']/correlated_results['overlap'] print('energy reference',energies_reference) print('energy sample', energy_sample) assert np.all(np.abs(energy_sample.diagonal(axis1=1,axis2=2) - energies_reference) < 0.1) print('overlap sample', correlated_results['overlap']) print('overlap reference', overlap_reference) assert np.all(np.abs(correlated_results['overlap']-overlap_reference)<0.1)
def test_shci_wf_is_better(H2_ccecp_hci): mol, mf, cisolver = H2_ccecp_hci configs = pyq.initial_guess(mol, 1000) wf = Slater(mol, mf, cisolver, tol=0.0) data, configs = pyq.vmc( wf, configs, nblocks=40, verbose=True, accumulators={"energy": pyq.EnergyAccumulator(mol)}, ) en, err = avg(data["energytotal"][1:]) nsigma = 4 assert len(wf.parameters["det_coeff"]) == len(cisolver.ci) assert en - nsigma * err < mf.e_tot assert en + nsigma * err > cisolver.energy
def runtest(mol, mf, kind=0): kpt = mf.kpts[kind] dm = mf.make_rdm1() print("original dm shape", dm.shape) if len(dm.shape) == 4: dm = np.sum(dm, axis=0) dm = dm[kind] ##################################### ## evaluate KE in PySCF ##################################### ke_mat = mol.pbc_intor("int1e_kin", hermi=1, kpts=np.array(kpt)) print("ke_mat", ke_mat.shape) print("dm", dm.shape) pyscfke = np.real(np.einsum("ij,ji->", ke_mat, dm)) print("PySCF kinetic energy: {0}".format(pyscfke)) ##################################### ## evaluate KE integral with VMC ##################################### wf = Slater(mol, mf) coords = pyq.initial_guess(mol, 1200, 0.7) warmup = 10 start = time.time() df, coords = pyq.vmc( wf, coords, nsteps=100 + warmup, tstep=1, accumulators={"energy": pyq.EnergyAccumulator(mol)}, verbose=False, hdf_file=str(uuid.uuid4()), ) print("VMC time", time.time() - start) df = pd.DataFrame(df) dfke = pyq.avg_reblock(df["energyke"][warmup:], 10) vmcke, err = dfke.mean(), dfke.sem() print("VMC kinetic energy: {0} +- {1}".format(vmcke, err)) assert ( np.abs(vmcke - pyscfke) < 5 * err ), "energy diff not within 5 sigma ({0:.6f}): energies \n{1} \n{2}".format( 5 * err, vmcke, pyscfke )
def test_dmc_restarts(H_pbc_sto3g_krks, nconf=10): """For PBCs, check to make sure there are no errors on restart.""" mol, mf = H_pbc_sto3g_krks nconf = 10 fname = "test_dmc_restart_" + str(uuid.uuid4()) configs = pyq.initial_guess(mol, nconf) wf, _ = pyq.generate_wf(mol, mf, jastrow_kws=dict(na=0, nb=0)) enacc = pyq.EnergyAccumulator(mol) pyq.rundmc(wf, configs, nsteps=20, hdf_file=fname, accumulators={"energy": enacc}) pyq.rundmc(wf, configs, nsteps=20, hdf_file=fname, accumulators={"energy": enacc}) os.remove(fname)
def test(): """ Ensure that DMC obtains the exact result for a hydrogen atom """ from pyscf import gto, scf from pyqmc.dmc import limdrift import pandas as pd mol = gto.M(atom="H 0. 0. 0.", basis="sto-3g", unit="bohr", spin=1) mf = scf.UHF(mol).run() nconf = 1000 configs = pyq.initial_guess(mol, nconf) wf, _ = pyq.generate_wf(mol, mf, jastrow_kws=dict(na=0, nb=0)) enacc = pyq.EnergyAccumulator(mol) warmup = 200 branchtime = 5 dfdmc, configs_, weights_ = pyq.rundmc( wf, configs, nsteps=4000 + warmup * branchtime, branchtime=branchtime, accumulators={"energy": enacc}, ekey=("energy", "total"), tstep=0.01, verbose=True, ) dfdmc = pd.DataFrame(dfdmc) dfdmc.sort_values("step", inplace=True) dfprod = dfdmc[dfdmc.step >= warmup] rb_summary = reblock.reblock_summary(dfprod[["energytotal", "energyei"]], 20, weights=dfprod["weight"]) energy, err = [ rb_summary[v]["energytotal"] for v in ("mean", "standard error") ] assert (np.abs(energy + 0.5) < 5 * err), "energy not within {0} of -0.5: energy {1}".format( 5 * err, np.mean(energy))
def runtest(mol, mf, kind=0): kpt = mf.kpts[kind] twist = np.dot(kpt, mol.lattice_vectors().T / (2 * np.pi)) wf0 = Slater(mol, mf) wft = Slater(mol, mf, twist=twist) ##################################### ## compare values across boundary ## psi, KE, ecp, ##################################### nconfig = 50 coords = pyq.initial_guess(mol, nconfig, 1) epos, wrap = enforce_pbc(coords.lvecs, coords.configs) coords = PeriodicConfigs(epos, coords.lvecs) shift_ = np.random.randint(10, size=coords.configs.shape) - 5 phase = np.exp(2j * np.pi * np.einsum("ijk,k->ij", shift_, twist)) shift = np.dot(shift_, mol.lattice_vectors()) epos, wrap = enforce_pbc(coords.lvecs, epos + shift) newcoords = PeriodicConfigs(epos, coords.lvecs, wrap=wrap) assert np.linalg.norm(newcoords.configs - coords.configs) < 1e-12 ph0, val0 = wf0.recompute(coords) pht, valt = wft.recompute(coords) enacc = pyq.EnergyAccumulator(mol, threshold=np.inf) np.random.seed(0) en0 = enacc(coords, wf0) np.random.seed(0) ent = enacc(coords, wft) e = 0 rat0 = wf0.testvalue(e, newcoords.electron(e)) assert np.linalg.norm(rat0 - 1) < 1e-9, rat0 - 1 ratt = wft.testvalue(e, newcoords.electron(e)) rattdiff = ratt - phase[:, e] print("phase", phase[:, e]) assert np.linalg.norm(rattdiff) < 1e-9, [ np.round(rattdiff, 10), np.amax(np.abs(rattdiff)), ] ph0new, val0new = wf0.recompute(newcoords) phtnew, valtnew = wft.recompute(newcoords) np.random.seed(0) en0new = enacc(newcoords, wf0) np.random.seed(0) entnew = enacc(newcoords, wft) assert np.linalg.norm(ph0 - ph0new) < 1e-11 assert np.linalg.norm(pht * phase.prod(axis=1) - phtnew) < 1e-11, ( pht * phase.prod(axis=1) - phtnew) assert np.linalg.norm(val0 - val0new) < 1e-11, np.linalg.norm(val0 - val0new) assert np.linalg.norm(valt - valtnew) < 1e-11, np.linalg.norm(valt - valtnew) for k in en0.keys(): diff0 = en0[k] - en0new[k] difft = ent[k] - entnew[k] if k == "ecp": for l, diff in [("0", diff0), ("t", difft)]: mad = np.mean(np.abs(diff)) if True: # mad > 1e-12: print("ecp%s diff" % l, mad, np.linalg.norm(diff)) assert mad < 1e-3, diff else: assert np.mean(np.abs(diff0)) < 1e-6, diff0 assert np.mean(np.abs(difft)) < 1e-6, difft