def test_metropolis(nconfig=1000, ndim=3, nelec=2, nstep=100, tau=0.5): # This part of the code will test your implementation. # You can modify the parameters to see how they affect the results. from slaterwf import ExponentSlaterWF from hamiltonian import Hamiltonian wf = ExponentSlaterWF(alpha=1.0) ham = Hamiltonian(Z=1) possample = np.random.randn(nelec, ndim, nconfig) possample, acc = metropolis_sample(possample, wf, tau=tau, nstep=nstep) # calculate kinetic energy ke = -0.5 * np.sum(wf.laplacian(possample), axis=0) # calculate potential energy vion = ham.pot_en(possample) # The local energy. eloc = ke + vion # report print("Cycle finished; acceptance = {acc:3.2f}.".format(acc=acc)) for nm, quant, ref in zip(['kinetic', 'Electron-nucleus', 'total'], [ke, vion, eloc], [1.0, -2.0, -1.0]): avg = np.mean(quant) err = np.std(quant) / np.sqrt(nconfig) print("{name:20s} = {avg:10.6f} +- {err:8.6f}; reference = {ref:5.2f}". format(name=nm, avg=avg, err=err, ref=ref))
def check_wfs(): newdf=check_singularity( wf=ExponentSlaterWF(1.0), ham=Hamiltonian(Z=2) ) newdf['wf']='slater unopt.' resdf=newdf resdf['electron-nucleus'] newdf=check_singularity( wf=ExponentSlaterWF(2.0), ham=Hamiltonian(Z=2) ) newdf['wf']='slater opt.' resdf=pd.concat([resdf,newdf]) newdf=check_singularity( wf=MultiplyWF(ExponentSlaterWF(2.0),JastrowWF(0.5)), ham=Hamiltonian(Z=2) ) newdf['wf']='slater-jastrow' resdf=pd.concat([resdf,newdf]) return resdf
def standard_config(): """Return a NHF with standard configuration.""" h1, h2 = Hamiltonian(2), Hamiltonian(2) encoder = GaussianEncoder(2) prior = tfp.python.distributions.MultivariateNormalDiag( loc=tf.zeros(2), scale_identity_multiplier=1) nhf = NHF([h1, h2], encoder, prior) nhf.set_optimizer() return nhf
def __init__(self, G, report_flag=False): """ Diffeo: report_flag=False (quiet) or True (gives progress reports) """ Hamiltonian.__init__(self, G, report_flag) # initialize N=1 # with one landmark self.set_landmarks(np.zeros((2,self.d, N))) # landmarks are indexed [i,j,k] # i=landmark set index, j=dimension index, k=landmark index # self.set_no_steps(5) # no. time steps for discretising ODEs
def test_energy_ferro(): ''' Check the energy of the ferromagnetic state.''' size = 4 coupling = 1.0 lattice = Lattice(size, size) ham = Hamiltonian(lattice, coupling) assert ham.energy( ones((size, size), dtype=bool) ) == 2 * coupling * size * size, "Energy of ferromagnetic state is incorrect."
def test_energy_antiferro(): ''' Check the energy of the antiferromagnetic state.''' size = 10 coupling = 1.0 lattice = Lattice(size, size) ham = Hamiltonian(lattice, coupling) afm = tile(eye(2, dtype=bool), (5, 5)) assert ham.energy( afm ) == -2 * coupling * size * size, "Energy of antiferromagnetic state is incorrect."
def initializer(**kwargs): set_tb_params(**kwargs) Atom.orbital_sets = kwargs.get('orbital_sets', {'Si': 'SiliconSP3D5S', 'H': 'HydrogenS'}) sys.modules[__name__].VERBOSITY = kwargs.get('VERBOSITY', 1) xyz = kwargs.get('xyz', {}) nn_distance = kwargs.get('nn_distance', 2.7) sparse = kwargs.get('sparse', 0) sigma = kwargs.get('sigma', 1.1) num_eigs = kwargs.get('num_eigs', 14) if sparse: h = HamiltonianSp(xyz=xyz, nn_distance=nn_distance, sigma=sigma, num_eigs=num_eigs) else: h = Hamiltonian(xyz=xyz, nn_distance=nn_distance) h.initialize() primitive_cell = kwargs.get('primitive_cell', [0, 0, 0]) if np.sum(np.abs(np.array(primitive_cell))) > 0: h.set_periodic_bc(primitive_cell=primitive_cell) return h
def __init__(self, structure, **kwargs): kw_grid = dict() kw_ham = dict() self.structure = structure self.grid = Grid(structure, 30, **kw_grid) self.ham = Hamiltonian(self.structure, self.grid, **kw_ham) self.it = Iterator(self.ham) self.solved = False
def main(): # fcc_ge_input = sys_input.Sys_input('./input_Jancu_PRB_76_115202_2007.yaml') fcc_ge_input = sys_input.Sys_input('./input_PRB_57_1998_Jancu.yaml') fcc_ge_sys = fcc_ge_input.system k_all_path, kpts_len = fcc_ge_input.get_kpts() fcc_ge_ham = Hamiltonian(fcc_ge_sys) eigs_k = [] for kpt in k_all_path: ham = fcc_ge_ham.get_ham(np.array(kpt)) eigs = np.linalg.eigvalsh(ham) eigs_k.append(eigs) eigs_k = np.array(eigs_k).T print eigs_k[:, -1] draw_band(kpts_len, eigs_k)
def __init__(self): Hamiltonian.__init__(self)
#df={} #quantities=['kinetic','electron-nucleus','electron-electron'] #for i in quantities: # df[i]=[] #for i in ['alpha','beta','acceptance']: # df[i]=[] ke = [] vion = [] vele = [] potential = [] eloc = [] acc = [] virial = [] vele = [] venu = [] ham = Hamiltonian(Z=2) # Helium # Best Slater determinant. n = 51 ast = 1.5 aen = 2.5 bst = -0.5 ben = 1.5 alphas = np.linspace(ast, aen, n) betas = np.linspace(bst, ben, n) for alpha in alphas: ewf = ExponentSlaterWF(alpha=alpha) pos = np.random.randn(nelec, ndim, nconfig) pos, _ = metropolis_sample(pos=pos, wf=ewf, tau=tau, nstep=nstep) acc.append(_) ke.append(np.mean(-0.5 * np.sum(ewf.laplacian(pos), axis=0))) vele.append(np.mean(ham.pot_ee(pos)))
"acceptance", acc_ratio) df['step'].append(istep) df['elocal'].append(np.mean(eloc)) df['weight'].append(np.mean(weight)) df['elocalvar'].append(np.std(eloc)) df['weightvar'].append(np.std(weight)) df['eref'].append(eref) df['tau'].append(tau) weight.fill(wavg) return pd.DataFrame(df) ##################################### if __name__ == '__main__': from slaterwf import ExponentSlaterWF from wavefunction import MultiplyWF, JastrowWF from hamiltonian import Hamiltonian nconfig = 50000 dfs = [] for tau in [.01, .005, .0025]: dfs.append( simple_dmc(MultiplyWF(ExponentSlaterWF(2.0), JastrowWF(0.5)), Hamiltonian(), pos=np.random.randn(2, 3, nconfig), tau=tau, nstep=10000)) df = pd.concat(dfs) df.to_csv("dmc.csv", index=False)
def __init__(self, graph, j=1.0, h=1.0): Hamiltonian.__init__(self, graph) self.j = j self.h = h
def __init__(self, graph, jx=1.0, jy=1.0, jz=1.0): Hamiltonian.__init__(self, graph) self.jx = jx self.jy = jy self.jz = jz
q.gate(H, target=0) q.gate(H, target=1) print(q.data.flatten()) q.data = [0, 1, 0, 0, 0, 0, 0, 0] q.gate(X, target=2) print(q.data.flatten()) q.gate(H, target=0) q.gate(H, target=1) q.gate(X, target=2, control=(0, 1)) q.gate(X, target=0, control=1, control_0=2) q.gate(swap, target=(0, 2)) q.gate(rz(np.pi / 8), target=2, control_0=1) print(q.data.flatten()) q.gate(iswap, target=(2, 1)) print(q.data.flatten()) res = q.projection(target=1) print(res) from hamiltonian import Hamiltonian ham = Hamiltonian(3, coefs=[2, 1, 1], ops=["XII", "IYI", "IIZ"]) q.set_state("000") q.gate(H, target=0) q.gate(H, target=1) q.gate(S, target=1) print(q.get_state()) print(q.expect(ham))
from flow_example import FlowExample import tensorflow_probability as tfp # INPUT DATA # Define distribution gaussian = tfp.python.distributions.MultivariateNormalDiag( loc=tf.zeros(2), scale_identity_multiplier=1) distrib = FlowExample.from_tensorflow_distribution(gaussian) x = distrib.sample(20) # Cast to Dataset data = tf.data.Dataset.from_tensor_slices(x) data = data.batch(5) # INIT MODEL # Define hamiltonians h1_, h2_ = Hamiltonian(2), Hamiltonian(2) encoder_ = GaussianEncoder(2) prior_ = gaussian # Init nhf_ = NHF([h1_, h2_], encoder_, prior_) nhf_.set_optimizer() # TRAIN nhf_.train(data, 10) # SOME TEST print('Evaluating on zeros(4, 2) with 10 samples: \n %s' % nhf_.evaluate(tf.zeros((4, 2)), n_samples=10)) print('With 1 sample: \n %s' % nhf_.evaluate(tf.zeros( (4, 2)), n_samples=1))
def main(): H = Hamiltonian() locator = Locator(H) integrator = Integrator(H) J = get_metric_mat(2) locate_po = True x = np.array([0.7,1.0,0.3,-0.3]) # print ("Locate minimum by minimum search of Hamiltonian") # mini = locator.find_min(x,method='SLSQP',bounds=((0,2),(0,2),(-1,1),(-1,1))) # if mini.success: # print("found minimum at : {}".format(mini.x)) # else: # print("couldn't find minimum : {}".format(mini.message)) print ("Locate minimum by root finding of Hamiltonian gradient") mini = locator.find_saddle(x,root_method='hybr') if mini.success: print("found critical pt at : {}\n".format(mini.x)) # analyse stability of critical pt eig, eigv, periods = analyse_equilibrium(H, 0.0, mini.x) else: print("couldn't find minimum : {}".format(mini.message)) # just to get options for specific solver # show_options(solver='root', method='broyden1', disp=True) if locate_po and mini.success: print ("\n\n########################\nTrying to locate PO...") period = periods[0] + periods[0]*0.05 dev = np.array([0.5,0,0.1,0]) xini = mini.x + dev #0.1*eigv[:,0] # variational_0 = np.array(np.identity(2*H.dof)).flatten() # xstart = np.concatenate((xini, variational_0)) # print("xstart:\n{}".format(xstart)) # traj = integrator.integrate_variational_plot(x=xstart, tstart=0., tend=period, npts=50) # # traj = integrator.integrate_plot(x=xini, tstart=0., tend=period, npts=50) # print("trajectory:\n{}".format(traj[-1])) # plot_traj(traj,H.dof,'traj.pdf') PO_sol = locator.locatePO(xini,period,root_method='broyden1',integration_method="dop853") if PO_sol.success: print("success : {}".format(PO_sol.success)) print("PO initial conditions {}".format(PO_sol.x)) # compute monodromy matrix variational_0 = np.array(np.identity(2*H.dof)).flatten() xstart = np.concatenate((PO_sol.x, variational_0)) print("xstart:\n{}".format(xstart)) traj = integrator.integrate_variational(x=xstart, tstart=0., tend=period,method="dop853") # last = traj[-1] monod = np.reshape(traj[2*H.dof:], (2*H.dof, 2*H.dof)) print("monodromy matrix:\n{}".format(monod)) eig, eigenvec = compute_eigenval_mat(monod) for i in range(eig.size): print("eigenvalue: {}".format(eig[i])) # PO = integrator.integrate_plot(PO_sol.x,0,period,'dop853',100) # print("PO:\n{}".format(PO)) # plot_traj(traj,H.dof,'PO.pdf') else: print("Couldn't find PO : {}".format(PO_sol.message))
if __name__ == "__main__": nconfig = 10000 ndim = 3 nelec = 2 nstep = 100 tau = 0.2 # All the quantities we will keep track of df = {} quantities = ['kinetic', 'electron-nucleus', 'electron-electron'] for i in quantities: df[i] = [] for i in ['alpha', 'beta', 'acceptance']: df[i] = [] ham = Hamiltonian(Z=2) # Helium # Best Slater determinant. beta = 0.0 # For book keeping. for alpha in np.linspace(1.5, 2.5, 11): wf = ExponentSlaterWF(alpha=alpha) sample, acc = metropolis_sample(np.random.randn(nelec, ndim, nconfig), wf, tau=tau, nstep=nstep) ke = -0.5 * np.sum(wf.laplacian(sample), axis=0) vion = ham.pot_en(sample) vee = ham.pot_ee(sample) for i in range(nconfig): for nm, quant in zip(quantities, [ke, vion, vee]):
sample_met,acc_met = metropolis_sample(np.random.randn(nelec,ndim,nconfig), wf,tau=tau,nstep=nstep) sample_bia,acc_bia = metropolis_sample_biased(np.random.randn(nelec,ndim,nconfig), wf,tau=tau,nstep=nstep) ke_met=-0.5*np.sum(wf.laplacian(sample_met),axis=0) vion_met=ham.pot_en(sample_met) vee_met=ham.pot_ee(sample_met) ke_bia=-0.5*np.sum(wf.laplacian(sample_bia),axis=0) vion_bia=ham.pot_en(sample_bia) vee_bia=ham.pot_ee(sample_bia) res=pd.DataFrame({ 'method':['unbiased','biased'], 'acceptance':[acc_met,acc_bia], 'kinetic':[ke_met.mean(),ke_bia.mean()], 'electron-electron':[vee_met.mean(),vee_bia.mean()], 'electron-nucleus':[vion_met.mean(),vion_bia.mean()], }) print(res) ham=Hamiltonian(Z=2) wf=MultiplyWF(ExponentSlaterWF(2.0),JastrowWF(0.5)) compare_drift(wf,ham)
hterms.append({ 'coeff': k9a1, 'units': 'ev', 'modes': 3, 'elop': '0,0', 'ops': 'q' }) # Holstein copuling mode 4 el 0 hterms.append({ 'coeff': k9a2, 'units': 'ev', 'modes': 3, 'elop': '1,1', 'ops': 'q' }) # Holstein copuling mode 4 el 1 ham = Hamiltonian(nmodes, hterms) # # TODO update this # wf.overlap_matrices() # opspfs,opips = precompute_ops(ham.ops, wf) # # btime = time() # for i in range(int(1e3)): # for alpha in range(nel): # for beta in range(nel): # spfout = compute_meanfield_corr(alpha,beta,wf,ham.hcterms,opspfs,opips) # print(time()-btime) # # btime = time() # for i in range(int(1e4)): # for alpha in range(nel):
nconfig=1000 ndim=3 nelec=2 nstep=100 tau=0.2 # All the quantities we will keep track of. # You'll want to populate thes lists. df={} quantities=['kinetic','electron-nucleus','electron-electron'] for i in quantities: df[i]=[] for i in ['alpha','beta','acceptance']: df[i]=[] ham=Hamiltonian(Z=2) # Helium # Best Slater determinant. beta=0.0 # For book keeping. for alpha in np.linspace(1.5,2.5,11): pass # Best Slater-Jastrow. for alpha in np.linspace(1.5,2.5,11): for beta in np.linspace(-0.5,1.5,11): pass import pandas as pd pd.DataFrame(df).to_csv("helium.csv",index=False)
def __str__(self): """ Diffeo: """ s = "--\nDiffeo: "+ Hamiltonian.__str__(self) return s
minv = 1.43e-3 E0 = 0.0 E1 = 2.00 W0 = 2.3 W1 = 1.50 lamda = 0.19 hterms = [] # electronic energy shifts hterms.append({'coeff': E0+0.5*W0, 'units': 'ev', 'elop': '0,0'}) hterms.append({'coeff': E1-0.5*W1, 'units': 'ev', 'elop': '1,1'}) ## coupling mode harmonic oscillator potential hterms.append({'coeff': wc, 'units': 'ev', 'modes': 0, 'ops': 'KE'}) hterms.append({'coeff': 0.5*wc, 'units': 'ev', 'modes': 0, 'ops': 'q^2'}) hterms.append({'coeff': kappa1, 'units': 'ev', 'modes': 0, 'elop': '1,1', 'ops': 'q'}) hterms.append({'coeff': lamda, 'units': 'ev', 'modes': 0, 'elop': 'sx', 'ops': 'q'}) # torsional mode plane wave potential hterms.append({'coeff': -0.5*minv, 'units': 'ev', 'modes': 1, 'ops': 'KE'}) hterms.append({'coeff': -0.5*W0, 'units': 'ev', 'modes': 1, 'elop': '0,0', 'ops': 'cos'}) hterms.append({'coeff': 0.5*W1, 'units': 'ev', 'modes': 1, 'elop': '1,1', 'ops': 'cos'}) ham = Hamiltonian(nmodes, hterms, pbfs=pbfs) dt = 1.0 times = np.arange(0.0,4000.,dt)*units.convert_to('fs') #wf = vmfpropagate(times, ham, pbfs, wf, 'rhodopsin_diabatic_pops_test.txt') #wf = vmfpropagate(times, ham, pbfs, wf, 'rhodopsin_diabatic_pops.txt') #wf = vmfpropagate(times, ham, pbfs, wf, 'rhodopsin_diabatic_pops_small.txt') wf = vmfpropagate(times, ham, pbfs, wf, 'rhodopsin_diabatic_pops_really_small.txt')