def row(project, uid): db = databases[project] if not hasattr(db, 'meta'): db.meta = ase.db.web.process_metadata(db) prefix = '{}/{}-{}-'.format(tmpdir, project, uid) key = db.meta.get('unique_key', 'id') try: uid = int(uid) except ValueError: pass row = db.get(**{key: uid}) s = Summary(row, db.meta, SUBSCRIPT, prefix) atoms = Atoms(cell=row.cell, pbc=row.pbc) n1, n2, n3 = kptdensity2monkhorstpack(atoms, kptdensity=1.8, even=False) return render_template('summary.html', project=project, s=s, uid=uid, n1=n1, n2=n2, n3=n3, home=home, back=True, ase_db_footer=ase_db_footer, md=db.meta, open_ase_gui=open_ase_gui)
def get_bulk(name, proto, id=None, method="gpaw"): # Get bulk properties if id is None: res = list(db.select(formula=name, prototype=proto)) if len(res) == 0: return None r = res[0] else: r = db.get(id) try: if method.lower() == "gpaw": L = r.bulk_L eps_para = (r.bulk_eps_x + r.bulk_eps_y) / 2 eps_perp = r.bulk_eps_z e = r.gap_hse # VASP version below: elif method.lower() == "vasp": L = r.bulk_L_vasp eps_para = (r.bulk_eps_x_vasp + r.bulk_eps_y_vasp) / 2 eps_perp = r.bulk_eps_z_vasp if r.bulk_gap < 0: e = r.gap_hse else: e = r.bulk_gap else: return None if eps_para < 0 or eps_perp < 0: return None except Exception: return None return L, eps_para, eps_perp, e
def open_row(id): con_id = int(request.args["x"]) opened = connections[con_id].opened if id in opened: opened.remove(id) return "" opened.add(id) return render_template("more.html", dct=db.get(id), id=id, cid=con_id)
def open_row(id): con_id = int(request.args['x']) opened = connections[con_id].opened if id in opened: opened.remove(id) return '' opened.add(id) return render_template('more.html', dct=db.get(id), id=id, cid=con_id)
def db_read_row(dbname, id): #-# import ase.db db = ase.db.connect(dbname) # Read all information for one row in the database row = db.get(id=id) # Create Atoms object and glu the data from the row for convenience structure = row.toatoms() structure.row = row return structure
def main(argv): relax_atoms = (argv[1] == "atoms") runID = int(argv[0]) print("Running job: %d" % (runID)) db_name = db_name_atoms #db_name = "/home/ntnu/davidkl/Documents/GPAWTutorials/ceTest.db" db = ase.db.connect(db_name) new_run = not db.get(id=runID).key_value_pairs["started"] # Update the databse db.update(runID, started=True, converged=False) atoms = db.get_atoms(id=runID) calc = EAM(potential="/home/davidkl/Documents/EAM/mg-al-set.eam.alloy") atoms.set_calculator(calc) init_energy = atoms.get_potential_energy() logfile = "CE_eam/ceEAM%d.log" % (runID) traj = "CE_eam/ceEAM%d.traj" % (runID) trajObj = Trajectory(traj, 'w', atoms) if (relax_atoms): relaxer = BFGS(atoms, logfile=logfile) relaxer.attach(trajObj) relaxer.run(fmax=0.025) energy = atoms.get_potential_energy() else: res = minimize(target_function, x0=4.05, args=(atoms, )) a = res["x"] atoms = set_cell_parameter(atoms, a) energy = atoms.get_potential_energy() print("Final energy: {}, final a_la: {}".format(energy, a)) row = db.get(id=runID) del db[runID] kvp = row.key_value_pairs kvp["init_energy"] = init_energy runID = db.write(atoms, key_value_pairs=kvp) db.update(runID, converged=True) print("Energy: %.2E eV/atom" % (energy / len(atoms))) print("Initial energy: %.2E eV/atom" % (init_energy / len(atoms)))
def main(runID): db = ase.db.connect(db_name()) atoms = db.get_atoms(id=runID) row = db.get(id=runID) n_kpt = row.n_kpt cutoff = row.cutoff calc = gp.GPAW(mode=gp.PW(cutoff), xc="PBE", kpts=(n_kpt, n_kpt, n_kpt), nbands="120%") atoms.set_calculator(calc) energy = atoms.get_potential_energy() db.update(runID, trial_energy=energy)
def test_db(name): print(name) db = ase.db.connect(name, append=False) db.write(Atoms(), x=1, data={'a': 1}) db.update(1, y=2, data={'b': 2}) db.update(1, delete_keys=['x']) row = db.get(1) print(row.y, row.data) assert 'x' not in row db.update(1, atoms=Atoms('H')) row = db.get(1) print(row.y, row.data, row.numbers) assert (row.numbers == [1]).all() assert sorted(row.data) == ['a', 'b'] db.write(Atoms(), id=1) row = db.get(1) assert len(row.data) == 0 assert len(row.key_value_pairs) == 0 assert len(row.numbers) == 0 # N = 100 N = 5 for i in range(N): db.write(Atoms('H10'), i=i, data={'c': 3}) t0 = time() for id in range(2, 2 + N): db.update(id, z=3) print(time() - t0) # This should be faster for large N: t0 = time() with db: for id in range(2, 2 + N): db.update(id, z=3) print(time() - t0)
def db_read_row(dbname, id): """Reads a specific row in a ASE database. Keyword arguments: dbname -- database name. id -- id number of the row to read. """ import ase.db db = ase.db.connect(dbname) # Read all information for one row in the database row = db.get(id=id) structure = row.toatoms() structure.row = row return structure
def summary(id): db = database() if db is None: return '' if not hasattr(db, 'meta'): db.meta = ase.db.web.process_metadata(db) prfx = prefix() + str(id) + '-' row = db.get(id) s = Summary(row, db.meta, SUBSCRIPT, prfx, tmpdir) atoms = Atoms(cell=row.cell, pbc=row.pbc) n1, n2, n3 = kptdensity2monkhorstpack(atoms, kptdensity=1.8, even=False) return render_template('summary.html', project=request.args.get('project', 'default'), projects=projects, s=s, n1=n1, n2=n2, n3=n3, home=home, md=db.meta, open_ase_gui=open_ase_gui)
def summary(id): db = database() if not hasattr(db, 'meta'): db.meta = ase.db.web.process_metadata(db) prfx = prefix() + str(id) + '-' row = db.get(id) s = Summary(row, db.meta, SUBSCRIPT, prfx, tmpdir) atoms = Atoms(cell=row.cell, pbc=row.pbc) n1, n2, n3 = kptdensity2monkhorstpack(atoms, kptdensity=1.8, even=False) return render_template('summary.html', project=request.args.get('project', 'default'), projects=projects, s=s, n1=n1, n2=n2, n3=n3, home=home, md=db.meta, open_ase_gui=open_ase_gui)
def choose_adsorbate(adsorbate_database): ''' Chooses a bulks from our database at random as long as the bulk contains all the specified elements. Args: adsorbate_database A string pointing to the ASE *.db object that contains the adsorbates you want to consider. Returns: atoms `ase.Atoms` object of the adsorbate simles SMILES-formatted representation of the adsorbate bond_indices list of integers indicating the indices of the atoms in the adsorbate that are meant to be bonded to the surface ''' db = ase.db.connect(adsorbate_database) ads_idx = random.choice(list(range(db.count()))) row = db.get(ads_idx + 1) # ase.db's don't 0-index atoms = row.toatoms() data = row.data smiles = data['SMILE'] bond_indices = data['bond_idx'] return atoms, smiles, bond_indices
# thick.append(get_thick(mol)) alpha_x = numpy.array(alpha_x) alpha_z = numpy.array(alpha_z) eps_x_3D = numpy.array(eps_x_3D) eps_z_3D = numpy.array(eps_z_3D) Eg_HSE = numpy.array(Eg_HSE) # thick = numpy.array(thick) eps_x_gpaw = [] eps_z_gpaw = [] alpha_z_gpaw = [] Eg_gpaw = [] L_gpaw = [] for db_id in range(1, db.count() + 1): # db index starts with 1 mol = db.get(db_id) # if any(hasattr(mol, key) is False for key in ["alphax", "alphay", "alphaz", # "bulk_L", "bulk_eps_x", "bulk_eps_y", # "bulk_eps_z"]): # continue # if mol.bulk_calculated is False: # continue try: ax = (mol.alphax + mol.alphay) / 2 az = mol.alphaz L, ex, ez, e = get_bulk(None, None, db_id, method="gpaw") ex_simu = 1 + 4 * pi * ax / L ez_simu = 1 / (1 - 4 * pi * az / L) # ez_simu = 4 * pi * az / L eps_x_gpaw.append((ex, ex_simu)) eps_z_gpaw.append((ez, ez_simu))
def main(argv): relaxCell = True system = "AlMg" runID = int(argv[0]) print("Running job: %d" % (runID)) db_paths = [ "/home/ntnu/davidkl/GPAWTutorial/CE/ce_hydrostatic.db", "ce_hydrostatic.db", "/home/davidkl/GPAWTutorial/CE/ce_hydrostatic.db" ] for path in db_paths: if (os.path.isfile(path)): db_name = path break #db_name = "/home/ntnu/davidkl/Documents/GPAWTutorials/ceTest.db" db = ase.db.connect(db_name) con = sq.connect(db_name) cur = con.cursor() cur.execute("SELECT value FROM text_key_values WHERE id=? AND key='name'", (runID, )) name = cur.fetchone()[0] con.close() new_run = not db.get(id=runID).key_value_pairs["started"] # Update the databse db.update(runID, started=True, converged=False) atoms = db.get_atoms(id=runID) if (system == "AlMg" and new_run == False): atoms = change_cell_composition_AlMg(atoms) convergence = {"density": 1E-4, "eigenstates": 4E-8} calc = gp.GPAW(mode=gp.PW(500), xc="PBE", kpts=(4, 4, 4), nbands="120%", convergence=convergence) atoms.set_calculator(calc) logfile = "ceTest%d.log" % (runID) traj = "ceTest%d.traj" % (runID) trajObj = Trajectory(traj, 'w', atoms) storeBest = SaveToDB(db_name, runID, name) try: precon = Exp(mu=1.0, mu_c=1.0) if (relaxCell): uf = UnitCellFilter(atoms, hydrostatic_strain=True) relaxer = PreconLBFGS(uf, logfile=logfile, use_armijo=True, precon=precon) else: relaxer = PreconFIRE(atoms, logfile=logfile, precon=precon) relaxer = SciPyFminCG(atoms, logfile=logfile) relaxer.attach(trajObj) relaxer.attach(storeBest, interval=1, atoms=atoms) if (relaxCell): relaxer.run(fmax=0.025, smax=0.003) else: relaxer.run(fmax=0.025) energy = atoms.get_potential_energy() db.update(storeBest.runID, converged=True) print("Energy: %.2E eV/atom" % (energy / len(atoms))) print("Preconditioner parameters") print("Mu:", precon.mu) print("Mu_c:", precon.mu_c) except Exception as exc: print(exc)
def main(argv): relax_mode = "cell" # both, cell, positions system = "AlMg" runID = int(argv[0]) print("Running job: %d" % (runID)) db_paths = [ "/home/ntnu/davidkl/GPAWTutorial/CE/almg_217.db", "almg_217.db", "/home/davidkl/GPAWTutorial/CE/almg_217.db" ] for path in db_paths: if (os.path.isfile(path)): db_name = path break #db_name = "test_db.db" db = ase.db.connect(db_name) con = sq.connect(db_name) cur = con.cursor() cur.execute("SELECT value FROM text_key_values WHERE id=? AND key='name'", (runID, )) name = cur.fetchone()[0] con.close() new_run = not db.get(id=runID).key_value_pairs["started"] # Update the databse db.update(runID, started=True, converged=False) atoms = db.get_atoms(id=runID) calc = gp.GPAW(mode=gp.PW(500), xc="PBE", kpts=(4, 4, 4), nbands="120%") #calc = gp.GPAW( mode=gp.PW(500), xc="PBE", kpts=(4,4,4), nbands=-10 ) atoms.set_calculator(calc) logfile = "almg_bcc%d.log" % (runID) traj = "almg_bcc%d.traj" % (runID) trajObj = Trajectory(traj, 'w', atoms) storeBest = SaveToDB(db_name, runID, name, mode=relax_mode) volume = atoms.get_volume() try: precon = Exp(mu=1.0, mu_c=1.0) fmax = 0.025 smax = 0.003 if (relax_mode == "both"): relaxer = PreconLBFGS(atoms, logfile=logfile, use_armijo=True, precon=precon, variable_cell=True) elif (relax_mode == "positions"): relaxer = SciPyFminCG(atoms, logfile=logfile) #relaxer = BFGS( atoms, logfile=logfile ) elif (relax_mode == "cell"): str_f = StrainFilter(atoms, mask=[1, 1, 1, 0, 0, 0]) relaxer = BFGS(str_f, logfile=logfile) fmax = smax * volume relaxer.attach(trajObj) relaxer.attach(storeBest, interval=1, atoms=atoms) if (relax_mode == "both"): relaxer.run(fmax=fmax, smax=smax) else: relaxer.run(fmax=fmax) energy = atoms.get_potential_energy() if (relax_mode == "positions"): db.update(storeBest.runID, converged_force=True) elif (relax_mode == "cell"): db.update(storeBest.runID, converged_stress=True) else: db.update(storeBest.runID, converged_stress=True, converged_force=True) row = db.get(id=storeBest.runID) conv_force = row.get("converged_force", default=0) conv_stress = row.get("converged_stress", default=0) if ((conv_force == 1) and (conv_stress == 1)): db.update(storeBest.runID, converged=True) except Exception as exc: print(exc)
def main(argv): relax_mode = "both" # both, cell, positions system = "AlMg" runID = int(argv[0]) nkpt = int(argv[1]) single_point = False if (len(argv) >= 3): single_point = (int(argv[2]) == 1) print("Running job: %d" % (runID)) db_paths = [ "/home/ntnu/davidkl/GPAWTutorial/CE/almg_fcc_vac.db", "almg_fcc_vac.db", "/home/davidkl/GPAWTutorial/CE/almg_fcc_vac.db" ] for path in db_paths: if (os.path.isfile(path)): db_name = path break #db_name = "almgsi_test_db.db" db = ase.db.connect(db_name) name = db.get(id=runID).key_value_pairs["name"] new_run = not db.get(id=runID).key_value_pairs["started"] # Update the databse db.update(runID, started=True, converged=False) db.update(runID, nkpt=nkpt) atoms = db.get_atoms(id=runID) atoms = delete_vacancies(atoms) if (len(atoms) == 1): nbands = -10 else: nbands = "120%" kpts = (nkpt, nkpt, nkpt) try: restart_name = SaveRestartFiles.restart_name(name) atoms, calc = gp.restart(restart_name) except: calc = gp.GPAW(mode=gp.PW(500), xc="PBE", kpts=kpts, nbands=nbands) atoms.set_calculator(calc) if (single_point): calc = gp.GPAW(mode=gp.PW(500), xc="PBE", kpts=kpts, nbands=nbands) atoms.set_calculator(calc) logfile = "almg_fcc_vac{}.log".format(name) traj = "almg_bcc{}.traj".format(name) db.update(runID, trajfile=traj) trajObj = Trajectory(traj, 'w', atoms) #storeBest = SaveToDB(db_name,runID,name,mode=relax_mode) save_calc = SaveRestartFiles(calc, name) update_db_info = UpdateDBInfo(db_name, runID, atoms) volume = atoms.get_volume() try: precon = Exp(mu=1.0, mu_c=1.0) fmax = 0.025 smax = 0.003 if (relax_mode == "both"): relaxer = PreconLBFGS(atoms, logfile=logfile, use_armijo=True, variable_cell=True) elif (relax_mode == "positions"): #relaxer = SciPyFminCG( atoms, logfile=logfile ) relaxer = BFGS(atoms, logfile=logfile) elif (relax_mode == "cell"): str_f = StrainFilter(atoms, mask=[1, 1, 1, 0, 0, 0]) relaxer = BFGS(str_f, logfile=logfile) fmax = smax * volume relaxer.attach(trajObj) #relaxer.attach( storeBest, interval=1, atoms=atoms ) relaxer.attach(save_calc, interval=1) relaxer.attach(update_db_info, interval=1) if (not single_point): if (relax_mode == "both"): relaxer.run(fmax=fmax, smax=smax) else: relaxer.run(fmax=fmax) energy = atoms.get_potential_energy() orig_atoms = db.get_atoms(runID) single_p_calc = SinglePointCalculator(orig_atoms, energy=energy) orig_atoms.set_calculator(single_p_calc) kvp = db.get(name=name).key_value_pairs del db[runID] newID = db.write(orig_atoms, key_value_pairs=kvp) if (relax_mode == "positions"): db.update(newID, converged_force=True) elif (relax_mode == "cell"): db.update(newID, converged_stress=True) else: db.update(newID, converged_stress=True, converged_force=True) db.update(newID, single_point=single_point) db.update(newID, restart_file=SaveRestartFiles.restart_name(name)) row = db.get(id=newID) conv_force = row.get("converged_force", default=0) conv_stress = row.get("converged_stress", default=0) if ((conv_force == 1) and (conv_stress == 1) and (nkpt == 4)): db.update(newID, converged=True) except Exception as exc: print(exc)
# creates: H-MoS2_band_structure.png from math import floor, ceil import matplotlib.pyplot as plt import ase.db name = 'MoS2' phase = 'H' txtname = phase + '-' + name.replace('2', '$_2$') # Connect to database db = ase.db.connect('c2dm.db') # Get the db row row = db.get(name=name, phase=phase, xc='LDA') ef = row.data['efermi'] # Fermi level x_k = row.data['xbs_k'] # LDA band structure coordinates e_kn = row.data['ebs_kn'] # LDA band structure energies xqp_k = row.data['xqpbs_k'] # GW band structure coordinates eqp_kn = row.data['qpbs_kn'] # GW band structure energies x_K = row.data['xbs_K'] # Coordinates of high symmetry points labels_K = row.data['bslabels_K'] # Names of high symmetry points # Use Gamma symbol instead of 'Gamma' for K, label in enumerate(labels_K): if label == 'Gamma': labels_K[K] = r'$\Gamma$' ppi = 100 figw = 600 # Width in pixels
def get_atomsrow_by_id(unique_id): db = get_ase_db() row = db.get('unique_id={}'.format(unique_id)) return row
def summary(id): s = Summary(db.get(id), SUBSCRIPT) return render_template('summary.html', s=s, home=home)
from time import time import ase.db from ase import Atoms for name in ['x.json', 'x.db']: print(name) db = ase.db.connect(name, append=False) db.write(Atoms(), x=1, data={'a': 1}) db.update(1, y=2, data={'b': 2}) db.update(1, delete_keys=['x']) row = db.get(1) print(row.y, row.data) assert 'x' not in row db.update(1, atoms=Atoms('H')) row = db.get(1) print(row.y, row.data, row.numbers) assert (row.numbers == [1]).all() assert sorted(row.data) == ['a', 'b'] db.write(Atoms(), id=1) row = db.get(1) assert len(row.data) == 0 assert len(row.key_value_pairs) == 0 assert len(row.numbers) == 0 # N = 100 N = 5 for i in range(N): db.write(Atoms('H10'), i=i, data={'c': 3}) t0 = time()
def transfer(self, filename_sqlite, start_id=1, write_ase=True, write_publication=True, write_reaction=True, write_reaction_system=True, block_size=1000, start_block=0): self.stdout.write('Starting transfer\n') con = self.connection or self._connect() self._initialize(con) self.stdout.write('Finished initialization\n') cur = con.cursor() self.stdout.write('Got a cursor\n') set_schema = 'SET search_path = {0};'.format(self.schema) cur.execute(set_schema) import os self.stdout.write('Imported os\n') import ase.db self.stdout.write('Imported ase.db\n') self.stdout.write('Building server_name\n') server_name = "postgres://{0}:{1}@{2}:5432/catalysishub".format( self.user, self.password, self.server) self.stdout.write('Connecting to {server_name}\n'.format(**locals())) nkvp = 0 nrows = 0 if write_ase: db = ase.db.connect(filename_sqlite) n_structures = db.count() n_blocks = int(n_structures / block_size) + 1 for block_id in range(start_block, n_blocks): b0 = block_id * block_size + 1 b1 = (block_id + 1) * block_size + 1 self.stdout.write( str(block_id) + ' ' + 'from ' + str(b0) + ' to ' + str(b1) + '\n') if block_id + 1 == n_blocks: b1 = n_structures + 1 #rows = [db._get_row(i) for i in range(b0, b1] #db2 = ase.db.connect(server_name, type='postgresql') #for lala in [0]: with ase.db.connect(server_name, type='postgresql') as db2: for i in range(b0, b1): self.stdout.write(' .' + str(i)) self.stdout.flush() row = db.get(i) kvp = row.get('key_value_pairs', {}) nkvp -= len(kvp) # kvp.update(add_key_value_pairs) nkvp += len(kvp) db2.write(row, data=row.get('data'), **kvp) nrows += 1 self.stdout.write('\n') self.stdout.write('Finished Block {0}\n:'.format(block_id)) self.stdout.write( ' Completed transfer of {0} atomic structures.\n'.format( nrows)) from cathub.cathubsqlite import CathubSQLite db = CathubSQLite(filename_sqlite) con_lite = db._connect() cur_lite = con_lite.cursor() # write publication Npub = 0 Npubstruc = 0 if write_publication: try: npub = db.get_last_pub_id(cur_lite) except: npub = 1 for id_lite in range(1, npub + 1): Npub += 1 row = db.read(id=id_lite, table='publication') if len(row) == 0: continue values = row[0] pid, pub_id = self.write_publication(values) # Publication structures connection cur_lite.execute("""SELECT * from publication_system;""") rows = cur_lite.fetchall() for row in rows: Npubstruc += 1 values = row[:] key_str, value_str = get_key_value_str( values, table='publication_system') set_schema = 'SET search_path = {0};'.format(self.schema) cur.execute(set_schema) print("[SET SCHEMA] {set_schema}".format(**locals())) insert_command = 'INSERT INTO publication_system ({0}) VALUES ({1}) ON CONFLICT DO NOTHING;'.format( key_str, value_str) cur.execute(insert_command) # self.write(values, table='publication_system') con.commit() Ncat = 0 Ncatstruc = 0 if write_reaction: n = db.get_last_id(cur_lite) select_ase = """SELECT * from reaction_system where id={};""" for id_lite in range(start_id, n + 1): row = db.read(id_lite) if len(row) == 0: continue values = row[0] id = self.check(values[13], values[1], values[6], values[7], values[8], strict=True) update_rs = False if id is not None: id = self.update(id, values) self.stdout.write( 'Updated reaction db with row id = {}\n'.format(id)) update_rs = True else: Ncat += 1 id = self.write(values) self.stdout.write( 'Written to reaction db row id = {0}\n'.format(id)) cur_lite.execute(select_ase.format(id_lite)) rows = cur_lite.fetchall() if write_reaction_system: if update_rs: cur.execute( 'Delete from reaction_system where reaction_id={0}' .format(id)) for row in rows: Ncatstruc += 1 values = list(row) if len(values) == 3: values.insert(1, None) values[3] = id key_str, value_str = get_key_value_str( values, table='reaction_system') set_schema = 'SET search_path = {0};'.format( self.schema) cur.execute(set_schema) print("[SET SCHEMA] {set_schema}".format(**locals())) insert_command = 'INSERT INTO reaction_system ({0}) VALUES ({1}) ON CONFLICT DO NOTHING;'.format( key_str, value_str) print("[INSERT COMMAND] {insert_command}".format( **locals())) cur.execute(insert_command) con.commit() # Commit reaction_system for each row for statement in tsvector_update: cur.execute(statement) if self.connection is None: con.commit() con.close() self.stdout.write('Inserted into:\n') self.stdout.write(' systems: {0}\n'.format(nrows)) self.stdout.write(' publication: {0}\n'.format(Npub)) self.stdout.write(' publication_system: {0}\n'.format(Npubstruc)) self.stdout.write(' reaction: {0}\n'.format(Ncat)) self.stdout.write(' reaction_system: {0}\n'.format(Ncatstruc))