def calc_E_vs_V(bulk, dV=0.025, n_steps=(-10,10), tol=1e-2, method='lbfgs'): # hack tol to deal with Te C2/m import model V0 = bulk.get_volume() dV *= V0 E_vs_V=[] scaled_bulk = bulk.copy() for i in range(0, n_steps[0]-1, -1): V_cur = scaled_bulk.get_volume() scaled_bulk.set_cell(scaled_bulk.get_cell()*((V0+i*dV)/V_cur)**(1.0/3.0), scale_atoms=True) ase.io.write(sys.stdout, scaled_bulk, format='extxyz') print("trying to relax i",i) try: if hasattr(model, "fix_cell_dependence"): model.fix_cell_dependence(scaled_bulk) ase.io.write(run_root+"-E_vs_V_%03d-unrelaxed.xyz" % i, scaled_bulk, format='extxyz') scaled_bulk = relax_config(scaled_bulk, relax_pos=True, relax_cell=True, tol=tol, max_steps=200, save_traj=True, constant_volume=True, method=method, refine_symmetry_tol=1.0e-1, keep_symmetry=True, config_label="E_vs_V_%03d" % i, from_base_model=True, save_config=True) except Exception as e: print("WARNING: failed config in calc_E_vs_V", str(e)) sys.exit(1) #### NB break ase.io.write(sys.stdout, scaled_bulk, format='extxyz') E_vs_V.insert(0, (scaled_bulk.get_volume()/len(scaled_bulk), scaled_bulk.get_potential_energy()/len(bulk), list(scaled_bulk.get_stress())) ) scaled_bulk = bulk.copy() for i in range(1,n_steps[1]+1): V_cur = scaled_bulk.get_volume() scaled_bulk.set_cell(scaled_bulk.get_cell()*((V0+i*dV)/V_cur)**(1.0/3.0), scale_atoms=True) ase.io.write(sys.stdout, scaled_bulk, format='extxyz') print("trying to relax i",i) try: if hasattr(model, "fix_cell_dependence"): model.fix_cell_dependence(scaled_bulk) ase.io.write(run_root+"-E_vs_V_%02d-unrelaxed.xyz" % i, scaled_bulk, format='extxyz') scaled_bulk = relax_config(scaled_bulk, relax_pos=True, relax_cell=True, tol=tol, max_steps=200, save_traj=True, constant_volume=True, method=method, refine_symmetry_tol=1.0e-1, keep_symmetry=True, config_label="E_vs_V_%02d" % i, from_base_model=True, save_config=True) except Exception as e: print("failed", str(e)) break ase.io.write(sys.stdout, scaled_bulk, format='extxyz') E_vs_V.append( (scaled_bulk.get_volume()/len(scaled_bulk), scaled_bulk.get_potential_energy()/len(bulk), list(scaled_bulk.get_stress())) ) if hasattr(model, "fix_cell_dependence"): model.fix_cell_dependence() scaled_bulk.calc = model.calculator return E_vs_V
def dumbbell_interstitial_energy(bulk): Nat = bulk.get_number_of_atoms() int_struct = bulk.copy() int_struct.set_calculator(bulk.get_calculator()) # add an atom to introduce an interstitial int_struct.append(Atom('Si', (-0.5, 0.5, 5.44 / 2.0 + 1.0))) p = int_struct.get_positions() p[149, 0] -= 1.0 p[149, 1] += 1.0 p[149, 2] -= 0.5 int_struct.set_positions(p) ase.io.write(sys.stdout, int_struct, format='extxyz') # relax atom positions, holding cell fixed int_struct = relax_config(int_struct, relax_pos=True, relax_cell=False, tol=tol, traj_file="model-" + model.name + "-test-interstitial-dumbbell.opt.xyz") #int_struct = relax_atoms(int_struct, tol=tol, traj_file="model-"+model.name+"-test-interstitial-dumbbell.opt.xyz") ase.io.write(sys.stdout, int_struct, format='extxyz') # compute formation energy as difference of bulk and int energies print('bulk cell energy', bulk_energy) print('interstitial cell energy', int_struct.get_potential_energy()) e_form = int_struct.get_potential_energy() - bulk_energy * ( (Nat + 1.0) / Nat) print('interstitial formation energy', e_form) return e_form
def do_symmetric_surface(test_dir, in_plane_supercell=[1,1], pert_pos=0.0): assert len(supercell) == 2 surf = ase.io.read(test_dir+"/surface.xyz", format="extxyz") surf *= list(in_plane_supercell) + [1] if pert_pos > 0.0: surf.rattle(pert_pos) bulk = rescale_to_relaxed_bulk(surf) bulk_Zs = bulk.get_atomic_numbers() evaluate(bulk) bulk_cell = bulk.get_cell() bulk_E = bulk.get_potential_energy() try: model.reset_config() except AttributeError: pass print("got relaxed bulk cell ", bulk_cell) print("got rescaled surf cell ", surf.get_cell()) # relax surface system tol = 1.0e-2 surf = relax_config(surf, relax_pos=True, relax_cell=False, tol=tol, save_traj=True, config_label="surface", from_base_model=True, save_config=True, try_restart=True) ase.io.write(os.path.join("..",run_root+"-relaxed.xyz"), surf, format='extxyz') # check stoichiometry and number of bulk cell energies to subtract surf_Zs = surf.get_atomic_numbers() Z0 = bulk_Zs[0] n_bulk_cells = float(sum(surf_Zs == Z0))/float(sum(bulk_Zs == Z0)) if len(set(bulk_Zs)) == 1: n_dmu = None else: n_dmu = {} for Z in set(bulk_Zs): # make sure types are JSON compatible n_dmu[int(Z)] = float(n_bulk_cells*sum(bulk_Zs == Z) - sum(surf_Zs == Z)) # calculate surface energy area = np.linalg.norm(np.cross(surf.get_cell()[0,:],surf.get_cell()[1,:])) print("got surface cell potential energy", surf.get_potential_energy()) print("got bulk potential energy",bulk_E*n_bulk_cells) print("got area",area) return { "bulk_struct_test" : surf.info["bulk_struct_test"], "Ef" : (surf.get_potential_energy() - bulk_E*n_bulk_cells)/(2.0*area), "dmu" : n_dmu, 'filename' : run_root+"-relaxed.xyz" }
def do_symmetric_surface(bulk, surf, calculator): #surf = ase.io.read(test_dir+"/surface.xyz", format="extxyz") #bulk = rescale_to_relaxed_bulk(surf) bulk_Zs = bulk.get_atomic_numbers() bulk.set_calculator(calculator) #evaluate(bulk) bulk_cell = bulk.get_cell() bulk_E = bulk.get_potential_energy() print("got relaxed bulk cell ", bulk_cell) print("got rescaled surf cell ", surf.get_cell()) # relax surface system tol = 1.0e-2 surf = relax_config(surf, calculator, relax_pos=True, relax_cell=False, tol=tol, traj_file=None, config_label="surface", from_base_model=True, save_config=True) #ase.io.write(os.path.join("..","relaxed.xyz"), surf, format='extxyz') # check stoichiometry and number of bulk cell energies to subtract surf_Zs = surf.get_atomic_numbers() Z0 = bulk_Zs[0] n_bulk_cells = float(sum(surf_Zs == Z0)) / float(sum(bulk_Zs == Z0)) if len(set(bulk_Zs)) == 1: n_dmu = None else: n_dmu = {} for Z in set(bulk_Zs): n_dmu[Z] = n_bulk_cells * sum(bulk_Zs == Z) - sum(surf_Zs == Z) # calculate surface energy area = np.linalg.norm( np.cross(surf.get_cell()[0, :], surf.get_cell()[1, :])) print("got surface cell potential energy", surf.get_potential_energy()) print("got bulk potential energy", bulk_E * n_bulk_cells) print("got area", area) return { "Ef": (surf.get_potential_energy() - bulk_E * n_bulk_cells) / (2.0 * area) }
def do_one_vacancy(bulk_supercell, bulk_supercell_pe, vac_i, relax_radial=0.0, relax_symm_break=0.0, nn_cutoff=0.0, tol=1.0e-2): # do unrelaxed (without perturbations) vac = bulk_supercell.copy() del vac[vac_i] label = "ind_%d_Z_%d" % (vac_i, bulk_supercell.get_atomic_numbers()[vac_i]) unrelaxed_filename=run_root+"-%s-unrelaxed.xyz" % label ase.io.write(os.path.join("..",unrelaxed_filename), vac, format='extxyz') evaluate(vac) unrelaxed_vac_pe = vac.get_potential_energy() # recreate with perturbations for relaxation vac = bulk_supercell.copy() if relax_radial != 0.0 or relax_symm_break != 0.0: nl = NeighborList([nn_cutoff/2.0]*len(bulk_supercell), self_interaction=False, bothways=True) nl.update(bulk_supercell) indices, offsets = nl.get_neighbors(vac_i) offset_factor = relax_radial for i, offset in zip(indices, offsets): ri = vac.positions[vac_i] - (vac.positions[i] + np.dot(offset, vac.get_cell())) vac.positions[i] += offset_factor*ri offset_factor += relax_symm_break del vac[vac_i] vac_pos = vac.positions[vac_i] vac = relax_config(vac, relax_pos=True, relax_cell=False, tol=tol, save_traj=True, config_label=label, from_base_model=True, save_config=True, try_restart=True) relaxed_filename=run_root+"-%s-relaxed.xyz" % label ase.io.write(os.path.join("..",relaxed_filename), vac, format='extxyz') # already has calculator from relax_configs vac_pe = vac.get_potential_energy() if len(set(bulk_supercell.get_atomic_numbers())) == 1: Ebulk = float(len(vac))/float(len(bulk_supercell)) * bulk_supercell_pe else: Ebulk = bulk_supercell_pe Ef0 = unrelaxed_vac_pe - Ebulk Ef = vac_pe - Ebulk print("got vacancy",label,"cell energy",vac_pe,"n_atoms",len(vac)) print("got bulk energy", Ebulk," (scaled to (N-1)/N if single component)") return ( label, unrelaxed_filename, Ef0, relaxed_filename, Ef, int(bulk_supercell.get_atomic_numbers()[vac_i]), vac_pos )
def do_one_antisite_pair(bulk_supercell, bulk_supercell_pe, i1, i2, tol=1.0e-2): assert bulk_supercell.numbers[i1] != bulk_supercell.numbers[i2] # do unrelaxed (without perturbations) antisite = bulk_supercell.copy() Z1, Z2 = antisite.numbers[[i1, i2]] antisite.numbers[i1] = Z2 antisite.numbers[i2] = Z1 label = "ind_%d_Z_%d_ind_%d_Z_%d" % (i1, Z1, i2, Z2) unrelaxed_filename = run_root + "-%s-unrelaxed.xyz" % label ase.io.write(os.path.join("..", unrelaxed_filename), antisite, format='extxyz') evaluate(antisite) unrelaxed_antisite_pe = antisite.get_potential_energy() antisite = relax_config(antisite, relax_pos=True, relax_cell=False, tol=tol, save_traj=True, config_label=label, from_base_model=True, save_config=True, try_restart=True) relaxed_filename = run_root + "-%s-relaxed.xyz" % label ase.io.write(os.path.join("..", relaxed_filename), antisite, format='extxyz') # already has calculator from relax_configs antisite_pe = antisite.get_potential_energy() Ef0 = unrelaxed_antisite_pe - bulk_supercell_pe Ef = antisite_pe - bulk_supercell_pe print("got antisite", label, "cell energy", antisite_pe, "n_atoms", len(antisite)) print("got bulk energy", bulk_supercell_pe) return (label, unrelaxed_filename, Ef0, relaxed_filename, Ef, Z1, Z2)
def surface_energy(bulk, z_offset): Nat = bulk.get_number_of_atoms() # shift so cut is through shuffle plane bulk.positions[:, 2] += z_offset bulk.wrap() # relax atom positions, holding cell fixed # vac = relax_atoms(vac, fmax=fmax) # compute surface formation energy as difference of bulk and expanded cell ebulk = bulk.get_potential_energy() print('bulk cell energy', ebulk) bulk.cell[2, 2] *= (np.abs(bulk.cell[2, 2]) + 10.0) / np.abs(bulk.cell[2, 2]) np.random.seed(75) bulk.positions += (np.random.rand((Nat * 3)) * 0.1).reshape([Nat, 3]) bulk = relax_config(bulk, relax_pos=True, relax_cell=True, tol=fmax, traj_file=run_root + "-surface-energy-110-relaxed.opt.xyz") eexp = bulk.get_potential_energy() ase.io.write(sys.stdout, bulk, format='extxyz') print('expanded cell energy', eexp) e_form = 0.5 * (eexp - ebulk) / np.linalg.norm( np.cross(bulk.cell[0, :], bulk.cell[1, :])) print('relaxed 110 surface formation energy', e_form) return e_form
def do_farthest_inequiv_pairs(test_dir, tol=1.0e-2): print("doing do_farthest_antisite_pairs") bulk_supercell = ase.io.read(os.path.join(test_dir, "bulk_supercell.xyz"), format="extxyz") print("got bulk_supercell ", len(bulk_supercell)) bulk = rescale_to_relaxed_bulk(bulk_supercell) # relax bulk supercell positions in case it's only approximate (as it must be for different models), but stick # to relaxed bulk's lattice constants as set by rescale_to_relaxed_bulk bulk_supercell = relax_config(bulk_supercell, relax_pos=True, relax_cell=False, tol=tol, save_traj=True, config_label="rescaled_bulk", from_base_model=True, save_config=True) ase.io.write(os.path.join("..", run_root + "-rescaled-bulk.xyz"), bulk_supercell, format='extxyz') print("got bulk primitive cell ", bulk.get_cell()) print("got rescaled bulk_supercell cell ", bulk_supercell.get_cell()) if 'arb_supercell' in bulk_supercell.info: print("making bulk supercell from", bulk_supercell.info['arb_supercell'].reshape((3, 3))) bulk_supersupercell = ase.build.make_supercell( bulk_supercell, bulk_supercell.info['arb_supercell'].reshape( (3, 3))) print("got supersupercell with ", len(bulk_supersupercell), "atoms, cell\n", bulk_supersupercell.get_cell()) bulk_supersupercell.info.update(bulk_supercell.info) bulk_supercell = bulk_supersupercell sym_data = spglib.get_symmetry_dataset(bulk_supercell, symprec=0.01) equiv_at = set([ tuple(iZ) for iZ in zip(sym_data["equivalent_atoms"], bulk_supercell.numbers) ]) # print("equiv_at", equiv_at) antisite_list = [] for i1, Z1 in equiv_at: for i2_proto, Z2 in equiv_at: if Z1 <= Z2: continue # print("check i", i1, i2_proto, "Z", Z1, Z2) i2s = np.where(sym_data["equivalent_atoms"] == i2_proto)[0] i2_dists = bulk_supercell.get_distances(i1, i2s, mic=True) farthest_ind = np.argmax(i2_dists) i2 = i2s[farthest_ind] antisite_list.append((i1, i2)) # print("antisite_list", antisite_list) ## evaluate(bulk_supercell) bulk_supercell_pe = bulk_supercell.get_potential_energy() properties = { "bulk_struct_test": bulk_supercell.info["bulk_struct_test"], "bulk_E_per_atom": bulk_supercell_pe / len(bulk_supercell), "defects": {} } for i1, i2 in antisite_list: (label, unrelaxed_filename, Ef0, relaxed_filename, Ef, Z1, Z2) = do_one_antisite_pair(bulk_supercell, bulk_supercell_pe, i1, i2, tol) properties["defects"][label] = { 'Ef0': Ef0, 'Ef': Ef, 'unrelaxed_filename': unrelaxed_filename, 'relaxed_filename': relaxed_filename, 'atom_inds': (int(i1), int(i2)), 'Zs': (int(Z1), int(Z2)) } print("returning properties", properties) return properties
# set up cell and bulk modulus try: with open("../model-{}-test-bulk_diamond-properties.json".format( model.name)) as f: j = json.load(f) a0 = j["diamond_a0"] bulk_at = bulk("Si", "diamond", a0) bulk_modulus = j["diamond_bulk_modulus"] / GPA except: bulk_at = bulk("Si", "diamond", 5.43) bulk_at.set_calculator(model.calculator) bulk_at = relax_config(bulk_at, relax_pos=True, relax_cell=True, tol=1.0e-4, traj_file=None) a0 = bulk_at.get_cell_lengths_and_angles()[0] * np.sqrt(2.0) bulk_at = bulk("Si", "diamond", a0) from matscipy import elasticity from ase.optimize import BFGS bulk_at.set_calculator(model.calculator) opt = BFGS b = elasticity.fit_elastic_constants(bulk_at, symmetry='cubic', optimizer=opt) bulk_modulus = elasticity.elastic_moduli(b[0])[3]
a0 = 5.44 # initial guess at lattice constant, cell will be relaxed below tol = 1e-3 # maximum force following relaxtion [eV/A] N = 3 # number of unit cells in each direction if not hasattr(model, 'bulk_reference_216'): # set up the a bulk = Diamond(symbol='Si', latticeconstant=a0) # specify that we will use model.calculator to compute forces, energies and stresses bulk.set_calculator(model.calculator) # use one of the routines from utilities module to relax the initial # unit cell and atomic positions bulk = relax_config(bulk, relax_pos=True, relax_cell=True, tol=tol, traj_file=None) bulk *= (N, N, N) bulk_energy = bulk.get_potential_energy() else: bulk = model.bulk_reference_216 bulk_energy = bulk.get_potential_energy() def dumbbell_interstitial_energy(bulk): Nat = bulk.get_number_of_atoms() int_struct = bulk.copy() int_struct.set_calculator(bulk.get_calculator()) # add an atom to introduce an interstitial
def do_all_interstitials(test_dir, nn_cutoff=0.0, tol=1.0e-2): print("doing do_all_interstitials") bulk_supercell = ase.io.read(os.path.join(test_dir, "bulk_supercell.xyz"), format="extxyz") print("got bulk_supercell ", len(bulk_supercell)) bulk = rescale_to_relaxed_bulk(bulk_supercell) # relax bulk supercell positions in case it's only approximate (as it must be for different models), but stick # to relaxed bulk's lattice constants as set by rescale_to_relaxed_bulk bulk_supercell = relax_config(bulk_supercell, relax_pos=True, relax_cell=False, tol=tol, save_traj=True, config_label="relaxed_bulk", from_base_model=True, save_config=True) ase.io.write(os.path.join("..", run_root + "-rescaled-bulk.xyz"), bulk_supercell, format='extxyz') print("got bulk primitive cell ", bulk.get_cell()) print("got rescaled bulk_supercell cell ", bulk_supercell.get_cell()) try: # Cartesian 3-vector interstitial_pos_l = [ np.array([float(x) for x in bulk_supercell.info["interstitials"]]) ] if len(interstitial_pos_l) != 3: raise ValueError("not a 3-vector") except: interstitial_pos_type = bulk_supercell.info["interstitials"].split()[0] if interstitial_pos_type == "mean": neighbor_indices = [ int(i) for i in bulk_supercell.info["interstitials"].split()[1:] ] if len(neighbor_indices) < 2: raise ValueError( "interstitial position type mean, but {} < 2 indices". format(len(neighbor_indices))) interstitial_pos_l = [ np.sum(bulk_supercell.get_positions()[neighbor_indices], axis=0) / float(len(neighbor_indices)) ] elif interstitial_pos_type == "inequivalent": if 'arb_supercell' in bulk_supercell.info: print("making bulk supercell from", bulk_supercell.info['arb_supercell'].reshape((3, 3))) bulk_supersupercell = ase.build.make_supercell( bulk_supercell, bulk_supercell.info['arb_supercell'].reshape((3, 3))) print("got supersupercell with ", len(bulk_supersupercell), "atoms, cell\n", bulk_supersupercell.get_cell()) voids = find_voids(bulk_supercell) interstitial_pos_l = [(v[1], v[2], v[3]) for v in voids] bulk_supersupercell.info.update(bulk_supercell.info) bulk_supercell = bulk_supersupercell else: raise ValueError("Unknown interstitial position type in '" + bulk_supercell.info["interstitials"] + "'") evaluate(bulk_supercell) bulk_supercell_pe = bulk_supercell.get_potential_energy() properties = { "bulk_struct_test": bulk_supercell.info["bulk_struct_test"], "bulk_E_per_atom": bulk_supercell_pe / len(bulk_supercell), "defects": {} } Z_list = ([bulk_supercell.info["Zs"]] if isinstance( bulk_supercell.info["Zs"], (int, np.integer)) else bulk_supercell.info["Zs"]) for interstitial_Z in Z_list: try: relax_radial = bulk_supercell.info['relax_radial_{}'.format( interstitial_Z)] except: relax_radial = 0.0 try: relax_symm_break = bulk_supercell.info[ 'relax_symm_break_{}'.format(interstitial_Z)] except: relax_symm_break = 0.0 for interst_i, interst_pos in enumerate(interstitial_pos_l): label = f'Z_{interstitial_Z}_pos_{interst_i}' (unrelaxed_filename, Ef0, relaxed_filename, Ef, interstitial_i) = do_one_interstitial( bulk_supercell, bulk_supercell_pe, interstitial_Z, interst_pos, label, relax_radial, relax_symm_break, nn_cutoff, tol) properties["defects"][label] = { 'Ef0': Ef0, 'Ef': Ef, 'unrelaxed_filename': unrelaxed_filename, 'relaxed_filename': relaxed_filename, 'atom_ind': int(interstitial_i), 'Z': int(interstitial_Z), 'pos_index': interst_i } if len(set(bulk_supercell.get_atomic_numbers())) != 1: properties["defects"][label]['dmu'] = [-1, int(interstitial_Z)] return properties
def do_one_interstitial(bulk_supercell, bulk_supercell_pe, interstitial_Z, interstitial_pos, label, relax_radial=0.0, relax_symm_break=0.0, nn_cutoff=0.0, tol=1.0e-2): interst = bulk_supercell.copy() interst += Atoms(numbers=[interstitial_Z], positions=[interstitial_pos]) interstitial_i = len(interst) - 1 if relax_radial != 0.0 or relax_symm_break != 0.0: nl = NeighborList([nn_cutoff / 2.0] * len(bulk_supercell), self_interaction=False, bothways=True) nl.update(bulk_supercell) indices, offsets = nl.get_neighbors(interstitial_i) offset_factor = relax_radial for i, offset in zip(indices, offsets): ri = interst.positions[interstitial_i] - ( interst.positions[i] + np.dot(offset, interst.get_cell())) interst.positions[i] += offset_factor * ri offset_factor += relax_symm_break if "interstitial_constraint" in bulk_supercell.info: (constr_type, constr_subtype ) = bulk_supercell.info["interstitial_constraint"].split()[0:2] if constr_type == "plane": if constr_subtype == "atoms": indices = [ int(i) for i in bulk_supercell.info["interstitial_constraint"].split()[2:] ] if len(indices) != 3: raise ValueError( "number of indices not 3 for plane atoms '{}'".format( bulk_supercell.info["interstitial_constraint"])) p = interst.get_positions() constr_normal = np.cross(p[indices[0]] - p[indices[1]], p[indices[0]] - p[indices[2]]) elif constr_subtype == "vector": constr_normal = np.array( bulk_supercell.info["interstitial_constraint"].split()[2:]) else: raise ValueError( "unknown interstitial constraint subtype for plane '{}'". format(bulk_supercell.info["interstitial_constraint"])) print("setting constraint FixedPlane with normal", constr_normal) interst.set_constraint(FixedPlane(interstitial_i, constr_normal)) else: raise ValueError( "unknown interstitial constraint type '{}'".format( bulk_supercell.info["interstitial_constraint"])) evaluate(interst) unrelaxed_interstitial_pe = interst.get_potential_energy() if len(set(bulk_supercell.get_atomic_numbers())) == 1: Ebulk = float(len(interst)) / float( len(bulk_supercell)) * bulk_supercell_pe else: Ebulk = bulk_supercell_pe Ef0 = unrelaxed_interstitial_pe - Ebulk unrelaxed_filename = run_root + "-%s-unrelaxed.xyz" % label ase.io.write(os.path.join("..", unrelaxed_filename), interst, format='extxyz') print("got unrelaxed interstitial {} cell energy".format(label), unrelaxed_interstitial_pe) try: interst = relax_config(interst, relax_pos=True, relax_cell=False, tol=tol, save_traj=True, config_label=label, from_base_model=True, save_config=True, try_restart=True) relaxed_filename = run_root + "-%s-relaxed.xyz" % label ase.io.write(os.path.join("..", relaxed_filename), interst, format='extxyz') interstitial_pe = interst.get_potential_energy() Ef = interstitial_pe - Ebulk print("got relaxed interstitial {} cell energy".format(label), interstitial_pe) except: relaxed_filename = None Ef = None print("got bulk energy", Ebulk) return (unrelaxed_filename, Ef0, relaxed_filename, Ef, interstitial_i)
import os.path, ase.io from utilities import relax_config, run_root import random, sys, model test_dir = os.path.abspath(os.path.dirname(__file__)) bulk = ase.io.read(os.path.join(test_dir, "bulk.xyz"), format="extxyz") tol = 1.0e-3 relaxed_bulk = relax_config(bulk, relax_pos=True, relax_cell=True, tol=tol, traj_file=None, config_label='bulk', from_base_model=True, save_config=True, keep_symmetry=True) relaxed_bulk_pe = relaxed_bulk.get_potential_energy() / len(relaxed_bulk) ase.io.write(sys.stdout, relaxed_bulk, format="extxyz") tol = 2.0e-3 # hack to work around convergence issues antisites = relaxed_bulk * (2, 2, 2) random.seed(10) unrelaxed_energies = [] relaxed_energies = [] for config_i in range(20): Z = antisites.get_atomic_numbers() (i1, i2) = random.sample(range(len(antisites)), 2) while Z[i1] == Z[i2]: (i1, i2) = random.sample(range(len(antisites)), 2)
import os.path, ase.io from utilities import relax_config, run_root import random, sys, model test_dir = os.path.abspath(os.path.dirname(__file__)) bulk = ase.io.read(os.path.join(test_dir, "bulk.xyz"), format="extxyz") tol = 1.0e-3 relaxed_bulk = relax_config(bulk, relax_pos=True, relax_cell=True, tol=tol, traj_file=None, config_label='bulk', from_base_model=True, save_config=True, keep_symmetry=True) relaxed_bulk_pe = relaxed_bulk.get_potential_energy() / len(relaxed_bulk) ase.io.write(sys.stdout, relaxed_bulk, format="extxyz") tol = 1.0e-2 # hack to work around convergence issues antisites = relaxed_bulk * (2, 2, 2) random.seed(10) unrelaxed_energies = [] relaxed_energies = [] for config_i in range(20): Z = antisites.get_atomic_numbers() (i1, i2) = random.sample(range(len(antisites)), 2) while Z[i1] == Z[i2]: (i1, i2) = random.sample(range(len(antisites)), 2)
def do_lattice(test_dir, lattice_type, dV=0.025, n_steps=(-10, 10), tol=1.0e-2, method='lbfgs', applied_P=0.0): bulk = ase.io.read(test_dir + "/bulk.xyz", format="extxyz") results_dict = {} print("relax bulk") # relax the initial unit cell and atomic positions (orig_cell, new_cell) = (None, None) while new_cell is None or np.max( np.abs(np.dot(np.linalg.inv(new_cell), orig_cell) - np.eye(3))) > 0.05: if hasattr(model, "fix_cell_dependence"): model.fix_cell_dependence(bulk) orig_cell = bulk.get_cell() bulk = relax_config(bulk, relax_pos=True, relax_cell=True, tol=tol, traj_file="lattice_bulk_traj.xyz", method=method, refine_symmetry_tol=1.0e-2, keep_symmetry=True, config_label="bulk", from_base_model=True, save_config=True, applied_P=applied_P) new_cell = bulk.get_cell() if hasattr(model, "fix_cell_dependence"): model.fix_cell_dependence() else: break print("final relaxed bulk") ase.io.write(sys.stdout, bulk, format='extxyz') ase.io.write(os.path.join("..", "relaxed.xyz"), bulk, format='extxyz') print("calculating E vs. V") E_vs_V = calc_E_vs_V(bulk, dV=dV, n_steps=n_steps, tol=tol) results_dict.update({'E_vs_V': E_vs_V}) print("calculating elastic constants") if hasattr(model, "fix_cell_dependence"): model.fix_cell_dependence(bulk) opt = lambda atoms, **kwargs: PreconLBFGS(atoms, **kwargs) if lattice_type == 'cubic': elastic_consts = matscipy.elasticity.fit_elastic_constants( bulk, symmetry='cubic', optimizer=opt, logfile=sys.stdout) c11 = elastic_consts[0][0, 0] / GPa c12 = elastic_consts[0][0, 1] / GPa c44 = elastic_consts[0][3, 3] / GPa results_dict.update({ 'c11': c11, 'c12': c12, 'c44': c44, 'B': (c11 + 2.0 * c12) / 3.0 }) elif lattice_type == 'orthorhombic': elastic_consts = matscipy.elasticity.fit_elastic_constants( bulk, optimizer=opt, logfile=sys.stdout) c11 = elastic_consts[0][0, 0] / GPa c22 = elastic_consts[0][1, 1] / GPa c33 = elastic_consts[0][2, 2] / GPa c12 = elastic_consts[0][0, 1] / GPa c13 = elastic_consts[0][0, 2] / GPa c23 = elastic_consts[0][1, 2] / GPa c44 = elastic_consts[0][3, 3] / GPa c55 = elastic_consts[0][4, 4] / GPa c66 = elastic_consts[0][5, 5] / GPa results_dict.update({ 'c11': c11, 'c22': c22, 'c33': c33, 'c12': c12, 'c13': c13, 'c23': c23, 'c44': c44, 'c55': c55, 'c66': c66 }) elif lattice_type == 'tetragonal': elastic_consts = matscipy.elasticity.fit_elastic_constants( bulk, symmetry='tetragonal_high', optimizer=opt, logfile=sys.stdout) c11 = elastic_consts[0][0, 0] / GPa c33 = elastic_consts[0][2, 2] / GPa c12 = elastic_consts[0][0, 1] / GPa c13 = elastic_consts[0][0, 2] / GPa c44 = elastic_consts[0][3, 3] / GPa c66 = elastic_consts[0][5, 5] / GPa results_dict.update({ 'c11': c11, 'c33': c33, 'c12': c12, 'c13': c13, 'c44': c44, 'c66': c66, 'B': VRH_B(c11, c33, c12, c13, c44, c66) }) elif lattice_type == 'hexagonal': # Need to check if hexagonal structures are truly trigonal_high # symmetry=triginal_high not hexagonal until matscipy is debugged elastic_consts = matscipy.elasticity.fit_elastic_constants( bulk, symmetry='trigonal_high', optimizer=opt, logfile=sys.stdout) c11 = elastic_consts[0][0, 0] / GPa c33 = elastic_consts[0][2, 2] / GPa c12 = elastic_consts[0][0, 1] / GPa c13 = elastic_consts[0][0, 2] / GPa c44 = elastic_consts[0][3, 3] / GPa c14 = elastic_consts[0][0, 3] / GPa c15 = elastic_consts[0][0, 4] / GPa c25 = elastic_consts[0][1, 4] / GPa c66 = elastic_consts[0][5, 5] / GPa results_dict.update({ 'c11': c11, 'c33': c33, 'c12': c12, 'c13': c13, 'c44': c44, 'c14': c14, 'c15': c15, 'c25': c25, 'c66': c66, 'B': HTT_B(c11, c33, c12, c13) }) elif lattice_type == 'trigonal': elastic_consts = matscipy.elasticity.fit_elastic_constants( bulk, symmetry='trigonal_high', optimizer=opt, logfile=sys.stdout) c11 = elastic_consts[0][0, 0] / GPa c33 = elastic_consts[0][2, 2] / GPa c12 = elastic_consts[0][0, 1] / GPa c13 = elastic_consts[0][0, 2] / GPa c44 = elastic_consts[0][3, 3] / GPa c14 = elastic_consts[0][0, 3] / GPa c15 = elastic_consts[0][0, 4] / GPa c25 = elastic_consts[0][1, 4] / GPa c66 = elastic_consts[0][5, 5] / GPa results_dict.update({ 'c11': c11, 'c33': c33, 'c12': c12, 'c13': c13, 'c44': c44, 'c14': c14, 'c15': c15, 'c25': c25, 'c66': c66, 'B': HTT_B(c11, c33, c12, c13) }) if hasattr(model, "fix_cell_dependence"): model.fix_cell_dependence() return results_dict
def do_interstitial(test_dir, nn_cutoff=0.0, tol=1.0e-2): print("doing do_interstitial") bulk_supercell = ase.io.read(os.path.join(test_dir, "bulk_supercell.xyz"), format="extxyz") print("got bulk_supercell ", len(bulk_supercell)) bulk = rescale_to_relaxed_bulk(bulk_supercell) # relax bulk supercell positions in case it's only approximate (as it must be for different models), but stick # to relaxed bulk's lattice constants as set by rescale_to_relaxed_bulk bulk_supercell = relax_config(bulk_supercell, relax_pos=True, relax_cell=False, tol=tol, traj_file=None, config_label="relaxed_bulk", from_base_model=True, save_config=True) evaluate(bulk_supercell) bulk_supercell_pe = bulk_supercell.get_potential_energy() ase.io.write(os.path.join("..", run_root + "-rescaled-bulk.xyz"), bulk_supercell, format='extxyz') print("got bulk primitive cell ", bulk.get_cell()) print("got rescaled bulk_supercell cell ", bulk_supercell.get_cell()) properties = { "bulk_struct_test": bulk_supercell.info["bulk_struct_test"], "bulk_E_per_atom": bulk_supercell_pe / len(bulk_supercell), "defects": {} } try: # Cartesian 3-vector interstitial_pos = np.array( [float(x) for x in bulk_supercell.info["interstitial_position"]]) if len(interstitial_pos) != 3: raise ValueError("not a 3-vector") except: interstitial_pos_type = bulk_supercell.info[ "interstitial_position"].split()[0] if interstitial_pos_type == "mean": neighbor_indices = [ int(i) for i in bulk_supercell.info["interstitial_position"].split()[1:] ] if len(neighbor_indices) < 2: raise ValueError( "interstitial position type mean, but {} < 2 indices". format(len(neighbor_indices))) interstitial_pos = np.sum( bulk_supercell.get_positions()[neighbor_indices], axis=0) / float(len(neighbor_indices)) else: raise ValueError("Unknown interstitial position type in '" + bulk_supercell.info["interstitial_position"] + "'") if isinstance(bulk_supercell.info["Zs"], list): Z_list = bulk_supercell.info["Zs"] else: Z_list = [bulk_supercell.info["Zs"]] for interstitial_Z in Z_list: try: relax_radial = bulk_supercell.info['relax_radial_{}'.format( interstitial_Z)] except: relax_radial = 0.0 try: relax_symm_break = bulk_supercell.info[ 'relax_symm_break_{}'.format(interstitial_Z)] except: relax_symm_break = 0.0 (label, unrelaxed_filename, Ef0, relaxed_filename, Ef, interstitial_i) = do_one_interstitial( bulk_supercell, bulk_supercell_pe, interstitial_Z, interstitial_pos, relax_radial, relax_symm_break, nn_cutoff, tol) properties["defects"][label] = { 'Ef0': Ef0, 'Ef': Ef, 'unrelaxed_filename': unrelaxed_filename, 'relaxed_filename': relaxed_filename, 'atom_ind': int(interstitial_i), 'Z': int(interstitial_Z) } if len(set(bulk_supercell.get_atomic_numbers())) != 1: properties["defects"][label]['dmu'] = [-1, interstitial_Z] return properties
# the current model import model a0 = 5.44 # initial guess at lattice constant, cell will be relaxed below fmax = 0.01 # maximum force following relaxtion [eV/A] # set up the a bulk = Diamond(symbol='Si', latticeconstant=a0) # specify that we will use model.calculator to compute forces, energies and stresses bulk.set_calculator(model.calculator) # use one of the routines from utilities module to relax the initial # unit cell and atomic positions bulk = relax_config(bulk, relax_pos=True, relax_cell=False, tol=fmax, traj_file=None) # set up supercell bulk *= (5, 1, 1) def surface_energy(bulk, opening): Nat = bulk.get_number_of_atoms() # relax atom positions, holding cell fixed # vac = relax_atoms(vac, fmax=fmax) # compute surface formation energy as difference of bulk and expanded cell ebulk = bulk.get_potential_energy() print('bulk cell energy', ebulk) bulk.cell[0,:] += [opening,0.0,0.0]
def do_all_vacancies(test_dir, nn_cutoff=0.0, tol=1.0e-2): print("doing do_all_vacancies") bulk_supercell = ase.io.read(os.path.join(test_dir,"bulk_supercell.xyz"), format="extxyz") print("got bulk_supercell ", len(bulk_supercell)) bulk = rescale_to_relaxed_bulk(bulk_supercell) # relax bulk supercell positions in case it's only approximate (as it must be for different models), but stick # to relaxed bulk's lattice constants as set by rescale_to_relaxed_bulk bulk_supercell = relax_config(bulk_supercell, relax_pos=True, relax_cell=False, tol=tol, save_traj=True, config_label="rescaled_bulk", from_base_model=True, save_config=True) ase.io.write(os.path.join("..",run_root+"-rescaled-bulk.xyz"), bulk_supercell, format='extxyz') print("got bulk primitive cell ", bulk.get_cell()) print("got rescaled bulk_supercell cell ", bulk_supercell.get_cell()) if bulk_supercell.info['vacancies'] == "inequivalent": sym_data = spglib.get_symmetry_dataset(bulk_supercell, symprec=0.01) prim_vacancy_list = np.unique(sym_data["equivalent_atoms"]) print("orig cell vacancy_list", prim_vacancy_list) if 'arb_supercell' in bulk_supercell.info: print("making bulk supercell from", bulk_supercell.info['arb_supercell'].reshape((3,3)) ) bulk_supersupercell = ase.build.make_supercell(bulk_supercell,bulk_supercell.info['arb_supercell'].reshape((3,3)) ) print("got supersupercell with ",len(bulk_supersupercell),"atoms, cell\n",bulk_supersupercell.get_cell()) vacancy_list = [] for i in prim_vacancy_list: p = bulk_supercell.get_positions()[i] dv = bulk_supersupercell.get_positions() - p dv_scaled = np.dot(dv, bulk_supersupercell.get_reciprocal_cell().T) dv -= np.dot(np.round(dv_scaled), bulk_supersupercell.get_cell()) i_closest = np.argmin(np.linalg.norm(dv, axis=1)) print("found closest in new cell", i_closest, "distance in orig cell lattice coords", np.dot((bulk_supersupercell.get_positions()[i_closest]-p), \ bulk_supercell.get_reciprocal_cell().T)) vacancy_list.append(i_closest) bulk_supersupercell.info.update(bulk_supercell.info) bulk_supercell = bulk_supersupercell else: vacancy_list = prim_vacancy_list print("final vacancy_list", vacancy_list) else: try: vacancy_list = [ int(i) for i in bulk_supercell.info['vacancies'] ] except: vacancy_list = [ int(bulk_supercell.info['vacancies']) ] evaluate(bulk_supercell) bulk_supercell_pe = bulk_supercell.get_potential_energy() properties = { "bulk_struct_test" : bulk_supercell.info["bulk_struct_test"], "bulk_E_per_atom" : bulk_supercell_pe / len(bulk_supercell), "defects" : {} } for vac_i in vacancy_list: # maybe set up a system to read these from xyz file? try: relax_radial = bulk_supercell.info['relax_radial_{}'.format(vac_i)] except: relax_radial = 0.0 try: relax_symm_break = bulk_supercell.info['relax_symm_break_{}'.format(vac_i)] except: relax_symm_break = 0.0 (label, unrelaxed_filename, Ef0, relaxed_filename, Ef, vac_Z, vac_pos) = do_one_vacancy(bulk_supercell, bulk_supercell_pe, vac_i, relax_radial, relax_symm_break, nn_cutoff, tol) properties["defects"][label] = { 'Ef0' : Ef0, 'Ef' : Ef, 'unrelaxed_filename' : unrelaxed_filename,'relaxed_filename' : relaxed_filename, 'atom_ind' : int(vac_i), 'Z' : int(vac_Z), 'vac_pos' : vac_pos.tolist()} if len(set(bulk_supercell.get_atomic_numbers())) > 1: properties["defects"][label]["dmu"] = [1, vac_Z] print("returning properties", properties) return properties
def do_Bain_path(bulk_struct_test_fcc, bcc_fcc_steps=15, extra_deformation=0.3): c_a_step = (1.0 - 1.0/np.sqrt(2.0))/(bcc_fcc_steps-1) n_steps = bcc_fcc_steps # up c_a_t = 1.0 n_extra_steps = int(np.ceil( (c_a_t*(1.0+extra_deformation)-c_a_t) / c_a_step )) n_steps += n_extra_steps c_a_max = 1.0 + c_a_step*n_extra_steps # down c_a_t = 1.0/np.sqrt(2) n_extra_steps = int(np.ceil( (c_a_t - c_a_t/(1.0+extra_deformation)) / c_a_step )) n_steps += n_extra_steps c_a_min = 1.0 / np.sqrt(2) - c_a_step*n_extra_steps properties = {} source_fcc = get_relaxed_bulk(bulk_struct_test_fcc) lattice, scaled_positions, numbers = spglib.standardize_cell(source_fcc) at0_fcc = ase.atoms.Atoms(cell=lattice, scaled_positions=scaled_positions, numbers=numbers, pbc=[True]*3) # assert normal cell vectors assert np.dot(at0_fcc.cell[0], at0_fcc.cell[1]) == 0 assert np.dot(at0_fcc.cell[0], at0_fcc.cell[2]) == 0 assert np.dot(at0_fcc.cell[1], at0_fcc.cell[2]) == 0 # assert that cell[0] || \hat{x}, presumably a assert np.all(at0_fcc.cell[0][1:3] == 0) # assert that cell[0] || \hat{y}, presumably a assert np.all(at0_fcc.cell[1][[0,2]] == 0) # assert that cell[0] == cell[1] assert np.all(at0_fcc.cell[0][0] == at0_fcc.cell[1][1]) # assert that cell[2] || \hat{z}, presumably c assert np.all(at0_fcc.cell[2][0:2] == 0) at = at0_fcc.copy() energies = [] for c_a in np.linspace(c_a_max, c_a_min, n_steps): # fa*fa*fc = 1.0 # fc*c / fa*a = c_a # fc = c_a * fa * a / c # 1.0 = fa * fa * (fa * c_a * a/c) = fa^3 c_a*a/c # fa = (c/(c_a*a))**(1/3) a = np.linalg.norm(at.cell[0]) c = np.linalg.norm(at.cell[2]) fa = (c/(c_a*a))**(1/3) fc = c_a * fa * a/c at.set_cell(np.diag((fa, fa, fc)) @ at.cell, True) at.info['c_a'] = c_a at = relax_config(at, relax_pos=False, relax_cell=True, hydrostatic_strain=True, refine_symmetry_tol=1.0e-1, keep_symmetry=True, config_label=f'Bain_path_{c_a:.3}', save_traj=True, from_base_model=True, save_config=True) energies.append((c_a, at.get_potential_energy()/len(at), at.get_stress().tolist())) properties['Bain_path_E'] = energies return properties