def create_vertex(self, params): if params is None: raise Exception("Tried to create a vertex with params = None!") # Label directories with stoichiometry base_vert_dir = self.dir + "/" + params["stoichiometry_string"] # Loop over verticies with the same stochiometry version = 1 while True: vertex_dir = base_vert_dir + "_v{0}".format(version) # We've found a free vertex name if not os.path.isdir(vertex_dir): break # Vertex name exists, increment version version += 1 # If these verticies are the same (as far as # the network is concerned), return the one that # already exists vertex = alch_vertex(vertex_dir) if self.parameters_comp(params, vertex.params): log("Vertex {0} already exists".format(vertex.name), "alchemy.log") return vertex # Initialize the vertex directory os.system("mkdir "+vertex_dir) new_vertex = alch_vertex(vertex_dir) new_vertex.params = params log("Created vertex {0}".format(new_vertex.name), "alchemy.log") return new_vertex
def randomly_mutate(structure, mutations, check_valid): # Nice header to mutation line = "{:^64}".format("Mutating " + structure["stochiometry_string"]) div = "".join(["-" for c in line]) log("\n" + div, "alchemy.log") log(line, "alchemy.log") log(div, "alchemy.log") # Mutate the structure until a valid # mutation is found mutation = None while True: mutation = mutations[random.randrange(len(mutations))](structure) # Mutations return None if there was no sensible # way to apply them if mutation is None: log(" Mutation returned None, trying again...", "alchemy.log") continue # Check validty of the mutation if not check_valid(mutation): log(" Mutation invalid, trying again...", "alchemy.log") continue # Mutation valid, return it return mutation raise Exception("Failed to generate a random mutation.")
def tc_calculation_complete(dirname): elph_in = dirname+"/elph.in" if not os.path.isfile(elph_in): return False # Parse the number of sigma values # fromthe .in file n_sig = None with open(elph_in) as f: for line in f: if "el_ph_nsigma" in line: n_sig = int(line.split("=")[-1].replace(",","")) if n_sig is None: log("Could not parse el_ph_nsigma from "+elph_in) return False # If there are less than that many a2F files, the calculation # has not completed all_exist = True for i in range(1, n_sig+1): if not os.path.isfile(dirname + "/a2F.dos{0}".format(i)): all_exist = False break return all_exist
def a2f_float(self, word): f = float(word) if math.isnan(f): msg = "NaN appeared in "+self.filename+"!" log(msg) print(msg) f = 0.0 return f
def run_test_lah10_fm3m(): # Create and move to a directory to # run the test from old_path = os.getcwd() os.system("mkdir lah10_fm3m_test") os.chdir("lah10_fm3m_test") try: # Relax the structure rlx = relax(lah10_fm3m) log(rlx.run()) # Calculate the density of states ds = dos(lah10_fm3m) log(ds.run()) # Calculate the projected density of states pdos = proj_dos(lah10_fm3m) log(pdos.run()) # Calculate phonons on a grid ph = phonon_grid(lah10_fm3m) log(ph.run()) except: pass # Restore old directory os.chdir(old_path)
def substitute_random_atom(structure, restrict_to=None): # Choose the atom to be replaced atoms = structure["atoms"] i_replace = random.randrange(len(atoms)) to_replace = atoms[i_replace][0] sub = propose_substitution(to_replace, restrict_to=restrict_to) fs = "Replacing atom {0} ({1}) with {2} in {3}" fs = fs.format(i_replace, to_replace, sub, structure["stoichiometry_string"]) log(fs, "alchemy.log") # Make the replacement structure["atoms"][i_replace][0] = sub return structure
def remove_random_atom(structure): # Dont remove the last atom if len(structure["atoms"]) == 1: return None # Copy the atom list and remove a random atom from the result i_rem = random.randrange(len(structure["atoms"])) atom_removed = structure["atoms"][i_rem] fs = "Removing atom {0} in {1}\n Removed {2} @ {3:8.6f} {4:8.6f} {5:8.6f}" fs = fs.format(i_rem, structure["stoichiometry_string"], atom_removed[0], *atom_removed[1]) log(fs, "alchemy.log") del structure["atoms"][i_rem] return structure
def shuffle_atoms(structure): # No point shuffling a single atom if len(structure["atoms"]) == 1: return None # Shuffle the atoms into each others locations atoms = structure["atoms"] new_pos = [a[1] for a in atoms] random.shuffle(new_pos) for i in range(len(new_pos)): atoms[i][1] = new_pos[i] log("Shuffled atoms in {0}".format(structure["stoichiometry_string"]), "alchemy.log") return structure
def substitute_random_species(structure, restrict_to=None): # Choose the type to be replaced to_replace = structure["species"] to_replace = to_replace[random.randrange(len(to_replace))][0] # Choose the type to replace it with sub = propose_substitution(to_replace, restrict_to=restrict_to) fs = "Replacing {0} with {1} in {2}" fs = fs.format(to_replace, sub, structure["stoichiometry_string"]) log(fs, "alchemy.log") # Make the replacement for i, a in enumerate(structure["atoms"]): if a[0] == to_replace: structure["atoms"][i][0] = sub return structure
def parse(self, filename): # Get the directory of this calculation dos_dir = os.path.dirname(filename) dos_file = dos_dir + "/pwscf.dos" # Throw an error if the .dos file doesn't exist if not os.path.isfile(dos_file): msg = "Could not find the DOS file {0}" msg = msg.format(dos_file) log(msg) raise RuntimeError(msg) # Parse the .dos file energies = [] densities = [] first_line = True with open(dos_file) as f: for line in f: # Skip the first line if first_line: fermi_energy = float(line.split()[-2]) fermi_energy *= EV_TO_RY first_line = False continue # Parse the energy and density e, d = [float(w) for w in line.split()[0:2]] energies.append(e * EV_TO_RY) densities.append(d) # Store the results self["DOS energies"] = energies self["DOS (energy)"] = densities self["fermi energy"] = fermi_energy # Linearly interpolate to get the DOS at # the fermi level for i in range(len(energies)): if energies[i] > fermi_energy: r = (energies[i] - fermi_energy)/(energies[i] - energies[i-1]) self["DOS (E_F)"] = densities[i]*(1-r) + densities[i-1]*r break
def __init__(self, # The directory representing this network, verticies will be # represented by subdirectories within base_dir. # If None, a new directory of the form network_i will be created # If base_dir exists, we will load the network in base_dir # Otherwise a new network will be created with name base_dir base_dir = None, # Function used to compare two parameters sets of the same stochiometry # returns true if they should be considered as the same vertex. parameters_comp = lambda s1, s2 : True, # Default: same stoichiometry => same vertex ): # Generate new network name if base_dir is None: i = 1 while True: if not os.path.isdir("network_{0}".format(i)): base_dir = "network_{0}".format(i) break i += 1 # Ensure the network directory exists created = False if not os.path.isdir(base_dir): os.system("mkdir "+base_dir) created = True # Use the absolute path from here on base_dir = os.path.abspath(base_dir) self.dir = base_dir self.name = base_dir.split("/")[-1] # Record the parameters comparison function self.parameters_comp = parameters_comp if created: log("Created network "+self.dir, "alchemy.log") else: fs = "Loaded network {0} with {1} verticies" fs = fs.format(self.dir, len(self.verticies)) log(fs, "alchemy.log")
def choose_mutation(self, mutations, objective=None): # Get scores for mutations that already appear in the network scores = self.get_mutation_scores(objective=objective) max_score = max(scores[m] for m in scores) if len(scores) > 0 else 1.0 # Give untested mutations a high score (so they will be tested soon) for m in mutations: if not m in scores: scores[m] = max_score # Rescale scores to [-1,1] by dividing by the # maximum absolute score (note scores of 0 remain unchanged). # This is done to decrease score sensitivity to the order # of magnidute of the objective function. max_abs = max(abs(scores[m]) for m in scores) if max_abs > 10e-5: scores = {m : scores[m]/max_abs for m in scores} # Map to [0, 1] scores = {m : (1.0 + scores[m])/2.0 for m in scores} # Generate probabilities so that the maximum possible # ratio between most and least probable is 10 : 1 probs = {m : 10.0**scores[m] for m in scores} tot = sum(probs[m] for m in probs) probs = {m : probs[m]/tot for m in probs} # Report the probabilities max_l = max(len(m) for m in probs) fs = " {0:"+str(max_l)+"} {1}" log("Mutation probabilities:", "alchemy.log") for m in probs: log(fs.format(m, probs[m]), "alchemy.log") # Choose according to that probability rnd = random.random() tot = 0.0 for m in probs: tot += probs[m] if tot > rnd: return m raise Exception("Should not be able to get here!")
def tc_from_a2f_eliashberg_recursive(root_dir): # Run over all a2F.dos files and calculate Tc for f in listfiles(root_dir): if not "a2F.dos" in f: continue log("Calculating Tc for "+f, "tc.log") try: tc_res = tc_from_a2f_file_eliashberg(f) log("Success", "tc.log") for mu in tc_res: log(" Mu = {0}, Tc = {1}".format(mu, tc_res[mu]), "tc.log") except Exception as e: log("Failed with excpetion:\n"+str(e), "tc.log") pass
def duplicate_random_atom(structure): # Copy a random atom atoms = structure["atoms"] i_dupe = random.randrange(len(atoms)) new_atom = copy.deepcopy(atoms[i_dupe]) fs = "Duplicating atom {0} in {1}".format( i_dupe, structure["stoichiometry_string"]) fs += "\n Duplicated {0} @ {1:8.6f} {2:8.6f} {3:8.6f}".format( new_atom[0], *new_atom[1]) # Displace it by a gaussian for j in range(3): new_atom[1][j] += random.gauss(0, 0.1) atoms.append(new_atom) fs += "\n New atom {0} @ {1:8.6f} {2:8.6f} {3:8.6f}".format( new_atom[0], *new_atom[1]) log(fs, "alchemy.log") return structure
def expand_to_minimize(self, objective, # Objective to minimize mutations, # List of allowed mutations to vertex parameters # Function used to determine if a mutated structure is valid is_valid = lambda s : True ): if len(mutations) == 0: raise Exception("No mutations specified in expand_to_minimize") log("Choosing a mutation to apply (minimizing {0})".format(objective.__name__), "alchemy.log") mut_name = self.choose_mutation([m.__name__ for m in mutations], objective=objective) mut = None for m in mutations: if m.__name__ == mut_name: mut = m break if mut is None: mut = random.choice(mutations) log("Selected mutation {0}".format(mut.__name__), "alchemy.log") log("Choosing vertex to expand (minimizing {0})...".format(objective.__name__), "alchemy.log") vert = self.vertex_chooser(objective) log("Vertex chosen: "+vert.name, "alchemy.log") self.expand_vertex(vert, mut, is_valid)
def tc_from_gap_function(filename, plot=False): from scipy.optimize import curve_fit import numpy as np # Parse the superconducting gap vs temperature ts = [] gs = [] with open(filename) as f: for l in f: vals = [float(w) for w in l.split()] if len(vals) != 3: continue ts.append(vals[0]) gs.append(vals[1]/vals[2]) # Interpolate g(t) data to a finer grid #ts_interp = np.linspace(min(ts), max(ts), len(ts)*5) #gs_interp = [np.interp(t, ts, gs) for t in ts_interp] #ts = ts_interp #gs = gs_interp # Guess tc from just where gap first goes above # 5% of the maximum on decreasing Tc tc_guess = 0 for i in range(len(ts)-1, -1, -1): tc_guess = ts[i] if gs[i] > 0.05 * max(gs): break def gap_model(t, tc, gmax): t = np.array([min(ti, tc) for ti in t]) return gmax * np.tanh(1.74*np.sqrt(tc/t - 1)) try: p0 = [tc_guess, max(gs)*2] par, cov = curve_fit(gap_model, ts, gs, p0) if plot: import matplotlib.pyplot as plt plt.plot(ts, gs, marker="+", label="data") ts_interp = np.linspace(min(ts), max(ts), 200) plt.plot(ts_interp, gap_model(ts_interp, *par), label="fit") plt.axvline(par[0], color="black", linestyle=":", label="Tc = "+str(par[0])) plt.legend() plt.show() except Warning as warn: log("Gap function fit failed with warning:", "tc.log") log(str(warn), "tc.log") par = [0, p0[1]] cov = float("inf") except Exception as err: log("Gap function fit failed with error:", "tc.log") log(str(err), "tc.log") raise err if np.isfinite(cov).all(): tc = par[0] err = cov[0][0]**0.5 else: log("Tc covariance is infinite!", "tc.log") tc = tc_guess err = float("inf") return tc, err
def run(self, filename=None, path="./", required=True): if filename is None: filename = self.default_filename() log("Starting {0} {1} calculation ({2}) at {3}:".format( "required" if required else "optional", self.exe(), filename, path)) # Remove trailing /'s from path path = path.strip() path = path.rstrip("/") inf = path+"/"+filename+".in" outf = path+"/"+filename+".out" # Test to see if the calculation is complete if is_complete(outf): # Log that we are skipping this complete calculation msg = "Calculation \"{0}\" is complete, skipping..." log(msg.format(outf)) return self.parse_output(outf) else: # Calculation not complete # Create input file, run calculation recover = os.path.isfile(outf) log("Generating input file...") with open(inf, "w") as f: f.write(self.gen_input_file(recover=recover)) log("Setting up command to run...") # Get number of processes np = self.in_params["cores_per_node"]*self.in_params["nodes"] ppn = self.in_params["cores_per_node"] # Setup parallelism scheme if self.image_parallelization_allowed(): # Use k-point/image parallelism pools = self.in_params["pools"] images = self.in_params["images"] qe_flags = "-nk {0} -ni {1}".format(pools, images) else: # Just use k-point parallelism qe_flags = "-nk {0}".format(np) # Apply overriden q-e location bin = self.in_params["path_override"] if len(bin) > 0: bin += "/" # Get the thing that we will run mpi with mpirun = self.in_params["mpirun"] try: # Check if mpirun accepts -ppn flag subprocess.check_output([mpirun, "-ppn", "1", "ls"]) cmd = "cd {0}; {1} -ppn {2} -np {3} {4} {5} -i {6} > {7}" cmd = cmd.format(path, mpirun, ppn, np, bin + self.exe(), qe_flags, inf, outf) except: # Doesn't accept -ppn flag cmd = "cd {0}; {1} -np {2} {3} {4} -i {5} > {6}" cmd = cmd.format(path, mpirun, np, bin + self.exe(), qe_flags, inf, outf) log("Running:\n"+cmd) # Start tracking thread tracking = cpu_tracking_thread(filename=filename+".cpu") tracking.start() try: # Run calculation, log stdout/stderr stdout = subprocess.check_output([cmd], stderr=subprocess.STDOUT, shell=True) log(stdout, filename="qet.stdout") except subprocess.CalledProcessError as e: # Log subprocess errror log(e) tracking.stop() # Check for success if not is_complete(outf): if required: msg = "Calculation {0} did not complete, stopping!" msg = msg.format(outf) log(msg) raise RuntimeError(msg) else: msg = "Calculation {0} did not complete, but isn't required, continuing..." log(msg.format(outf)) return None else: # Parse the output return self.parse_output(outf)
def gen_param(self, key): # Count the atoms if key == "nat": return len(self["atoms"]) # Count the atom types if key == "ntyp": return len(self["species"]) # Get a list of the species # with masses and pseudo names if key == "species": spec = [] for a in self["atoms"]: if a[0] in spec: continue spec.append(a[0]) for i, s in enumerate(spec): spec[i] = [s, elements[s]["mass number"], s + ".UPF"] return spec if key == "a": return np.linalg.norm(self["lattice"][0]) if key == "b": return np.linalg.norm(self["lattice"][1]) if key == "c": return np.linalg.norm(self["lattice"][2]) if key == "alpha": lat = self["lattice"] ret = np.dot(lat[1], lat[2]) ret = np.arccos(ret) * 180 / np.pi return ret if key == "beta": lat = self["lattice"] ret = np.dot(lat[0], lat[2]) ret = np.arccos(ret) * 180 / np.pi return ret if key == "gamma": lat = self["lattice"] ret = np.dot(lat[0], lat[1]) ret = np.arccos(ret) * 180 / np.pi return ret # Work out the space group if key == "space_group_name": self.eval_symmetry() return self["space_group_name"] # Work out the number of symmetry operations if key == "sym_ops": self.eval_symmetry() return self["sym_ops"] # Work out a good BZ path if key == "bz_path" or key == "high_symmetry_bz_points": try: import seekpath except ImportError: log("Could not import seekpath!") raise ImportError("Could not import SeeKpath!") # Convert the structure into a form that SeeKpath can digest frac_coords = [] atom_numbers = [] unique_names = [] for a in self["atoms"]: if not a[0] in unique_names: unique_names.append(a[0]) for a in self["atoms"]: frac_coords.append(a[1]) atom_numbers.append(unique_names.index(a[0])) # Call SeeKpath to get the BZ path structure = (self["lattice"], frac_coords, atom_numbers) path = seekpath.get_path(structure, with_time_reversal=True, symprec=0.001, angle_tolerance=0.5, threshold=0) # Work out how many points we have along each segment of the BZ path pc = path["point_coords"] segs = [[np.array(pc[p[0]]), np.array(pc[p[1]])] for p in path["path"]] seg_names = [[p[0], p[1]] for p in path["path"]] seg_lengths = [np.linalg.norm(s[1] - s[0]) for s in segs] tot_length = sum(seg_lengths) seg_counts = [ max(int(self["bz_path_points"] * l / tot_length), 2) for l in seg_lengths ] kpoints = [] # Will contain the k-points in the path high_symm_points = { } # Will contain the names of high-symmetry points along the path for i, c in enumerate(seg_counts): pts = np.linspace(0.0, 1.0, c) pts = [segs[i][0] + (segs[i][1] - segs[i][0]) * p for p in pts] high_symm_points[len(kpoints)] = seg_names[i][0] kpoints.extend(pts) if i + 1 < len(seg_names) and seg_names[i][1] == seg_names[ i + 1][0]: kpoints.pop() # Remove repeated high symmetry point else: high_symm_points[len(kpoints) - 1] = seg_names[i][1] self["high_symmetry_bz_points"] = high_symm_points self["bz_path"] = kpoints if key == "bz_path": return kpoints else: return high_symm_points # Find the pseudo_dir that contains # all of the needed pseudopotentials if key == "pseudo_dir": # Work out which pseudo_dirs contain # which pseudopotentials found_in = {} for s, m, p in self["species"]: found_in[p] = [] for pd in self["pseudo_dirs"]: if not os.path.isdir(pd): continue if p in os.listdir(pd): found_in[p].append(pd) # See if any one pseudo_dir contains # all of the needed pseudods for pd in self["pseudo_dirs"]: has_all = True for p in found_in: if not pd in found_in[p]: has_all = False break # This pseudo_dir contains all the # needed pseudos, go ahead and use it if has_all: return pd # See if we can combine pseudos from # multiple directories for p in found_in: if len(found_in[p]) == 0: err = "Could not find the pseudopotentail " err += p + " in any of:" for pd in self["pseudo_dirs"]: err += "\n" + pd raise ParamNotFound(err) # Create a file with the pseudopotential origin locations pof = open("pseudopotential_origin", "w") # We have found all the pseudos, collect # them into the working directory for p in found_in: # Copy the fist found pseudo to # working directory os.system("cp " + found_in[p][0] + "/" + p + " .") pof.write(p + " from " + found_in[p][0] + "\n") pof.close() return "./" # Get a dictionary of the form atom name : count if key == "atom_counts": atom_counts = defaultdict(lambda: 0) for a in self["atoms"]: if a[0] in atom_counts: atom_counts[a[0]] += 1 else: atom_counts[a[0]] = 1 return atom_counts # Reurn the stochiometry # of the given cell as a string if key == "stoichiometry_string": atom_counts = self["atom_counts"] ss = "" for a in atom_counts: ss += a + "_{0}_".format(atom_counts[a]) ss = ss[0:-1] return ss # Get an estimate for the volume of the # cell by approximating each atom as # a covalent-radius sphere if key == "covalent_volume": vol = 0.0 for a in self["atoms"]: vol += elements[a[0]]["covalent radius"]**3.0 return np.pi * vol * 4.0 / 3.0 # Default to ecutrho = 10*ecutwfc if key == "ecutrho": return 10 * self["ecutwfc"] # Generate the qpoint grid if key == "qpoint_grid": # Generate qpoint grid from spacing/min_q_grid_size rlat = np.linalg.inv(self["lattice"]).T qps = float(self["qpoint_spacing"]) b2q = lambda b: int(np.linalg.norm(b) / qps) grid = [max(self["min_q_grid_size"], b2q(b)) for b in rlat] if self["force_cube_grids"]: grid = [max(grid) for g in grid] return grid # Get individual components of qpoint grid if key == "nq1": return self["qpoint_grid"][0] if key == "nq2": return self["qpoint_grid"][1] if key == "nq3": return self["qpoint_grid"][2] # Get individial components of interpolated qpoint grid if key == "ph_interp_nq1": return self["qpoint_grid"][0] * self["ph_interp_amt"] if key == "ph_interp_nq2": return self["qpoint_grid"][0] * self["ph_interp_amt"] if key == "ph_interp_nq3": return self["qpoint_grid"][0] * self["ph_interp_amt"] # Phonon interpolation output files from prefix if key == "ph_interp_dos_file": return self["ph_interp_prefix"] + ".dos" if key == "ph_interp_freq_file": return self["ph_interp_prefix"] + ".freq" if key == "ph_interp_modes_file": return self["ph_interp_prefix"] + ".modes" if key == "ph_interp_eig_file": return self["ph_interp_prefix"] + ".eig" # Generate the kpoint grid if key == "kpoint_grid": if "kpoint_spacing" in self.par: # Generate kpoint grid from spacing rlat = np.linalg.inv(self["lattice"]).T kps = float(self["kpoint_spacing"]) b2k = lambda b: int(np.linalg.norm(b) / kps) return [b2k(b) for b in rlat] elif "kpts_per_qpt" in self.par: # Generate kpoint grid from qpoint grid kpq = self["kpts_per_qpt"] qpg = self["qpoint_grid"] return [kpq * q for q in qpg] else: msg = "Could not generate k-point grid from parameter set." raise ParamNotFound(msg) # Default to k-point parallelism if key == "pools": return self["cores_per_node"] * self["nodes"] if key == "images": return 1 # Get the default k-point grids for calculating Tc if key == "tc_kpqs": return [self["kpts_per_qpt"] - 1, self["kpts_per_qpt"]] # Default q-e bin/ path = environment path if key == "path_override": return "" # If total_walltime is > 0, this will be the time returned by # time.time() when we will run out of walltime. Otherwise is -1. if key == "end_time": if self["total_walltime"] < 0: return -1 else: return parameters.first_init_time + self["total_walltime"] # Returns the maximum seconds we let a calculation run for # from this moment in time. Is equal to the end_time minus # the current time minus the tidy_up_time. if key == "max_seconds": if self["end_time"] < 0: return 10e7 # No end time => essentially infinite time return max(self["end_time"] - time.time() - self["tidy_up_time"], 0) # This wasn't one of the generatable objects, treat # this as a KeyError, so we use the QE default value # (if there is one) exept = "Key \"{0}\" cold not be generated in parameters object." raise KeyError(exept.format(key))
#!/home/mjh261/bin/anaconda3/bin/python from qet.params import parameters from qet.calculations import calculate_tc from qet.logs import log import os import sys # Make, and move to the calculation directory f = sys.argv[1] dname = f.replace(".in","") os.system("mkdir "+dname) os.chdir(dname) # Start the calculation log("Starting TC calculation in "+dname) p = parameters() p.load("../"+f) calculate_tc(p)
def __init__(self, filename=None): # Record the first initialization time if parameters.first_init_time is None: parameters.first_init_time = time.time() log("First init time set to {0}".format( parameters.first_init_time)) # Set the default parameters # any unspecified parameters will be left # unspecified in input files and therefore # result in QE default values being used self.par = {} self["outdir"] = "./" # outdir = working dir self["ibrav"] = 0 # no bravis-lattice index self["ecutwfc"] = 60 # plane-wave cutoff (Ry) self["occupations"] = "smearing" # treat as metallic self["degauss"] = 0.02 # metal smearing width (Ry) self["qpoint_spacing"] = 0.15 # qpoint spacing (2pi A^-1) self[ "min_q_grid_size"] = 2 # The minimum q-points to a side for a q-point grid self["force_cube_grids"] = False # true if grids must be of form NxNxN self["kpts_per_qpt"] = 10 # ratio of kpt to qpt grid self["ldisp"] = True # use a grid of q-points self["reduce_io"] = True # reduce io to a strict minimum self["fildvscf"] = "dvscf" # potential variation file self["electron_phonon"] = "interpolated" # electron-phonon method self["el_ph_sigma"] = 0.0025 # smearing spacing self["el_ph_nsigma"] = 50 # smearing points self["fildyn"] = "matdyn" # dynamical matrix prefix self["flfrc"] = "force_constants" # force constants filename self["zasr"] = "simple" # acoustic sum rule to apply self[ "ph_interp_amt"] = 8 # phonon interpolation grid size (as multiple of qpoint_grid) self[ "ndos"] = 200 # number of energy steps to use when interpolating DOS self[ "ph_interp_prefix"] = "ph_interp" # the prefix to give to files produced by phonon interpolations self["pseudo_dirs"] = [] # directories to search for pseudopotentials self[ "bz_path_points"] = 100 # the approximate number of points along a BZ path self[ "total_walltime"] = 129600 # allocated walltime in seconds (negative => treat as infinite) self[ "tidy_up_time"] = 1800 # time reserved for tidying up at end of total_walltime self["mpirun"] = "mpirun" # The mpi runner that we want to use # By default, assume cores_per_node is # equal to the number of cores where the # script is running and nodes = 1 self["nodes"] = 1 try: import multiprocessing self["cores_per_node"] = multiprocessing.cpu_count() except ImportError: self["cores_per_node"] = 1 # Add HOME/pseudopotentials and PSEUDO_DIR to the # pseudopotential directories to search, if they exist if "HOME" in os.environ: pd = os.environ["HOME"] + "/pseudopotentials" if not pd in self["pseudo_dirs"]: self["pseudo_dirs"].append(pd) if "PSEUDO_DIR" in os.environ: pd = os.environ["PSEUDO_DIR"] if not pd in self["pseudo_dirs"]: self["pseudo_dirs"].append(pd) if not filename is None: self.load(filename) # Output timing information log("Initialized parameters object at time {0}".format(time.time())) log(" first_init_time : {0}".format(parameters.first_init_time)) log(" total_walltime : {0}".format(self["total_walltime"])) log(" end_time : {0}".format(self["end_time"])) log(" max_seconds : {0}".format(self["max_seconds"]))
def expand_vertex(self, vertex, param_mutation, is_valid): if not os.path.isdir(vertex.dir): raise Exception("Tried to expand a vertex that wasn't in the network!") # Nice formatting expand_text = "{:^64}".format("Expanding vertex "+vertex.name) underline = "".join("-" for c in expand_text) log("\n"+expand_text, "alchemy.log") log(underline, "alchemy.log") # Apply mutation, return None on failure mutation = param_mutation(vertex.params) if mutation is None: log("Mutation {0} returned None".format(param_mutation.__name__), "alchemy.log") log(underline, "alchemy.log") return None # Check structure produced is valid if not is_valid(mutation): log("Mutation {0} produced invalid structure".format(param_mutation.__name__), "alchemy.log") log(underline, "alchemy.log") return None # Adjust the volume of the new structure in an # attempt to accomodate the mutation cov_volume_factor = mutation["covalent_volume"]/vertex.params["covalent_volume"] lat_volume_factor = np.linalg.det(mutation["lattice"])/np.linalg.det(vertex.params["lattice"]) volume_boost = cov_volume_factor / lat_volume_factor mutation["lattice"] *= volume_boost ** (1.0/3.0) # Create new vertex new_vertex = self.create_vertex(mutation) new_vertex.add_parent(vertex, param_mutation.__name__) log(underline, "alchemy.log") return new_vertex
def eval_objective(structure, objective, structure_compare): name = structure["stochiometry_string"] version = 1 log("Evaluating objective for {0}".format(name), "alchemy.log") # Find previous versions for d in os.listdir("."): if not os.path.isdir(d): continue if not d.startswith(name): continue version += 1 # Parse the structure to see if it's # the same (according to structure_compare), # and we need not re-do the optimization lattice = [] atoms = [] with open(d + "/objective.log") as f: for line in f: splt = line.split() if splt[0] == "lattice": lattice.append([float(w) for w in splt[1:]]) continue if splt[0] == "atom": atoms.append([splt[1], [float(w) for w in splt[2:]]]) continue if splt[0] == "objective": obj = float(splt[1]) continue if structure_compare(lattice, atoms, structure["lattice"], structure["atoms"]): # Structre is the same as previously evaluated log(" From equivalent structure in " + d, "alchemy.log") log(" Objective = {0}".format(obj), "alchemy.log") return obj # Count previous objective evaluations # so we know which evaluation this is objective_number = 1 for d in os.listdir("."): if not os.path.isdir(d): continue if not os.path.isfile(d + "/objective.log"): continue objective_number += 1 # Create the directory to evaluate this # objective in obj_dir = "{0}_v{1}".format(name, version) log(" From new directory " + obj_dir, "alchemy.log") os.system("mkdir " + obj_dir) os.chdir(obj_dir) # Try to calculate the objective # set to inf if failed try: # Evaluate and record the objective obj = objective(structure) except Exception as e: # Flag failed calculation, but don't stop log( " Objective evaluation failed with below error\n {0}".format( e), "alchemy.log") obj = float("inf") # Write the results of the objective eval with open("objective.log", "w") as f: # Write the objective/evaluation number to file f.write("n {0}\n".format(objective_number)) f.write("objective {0}\n".format(obj)) # Note the structure, in case we arrive at the # same structure later for l in structure["lattice"]: f.write("lattice {0} {1} {2}\n".format(*l)) for a in structure["atoms"]: f.write("atom {0} {1} {2} {3}\n".format(a[0], *a[1])) # Move back to previous directory os.chdir("..") log(" Objective = {0}".format(obj), "alchemy.log") return obj
def objective(self, objective): # Return stored objective value stored = self.get_evaluated_objectives() if objective.__name__ in stored: return stored[objective.__name__] # Attempt to evaluate the objective. # # If evaluataion fails (i.e the underlying objective # could not be evaluated), the objective is set to inf. # # If evaluation is already underway on another process (i.e we cannot # acquire objective_lock within some timeout), return inf this time, # but do not store inf for future retrieval. # try: with self.lock("objective_lock").acquire(timeout=1.0): # Set the working directory to the vertex directory old_wd = os.getcwd() os.chdir(self.dir) try: # Attempt to evaluate the objective log("Evaluating objective in {0}".format(self.name), "alchemy.log") obj = objective(self.params) # If the objective has length 2, we assume it has the form # [objective value, new structure]. If it has length 1 assume # it is of the form [objective value], but issue a warning, # as the function should probably have not returned a list if # there was only one thing to return. All other lengths are errors. if hasattr(obj, "__len__"): if len(obj) > 2: raise Exception("Objective returned too many results!") elif len(obj) == 0: raise Exception("Objective returned too few results!") elif len(obj) == 1: fs = "objective assumed to be of the form [objective value] in {0}" log(fs.format(self.name), "alchemy.warnings") obj = obj[0] else: fs = "Objective returned [objective value, new structure] in {0}" log(fs.format(self.name), "alchemy.log") self.params = obj[1] obj = obj[0] fs = "evaluated objective in {2}\n {0} = {1}" log(fs.format(objective.__name__, obj, self.name), "alchemy.log") except Exception as e: # Failed to evaluate the objective, set it to inf fs = "failed to evaluate the objective\n {0} in {1}" log(fs.format(objective.__name__, self.name), "alchemy.log") log("error: {0}".format(e), "alchemy.log") obj = float("inf") # Write the values of all the objectives to file with self.lock("obj_file_lock"): stored = self.get_evaluated_objectives() stored[objective.__name__] = obj with open("objectives", "w") as f: for o in stored: f.write("{0}:{1}\n".format(o, stored[o])) # Restore working directory os.chdir(old_wd) return obj except Timeout: fs = "Evaluation already underway in {0}".format(self.name) log(fs, "alchemy.log") return float("inf")
def calculate_tc(parameters, primary_only=False, skip_elph=False, phonons_only=False, phonons_from_elph=False, tidy_after=True): log("Tc calculation using parameters:") log(str(parameters)) # Get the k-point grid sizes # needed to determine the most sensible # double-delta smearing parameter. kpq = parameters["tc_kpqs"] kpqs = {} for k in kpq: kpqs["kpq_{0}".format(k)] = k # Save working directory base_wd = os.getcwd() # Run an electron-phonon coulping calculation # for each of the kpoint grid sizes for dirname in kpqs: try: if tc_calculation_complete(dirname): log("Calculation " + base_wd + "/" + dirname + " complete, skipping...") continue # Go into a directory for this kpoint grid os.system("mkdir "+dirname) os.chdir(dirname) log("Switched to directory "+dirname) # Setup the kpoint grid for this directory parameters["kpts_per_qpt"] = kpqs[dirname] # relax the structure parameters["la2F"] = False # We don't need the a2F flag for the relaxation res = relax(parameters).run() parameters["atoms"] = res["relaxed atoms"] parameters["lattice"] = res["relaxed lattice"] # Calculate the projected density of states/bandstructure try: proj_dos(parameters).run(required=False) bands(parameters).run(required=False) extract_bands(parameters).run(required=False) except Exception as ignored_ex: log("Encountered an expection when carrying out non-essential task.") log(str(ignored_ex)) if not skip_elph: # We're gonna need the Eliashberg function from now on # (unless we're just doing the phonons) parameters["la2F"] = not phonons_only # Run the succesion of neccasary calculations scf(parameters).run() if phonons_only: phonon_grid(parameters).run() else: electron_phonon_grid(parameters).run() # We're recovering just the phonon information # from an electron-phonon calculation if phonons_from_elph: parameters["la2F"] = False q2r(parameters).run() interpolate_phonon(parameters).run() # El-Ph complete, tidy up the huge files created if tidy_after: tidy_tc_calculations() except Exception as e: # Return to the base directory os.chdir(base_wd) raise e # Go back to the base directory os.chdir(base_wd)
def optimize( start_structure, # Structure to begin from objective, # Objective function to minimize; called as objective(structure) mutations, # Allowed mutations of the structure (see alchemy.mutations) check_valid=lambda s: True, # Check if a given structure is allowable max_iter=100, # Maximum optimization steps # Function that determines if two structures of the same stochiometry are simmilar enough # to not need recalculating, takes (lattice_1, atoms_1, lattice_2, atoms_2) structure_compare=lambda l1, a1, l2, a2: False): # Check arguments are sensible if not isinstance(start_structure, parameters): raise TypeError("The input structure should be a 'parameters' object!") optimize_start_time = datetime.now() # Create the optimization directory opt_dir = "alchemical_optimization" n = 0 while os.path.isdir(opt_dir): n += 1 opt_dir = "alchemical_optimization_{0}".format(n) os.system("mkdir " + opt_dir) # Set the opt_dir as our working directory os.chdir(opt_dir) log("Starting optimization...", "alchemy.log") # Initilize the structure and the # value of the objective structure = start_structure last_obj = eval_objective(structure, objective, structure_compare) # Initilize the path path = [{"structure": structure, "objective": last_obj, "proposal": False}] # Run optimization iterations iteration = 1 while True: # Generate a new structure new_structure = randomly_mutate(structure, mutations, check_valid) new_obj = eval_objective(new_structure, objective, structure_compare) # Add it as a "proposal" point # along the path path.append({ "structure": new_structure, "objective": new_obj, "proposal": True }) log("Old objective value: {0}".format(last_obj), "alchemy.log") log("New objective value: {0}".format(new_obj), "alchemy.log") if new_obj < last_obj: # Accept new structure structure = new_structure last_obj = new_obj log("Mutation accepted", "alchemy.log") else: log("Mutation rejected", "alchemy.log") # Record accepted (or reverted) # structure/objective path.append({ "structure": structure, "objective": last_obj, "proposal": False }) iteration += 1 if iteration > max_iter: log("\nMax iter = {0} hit, stopping.".format(max_iter), "alchemy.log") break # Output time taken optimize_end_time = datetime.now() fs = "Optimization compete, total time {0}" fs = fs.format(optimize_end_time - optimize_start_time) log(fs, "alchemy.log") return path