def point_energy_match(file_a, energy_a, file_b, energy_b): if abs(energy_a - energy_b) > config.comp_eps_e: return False a = io.loadcon(file_a) b = io.loadcon(file_b) if match(a, b, config.comp_eps_r, config.comp_neighbor_cutoff, False): return True
def finish_minimization(self, result): result1 = self.load_result(self.finished_min1_name) result2 = result atoms1 = io.loadcon(result1['min.con']) atoms2 = io.loadcon(result2['min.con']) results_dat1 = io.parse_results(result1['results.dat']) results_dat2 = io.parse_results(result2['results.dat']) self.data['force_calls_minimization'] += results_dat1['total_force_calls'] self.data['force_calls_minimization'] += results_dat2['total_force_calls'] is_reactant = lambda a: atoms.match(a, self.reactant, config.comp_eps_r, config.comp_neighbor_cutoff, False) tc1 = io.parse_results(result1['results.dat'])['termination_reason'] tc2 = io.parse_results(result2['results.dat'])['termination_reason'] termination_reason1 = self.job_termination_reasons['minimization'][tc1] termination_reason2 = self.job_termination_reasons['minimization'][tc2] if termination_reason1 == 'max_iterations' or termination_reason2 == 'max_iterations': self.data['termination_reason'] = 9 self.data['potential_energy_saddle'] = 0.0 self.data['potential_energy_reactant'] = 0.0 self.data['potential_energy_product'] = 0.0 self.data['barrier_reactant_to_product'] = 0.0 self.data['barrier_product_to_reactant'] = 0.0 return # Check the connectivity of the process if (not is_reactant(atoms1) and not is_reactant(atoms2)) or \ (is_reactant(atoms1) and is_reactant(atoms2)): # Not connected self.data['termination_reason'] = 6 self.data['potential_energy_saddle'] = 0.0 self.data['potential_energy_reactant'] = 0.0 self.data['potential_energy_product'] = 0.0 self.data['barrier_reactant_to_product'] = 0.0 self.data['barrier_product_to_reactant'] = 0.0 return elif is_reactant(atoms1): reactant_results_dat = results_dat1 product_results_dat = results_dat2 self.finished_reactant_name = self.finished_min1_name self.finished_product_name = self.finished_min2_name elif is_reactant(atoms2): reactant_results_dat = results_dat2 product_results_dat = results_dat1 self.finished_reactant_name = self.finished_min2_name self.finished_product_name = self.finished_min1_name self.data['potential_energy_reactant'] = reactant_results_dat['potential_energy'] self.data['potential_energy_product'] = product_results_dat['potential_energy'] self.data['barrier_reactant_to_product'] = self.data['potential_energy_saddle'] - \ self.data['potential_energy_reactant'] self.data['barrier_product_to_reactant'] = self.data['potential_energy_saddle'] - \ self.data['potential_energy_product']
def add_state(self, result_files, result_info): energy = result_info['minimum_energy'] energetically_close = [] added = True if len(self.energy_table) != 0: for row in self.energy_table: if abs(energy - row['energy']) < config.comp_eps_e: energetically_close.append(row['state']) if len(energetically_close) != 0: a1 = io.loadcon(result_files['min.con']) for state_number in energetically_close: state_con_path = os.path.join(config.path_states, str(state_number), 'minimum.con') a2 = io.loadcon(state_con_path) if atoms.match(a1, a2, config.comp_eps_r, config.comp_neighbor_cutoff, True): logger.info("Found a repeat of state %i", state_number) added = False for row in self.energy_table.rows: if row['state'] == state_number: row['repeats'] += 1 self.energy_table.write() break if added: state_number = len(self.energy_table) row = {'state': state_number, 'energy': energy, 'repeats': 0} self.energy_table.add_row(row) self.energy_table.rows.sort(key=lambda r: -r['energy']) self.energy_table.write() state_path = os.path.join(config.path_states, str(state_number)) os.mkdir(state_path) result_files['minimum.con'] = result_files['min.con'] del result_files['min.con'] for fn, fh in result_files.iteritems(): if hasattr(fh, 'getvalue') == False: continue p = os.path.join(state_path, fn) f = open(p, 'w') f.write(fh.getvalue()) f.close() return added
def add_state(self, result_files, result_info): energy = result_info['minimum_energy'] energetically_close = [] added = True if len(self.energy_table) != 0: for row in self.energy_table: if abs(energy-row['energy']) < config.comp_eps_e: energetically_close.append(row['state']) if len(energetically_close) != 0: a1 = io.loadcon(result_files['min.con']) for state_number in energetically_close: state_con_path = os.path.join(config.path_states, str(state_number), 'minimum.con') a2 = io.loadcon(state_con_path) if atoms.match(a1, a2, config.comp_eps_r, config.comp_neighbor_cutoff, True): logger.info("Found a repeat of state %i", state_number) added = False for row in self.energy_table.rows: if row['state'] == state_number: row['repeats'] += 1 self.energy_table.write() break if added: state_number = len(self.energy_table) row = { 'state':state_number, 'energy':energy, 'repeats':0 } self.energy_table.add_row(row) self.energy_table.rows.sort(key=lambda r:-r['energy']) self.energy_table.write() state_path = os.path.join(config.path_states, str(state_number)) os.mkdir(state_path) result_files['minimum.con'] = result_files['min.con'] del result_files['min.con'] for fn, fh in result_files.iteritems(): if hasattr(fh, 'getvalue') == False: continue p = os.path.join(state_path, fn) f = open(p, 'w') f.write(fh.getvalue()) f.close() return added
def start_minimization(self, which_min): job = {} saddle_path = os.path.join(config.path_incomplete, self.finished_saddle_name) mode_file = open(os.path.join(saddle_path, "mode.dat")) mode = io.load_mode(mode_file) mode_file.close() reactant_file = open(os.path.join(saddle_path, "saddle.con")) reactant = io.loadcon(reactant_file) reactant_file.close() if which_min == "min2": mode = -mode reactant.r += config.process_search_minimization_offset * mode reactIO = StringIO.StringIO() io.savecon(reactIO, reactant) job['pos.con'] = reactIO ini_changes = [('Main', 'job', 'minimization')] job['config.ini'] = io.modify_config(config.config_path, ini_changes) return job
def get_saddle(self): if self.finished_saddle_name: saddle_result = self.load_result(self.finished_saddle_name) saddle = io.loadcon(saddle_result['saddle.con']) else: saddle = None return saddle
def start_minimization(self, which_min): job = {} saddle_path = os.path.join(config.path_incomplete, self.finished_saddle_name) mode_file = open(os.path.join(saddle_path, "mode.dat")) mode = io.load_mode(mode_file) mode_file.close() reactant_file = open(os.path.join(saddle_path, "saddle.con")) reactant = io.loadcon(reactant_file) reactant_file.close() if which_min == "min2": mode = -mode reactant.r += config.process_search_minimization_offset*mode reactIO = StringIO.StringIO() io.savecon(reactIO, reactant) job['pos.con'] = reactIO ini_changes = [ ('Main', 'job', 'minimization') ] job['config.ini'] = io.modify_config(config.config_path, ini_changes) return job
def find_repeat(self, saddle_file, barrier): self.load_process_table() energy_a = barrier p1 = io.loadcon(saddle_file) for id in self.procs.keys(): energy_b = self.procs[id]['barrier'] if abs(energy_a - energy_b) > config.comp_eps_e: continue if id in self.con_cache: p2 = self.con_cache[id] else: p2 = io.loadcon(self.proc_saddle_path(id)) self.con_cache[id] = p2 if atoms.match(p1, p2, config.comp_eps_r, config.comp_neighbor_cutoff, False): return id return None
def add_process(self, result): """ Adds a process to this state. """ state.State.add_process(self, result) resultdata = result["results"] # The information from the result.dat file # We may not already have the energy for this State. If not, it should be in the result data. if self.get_energy() is None: self.set_energy(resultdata["potential_energy_reactant"]) # Check if the reactant, and product are legit try: if 'reactant' not in result: io.loadcon(result['reactant.con']) if 'product' not in result: io.loadcon(result['product.con']) except: logger.exception("Reactant or product has incorrect format") return None # Update the search result table. #self.append_search_result(result, "good-%d" % self.get_num_procs()) # The id of this process is the number of processes. id = self.get_num_procs() # Keep track of the number of searches, Ns. #self.inc_proc_repeat_count(id) # Move the relevant files into the procdata directory. open(self.proc_reactant_path(id), 'w').writelines(result['reactant.con'].getvalue()) open(self.proc_product_path(id), 'w').writelines(result['product.con'].getvalue()) open(self.proc_results_path(id), 'w').writelines(result['results.dat'].getvalue()) # Append this barrier to the process table (in memory and on disk). self.append_process_table(id = id, product = -1, product_energy = resultdata["potential_energy_product"], time = resultdata["transition_time_s"]) # This was a unique process, so return the id. return id
def make_suggestion(): try: import eon.kdb as kdb except: logger.error('Python module kdb not found, kdb will not be used.') return None, None if os.path.isdir(os.path.join(config.kdb_scratch_path, "kdbmatches")): dones = glob.glob( os.path.join(config.kdb_scratch_path, "kdbmatches", ".done_*")) if len(dones) > 0: number = dones[0].split("_")[1] try: displacement = io.loadcon( os.path.join(config.kdb_scratch_path, "kdbmatches", "SADDLE_%s" % number)) except FloatingPointError: displacement = io.loadposcar( os.path.join(config.kdb_scratch_path, "kdbmatches", "SADDLE_%s" % number)) except ValueError: displacement = io.loadposcar( os.path.join(config.kdb_scratch_path, "kdbmatches", "SADDLE_%s" % number)) mode = [[float(i) for i in l.strip().split()] for l in open( os.path.join(config.kdb_scratch_path, "kdbmatches", "MODE_%s" % number), 'r').readlines()[:]] os.remove( os.path.join(config.kdb_scratch_path, "kdbmatches", ".done_%s" % number)) os.remove( os.path.join(config.kdb_scratch_path, "kdbmatches", "SADDLE_%s" % number)) os.remove( os.path.join(config.kdb_scratch_path, "kdbmatches", "MODE_%s" % number)) return displacement, mode return None, None
def make_suggestion(): try: import eon.kdb as kdb except: logger.error('Python module kdb not found, kdb will not be used.') return None, None if os.path.isdir(os.path.join(config.kdb_scratch_path, "kdbmatches")): dones = glob.glob(os.path.join(config.kdb_scratch_path, "kdbmatches",".done_*")) if len(dones) > 0: number = dones[0].split("_")[1] try: displacement = io.loadcon(os.path.join(config.kdb_scratch_path, "kdbmatches", "SADDLE_%s" % number)) except FloatingPointError: displacement = io.loadposcar(os.path.join(config.kdb_scratch_path, "kdbmatches", "SADDLE_%s" % number)) except ValueError: displacement = io.loadposcar(os.path.join(config.kdb_scratch_path, "kdbmatches", "SADDLE_%s" % number)) mode = [[float(i) for i in l.strip().split()] for l in open(os.path.join(config.kdb_scratch_path, "kdbmatches", "MODE_%s" % number), 'r').readlines()[:]] os.remove(os.path.join(config.kdb_scratch_path, "kdbmatches", ".done_%s" % number)) os.remove(os.path.join(config.kdb_scratch_path, "kdbmatches", "SADDLE_%s" % number)) os.remove(os.path.join(config.kdb_scratch_path, "kdbmatches", "MODE_%s" % number)) return displacement, mode return None, None
def register_results(comm, current_state, states): logger.info("Registering results") if os.path.isdir(config.path_jobs_in): shutil.rmtree(config.path_jobs_in) os.makedirs(config.path_jobs_in) # Function used by communicator to determine whether to discard a result def keep_result(name): return True transition = None num_registered = 0 speedup = 0 number_state = [] numres = 0 for result in comm.get_results(config.path_jobs_in, keep_result): # The result dictionary contains the following key-value pairs: # reactant.con - an array of strings containing the reactant # product.con - an array of strings containing the product # results.dat - an array of strings containing the results # id - StateNumber_WUID # # The reactant, product, and mode are passed as lines of the files because # the information contained in them is not needed for registering results state_num = int(result['name'].split("_")[0]) id = int(result['name'].split("_")[1]) + result['number'] state = states.get_state(state_num) # read in the results result['results'] = io.parse_results(result['results.dat']) speedup += result['results']['speedup'] if result['results']['transition_found'] == 1: result['results']['transition_time_s'] += state.get_time() a = result['results']['potential_energy_product'] f = open ("states/0/end_state_table","a+") lines = f.readlines() f.close() proc = [] number_state.append(0) count = 0 state_match = 0 flag = 0 product = io.loadcon (result['product.con']) for i in range(0, numres): product2 = io.loadcon ("states/0/procdata/product_%i.con" % i ) if atoms.match(product, product2,config.comp_eps_r,config.comp_neighbor_cutoff,True): if flag == 0: state_match = number_state[i] number_state[numres] = state_match flag = 1 break count = 0 time_to_state = 0 time_check = 0 for line in lines[1:]: l = line.split() proc.append({'state': l[0], 'views': l[1], 'rate': l[2], 'time': l[3]}) if float(l[3]) > time_check: time_check = float(l[3]) if flag == 0: number_state[numres] = int(l[0])+1 else: if state_match == int(l[0]): proc[count]['views'] = str(int(l[1]) + 1) time_to_state = float(l[3]) + result['results']['transition_time_s'] proc[count]['time'] = str(time_to_state) proc[count]['rate'] = str(1/(time_to_state/float(proc[count]['views']))) count += 1 if flag == 0: proc.append({'state': number_state[numres], 'views': 1, 'rate': 1/(float(time_check+result['results']['transition_time_s'])) , 'time': time_check + result['results']['transition_time_s']}) g = open ("states/0/end_state_table","w") g.write('state views rate time \n') for j in range(0,len(proc)): g.write(str(proc[j]['state'])) g.write(" ") g.write(str(proc[j]['views'])) g.write(" ") g.write(str(proc[j]['rate'])) g.write(" ") g.write(str(proc[j]['time'])) g.write("\n") g.close() numres += 1 time = result['results']['transition_time_s'] process_id = state.add_process(result) logger.info("Found transition with time %.3e", time) if not transition and current_state.number==state.number: transition = {'process_id':process_id, 'time':time} state.zero_time() else: state.inc_time(result['results']['simulation_time_s']) num_registered += 1 logger.info("Processed %i (result) searches", num_registered) if num_registered >=1: logger.info("Average speedup is %f", speedup/num_registered) return num_registered, transition, speedup
import sys import os import numpy import pathfix import fileio as io, atoms def usage(): print "usage: %s poscarfile" % (os.path.basename(sys.argv[0])) sys.exit(1) if len(sys.argv) < 2: usage() try: p = io.loadcon(sys.argv[1]) except IOError, (errno, strerrno): print "%s: %s" % (sys.argv[1], strerrno) usage() print "set boxwidth 0.10" print "set xlabel 'Distance (Angstrom)'" print "set ylabel 'Number of Atoms'" print "set key off" print "plot [0:8] '-' with boxes" hist = {} for i in range(0, len(p)): for j in range(i+1, len(p)): d = numpy.linalg.norm(p.r[i]-p.r[j]) d = round(d, 1)
def generate_corresponding_states(self): """ Generate the list of reactants expected as part of the new superbasin. Then, use the StateList object to create the actual states. """ # First, create the expected states. # Only start/continue this process if there hasn't already been a stop flag. if not self.in_progress: return # Only generate states from states in the superbasin which are *not* the previous state. indices_to_gen_from = [i for i in xrange(len(self.sb_states)) if self.sb_states[i][0].number != self.previous_state.number] current_reactant = self.current_state.get_reactant() previous_reactant = self.previous_state.get_reactant() # First generate a list of atoms which have moved enough to be considered "in the hole" num_atoms = len(current_reactant) diff = atoms.per_atom_norm(current_reactant.r - previous_reactant.r, current_reactant.box) moved = [] for i in range(num_atoms): if diff[i] > self.move_distance: moved.append(i) # Now, generate what we hope will look like the new states. # There should be as many as are in the sb state list less the original, premade one. state_possibilities = [] for k in indices_to_gen_from: sb_state = self.sb_states[k][0] sb_state_reactant = sb_state.get_reactant() # Take the previous reactant as a base. new_state_reactant = sb_state_reactant.copy() # Try to take all the atoms that moved in the trigger process and move them in each of the sb_states. # If any of them cannot be moved as they did in the trigger process (i.e. because they're not in the same place as in "previous_state"), # then one of the superbasin atoms must have moved during the triggering process. # Thus, scrap this whole super-basin recycling effort! # (If any of the followng do not get changed to a 1, toss the sb recycling.) all_clear = [0]*len(moved) # Move the atoms that moved in the process that initiated this recycling. for i in moved: # If the any of the atoms has the same basic location and the same name, # it will be considered the same atom, and will be moved as it did in the triggering process. for j in range(num_atoms): if (numpy.linalg.norm(sb_state_reactant.r[j] - previous_reactant.r[i]) < self.move_distance and sb_state_reactant.names[j] == previous_reactant.names[i]): # Move it to where it moved in the trigger process new_state_reactant.r[j] = current_reactant.r[i] all_clear[moved.index(i)] = 1 break # If we were able to move all of them successfully if 0 not in all_clear: state_possibilities.append(new_state_reactant) # Otherwise, this should no longer be "in progress", # and there's no use going on (sniffle). else: self.in_progress = False return # Next, having created what we expect the corresponding states in # the superbasin to look like, create the actual states. # First, find the process that got us from the previous state to the current state. self.previous_state.load_process_table() ref_pid = self.get_process_id(self.previous_state.procs, self.current_state.number) ref_rate = self.previous_state.procs[ref_pid]["rate"] ref_barrier = self.previous_state.procs[ref_pid]["barrier"] # Then, if we find a similar process from the other sb_states, which leads to a # state that is similar to one of our generated possibilities, it's a keeper. for i in indices_to_gen_from: sb_state = self.sb_states[i][0] sb_state.load_process_table() state_path = sb_state.path product_con = None for process_id in sb_state.procs.keys(): # If the process "looks" similar -- it has a rate less than an order of magnitude different, and a barrier less than 0.2 eV different. if (max(sb_state.procs[process_id]["rate"] / ref_rate, ref_rate / sb_state.procs[process_id]["rate"]) < 10 and abs(sb_state.procs[process_id]["barrier"] - ref_barrier) < 0.2): # Manually load the product.con for the process id and see if it's similar to the "state_possibility" product_path = os.path.join(state_path, "procdata", "product_%d.con" % process_id) fi = open(product_path, "r") product_con = io.loadcon(fi) fi.close() if atoms.identical(product_con, state_possibilities[indices_to_gen_from.index(i)], self.move_distance): break else: product_con = None if product_con is None: self.in_progress = False return # Now that we know which process from the reference state # goes to the desired state, make that state. self.sb_states[i][1] = self.states.get_product_state(sb_state.number, process_id) self.sb_state_nums[i][1] = self.sb_states[i][1].number
def get_process_product(self, id): return io.loadcon(self.proc_product_path(id))
def finish_minimization(self, result): result1 = self.load_result(self.finished_min1_name) result2 = result atoms1 = io.loadcon(result1['min.con']) atoms2 = io.loadcon(result2['min.con']) results_dat1 = io.parse_results(result1['results.dat']) results_dat2 = io.parse_results(result2['results.dat']) self.data['force_calls_minimization'] += results_dat1[ 'total_force_calls'] self.data['force_calls_minimization'] += results_dat2[ 'total_force_calls'] is_reactant = lambda a: atoms.match( a, self.reactant, config.comp_eps_r, config.comp_neighbor_cutoff, False) tc1 = io.parse_results(result1['results.dat'])['termination_reason'] tc2 = io.parse_results(result2['results.dat'])['termination_reason'] termination_reason1 = self.job_termination_reasons['minimization'][tc1] termination_reason2 = self.job_termination_reasons['minimization'][tc2] if termination_reason1 == 'max_iterations' or termination_reason2 == 'max_iterations': self.data['termination_reason'] = 9 self.data['potential_energy_saddle'] = 0.0 self.data['potential_energy_reactant'] = 0.0 self.data['potential_energy_product'] = 0.0 self.data['barrier_reactant_to_product'] = 0.0 self.data['barrier_product_to_reactant'] = 0.0 return # Check the connectivity of the process if (not is_reactant(atoms1) and not is_reactant(atoms2)) or \ (is_reactant(atoms1) and is_reactant(atoms2)): # Not connected self.data['termination_reason'] = 6 self.data['potential_energy_saddle'] = 0.0 self.data['potential_energy_reactant'] = 0.0 self.data['potential_energy_product'] = 0.0 self.data['barrier_reactant_to_product'] = 0.0 self.data['barrier_product_to_reactant'] = 0.0 return elif is_reactant(atoms1): reactant_results_dat = results_dat1 product_results_dat = results_dat2 self.finished_reactant_name = self.finished_min1_name self.finished_product_name = self.finished_min2_name elif is_reactant(atoms2): reactant_results_dat = results_dat2 product_results_dat = results_dat1 self.finished_reactant_name = self.finished_min2_name self.finished_product_name = self.finished_min1_name self.data['potential_energy_reactant'] = reactant_results_dat[ 'potential_energy'] self.data['potential_energy_product'] = product_results_dat[ 'potential_energy'] self.data['barrier_reactant_to_product'] = self.data['potential_energy_saddle'] - \ self.data['potential_energy_reactant'] self.data['barrier_product_to_reactant'] = self.data['potential_energy_saddle'] - \ self.data['potential_energy_product']
# Equations and notations are from: http://mathworld.wolfram.com/EulerAngles.html . @staticmethod def rotate_water(hydrogen1, hydrogen2, oxygen, psi, theta, phi, hydrogen_mass = 1.0, oxygen_mass = 16.0): G = (hydrogen_mass*(hydrogen1 + hydrogen2) + oxygen_mass*oxygen)/(hydrogen_mass*2.0 + oxygen_mass) rot = numpy.array([ [cos(theta)*cos(phi), cos(theta)*sin(phi), -sin(theta)], [sin(psi)*sin(theta)*cos(phi)-cos(psi)*sin(phi), sin(psi)*sin(theta)*sin(phi)+cos(psi)*cos(phi), cos(theta)*sin(psi)], [cos(psi)*sin(theta)*cos(phi)+sin(psi)*sin(phi), cos(psi)*sin(theta)*sin(phi)-sin(psi)*cos(phi), cos(theta)*cos(psi)] ]) rh1 = numpy.tensordot(rot, (hydrogen1-G), 1) + G rh2 = numpy.tensordot(rot, (hydrogen2-G), 1) + G ro = numpy.tensordot(rot, (oxygen-G), 1) + G return rh1, rh2, ro if __name__ == '__main__': import sys import time if len(sys.argv) < 3: print "%s: reactant.con outpath" % sys.argv[0] sys.exit(1) reactant = io.loadcon(sys.argv[1]) d = Random(reactant, 0.05, 5.0) #d = Undercoordinated(reactant, 11, 0.05, 5.0) t0 = time.time() ntimes = 1000 for i in xrange(ntimes): d.make_displacement(sys.argv[2]) dt = time.time()-t0 print "%.2f displacements per second" % (float(ntimes)/dt)
#!/usr/bin/env python import sys import pathfix import fileio as io import atoms import config config.comp_eps_r=float(sys.argv[3]) p1 = io.loadcon(sys.argv[1]) p2 = io.loadcon(sys.argv[2]) print atoms.identical(p1,p2)
#!/usr/bin/env python import sys import pathfix import atoms import fileio as io p1 = io.loadcon(sys.argv[1]) for file2 in sys.argv[2:]: p2 = io.loadcon(file2) distances = atoms.per_atom_norm(p1.r - p2.r, p1.box) max_i = 0 max_d = 0.0 for i in range(len(distances)): if distances[i] > max_d: max_d = distances[i] max_i = i print "%s: max distance: %f atom index: %i" % (file2, max_d, max_i)
def generate_corresponding_states(self): """ Generate the list of reactants expected as part of the new superbasin. Then, use the StateList object to create the actual states. """ # First, create the expected states. # Only start/continue this process if there hasn't already been a stop flag. if not self.in_progress: return # Only generate states from states in the superbasin which are *not* the previous state. indices_to_gen_from = [ i for i in xrange(len(self.sb_states)) if self.sb_states[i][0].number != self.previous_state.number ] current_reactant = self.current_state.get_reactant() previous_reactant = self.previous_state.get_reactant() # First generate a list of atoms which have moved enough to be considered "in the hole" num_atoms = len(current_reactant) diff = atoms.per_atom_norm(current_reactant.r - previous_reactant.r, current_reactant.box) moved = [] for i in range(num_atoms): if diff[i] > self.move_distance: moved.append(i) # Now, generate what we hope will look like the new states. # There should be as many as are in the sb state list less the original, premade one. state_possibilities = [] for k in indices_to_gen_from: sb_state = self.sb_states[k][0] sb_state_reactant = sb_state.get_reactant() # Take the previous reactant as a base. new_state_reactant = sb_state_reactant.copy() # Try to take all the atoms that moved in the trigger process and move them in each of the sb_states. # If any of them cannot be moved as they did in the trigger process (i.e. because they're not in the same place as in "previous_state"), # then one of the superbasin atoms must have moved during the triggering process. # Thus, scrap this whole super-basin recycling effort! # (If any of the followng do not get changed to a 1, toss the sb recycling.) all_clear = [0] * len(moved) # Move the atoms that moved in the process that initiated this recycling. for i in moved: # If the any of the atoms has the same basic location and the same name, # it will be considered the same atom, and will be moved as it did in the triggering process. for j in range(num_atoms): if (numpy.linalg.norm(sb_state_reactant.r[j] - previous_reactant.r[i]) < self.move_distance and sb_state_reactant.names[j] == previous_reactant.names[i]): # Move it to where it moved in the trigger process new_state_reactant.r[j] = current_reactant.r[i] all_clear[moved.index(i)] = 1 break # If we were able to move all of them successfully if 0 not in all_clear: state_possibilities.append(new_state_reactant) # Otherwise, this should no longer be "in progress", # and there's no use going on (sniffle). else: self.in_progress = False return # Next, having created what we expect the corresponding states in # the superbasin to look like, create the actual states. # First, find the process that got us from the previous state to the current state. self.previous_state.load_process_table() ref_pid = self.get_process_id(self.previous_state.procs, self.current_state.number) ref_rate = self.previous_state.procs[ref_pid]["rate"] ref_barrier = self.previous_state.procs[ref_pid]["barrier"] # Then, if we find a similar process from the other sb_states, which leads to a # state that is similar to one of our generated possibilities, it's a keeper. for i in indices_to_gen_from: sb_state = self.sb_states[i][0] sb_state.load_process_table() state_path = sb_state.path product_con = None for process_id in sb_state.procs.keys(): # If the process "looks" similar -- it has a rate less than an order of magnitude different, and a barrier less than 0.2 eV different. if (max(sb_state.procs[process_id]["rate"] / ref_rate, ref_rate / sb_state.procs[process_id]["rate"]) < 10 and abs(sb_state.procs[process_id]["barrier"] - ref_barrier) < 0.2): # Manually load the product.con for the process id and see if it's similar to the "state_possibility" product_path = os.path.join(state_path, "procdata", "product_%d.con" % process_id) fi = open(product_path, "r") product_con = io.loadcon(fi) fi.close() if atoms.identical( product_con, state_possibilities[indices_to_gen_from.index(i)], self.move_distance): break else: product_con = None if product_con is None: self.in_progress = False return # Now that we know which process from the reference state # goes to the desired state, make that state. self.sb_states[i][1] = self.states.get_product_state( sb_state.number, process_id) self.sb_state_nums[i][1] = self.sb_states[i][1].number
#!/usr/bin/env python import sys import pathfix import atoms import fileio as io p1 = io.loadcon(sys.argv[1]) p2 = io.loadcon(sys.argv[2]) ret = atoms.get_mappings(p1, p2, float(sys.argv[3]), float(sys.argv[4])) if ret: print sys.argv[1], sys.argv[2]
def get_reactant(self): """ Loads the reactant.con into a point and returns it. """ return io.loadcon(self.reactant_path)
def add_process(self, result, superbasin=None): """ Adds a process to this State. """ state.State.add_process(self, result) self.set_good_saddle_count(self.get_good_saddle_count() + 1) resultdata = result[ "results"] #The information from the result.dat file if 'simulation_time' in resultdata: self.increment_time(resultdata['simulation_time'], resultdata['md_temperature']) # We may not already have the energy for this State. If not, it should be placed in the result data. if self.get_energy() is None: # This energy now defines the reference energy for the state self.set_energy(resultdata["potential_energy_reactant"]) reactant_energy = self.get_energy() # Calculate the forward barrier for this process, and abort if the energy is too high. oldlowest = self.get_lowest_barrier() barrier = resultdata["potential_energy_saddle"] - reactant_energy lowest = self.update_lowest_barrier(barrier) ediff = (barrier - lowest) - (self.statelist.kT * (self.statelist.thermal_window + self.statelist.max_thermal_window)) if ediff > 0.0: self.append_search_result(result, "barrier > max_thermal_window", superbasin) return None # Determine the number of processes in the process table that have a similar energy. id = self.find_repeat(result["saddle.con"], barrier) if id != None: self.append_search_result(result, "repeat-%d" % id, superbasin) self.procs[id]['repeats'] += 1 self.save_process_table() if result['type'] == "random" or result['type'] == "dynamics": self.inc_proc_random_count(id) # Do not increase repeats if we are currently in a # superbasin and the process does not lead out of it; # or if the process barrier is outside the thermanl # window. if id in self.get_relevant_procids(superbasin): self.inc_repeats() if 'simulation_time' in resultdata: current_time = self.get_time() logger.debug("event %3i found at time %f fs" % (id, current_time)) return None # This appears to be a unique process. # Check if the mode, reactant, saddle, and product are legit try: if 'mode' not in result: io.load_mode(result['mode.dat']) if 'reactant' not in result: io.loadcon(result['reactant.con']) if 'saddle' not in result: io.loadcon(result['saddle.con']) if 'product' not in result: io.loadcon(result['product.con']) except: logger.exception( "Mode, reactant, saddle, or product has incorrect format") return None # Reset the repeat count. self.reset_repeats() # Respond to finding a new lowest barrier. self.set_unique_saddle_count(self.get_unique_saddle_count() + 1) if barrier == lowest and barrier < oldlowest - self.statelist.epsilon_e: logger.info("Found new lowest barrier %f for state %i (type: %s)", lowest, self.number, result['type']) logger.info("Found new barrier %f for state %i (type: %s)", barrier, self.number, result['type']) # Update the search result table. self.append_search_result(result, "good-%d" % self.get_num_procs(), superbasin) # The id of this process is the number of processes. id = self.get_num_procs() if 'simulation_time' in resultdata: current_time = self.get_time() logger.debug("new event %3i found at time %f fs" % (id, current_time)) # Move the relevant files into the procdata directory. open(self.proc_reactant_path(id), 'w').writelines(result['reactant.con'].getvalue()) open(self.proc_mode_path(id), 'w').writelines(result['mode.dat'].getvalue()) open(self.proc_product_path(id), 'w').writelines(result['product.con'].getvalue()) open(self.proc_saddle_path(id), 'w').writelines(result['saddle.con'].getvalue()) open(self.proc_results_path(id), 'w').writelines(result['results.dat'].getvalue()) # Append this barrier to the process table (in memory and on disk). self.append_process_table( id=id, saddle_energy=resultdata["potential_energy_saddle"], prefactor=resultdata["prefactor_reactant_to_product"], product=-1, product_energy=resultdata["potential_energy_product"], product_prefactor=resultdata["prefactor_product_to_reactant"], barrier=barrier, rate=resultdata["prefactor_reactant_to_product"] * math.exp(-barrier / self.statelist.kT), repeats=0) # If this is a random search type, add this proc to the random proc dict. if result['type'] == "random" or result['type'] == "dynamics": self.inc_proc_random_count(id) # This was a unique process, so return the id. return id
def get_process_saddle(self, id): return io.loadcon(self.proc_saddle_path(id))
import pathfix import fileio as io import atoms stpath = os.path.join('states', sys.argv[1]) lines = open(os.path.join(stpath, 'info'), 'r').readlines() reacenergy = 0.0 for line in lines: if "reactant energy" in line: reacenergy = float(line.split()[-1]) reac = io.loadcon(os.path.join(stpath, 'reactant.con')) print "%12s %12s %12s" % ('pid', 'max dist', 'e diff') lines = open(os.path.join(stpath, 'processtable'), 'r').readlines() for line in lines[1:]: split = line.strip().split() pid = split[0] pu = float(split[4]) pr = io.loadcon(os.path.join(stpath, 'procdata', 'product_%s.con' % pid)) maxd = 0.0 for i in range(len(reac)): d = numpy.linalg.norm(atoms.pbc(reac.r[i] - pr.r[i], reac.box)) maxd = max(maxd, d) print "%12s %12.6f %12.6f" % (pid, maxd, pu - reacenergy)
def get_process_reactant(self, id): return io.loadcon(self.proc_reactant_path(id))
#!/usr/bin/env python import pathfix import atoms import fileio as io import sys try: p = io.loadcon(sys.argv[1]) except: print "\nusage: yacs input.con" sys.exit() codes = atoms.cnar(p, 3.2, False) newcodes = [[] for i in range(len(p))] for i in range(len(p)): for code in codes[i]: newcodes[i].append(code.split(',') + [codes[i][code]]) for i in range(len(p)): newcodes[i].sort(key=lambda x: x[3], reverse=True) print '%6d' % i, for code in newcodes[i]: print '%3d%10s' % (code[3], '(%s,%s,%s)' % (code[0], code[1], code[2])), print
def register_results(comm, current_state, states): logger.info("Registering results") if os.path.isdir(config.path_jobs_in): shutil.rmtree(config.path_jobs_in) os.makedirs(config.path_jobs_in) # Function used by communicator to determine whether to discard a result def keep_result(name): return True transition = None num_registered = 0 speedup = 0 number_state = [] numres = 0 for result in comm.get_results(config.path_jobs_in, keep_result): # The result dictionary contains the following key-value pairs: # reactant.con - an array of strings containing the reactant # product.con - an array of strings containing the product # results.dat - an array of strings containing the results # id - StateNumber_WUID # # The reactant, product, and mode are passed as lines of the files because # the information contained in them is not needed for registering results state_num = int(result['name'].split("_")[0]) id = int(result['name'].split("_")[1]) + result['number'] state = states.get_state(state_num) # read in the results result['results'] = io.parse_results(result['results.dat']) speedup += result['results']['speedup'] if result['results']['transition_found'] == 1: result['results']['transition_time_s'] += state.get_time() a = result['results']['potential_energy_product'] f = open("states/0/end_state_table", "a+") lines = f.readlines() f.close() proc = [] number_state.append(0) count = 0 state_match = 0 flag = 0 product = io.loadcon(result['product.con']) for i in range(0, numres): product2 = io.loadcon("states/0/procdata/product_%i.con" % i) if atoms.match(product, product2, config.comp_eps_r, config.comp_neighbor_cutoff, True): if flag == 0: state_match = number_state[i] number_state[numres] = state_match flag = 1 break count = 0 time_to_state = 0 time_check = 0 for line in lines[1:]: l = line.split() proc.append({ 'state': l[0], 'views': l[1], 'rate': l[2], 'time': l[3] }) if float(l[3]) > time_check: time_check = float(l[3]) if flag == 0: number_state[numres] = int(l[0]) + 1 else: if state_match == int(l[0]): proc[count]['views'] = str(int(l[1]) + 1) time_to_state = float( l[3]) + result['results']['transition_time_s'] proc[count]['time'] = str(time_to_state) proc[count]['rate'] = str( 1 / (time_to_state / float(proc[count]['views']))) count += 1 if flag == 0: proc.append({ 'state': number_state[numres], 'views': 1, 'rate': 1 / (float(time_check + result['results']['transition_time_s'])), 'time': time_check + result['results']['transition_time_s'] }) g = open("states/0/end_state_table", "w") g.write('state views rate time \n') for j in range(0, len(proc)): g.write(str(proc[j]['state'])) g.write(" ") g.write(str(proc[j]['views'])) g.write(" ") g.write(str(proc[j]['rate'])) g.write(" ") g.write(str(proc[j]['time'])) g.write("\n") g.close() numres += 1 time = result['results']['transition_time_s'] process_id = state.add_process(result) logger.info("Found transition with time %.3e", time) if not transition and current_state.number == state.number: transition = {'process_id': process_id, 'time': time} state.zero_time() else: state.inc_time(result['results']['simulation_time_s']) num_registered += 1 logger.info("Processed %i (result) searches", num_registered) if num_registered >= 1: logger.info("Average speedup is %f", speedup / num_registered) return num_registered, transition, speedup
def kmc_step(current_state, states, time, kT, superbasining, steps=0): t1 = unix_time.time() previous_state = current_state # If the Chatterjee & Voter superbasin acceleration method is being used if config.askmc_on: pass_rec_path = None asKMC = askmc.ASKMC(kT, states, config.askmc_confidence, config.askmc_alpha, config.askmc_gamma, config.askmc_barrier_test_on, config.askmc_connections_test_on, config.sb_recycling_on, config.path_root, config.akmc_thermal_window, recycle_path = pass_rec_path) # The system might be in a superbasin if config.sb_on: sb = superbasining.get_containing_superbasin(current_state) else: sb = None while (( (not sb and current_state.get_confidence() >= config.akmc_confidence) or (sb and sb.get_confidence() >= config.akmc_confidence) ) and (steps < config.akmc_max_kmc_steps or config.akmc_max_kmc_steps == 0)): # Do a KMC step. steps += 1 if config.sb_on and sb: mean_time, current_state, next_state, sb_proc_id_out, sb_id = sb.step(current_state, states.get_product_state) else: if config.askmc_on: rate_table = asKMC.get_ratetable(current_state) else: rate_table = current_state.get_ratetable() if len(rate_table) == 0: logger.error("No processes in rate table, but confidence " "has been reached") ratesum = sum((row[1] for row in rate_table), 0.0) u = numpy.random.random_sample() p = 0.0 nsid = 1.1 # Next state process id, will throw exception if remains unchanged. # If we are following another trajectory: if config.debug_target_trajectory != "False": # Get the Dynamics objects. owndynamics = io.Dynamics(os.path.join(config.path_results, "dynamics.txt")).get() targetdynamics = io.Dynamics(os.path.join(config.debug_target_trajectory, "dynamics.txt")).get() # Get the current_step. try: current_step = len(owndynamics) except: current_step = 0 # Get the target step process id. if current_step > 0: stateid = targetdynamics[current_step]['reactant'] else: stateid = 0 try: procid = targetdynamics[current_step]['process'] except: print "Can no longer follow target trajectory" sys.exit(1) # Load the con file for that process saddle. targetSaddleCon = io.loadcon(os.path.join(config.debug_target_trajectory, "states", str(stateid), "procdata", "saddle_%d.con" % procid)) targetProductCon = io.loadcon(os.path.join(config.debug_target_trajectory, "states", str(stateid), "procdata", "product_%d.con" % procid)) ibox = numpy.linalg.inv(targetSaddleCon.box) # See if we have this process for i in xrange(len(rate_table)): p1 = current_state.get_process_saddle(rate_table[i][0]) for dist in atoms.per_atom_norm_gen(p1.free_r() - targetSaddleCon.free_r(), targetSaddleCon.box, ibox): if dist > config.comp_eps_r: break else: p1 = current_state.get_process_product(rate_table[i][0]) for dist in atoms.per_atom_norm_gen(p1.free_r() - targetProductCon.free_r(), targetProductCon.box, ibox): if dist > config.comp_eps_r: break else: nsid = i break else: print "Can no longer follow target trajectory" sys.exit(1) # We are not following another trajectory: else: for i, row in enumerate(rate_table): p += row[1]/ratesum if p>u: nsid = i break else: logger.warning("Warning: Failed to select rate; p = " + str(p)) break next_state = states.get_product_state(current_state.number, rate_table[nsid][0]) mean_time = 1.0/ratesum print("Meantime for Step "+str(steps)+": ", mean_time) # Accounting for time if config.debug_use_mean_time: step_time = mean_time else: #numpy.random.random_sample() uses [0,1) #which could produce issues with math.log() step_time = -mean_time*math.log(1 - numpy.random.random_sample()) time += step_time # Pass transition information to extension schemes if config.askmc_on: asKMC.register_transition(current_state, next_state) if config.sb_on: superbasining.register_transition(current_state, next_state) if config.sb_on and sb: proc_id_out = -1 else: proc_id_out = rate_table[nsid][0] # Write data to disk dynamics = io.Dynamics(os.path.join(config.path_results, "dynamics.txt")) if proc_id_out != -1: proc = current_state.get_process(proc_id_out) dynamics.append(current_state.number, proc_id_out, next_state.number, step_time, time, proc['barrier'], proc['rate'], current_state.get_energy()) logger.info("KMC step from state %i through process %i to state %i ", current_state.number, rate_table[nsid][0], next_state.number) else: #XXX The proc_out_id was -1, which means there's a bug or this was a superbasin step. dynamics.append_sb(current_state.number, sb_proc_id_out, next_state.number, step_time, time, sb_id, 1.0/mean_time, current_state.get_energy()) logger.info("SB step from state %i through process %i to state %i ", current_state.number, sb_proc_id_out, next_state.number) #criterion used to stop the job: currently an energy limit is used as criterion if current_state.get_energy() > config.debug_stop_criterion: sys.exit() previous_state = current_state current_state = next_state # The system might be in a superbasin if config.sb_on: sb = superbasining.get_containing_superbasin(current_state) else: sb = None if config.sb_on: superbasining.write_data() if not sb: logger.info("Currently in state %i with confidence %.6f", current_state.number, current_state.get_confidence()) else: logger.info("Currently in state %i (superbasin %i) with confidence %.6f", current_state.number, sb.id, sb.get_confidence()) t2 = unix_time.time() logger.debug("KMC finished in " + str(t2-t1) + " seconds") logger.debug("%.2f KMC steps per second", float(steps)/(t2-t1)) return current_state, previous_state, time, steps