def fuzzy_variable_translate(variable, poss_variables, verbose_output, throw_error=True, min_tol=0.3): """ Uses a fuzzy finder to correct spelling mistakes in variables. Inputs: * variable <str> => The variable to be checked for typos. * poss_variables <list<str>> => The possible (correct) names of the variables. * verbose_output <bool> => Whether to output lots of info or not. * throw_error <bool> (default True) => Whether to throw an error for no hits. * min_tol <float> (default 0.3) => The tolerance for classifying a 'hit'. Outputs: Returns a list of bools, these tell which element of the input list poss_variables are possible correct strings. """ if type(variable) != str: return False a = [dfl.SequenceMatcher(None, variable.lower(), i.lower()).ratio() for i in poss_variables] if all(i < min_tol for i in a) and throw_error: EXC.ERROR("I don't know what variable '%s' means. \nValid Options are:%s"%(variable,'\n\t*'+'\n\t*'.join(poss_variables)) ) if all(i < min_tol for i in a): return [False]*len(poss_variables) temp_array = [False]*len(poss_variables) temp_array = [True if i == max(a) else False for i in a ] if all(i < 0.95 for i in a) and verbose_output: EXC.WARN("Assuming '%s' means you want me to use %s" % (variable, np.array(poss_variables)[temp_array][0])) return temp_array
def fuzzy_variable_helper(variable, poss_var, just_1_var=True, tol=0.3, verbose_out=True, throw_error=True): """ A function to make the fuzzy_variable_translate slightly easier to use. Inputs: * variable <str> => The variable to be checked for typos. * poss_variables <list<str>> => The possible (correct) names of the variables. * just_1_var <bool> (default False) => Only allow 1 variable or not. * tol <float> (default 0.3) => The tolerance for classifying a 'hit'. * verbose_out <bool> => Whether to output lots of info or not. * throw_error <bool> (default True) => Whether to throw an error for no hits. Outputs: Returns a list of strings that can be the variable. If """ poss_var_inds = fuzzy_variable_translate(variable, poss_var, verbose_out, throw_error, tol) poss_vars = np.array(poss_var) poss_vars = poss_vars[poss_var_inds] if len(poss_vars) < 1: EXC.ERROR("I don't know what '%s' is supposed to mean." % variable) elif len(poss_vars) == 1: return poss_vars[0] elif len(poss_vars) > 1: if just_1_var: EXC.ERROR("I don't know what you mean by: '%s'.\n" % variable + "You could mean:\n\t* %s" % '\n\t* '.join(poss_var)) else: return poss_vars
def _vmd_visualise(self, step): """ Visualises the data. This fills in the variables in the vmd template, writes the script and runs it in vmd. """ start_vmd_time = time.time() for i in self.all_settings['tcl']['cube_files'].split(' '): if not io.path_leads_somewhere(i.strip()): msg = "Sorry I couldn't find the following cube file:" msg += "\n%s" % i.strip() EXC.ERROR(msg) self.all_settings['tcl']['pic_filename'][self.PID] = self.tga_filepath io.vmd_variable_writer(self.all_settings, self.PID) # check if the file exists tmp = os.path.isfile(self.all_settings['vmd_script'][self.PID]) if not tmp: msg = "Sorry I can't find the vmd script!" msg += "It hasn't been created (or created in the wrong place)." EXC.ERROR(msg) cond = 'tga' not in self.all_settings['files_to_keep'] cond *= not all_settings['calibrate'] if cond: self.all_settings['delete_these'].append(self.tga_filepath) io.VMD_visualise(self.all_settings, self.PID) end_time = time.time() - start_vmd_time self.all_settings['times']['VMD Visualisation'][step] += end_time
def _create_bra_ket_conj(self, inds): if len(inds) != 2: if "," in inds: EXC.WARN("The number of indices for the U matrix, %s, is %i it should be 2!\n\nDid you forget to seperate indices by a comma?"%(self.txt, len(inds))) else: EXC.WARN("The number of indices for the U matrix, %s, is %i it should be 2!\nDid you forget to put them in curly braces e.g. _{l,n}?"%(self.txt, len(inds))) bra = BRA("{\psi_{%s}[R,t]}"%(inds[1])) ket = KET("{\phi_{%s}[R,t]}"%(inds[0])) return bra,ket
def _simplify_1_delta_with_indices(self, delta_inds, delta_I): change = False print("Before\n", self.latex(), "\n") # Won't currently work if other parent math objects are inside. # This is because it won't recursively change things, lower down. if any(not i.child for i in self.objs): EXC.WARN("Sorry I currently can't cancel the kronecker delta in the sum %s.\nThis is something that needs coding in."%self.latex()) return # Finds the relevant math objects #print(delta_inds, [j.inds for j in self.objs]) # Holds all objects that have relevant indices relevant_objs = [obj for i,obj in enumerate(self.objs) if delta_inds[0] in obj.inds and i != delta_I and obj.child] # Change any occurance of the first delta index to the second delta index new_inds = self.inds[:] for i,ind in enumerate(new_inds): if delta_inds[0] == ind: if delta_inds[1] in new_inds: new_inds.remove(ind) else: new_inds[i] = delta_inds[1] for obj in relevant_objs: change = True if delta_inds[0] in obj.inds: obj.inds = [i.replace(delta_inds[0], delta_inds[1]) for i in obj.inds] return new_inds, change
def _simplify_U(self): LaTeX = self.latex() for level in self.struct: for i, (child,parent) in enumerate(self.struct[level]): if child.__name__ == "SUM": can_simp, indices, U_places = child._can_simplify_U() if can_simp: U_places = sorted(U_places) count = 0 for X in U_places: if len(X) != 2: EXC.WARN("Something went wrong with the SUM._can_simplify_U() function. It is telling me I can simplify %i"%child.latex()) return False sortX = sorted(X) delta_inds = [child.objs[u-count].inds[0] for u in sortX] new_delta = DELTA("{_{%s}}"%",".join(delta_inds)) for x in sortX: child._remove_obj(x-count) count += 1 child._insert_obj(len(child.objs)+1, new_delta, "\\delta", "") LaTeX = self.latex() self.steps_taken += "Simplifying the U terms using the relationship $\\sum\\limits_{x}U^{*}_{bx}U_{ax} = \delta_{ab}$:\n\n%s%s%s\n\n"%(self.begin_eq, LaTeX, self.end_eq) for ind in indices: child.inds = [j for j in child.inds if j != ind] child._simplify_deltas() latex_str = self.latex() if latex_str != LaTeX: self.steps_taken += "Let them Kronecker deltas work their magic: %s%s%s"%(self.begin_eq, latex_str, self.end_eq) #self._remove_empty_sums() self.struct, self.struct_names = self._create_struct(self.objs, self.obj_typ, parent="root", level=0, struct={}, levels={0:0}, names={}) self.paths = self._create_paths(self.objs, parent_i=0, level=0, paths={})
def _findActiveAtoms(self, molID): """ Find which atoms are active according to the AOM_COEFF.include file. These are atoms on a molecule. Inputs: * molID => The molecule to find active atoms for """ # Find active coordinates (from active atom index) atMask = [i for i in self.all_settings['active_atom_index'][molID]] self.active_coords = self.all_settings['coords'][self.posStepInd][atMask] self.active_coords = [self.active_coords[:, k] for k in range(3)] self.active_coords = np.array(self.active_coords) # Error Checking if len(self.active_coords) <= 0: # Check if any molecules are past the number of molecules being # visualised max_plot_mol = self.all_settings['num_mols_active'] max_act_mol = max(self.active_step_mols) if max_act_mol > max_plot_mol: msg = "The charge is no longer contained by the molecules" msg += " shown.\nPlease extend the range to allow for this!" msg += "\n\nMax charged molecule = %i" % max_plot_mol msg += "\tMax molecule plotted = %i" % max_act_mol EXC.WARN(msg, True) else: msg = "Something went wrong and I don't know what sorry!" msg += "\nThe length of the active_coords array " msg += "is %i. It should be >0" % len(self.active_coords) SystemExit(msg) return False
def _find_adiab_or_diab(self, txt): if "psi" in txt: return "a" if "phi" in txt: return "d" else: EXC.WARN("I don't know whether %s is diabatic or adiabatic!"%txt)
def _find_adiab_or_diab(self, txt): if "c" in txt.lower(): return "a" if "u" in txt.lower(): return "d" else: EXC.WARN("I don't know whether %s is diabatic or adiabatic!"%txt)
def open_read(filename, throw_error=True, max_size=1): filename = folder_correct(filename) if path_leads_somewhere(filename): check_size = True try: import psutil except ModuleNotFoundError: check_size = False if check_size: if os.path.getsize( filename) >= psutil.virtual_memory().available * max_size: raise IOError( "\n\nFilesize too big.\n\t* " + f"Filepath: '{filename}'" + "\n\t* " + f"Avail Mem: {psutil.virtual_memory().available}" + "\n\t* " + f"Filesize: {os.path.getsize(filename)}" + "\n\n\n") with open(filename, 'r') as f: txt = f.read() return txt else: if throw_error: EXC.ERROR("The %s file doesn't exist!" % filename) return ''
def correct_steps_to_read_startMaxStride(all_settings): """ Will adjust the steps to read so that no steps outside of the start_time, end_time and stride are included (subject to caveats). Caveats: * If 'missing_pos_steps' is used then we don't adjust the pos steps and allow another function to fix those later. This doesn't apply if the 'skip' setting is used in missing pos steps. Will change everything in place in the all_settings dictionary. """ start_time = all_settings['start_time'] end_time = all_settings['end_time'] stride = all_settings['stride'] var = all_settings['missing_pos_steps'] # Using no correction for missing position steps if var == "skip": names = ('nucl_tsteps_to_read', 'coeff_tsteps_to_read') else: names = ('coeff_tsteps_to_read', ) # Only allow steps allowed by min_step, max_step and stride for name in names: corr_stride_steps = set(all_settings[name][::stride]) tmp = [] for i in all_settings[name]: if i in corr_stride_steps and i <= end_time and i >= start_time: tmp.append(i) if len(tmp) == 0 and all_settings['missing_pos_steps'] == 'skip': common_timesteps = set( all_settings['nucl_tsteps_to_read']).intersection( set(all_settings['coeff_tsteps_to_read'])) EXC.ERROR( "Can't find any nucl and coeff timesteps to read.\n\nPlease adjust your settings.inp file." + "Available steps are: %s" % ', '.join(map(str, common_timesteps))) elif len(tmp) == 0 and all_settings['missing_pos_steps'] != 'skip': EXC.ERROR( "Can't find any coeff timesteps to read.\n\nPlease adjust your settings.inp file" + ".\nAvailable steps are: %s" % ', '.join(map(str, all_settings['coeff_tsteps_to_read']))) all_settings[name] = tmp
def __convert_to_float(string, var_name): """ Will convert a number to a float and if it can't be done raise an error. """ try: val = float(string) except: EXC.ERROR("'%s' must be a float" % var_name) return val
def settings_update(all_settings): """ Reads/parses the VMD log file. Then we decide to put the rotations in the include.vmd file and combine the zooms/scalings and translations into a single operation. These are then written into the settings file. """ vmd_log_text = open_read(all_settings['vmd_log_file'], False) if bool(vmd_log_text) is not False: os.remove(all_settings['vmd_log_file']) new_transforms = vmd_log_text[vmd_log_text.find(consts.end_of_vmd_file ) + len(consts.end_of_vmd_file):].split('\n') # First handle the scalings new_zoom = txt_lib.combine_vmd_scalings( new_transforms) * all_settings['zoom_value'] inp_zoom = all_settings['clean_settings_dict'].get('zoom_value') if type(inp_zoom) != type( None ): # If the settings file declare a zoom value use the comments from it inp_zoom[0] = new_zoom else: # else use a standard comment inp_zoom = [new_zoom, '# How much to zoom by'] all_settings['clean_settings_dict']['zoom_value'] = inp_zoom # Now handle translations new_translations = np.array( txt_lib.combine_vmd_translations( new_transforms)) + all_settings['translate_by'] inp_translate = all_settings['clean_settings_dict'].get("translate_by") if type(inp_translate) != type( None ): # If the settings file declare a zoom value use the comments from it inp_translate[0] = new_translations else: # else use a standard comment inp_translate = [ new_translations, '# How much to translate in xyz directions' ] all_settings['clean_settings_dict']['translate_by'] = inp_translate # Now save only certain actions to the include.vmd file to be sourced later whitelist = ['rotate'] new_transforms = [ line for line in new_transforms if any(j in line for j in whitelist) ] new_include = open_read(all_settings['tcl']['vmd_source_file'], False) + '\n' * 2 + '\n'.join(new_transforms) open_write(all_settings['tcl']['vmd_source_file'], new_include) write_cleaned_orig_settings(all_settings['clean_settings_dict'], 'settings.inp') else: EXC.WARN("VMD hasn't created a logfile!", all_settings['verbose_output']) return all_settings
def stitch_mp4(files, files_folder, output_name, length, ffmpeg_binary, Acodec='aac', Vcodec='libx264', extra_flags="-pix_fmt yuv420p -preset slow -qscale 14", log_file="a.log", err_file="a.err"): if all(i in files for i in ['%', 'd', '.']): ext = files.split('.')[-1] num_of_nums = eval(files.split('%')[-1].split('d')[0].strip('0')) prefix = files.split('%')[0] all_files = os.listdir(files_folder) all_files = [i for i in all_files if ext in i.split('.')[-1] ] #removing files that don't have the correct extension all_files = [ i for i in all_files if len(i[len(prefix):i.find('.')]) == num_of_nums ] # only files with the correct amount of nums num_files = len(all_files) framerate = int(np.round(num_files / length, 0)) if framerate == 0: framerate = 1 in_files = files_folder + files pre_flags = "" elif "*" in files and '.' in files: ext = files.split('.')[-1] all_files = os.listdir(files_folder) all_files = [i for i in all_files if ext in i.split('.')[-1] ] #removing files that don't have the correct extension num_files = len(all_files) framerate = int(np.round(num_files / length, 0)) if framerate == 0: framerate = 1 pre_flags = '-pattern_type glob' # Glob type input files in_files = '"%s"' % (files_folder + files ) #input files must be inside string else: EXC.ERROR( "Input format for image files is incorrect.\nPlease input them in the format:\n\n\t'pre%0Xd.ext'\n\nwhere pre is the prefix (can be nothing), X is the number of numbers in the filename, and ext is the file extensions (e.g. png or tga)." ) if path_leads_somewhere(output_name + '.mp4'): os.remove( output_name + '.mp4' ) #remove file before starting to prevent hanging on 'are you sure you want to overwrite ...' options = (ffmpeg_binary, pre_flags, framerate, in_files, Vcodec, Acodec, extra_flags, output_name, log_file, err_file) Stitch_cmd = "%s -f image2 %s -framerate %s -i %s -vcodec %s -acodec %s %s %s.mp4 > %s 2> %s" % options print(Stitch_cmd) return Stitch_cmd, log_file, err_file
def _find_index(self, txt): if "_" not in txt: EXC.WARN("Can't find any indices.\nTxt = %s"%txt) txt = txt[txt.find('_'):] if txt[1] != '{': end_ind = 2 index = [txt[1]] else: end_ind = txt.find('}') index = txt[txt.find('{')+1:end_ind].split(',') return index, txt[end_ind:]
def init_bounding_box(all_settings): if (type(all_settings['bounding_box_scale']) == int) or (type( all_settings['bounding_box_scale']) == float): all_settings['bounding_box_scale'] = [ all_settings['bounding_box_scale'] ] * 3 if type(all_settings['bounding_box_scale'] ) != list and all_settings['verbose_output']: EXC.WARN( "The 'bounding_box_scale' variable doesn't seem to be set correctly! \nCorrect options are:\n\t* integer or float\n\tlist of ints or floats (for x,y,z dimensions)" )
def open_read(filename, throw_error=True): filename = folder_correct(filename) if path_leads_somewhere(filename): f = open(filename, 'r') txt = f.read() f.close() return txt else: if throw_error: EXC.ERROR("The %s file doesn't exist!"%filename) return False
def setting_typo_check(line, defaults, setting_file_settings, replacer_settings): """ Inputs: * line <str> => ... * setting_file_settings <?> => ... * replacer_settings <?> => ... """ sett = line.split('=')[0].strip() poss_setts = fuzzy_variable_translate(sett, list(defaults),False, False,0.6) if sum(poss_setts) == 1: new_sett = defaults[poss_setts] if len(new_sett) == 1: new_sett = new_sett[0] if new_sett != sett: line = line.replace(sett, new_sett) setting_file_settings.append(sett) replacer_settings.append(new_sett) elif sum(poss_setts) > 1: EXC.WARN("There are too many possible settings for '%s'. These are:\n\t* %s.\n\nI do not want to assume which one it is, please correct it in the input file!"%(sett, '\n\t* '.join(defaults[poss_setts])), True) elif sum(poss_setts) < 1 and sett != 'path': EXC.WARN("There are too many possible settings for '%s'. These are:\n\t* %s.\n\nI do not want to assume which one it is, please correct it in the input file!"%(sett, '\n\t* '.join(defaults[poss_setts])), True) return line
def import_and_check(str_lib, error=True): """A function to import modules and check if they exist.""" try: i = __import__(str_lib) return i except ImportError: if error: EXC.ERROR( "You need the library named '%s' to run this program please install it!\n\n\t* If you are using conda use the command conda install <lib>\n\n\t* If you are using pip use the command sudo pip install <lib>\n\netc..." % str_lib) return None else: return False
def __split_by(self, line, by, length=2, congeal_last=False, min_lines=2): """ Split a string ('line') by the 'by' variable. Inputs: * line = line to split * by = the splitter * length = the max length the split list should be * congeal_last = Whether to join all but the 1st item in the split list * min_lines = minimum length of the lines """ split = line.split(by) if len(split) < min_lines: EXC.ERROR(""" ERROR: The length of line (%s) split by '%s' is %i, it should be 2. This is probably due to something being entered incorrectly in the Templates/defaults file. Each line needs to have the format: 'setting' : 'default' , # Explanation | ['list of accepted settings'] | 'not-tested' or 'tested' In the Templates/defaults.py file look for the line: \t'%s' and check the subtring: \t'%s' if there. """ % (line, by, len(split), line, by)) if len(split) > length: msg = "\n\nWarning docs entry entered incorrectly.\nDetails:" msg += "\n\t* Line = %s" % line msg += "\n\t*Length after split by %s %i" % (by, len(split)) EXC.ERROR(msg) if congeal_last: split = split[0], by.join(split[1:]) return split
def text_search(txt, start_find, end_find="\n", error_on=True): start_ind = txt.find(start_find) if start_ind != -1: txt = txt[start_ind:] end_ind = txt.find(end_find) if end_ind != -1: txt = txt[:end_ind] else: txt = txt[0:20] return txt, start_ind, end_ind if error_on: EXC.WARN("No instance of '%s' in the txt!"%start_find) return False
def _find_matching_str(self, txt, left_str, right_str): count2 = False count=0 for i,letter in enumerate(txt): if letter == left_str: count += 1 count2 = True if letter == right_str: count -= 1 count = abs(count) if count == 0 and count2: return i+1 EXC.WARN("Couldn't find the enclosing brace, txt = %s"%txt)
def VMD_visualise(step_info, PID): os.system("touch %s" % step_info['vmd_junk'][PID]) os.system("touch %s" % step_info['vmd_err'][PID]) vmd_exe = step_info['vmd_exe'] vmd_script = step_info['vmd_script'][PID] vmd_junk = step_info['vmd_junk'][PID] vmd_err = step_info['vmd_err'][PID] vmd_command = "%s -nt -dispdev none -e %s > %s 2> %s &" % ( vmd_exe, vmd_script, vmd_junk, vmd_err) #print(vmd_command) print(f"VMD Command: '{vmd_command}'") os.system( vmd_command ) #Maybe subprocess.call would be better as this would open VMD in a new thread? made_file = False race_start = time.time() while ( not made_file ): #Wait for VMD to have finished it's stuff to prevent race conditions made_file = bool( os.path.isfile(step_info['tcl']['pic_filename'][PID]) * vmd_finished_check(step_info['vmd_junk'][PID]) ) # This checks if VMD has finished preventing race conditions race_time = time.time() - race_start if race_time > step_info['vmd_timeout']: if (not os.path.isfile(step_info['tcl']['pic_filename'][PID]) ) and vmd_finished_check(step_info['vmd_junk'][PID]): EXC.ERROR( "\n\nVMD finished, but hasn't rendered a file! Check the VMD script at %s" % step_info['vmd_script'][PID]) os._exit(0) else: EXC.ERROR( "\n\nVMD is taking a long time! I think there may be a bug in VMD script. Try compiling the script manually with the command 'source ./src/TCL/script_4_vmd.vmd' within the tkconsole in VMD.\nIf everything works there then try increasing the 'vmd_step_info['vmd_timeout']' in python main.py settings." ) time.sleep(0.1)
def init_missing_pos_step_vars(all_settings): """ Will initialise the missing_pos_steps variable. There are 3 options: 'skip' -> Will simply ignore the steps that don't have positions. 'closest' -> Will use the closest known position to the coeff timestep. 'use N' -> Will use a specified position timestep. This will change the setting in the all_settings dictionary. """ var = all_settings['missing_pos_steps'].lower() varSplit = var.strip().split() if len(varSplit) == 0: EXC.ERROR("Please set the variable 'missing_pos_steps'.\n\n" + "It is currently: '%s'" % var) poss_vars = ('skip', 'closest', 'use') varFixed = txt_lib.fuzzy_variable_helper(varSplit[0], poss_vars) if varFixed == 'use': if len(varSplit) != 2: EXC.ERROR( "The correct syntax for using the 'use' keyword in the 'missing_pos_steps'" + " is `missing_pos_steps = 'use N' where N represents the step you wish to" + " use as the atomic coords for the full visualisation.") else: try: all_settings['use_missing_pos_step'] = int(varSplit[1]) except: EXC.ERROR( "Can't find position step '%s'. Please choose an integer." % varSplit[1]) all_settings['missing_pos_steps'] = varFixed
def fix_missing_pos_steps(all_settings): """ Will initialise which timesteps to carry out based on which steps are available. This is dependant on the method chosen to correct for missing position steps. If 'skip' is chosen then the following applies: if nuclear timesteps = [1,2,3, 6] and coeff timesteps = [1,2,3,4,5,6] we would carry out [1,2,3, 6]... If 'closest' or 'use ' is chosen we correct for missing position steps as outlined in the documentation or the docstr on the function `missing_pos_steps` We do this by finding which steps aren't common to all 3 lists as that is the way the xyz reader works. """ if all_settings['missing_pos_steps'] == 'skip': common_timesteps = np.intersect1d(all_settings['nucl_tsteps_to_read'], all_settings['coeff_tsteps_to_read']) all_settings['nucl_tsteps_to_read'] = common_timesteps all_settings['coeff_tsteps_to_read'] = common_timesteps all_settings['pos_step_inds'] = np.arange(len(common_timesteps)) elif all_settings['missing_pos_steps'] == 'use': use_step = all_settings['use_missing_pos_step'] pos_steps = all_settings['nucl_tsteps_to_read'] mol_steps = all_settings['coeff_tsteps_to_read'] if 0 > use_step > len(pos_steps): EXC.ERROR( "Step %i is out of bounds to use as a the correction to missing position steps." % use_step + "\n\nPlease choose a step 0 <= i <= %i as `missing_pos_steps = 'use N'`" % len(pos_steps)) all_settings['nucl_tsteps_to_read'] = [pos_steps[use_step]] all_settings['pos_step_inds'] = [0] * len(mol_steps) elif all_settings['missing_pos_steps'] == 'closest': pos_steps = all_settings['nucl_tsteps_to_read'] mol_steps = all_settings['coeff_tsteps_to_read'] all_settings['pos_step_inds'] = get_closest_inds(pos_steps, mol_steps) pos_inds = sorted(np.unique(all_settings['pos_step_inds'])) all_settings['nucl_tsteps_to_read'] = [pos_steps[i] for i in pos_inds] all_settings['pos_step_inds'] = np.array( sorted(all_settings['pos_step_inds'])) all_settings['pos_step_inds'] -= all_settings['pos_step_inds'][0] if all_settings['calibrate'] and pos_inds: all_settings['pos_step_inds'] = [0]
def check_mkdir(path, max_depth=2): path = folder_correct(path) lpath = path.split('/') act_folders = [] for i in range(2,len(lpath)): sub_path = '/'.join(lpath[:i]) if not path_leads_somewhere(sub_path): act_folders.append(False) else: act_folders.append(True) if not all(act_folders[:-max_depth]): EXC.ERROR("Too many folders need to be created please check the filepaths, or increase the amount of folder I am allowed to create (check_mkdir).") else: for i in range(2,len(lpath)+1): sub_path = '/'.join(lpath[:i]) if not os.path.isdir(sub_path) and '.' not in sub_path[sub_path.rfind('/'):]: os.mkdir(sub_path) return True
def _create_struct(self, lIst, obj_types, parent="root", level=0, struct={}, levels={0:0}, names={}): if len(lIst) != len(obj_types): EXC.WARN("%s._create_struct has a different number of objects and object types.\n\tobj_types = %s\n\tobjs = %s"%(self.__name__, str(obj_types), str(lIst))) for i, item in enumerate(lIst): if parent == "root": parent = self if levels.get(level) == None: levels[level] = 0 if struct.get(level) == None: struct[level] = [] names[level] = [] struct[level].append((item, parent)) names[level].append(item.__name__) levels[level] += 1 if obj_types[i] not in child_math_objects: new_list = item.objs self._create_struct(new_list, item.obj_typ, item, level+1,struct,levels, names) return struct, names
def xyz_step_writer(positions, atom_nums, timestep, step, filepath, conv=0.52918): natom = len(positions) positions *= conv if natom != len(atom_nums): EXC.ERROR( "The length of the positions array and atomic numbers array in the xyz_writer are not the same. Please fix this to use the 'background_mols' feature.\n\tlen(positions) = %i\n\tlen(atom_nums) = %i" % (natom, len(atom_nums))) s = "%i\ni = %i, time = %.3f\n" % (natom, step, timestep) s += '\n'.join([ '\t'.join([str(atom_nums[i])] + pos.tolist()) for i, pos in enumerate(positions.astype(str)) ]) open_write(filepath, s)
def _display_img(self): """ Displays the created image in the default viewer. Only works in linux! """ if self.all_settings['mols_plotted'] > 0: if self.all_settings['load_in_vmd']: self.all_settings['tcl']['pic_filename'][self.PID] = \ self.tga_filepath io.vmd_variable_writer(self.all_settings, self.PID) vmd_bin = self.all_settings['vmd_exe'] os.system(f"{vmd_bin} -nt -e {self.all_settings['vmd_script'][self.PID]}") io.settings_update(self.all_settings) if self.all_settings['show_img_after_vmd']: open_pic_cmd = "xdg-open %s" % (self.tga_filepath) subprocess.call(open_pic_cmd, shell=True) else: EXC.WARN("There were no wavefunctions plotted on the molecules!")
def init_colors(all_settings): """ Initialises the colors of the wavefunction e.g. whether to use density, a purely real phase (neg and pos) or full complex phase (pos, neg, imag, real). """ density, real_phase, full_phase = txt_lib.fuzzy_variable_translate( all_settings['type_of_wavefunction'], ["density", "real-phase", "phase"], all_settings['verbose_output']) if density: all_settings['color_type'] = 'density' elif real_phase: all_settings['color_type'] = 'real-phase' elif full_phase: all_settings['color_type'] = 'phase' else: EXC.WARN( "Sorry I'm not sure what type of color I should use, defaulting to %s" % dft.defaults['type_of_wavefunction']) all_settings['color_type'] = dft.defaults['type_of_wavefunction']