def _get_arguments(arguments: list, holes: list): arguments = list(deepcopy(arguments)) relevant_arguments = [] mass = 0 primitives = False idle = False fine = 1 while not (idle and primitives) and len(holes) > 0 and len(arguments) > 0: primitives = True idle = True for i, argument in enumerate(arguments): if not argument.type.isprimitive(): primitives = False if argument.type == holes[0]: del holes[0] del arguments[i] relevant_arguments.append(argument) mass += (i + 1) * fine idle = False break if idle: arguments = [argument.simplify() for argument in arguments] fine += 4 IO.debug("primitives = {}", primitives) IO.debug("idle = {}", idle) IO.debug("holes = {}", holes) IO.debug("arguments = {}", arguments) IO.debug("---------------------------") return relevant_arguments, mass
def _vmd_visualise(self, step): """ Visualises the data. This fills in the variables in the vmd template, writes the script and runs it in vmd. """ start_vmd_time = time.time() for i in self.all_settings['tcl']['cube_files'].split(' '): if not io.path_leads_somewhere(i.strip()): msg = "Sorry I couldn't find the following cube file:" msg += "\n%s" % i.strip() EXC.ERROR(msg) self.all_settings['tcl']['pic_filename'][self.PID] = self.tga_filepath io.vmd_variable_writer(self.all_settings, self.PID) # check if the file exists tmp = os.path.isfile(self.all_settings['vmd_script'][self.PID]) if not tmp: msg = "Sorry I can't find the vmd script!" msg += "It hasn't been created (or created in the wrong place)." EXC.ERROR(msg) cond = 'tga' not in self.all_settings['files_to_keep'] cond *= not all_settings['calibrate'] if cond: self.all_settings['delete_these'].append(self.tga_filepath) io.VMD_visualise(self.all_settings, self.PID) end_time = time.time() - start_vmd_time self.all_settings['times']['VMD Visualisation'][step] += end_time
def init_permanent_settings(all_settings): # Create the docs if they haven't already been created if not ps.created_docs: os.system("python3 Create_docs.py") io.read_write_perm_settings(all_settings['ps_filepath'], "created_docs", True) # Save the previous runtime io.read_write_perm_settings( all_settings['ps_filepath'], "previous_runtime", datetime.datetime.strftime(datetime.datetime.now(), ps.time_format)) # # Checking Tachyon Renderer Path # new_tachyon_path = io.find_tachyon(ps.tachyon_path) # if new_tachyon_path != ps.tachyon_path: # io.read_write_perm_settings(all_settings['ps_filepath'], "tachyon_path", new_tachyon_path) # tachyon_path = new_tachyon_path # else: # tachyon_path = ps.tachyon_path # all_settings['tcl']['tachyon_path'] = tachyon_path # Did we calibrate last time? io.read_write_perm_settings(all_settings['ps_filepath'], "previous_path", all_settings['path']) if all_settings['calibrate']: io.read_write_perm_settings(all_settings['ps_filepath'], "previous_calibrate", True) else: io.read_write_perm_settings(all_settings['ps_filepath'], "previous_calibrate", False)
def test_get_tachyon_path(tachyon_func): '''Test finding the path to the tachyon renderer. This needs to work for Mac and Linux ''' tachyon_func.side_effect = lambda i: i.is_file() # Set up a directory that mocks the linux VMD directory _tmp_dir = tempfile.TemporaryDirectory() tmp_dir = Path(_tmp_dir.name) tachyon_dir = tmp_dir / 'lib/tachyon' tachyon_dir.mkdir(parents=True) tachyon_path = tachyon_dir / 'tachyon_LINUXAMD64' tachyon_path.touch() vmd_dir = tmp_dir / 'LINUXAMD64' vmd_dir.mkdir() vmd_path = vmd_dir / 'vmd_LINUXAMD64' vmd_path.touch() # Test the linux get_tachyon_path path = IO.get_tachyon_path(vmd_path) assert path == tachyon_path del _tmp_dir # Set up the Mac directory _tmp_dir = tempfile.TemporaryDirectory() tmp_dir = Path(_tmp_dir.name) tachyon_path = tmp_dir / 'tachyon_MACOSXARM64' vmd_path = tmp_dir / 'vmd_MACOSXARM64' tachyon_path.touch() # Test the Mac Path path = IO.get_tachyon_path(vmd_path) assert path == tachyon_path
def check_VMD_TEMP(all_settings): ltxt = io.open_read(all_settings['vmd_temp']).strip('\n').split('\n') for i, line in enumerate(ltxt): if 'render' in line.lower() and 'tachyon' in line.lower(): break ltxt = ltxt[:i + 1] ltxt.append(consts.end_of_vmd_file) io.open_write(all_settings['vmd_temp'], '\n'.join(ltxt))
def parse(self, string: str) -> Node: if len(string.split()) == 0: return None root = next(StanfordParser.parser.raw_parse(string))[0] label = root.label() children = [node for node in root if not isinstance(node, str) and node.label() in ["VP"]] if label in StanfordParser.fixable_sentence | {"S"} and len(children) == 0: root = next(StanfordParser.parser.raw_parse("show " + string))[0] IO.log(root.label().lower() + ".txt", str(root)) return StanfordParser.convert(root)
def _finalise(self, num_steps): """ Updates the setting file with changes that occured in the vmd file. Will also display the img or stitch the movie. It will finally collect garbage. """ io.settings_update(self.all_settings) self._copy_settings_file() self._store_imgs() if not all_settings['calibrate']: self._stitch_movie() else: self._display_img() self._garbage_collector()
def TrainModel(sOutDir, Model, GenTrain, GenVal, Fit_args, np1TrainDir, np1ValDir, np1TestDir, kFold): """ Trains Model using GenTrain and GenVal data. These are imagedatagenerators Uses TestData, a numpy array, as the prediction values """ # paramaters for an early stop, and also saves the model after each epoch overwriting if not os.path.exists(os.path.join(sOutDir, str(kFold) + 'thFold')): os.makedirs(os.path.join(sOutDir, str(kFold) + 'thFold'), exist_ok=True) earlystop = [ ModelCheckpoint(os.path.join(sOutDir, str(kFold) + 'thFold', 'trainedModel.h5'), monitor='val_acc') ] # fit model history = Model.fit_generator(GenTrain, validation_data=GenVal, callbacks=earlystop, **Fit_args) # saves unet, and some training plots IO.SaveTrainingHistory(os.path.join(sOutDir, str(kFold) + 'thFold'), Model, history, np1TrainDir, np1ValDir, np1TestDir) with open( os.path.join(sOutDir, str(kFold) + 'thFold', 'trainHistoryDict.pkl'), 'wb') as file: pickle.dump([history.history], file) file.close() return Model
def test_run_e_works(self): eprover_path = os.path.join(GlobalVars.EPATH, 'eprover') command = ' '.join([eprover_path, '--cpu-limit=10 --tstp-format -s --proof-object', ' --memory-limit=2048--auto-schedule', self.p_file_extended]) resultcode, _stdout, _stderr = IO.run_command(command, 10) self.assertTrue(resultcode)
def input_createntials(config): """ Read user input and check if credentials are valid. """ while True: try: config = IO.input_jira_credentials(config.url, config.username, config.password) BaseFactory.create_jira(config) except Exception: IO.error('Credentials not valid') if click.confirm('Try again?', default=True, abort=True): continue return config
def _writeCubeFile(self, step, molID, numCube=-1): """ Converts each molecular wavefunction to a cube file to be loaded in vmd """ self.__createCubeFileTxt(step, molID) start_data_write_time = time.time() if all_settings['keep_cube_files']: RDataFName = "%s-%i-%i.cube" % ('Real', step, molID) IDataFName = "%s-%i-%i.cube" % ('Imag', step, molID) else: RDataFName = "tmp%s-%i.cube" % ('Real', molID) IDataFName = "tmp%s-%i.cube" % ('Imag', molID) RDataFPath = self.all_settings['data_fold'] / RDataFName IDataFPath = self.all_settings['data_fold'] / IDataFName if not all_settings['keep_cube_files']: self.all_settings['delete_these'].append(RDataFPath) self.all_settings['delete_these'].append(IDataFPath) if self.writeRealCube: self.data_files_to_visualise.append(RDataFPath) if self.writeImagCube: self.data_files_to_visualise.append(IDataFPath) self.all_settings['tcl']['cube_files'] = \ ' '.join(map(str, self.data_files_to_visualise)) self.tga_folderpath, _, self.tga_filepath = io.file_handler( self.all_settings['img_prefix'], 'tga', self.all_settings) # if all_settings['draw_time']: # replace = str(self.all_settings['Mtime-steps'][self.step]) # tLabelTxt = self.all_settings['time_lab_txt'].replace("*", replace) # self.all_settings['tcl']['time_step'] = '"%s"' % (tLabelTxt) self.all_settings['tcl']['cube_files'] = ' '.join( map(str, self.data_files_to_visualise) ) if self.writeRealCube: io.open_write(RDataFPath, self.RCubeTxt) if self.writeImagCube: io.open_write(IDataFPath, self.ICubeTxt) end_time = time.time() - start_data_write_time self.all_settings['times']['Write Cube File'][step] += end_time
def config(): """ Change credentials of JIRAs. """ click.echo('Jira-pub') pub_config = input_createntials(AppConfig.read_pub_config()) if pub_config: IO.success('PUB credentials are valid') AppConfig.write_pub_config(pub_config) click.echo('\nSK Jira') sk_config = input_createntials(AppConfig.read_sk_config()) if sk_config: IO.success('SK credentials are valid') AppConfig.write_sk_config(sk_config)
def _display_img(self): """ Displays the created image in the default viewer. Only works in linux! """ if self.all_settings['mols_plotted'] > 0: if self.all_settings['load_in_vmd']: self.all_settings['tcl']['pic_filename'][self.PID] = \ self.tga_filepath io.vmd_variable_writer(self.all_settings, self.PID) vmd_bin = self.all_settings['vmd_exe'] os.system(f"{vmd_bin} -nt -e {self.all_settings['vmd_script'][self.PID]}") io.settings_update(self.all_settings) if self.all_settings['show_img_after_vmd']: open_pic_cmd = "xdg-open %s" % (self.tga_filepath) subprocess.call(open_pic_cmd, shell=True) else: EXC.WARN("There were no wavefunctions plotted on the molecules!")
def build_tex(tex_folder, mod_name): base_filepath = tex_folder + mod_name extensions = ['tex', 'pdf', 'aux', 'log'] filepaths = { i: io.folder_correct(base_filepath + '.' + i) for i in extensions } remove_filepaths = [ io.folder_correct(os.getcwd() + '/' + mod_name + '.' + i) for i in ['aux', 'log'] ] jnk_filepath = io.folder_correct(base_filepath + '6sfdsafo7re_4adsfsd.tmp') sb.call("pdflatex %s > %s" % (filepaths['tex'], jnk_filepath), shell=True) os.remove(jnk_filepath) for i in remove_filepaths: os.remove(i) os.rename(io.folder_correct(os.getcwd() + '/' + mod_name + '.pdf'), filepaths['pdf']) sb.call("xdg-open %s" % filepaths['pdf'], shell=True)
def _garbage_collector(self): """ Deletes temporary files. """ self.all_settings['delete_these'].append( io.folder_correct('./vmdscene.dat') ) # Garbage collection #self.all_settings['delete_these'].append(self.all_settings['f.txt']) for f in all_settings['delete_these']: if os.path.isfile(f): os.remove(f)
def _write_background_mols(self): """ Will write the background molecules as a seperate xyz file. The background molecules are the inactive FIST molecules as part of the wider crystal. """ # Dealing with the background molecules largest_dim = np.argmax( [np.max(self.all_settings['coords'][self.posStepInd][:, i]) for i in range(3)] ) # dims = [Xdims, Ydims, Zdims][largest_dim] # Find maximum coordinate at_crds = self.all_settings['coords'][self.posStepInd] at_crds = at_crds[self.all_settings['atoms_to_plot']][:, largest_dim] max_coord = np.max(at_crds) max_coord += self.all_settings['background_mols_end_extend'] # Find coordinates within below this max_coord mask = self.all_settings['coords'] mask = mask[self.step][:, largest_dim] < max_coord # Apply the mask to get data. Find metadata. background_mols_pos = self.all_settings['coords'][self.posStepInd][mask] background_mols_at_num = self.all_settings['at_num'][mask] backgrnd_mols_filepath = self.all_settings['data_fold'] + \ "bckgrnd_mols-%s.xyz" % self.PID # Write the background mols io.xyz_step_writer(background_mols_pos, background_mols_at_num, self.all_settings['Mtime-steps'][self.step], self.step, backgrnd_mols_filepath, consts.bohr2ang) tcl_load_xyz_cmd = 'mol new {%s}' % backgrnd_mols_filepath tcl_load_xyz_cmd += ' type {xyz} first 0 last -1 step 1 waitfor 1' self.all_settings['tcl']['backgrnd_mols'] = tcl_load_xyz_cmd
def __print_timings(self, step, num_steps, start_step_time): """ Will pretty print the timings """ tmpTime = time.time() - start_step_time timeTaken = typ.seconds_to_minutes_hours(tmpTime, "CPU: ") timeStep = self.all_settings['Mtime-steps'][self.step] msg = "Trajectory %i/%i %s Timestep %s" % (step + 1, num_steps, timeTaken, timeStep) traj_print = "\n"+txt_lib.align(msg, 69, "l") + "*" if self.all_settings['verbose_output']: print("*"*70) print(traj_print) io.times_print(self.all_settings['times'], step, 70, tmpTime) else: io.print_same_line(traj_print, sys, print) if self.all_settings['verbose_output']: print("*"*70, "\n") self.all_settings['times_taken'].append(time.time() - start_step_time)
def main(sConfigFileDir, ithFold, iNumFoldsTotal): """ Parameters ---------- sConfigFileDir : string path to config.ini file that specifies training and testing params ithFold : int indicates which fold this experiment is on iNumFoldsTotal : int indicates total number of folds in this experiment Raises ------ ValueError If no config.ini file is found. Returns ------- pdResults : dataframe contains error metrics from experiment """ if ithFold >= iNumFoldsTotal: raise ValueError('cannot do {}th fold when there are only {} folds in total'.format(ithFold, iNumFoldsTotal)) if os.path.exists(sConfigFileDir) != True: raise ValueError('{} not found'.format(sConfigFileDir)) # set up config files oConfig = configparser.ConfigParser() oConfig.read(sConfigFileDir) # copy code, and config file to new output directory #TODO: replace the lSubDir with a function that just recursively checks for all dirs IO.CopyToOutDir(ithFold, iNumFoldsTotal, oConfig, sConfigFileDir, lSubDirs = ['src', 'tests', 'tests/ErrorMetrics', 'models']) # set up image generator GenTrain, GenVal, Gen_args, np1TrainDir, np1ValDir, np1TestDir = DataGenFun.setupGenerators(oConfig, iNumFoldsTotal, ithFold) # set up model NewModel = CompileModel(oConfig, iNumFoldsTotal) print('Model Created \n{:.0f}th fold \n Data generated with {:.0f} folds in total'.format(ithFold+1, iNumFoldsTotal)) # train data and do error metricsi pdResults = train.TrainAndTestUNet(oConfig, iNumFoldsTotal, ithFold, NewModel, GenTrain = GenTrain, GenVal = GenVal, np1TrainDir = np1TrainDir, np1ValDir = np1ValDir, np1TestDir = np1TestDir) K.clear_session() return pdResults
def _build(self, node: NounPhrase, args: list) -> list: if isinstance(node, LeafNounPhrase): string = node.nn.text noun = Simplifier.simplify_word(string) adjectives = Simplifier.simplify_adjectives(node.jjs) type_names = [str(type) for type in Types.Type.extract(adjectives)] if len(type_names) == 1 and type_names[0] in self._builders and noun not in self._builders: adjectives.remove(type_names[0]) adjectives.append(string) noun = type_names[0] constructed_object = None function = NullFunction() shell = self._get_relevant_shell(noun, adjectives) if shell is not None: adjectives = shell.difference(adjectives) function = self._get_relevant_function(noun, adjectives, shell.functions, args) if function.mass < float("inf"): constructed_object = self._execute(function, function.relevant_args) if constructed_object is None: constructed_object = Object.valueOf(string) IO.debug("relevant_function = {}", function) IO.debug("===========================") return [constructed_object] else: _args = [arg.mark(pp.pretext) for pp in node.pps for np in pp.nps for arg in self._build(np, [])] return [arg for np in node.nps for arg in self._build(np, args + _args)]
def init_all_settings_other(all_settings): all_settings['any_extra_raw_tcl_code'] = '' all_settings['clean_settings_dict'] = settings_file.final_orig_settings all_settings['settings_file'] = settings_file.settings_file all_settings['orig_settings'] = io.open_read(all_settings['settings_file']) all_settings['defaults'] = dft.defaults # Misc required step data all_settings['mols_plotted'] = '' all_settings['verbose_output'] = typ.translate_to_bool( all_settings['verbose_output'], 'verbose_output') all_settings['restart_vis'] = typ.translate_to_bool( all_settings['restart_vis'], 'restart_vis') all_settings['do_transition_state'] = typ.translate_to_bool( all_settings['do_transition_state'], 'do_transition_state') init_missing_pos_step_vars(all_settings)
def _plot(self, step): """ Unsupported. Will plot a graph along side the visualisation. """ # Plotting if required start_plot_time = time.time() import matplotlib.pyplot as plt files = {'name': "G%i" % step, 'tga_fold': self.tga_filepath} self.all_settings['delete_these'].append(io.plot(self.all_settings, self.step, files, plt)) end_time = time.time() - start_plot_time self.all_settings['times']['Plot and Save Img'][step] += end_time
def _store_imgs(self): """ Will convert images from tga to jpg (for storage). jpg creates smaller images than tga. We also add any leading zeros to files as this makes them easier to stitch together later on. """ # First add leading zeros to files new_files, tga_files = io.add_leading_zeros(self.tga_folderpath) cond = 'tga' not in self.all_settings['files_to_keep'] cond *= not all_settings['calibrate'] if cond: all_settings['delete_these'].extend(set(new_files)) # Convert all .tga to .<img> if 'img' in self.all_settings['files_to_keep']: cnvt_command = ["mogrify", "-format", self.all_settings['img_format'], f'{self.tga_folderpath}/*.tga'] subprocess.run(cnvt_command)
def __init__(self, templateFilePath, replacers, defaults, getPathOnly=False): self.filePath = templateFilePath self.replacers = replacers self.defaults = defaults self.params = defaults.params self._getTitle() self._determineSavePath(static_folder) if getPathOnly is not True: self.fileTxt = io.open_read(templateFilePath) self.replacers['*topnavStyle*'] = ' ' self.fileTxt = self._replaceVars(self.fileTxt, self.replacers)
def issues(days_ago): """Migrate non-synchronized tickets from SK to PUB since from DAYS_AGO till NOW \b Finds non-synchronized tickets by using worklogs and assigned tasks in SK for current user. Provides git-like interface to choose which tickets have to be migrated. After that, it uses `issue` command for each of a task. Can synchronize maximum 1000 days. """ sk, pub = JiraFactory.create() if days_ago and 1 < days_ago < 1000: started = day_ago_to_datetime(days_ago) else: started = IO.input_days_ago(default=14, limit=1000) IssueSync(sk, pub).migrate_issues(started)
def time(days_ago): """Time synchronization between JIRAs from DAYS_AGO till NOW Finds existing worklogs in SK and PUB JIRA. Compares them by using `External ID`. Migrates all worklogs from PUB to SK if time differences exists. Uses PUB as a primary source. Migrates comments for worklogs as well. Can synchronize maximum 100 days. """ sk, pub = JiraFactory.create() if days_ago and 1 <= days_ago < 100: started = day_ago_to_datetime(days_ago) else: started = IO.input_days_ago(default=5, limit=100) TimeSynchronizer(sk, pub).do(started)
def __init__(self, templateFilePath, sectionParams, title, replacers, getPathOnly=False): self.filePath = templateFilePath # The template HTML of the table self.title = title # What the section is called self.params = sectionParams # All the parameters in the table # Borrowed from Parent (HTMLFile) self._determineSavePath(tables_folder) # Sets the self.savePath if getPathOnly is not True: self.fileTxt = io.open_read(self.filePath) # Text in table file self._create_table() repl = {i: replacers[i] for i in replacers} repl['*table_data*'] = self.tableTxt repl['*table_name*'] = self.title.title() repl['*topnavStyle*'] = 'style="margin-left: 205px; width:100%;"' self.fileTxt = self._replaceVars(self.fileTxt, repl)
def __init__(self, filePath): self.fPath = filePath self.params = OrderedDict() self.fTxt = io.open_read(filePath) # Ignore the docstring at the top and get only the defaults dictionary defaultsTxt = self._getDefaultsDict() self._separateSections(defaultsTxt) # Get sections in defaults dict # Parse each line remKeys = [] for section in self.params: if section.strip(): for line in self.params[section]['ltxt']: setting, params = self._parseLine(line) if setting: self.params[section][setting] = params self.params[section].pop('ltxt') else: remKeys.append(section) # Remove redundant keys for key in remKeys: self.params.pop(key)
def _stitch_movie(self): """ Stitches the individual images together into a movie using the ffmpeg binary in the bin/ folder. """ files = "*.%s" % self.all_settings['img_format'] # Creating the ffmpeg and convert commands for stitching if self.all_settings['movie_format'] == 'mp4': os.chmod(self.all_settings['ffmpeg_bin'], int("755", base=8)) title_path = self.tga_folderpath + self.all_settings['title'] Stitch_cmd, tmp, _ = io.stitch_mp4( files, self.tga_folderpath, title_path, self.all_settings['movie_length'], self.all_settings['ffmpeg_bin'] ) self.all_settings['delete_these'].append(tmp) self.all_settings['delete_these'].append(_) else: raise SystemExit("Unrecognised `movie_format`. Please choose from:\n\t* mp4") subprocess.call(Stitch_cmd, shell=True) # Actually stitch the movie
def _custom_read(self, prompt: str): return IO.readln(Utils.format_nick(prompt, self._max_nick_len) + " :: ")
def _hide_read(self) -> str: return IO.readln(Utils.format_nick("password", self._max_nick_len) + " :: ")
def _read(self) -> str: return IO.readln(Utils.format_nick(self._nick, self._max_nick_len) + " :: ")
def _print(self, obj): for string in str(obj).split("\n"): if string == "": continue IO.writeln(Utils.format_nick(self._bot_nick, self._max_nick_len) + " :: " + string)
type=str, help='an integer for the accumulator') parser.add_argument("-c", action='store_true', default=False, help="Prints the functions called within functions") parser.add_argument( "-f", action='store_true', default=False, help= "Creates a tikz concept map of the subroutines and functions in the file") args = parser.parse_args() filepath = io.folder_correct(args.input) ltxt = io.open_read_close(filepath) subroutines, sub_beg_indices, _ = txt_lib.splicing(ltxt, "subroutine", "(", "end subroutine") if args.c: calls = { i: txt_lib.splicing(subroutines[i][1][:], "call", "(", ")", d_on=False)[0] for i in subroutines } functions, sub_beg_indices, _ = txt_lib.splicing( ltxt, "function", "(", "end function", start_d_index=len(subroutines) + 1) if args.c: for i in functions: calls[i] = txt_lib.splicing(functions[i][1][:],
# Don't handle the path variable as it hasn't got a default if not path_done and dfl.SequenceMatcher(None, setting, 'path').ratio() > 0.8: path_done = True all_settings['path'] = clean_settings_dict[setting] final_orig_settings_dict['path'] = orig_settings_dict[setting] continue actual_setting = get_act_setting(setting, all_setting_names) if type(orig_settings_dict[setting][0]) == str: final_orig_settings_dict[actual_setting] = ["'%s'"%orig_settings_dict[setting][0], orig_settings_dict[setting][1]] else: final_orig_settings_dict[actual_setting] = orig_settings_dict[setting] return final_orig_settings_dict settings_file = io.folder_correct(consts.settings_filepath) orig_settings_ltxt = io.open_read(settings_file).split('\n') settings_ltxt = remove_bad_lines(orig_settings_ltxt) clean_settings_dict = create_clean_settings(settings_ltxt) orig_settings_dict = create_orig_settings(orig_settings_ltxt) all_settings = grab_defaults(clean_settings_dict) final_orig_settings = find_final_orig_settings(orig_settings_dict, all_settings) io.write_cleaned_orig_settings(final_orig_settings, settings_file) if 'path' not in all_settings: raise SystemExit("""Sorry I can't find the path variable.... Are you sure you set this? Use path='...' in the settings.inp file. """) if not io.path_leads_somewhere(all_settings['path']): raise SystemExit("\nThe specified path doesn't lead anywhere:\n%s\n\n\t Where is my data?! "%all_settings['path'])
def _get_relevant_function(self, noun: str, adjectives: list, functions: list, arguments: list) -> WFunction: relevant_function = NullFunction() for function in functions: holes = list(deepcopy(function.args)) IO.debug(holes) IO.debug(arguments) IO.debug(adjectives) IO.debug(list(reversed([Object.valueOf(jj) for jj in adjectives]))) relevant_arguments, mass = self._get_arguments(arguments, holes) if noun in self._type_builders and len(holes) == 1 and len(function.args) == 1: temp_function = self._type_builders[noun] if holes == list(temp_function.args): function = temp_function pair = self._get_arguments(list(reversed([Object.valueOf(jj) for jj in adjectives])), holes) relevant_arguments.extend(pair[0]) mass += pair[1] - 2 * len(function.args) IO.debug("function = {}", function) IO.debug("relevant_arguments = {}", relevant_arguments) IO.debug("relevant_function = {}", relevant_function) IO.debug("mass = {}", mass if len(holes) == 0 else float("inf")) IO.debug("+++++++++++++++++++++++++++") if len(holes) == 0 and relevant_function.mass > mass: relevant_function = WFunction(mass, relevant_arguments, function) return relevant_function
msg = "Trajectory %i/%i %s Timestep %s" % (step + 1, num_steps, timeTaken, timeStep) traj_print = "\n"+txt_lib.align(msg, 69, "l") + "*" if self.all_settings['verbose_output']: print("*"*70) print(traj_print) io.times_print(self.all_settings['times'], step, 70, tmpTime) else: io.print_same_line(traj_print, sys, print) if self.all_settings['verbose_output']: print("*"*70, "\n") self.all_settings['times_taken'].append(time.time() - start_step_time) if __name__ == '__main__': all_settings['img_prefix'] = consts.Orig_img_prefix.replace("$fs_", "") tgaFiles = [io.file_handler(all_settings['img_prefix'], 'tga', all_settings)[2] for step in INIT.all_steps] all_settings['to_stitch'] = '\n'.join(map(str, tgaFiles)) errors = {} step_data = MainLoop(INIT.all_settings, INIT.all_steps, errors)
def test_file_does_exist(self): p_file_expanded = IO.expand_filename(self.p_file_extended) self.assertEqual(self.p_file_extended, p_file_expanded)
def _parse(self): """Open file and parse it""" cols = [] data = [] timings = {} if self.atoms_to_read is not None: assert isinstance(self.atoms_to_read, set) ats_to_read = [ i in self.atoms_to_read for i in range(self.num_atoms) ] with open(self.filepath, 'r') as f: # If we want to read all atoms for istep in range(self.num_steps): if istep not in self.steps_to_read: if istep > self._max_step_to_read: break # Skip that whole step (with loop unrolling) for i in range(self.num_lines_in_step // 5): next(f) next(f) next(f) next(f) next(f) for i in range(self.num_lines_in_step % 5): next(f) continue # Skip the title lines for i in range(self.num_title_lines): next(f) # Create the filetxt string to be read in if self.atoms_to_read is None: filetxt = ''.join((next(f) for i in range(self.num_atoms))) else: l = [] for i in ats_to_read: if i: l.append(next(f)) else: next(f) filetxt = ''.join(l) # Read 1 step headers = list(map(str, range(self.num_cols))) dtypes = {i: object for i in headers} data_cols = headers[-self.num_data_cols:] col_cols = headers[:-self.num_data_cols] for i in data_cols: dtypes[i] = np.float32 step = pd.read_csv(io.StringIO(filetxt), names=headers, delim_whitespace=True, dtype=dtypes) # Split data and other cols if len(col_cols) == 1: cols.append(step.loc[:, col_cols[0]].to_numpy()) else: cols.append(step.loc[:, col_cols].to_numpy()) data.append(step.loc[:, data_cols].to_numpy()) self.data = np.array(data) self.cols = np.array(cols)
#import sympy as sp # Use this to print things prettyily eventually (maybe to simplify certain bits too) import sys #import os class Transform(object): def __init__(self, equation_in): steps_taken = "" begin_eq = "\n" end_eq = "\n" math_objs = MO.MATH_OBJECTS(equation_in, steps_taken, begin_eq, end_eq) print(math_objs.latex()) transform_path = io.folder_correct("./To_Transform") transform_txt = io.open_read(transform_path) transform_txt = txt_lib.clean_eq(transform_txt) if not len(transform_txt): print("Can't find any text") sys.exit() Transform("\sum{_k \delta_{jk}}") #tex_folderpath = io.folder_correct('./steps') #tex_filepath = tex_folderpath + "steps.tex" # #io.open_write(tex_filepath, MO.steps_taken) #latex_cmd = "pdflatex --output-directory='%s'"%tex_folderpath #os.system("%s %s"%(latex_cmd, tex_filepath))
def init_tcl_dict(all_settings): all_settings['tcl']['any_extra_raw_tcl_code'] = "" all_settings['tcl']['isoval'] = all_settings['isosurface_to_plot'] all_settings['tcl']['Ccol'] = all_settings['carbon_color'] all_settings['tcl']['Hcol'] = all_settings['hydrogen_color'] all_settings['tcl']['zoom_val'] = all_settings['zoom_value'] #all_settings['tcl']['mol_id'] = 0 all_settings['tcl']['Necol'] = all_settings['neon_color'] all_settings['tcl']['atom_style'] = all_settings['mol_style'] all_settings['tcl']['mol_material'] = all_settings['mol_material'] all_settings['tcl']['iso_material'] = all_settings['iso_material'] all_settings['tcl']['time_step'] = '" "' all_settings['tcl']['iso_type'] = 0 all_settings['tcl']['density_color'] = str( all_settings['density_iso_col']).replace("(", '"').replace( ")", '"').replace(",", '') all_settings['tcl']['imag_pos_col'] = str( all_settings['pos_imag_iso_col']).replace("(", '"').replace( ")", '"').replace(",", '') all_settings['tcl']['imag_neg_col'] = str( all_settings['neg_imag_iso_col']).replace("(", '"').replace( ")", '"').replace(",", '') all_settings['tcl']['real_pos_col'] = str( all_settings['pos_real_iso_col']).replace("(", '"').replace( ")", '"').replace(",", '') all_settings['tcl']['real_neg_col'] = str( all_settings['neg_real_iso_col']).replace("(", '"').replace( ")", '"').replace(",", '') all_settings['tcl']['maxX'] = all_settings['xdims'][1] all_settings['tcl']['minX'] = all_settings['xdims'][0] all_settings['tcl']['maxY'] = all_settings['ydims'][1] all_settings['tcl']['minY'] = all_settings['ydims'][0] all_settings['tcl']['maxZ'] = all_settings['zdims'][1] all_settings['tcl']['minZ'] = all_settings['zdims'][0] all_settings['tcl']['tachyon_path'] = io.get_tachyon_path( all_settings['vmd_exe']) imgSize = all_settings['img_size'] if type(imgSize) == str: if imgSize.lower() == 'auto': if all_settings['calibrate']: imgSize = [1000, 1000] else: imgSize = [650, 650] else: raise SystemExit("Unknown setting %s for the `img_size'" % imgSize) all_settings['tcl']['pic_sizex'] = imgSize[0] all_settings['tcl']['pic_sizey'] = imgSize[1] all_settings['tcl']['backgrnd_mols'] = "" if all_settings['background_mols']: all_settings['tcl']['bckg_mols_on_off'] = '' else: all_settings['tcl']['bckg_mols_on_off'] = '#' if all_settings['show_box']: all_settings['tcl']['iso_type'] = 2 all_settings['tcl'] = txt_lib.tcl_3D_input( all_settings['background_color'], ['R', 'G', 'B'], all_settings['tcl'], "backgrnd_") all_settings['tcl'] = txt_lib.tcl_3D_input(all_settings['translate_by'], ['x', 'y', 'z'], all_settings['tcl'], "trans_") all_settings['tcl'] = txt_lib.tcl_3D_input([0, 0, 0], ['x', 'y', 'z'], all_settings['tcl'], "time_lab_") all_settings['tcl'] = txt_lib.tcl_3D_input(all_settings['rotation'], ['x', 'y', 'z'], all_settings['tcl'], "rot") all_settings['tcl']['pic_filename'] = {} # set this in settings all_settings['tcl']['vmd_log_file'] = all_settings['vmd_log_file']
def test_file_does_not_exist(self): with self.assertRaises(IOError) as context: IO.expand_filename('xx') self.assertTrue(context.exception.errno == 11 or context.exception.errno == 12)