def main(col_energy, folder, out_report_name, format_out, nProcessors, output_folder, new_report, reportName, trajs_to_select): """ Calculate the relative SASA values of the ligand :param col_energy: Column corresponding to the energy in the reports :type col_energy: int :param folder: Path the simulation :type folder: str :param out_report_name: Name of the output file :type out_report_name: str :param format_out: String with the format of the output :type format_out: str :param nProcessors: Number of processors to use :type nProcessors: int :param output_folder: Path where to store the new reports :type output_folder: str :param new_report: Whether to create new reports :type new_report: bool """ # Constants if output_folder is not None: out_report_name = os.path.join(output_folder, out_report_name) outputFilename = "_".join([out_report_name, "%d"]) trajName = "*traj*" if reportName is None: reportName = "report_%d" else: reportName += "_%d" if nProcessors is None: nProcessors = utilities.getCpuCount() nProcessors = max(1, nProcessors) print("Standarizing energy with %d processors" % nProcessors) epochs = utilities.get_epoch_folders(folder) files = [] if not epochs: # path does not contain an adaptive simulation, we'll try to retrieve # trajectories from the specified path files = analysis_utils.process_folder( None, folder, trajName, reportName, os.path.join(folder, outputFilename), None, trajs_to_select) for epoch in epochs: print("Epoch", epoch) files.extend( analysis_utils.process_folder( epoch, folder, trajName, reportName, os.path.join(folder, epoch, outputFilename), None, trajs_to_select)) pool = mp.Pool(nProcessors) results = [ pool.apply_async(process_file, args=(info[1], info[4], format_out, new_report, info[3], col_energy)) for info in files ] pool.close() pool.join() for res in results: res.get()
def main(residues, folder, top, out_report_name, format_out, nProcessors, output_folder, new_report, trajs_to_select): """ Calculate the distances between paris of atoms :param residues: Pairs of atoms to calculate distances :type residues: list :param folder: Path the simulation :type folder: str :param top: Path to the topology :type top: str :param out_report_name: Name of the output file :type out_report_name: str :param format_out: String with the format of the output :type format_out: str :param nProcessors: Number of processors to use :type nProcessors: int :param output_folder: Path where to store the new reports :type output_folder: str :param new_report: Whether to create new reports :type new_report: bool :param trajs_to_select: Number of the reports to read, if don't want to select all :type trajs_to_select: set """ # Constants if output_folder is not None: out_report_name = os.path.join(output_folder, out_report_name) outputFilename = "_".join([out_report_name, "%d"]) trajName = "*traj*" reportName = "*report*_%d" distances_label = "\t".join(residues) residues = parse_selection(residues) if nProcessors is None: nProcessors = utilities.getCpuCount() nProcessors = max(1, nProcessors) print("Calculating distances with %d processors" % nProcessors) epochs = utilities.get_epoch_folders(folder) if top is not None: top_obj = utilities.getTopologyObject(top) else: top_obj = None files = [] if not epochs: # path does not contain an adaptive simulation, we'll try to retrieve # trajectories from the specified path files = analysis_utils.process_folder(None, folder, trajName, reportName, os.path.join(folder, outputFilename), top_obj, trajs_to_select) for epoch in epochs: print("Epoch", epoch) files.extend(analysis_utils.process_folder(epoch, folder, trajName, reportName, os.path.join(folder, epoch, outputFilename), top_obj, trajs_to_select)) print("Starting to process files!") pool = mp.Pool(nProcessors) results = [pool.apply_async(process_file, args=(info[0], info[2], residues, info[1], info[4], format_out, new_report, info[3], distances_label)) for info in files] pool.close() pool.join() for res in results: res.get()
def main(controlFile, trajName, reportName, folder, top, outputFilename, nProcessors, output_folder, format_str, new_report, trajs_to_select): """ Calculate the corrected rmsd values of conformation taking into account molecule symmetries :param controlFile: Control file :type controlFile: str :param folder: Path the simulation :type folder: str :param top: Path to the topology :type top: str :param outputFilename: Name of the output file :type outputFilename: str :param nProcessors: Number of processors to use :type nProcessors: int :param output_folder: Path where to store the new reports :type output_folder: str :param format_str: String with the format of the report :type format_str: str :param new_report: Whether to write rmsd to a new report file :type new_report: bool """ if trajName is None: trajName = "*traj*" else: trajName += "_*" if reportName is None: reportName = "report_%d" else: reportName += "_%d" if output_folder is not None: outputFilename = os.path.join(output_folder, outputFilename) outputFilename += "_%d" if nProcessors is None: nProcessors = utilities.getCpuCount() nProcessors = max(1, nProcessors) print("Calculating RMSDs with %d processors" % nProcessors) epochs = utilities.get_epoch_folders(folder) if top is not None: top_obj = utilities.getTopologyObject(top) else: top_obj = None resname, nativeFilename, symmetries, rmsdColInReport = readControlFile(controlFile) nativePDB = atomset.PDB() nativePDB.initialise(nativeFilename, resname=resname) files = [] if not epochs: # path does not contain an adaptive simulation, we'll try to retrieve # trajectories from the specified path files = analysis_utils.process_folder(None, folder, trajName, reportName, os.path.join(folder, outputFilename), top_obj, trajs_to_select) for epoch in epochs: print("Epoch", epoch) files.extend(analysis_utils.process_folder(epoch, folder, trajName, reportName, os.path.join(folder, epoch, outputFilename), top_obj, trajs_to_select)) pool = mp.Pool(nProcessors) results = [pool.apply_async(calculate_rmsd_traj, args=(nativePDB, resname, symmetries, rmsdColInReport, info[0], info[1], info[2], info[3], info[4], format_str, new_report)) for info in files] pool.close() pool.join() for res in results: res.get()
def main(resname, folder, top, out_report_name, format_out, nProcessors, output_folder, new_report): """ Calculate the relative SASA values of the ligand :param resname: Ligand resname :type resname: str :param folder: Path the simulation :type folder: str :param top: Path to the topology :type top: str :param out_report_name: Name of the output file :type out_report_name: str :param format_out: String with the format of the output :type format_out: str :param nProcessors: Number of processors to use :type nProcessors: int :param output_folder: Path where to store the new reports :type output_folder: str :param new_report: Whether to create new reports :type new_report: bool """ # Constants if output_folder is not None: out_report_name = os.path.join(output_folder, out_report_name) outputFilename = "_".join([out_report_name, "%d"]) trajName = "*traj*" reportName = "*report*_%d" if nProcessors is None: nProcessors = utilities.getCpuCount() nProcessors = max(1, nProcessors) print("Calculating SASA with %d processors" % nProcessors) pool = mp.Pool(nProcessors) epochs = utilities.get_epoch_folders(folder) if top is not None: top_obj = utilities.getTopologyObject(top) else: top_obj = None files = [] if not epochs: # path does not contain an adaptive simulation, we'll try to retrieve # trajectories from the specified path files = analysis_utils.process_folder( None, folder, trajName, reportName, os.path.join(folder, outputFilename), top_obj) for epoch in epochs: print("Epoch", epoch) files.extend( analysis_utils.process_folder( epoch, folder, trajName, reportName, os.path.join(folder, epoch, outputFilename), top_obj)) results = [] for info in files: results.append( pool.apply_async(process_file, args=(info[0], info[2], resname, info[1], info[4], format_out, new_report, info[3]))) for res in results: res.get() pool.close() pool.terminate()