The input dataset is an .oedb file of the aggregated confs MD results with Traj OEMols + IntE + PBSA """ # job.uuid = "7cacc2af-cae7-4dc7-8956-fcf539861e3d" ifs = DatasetReaderCube("ifs") ifs.promote_parameter("data_in", promoted_name="in", title="System Input OERecord", description="OERecord file name") confGather = ConformerGatheringData("Gathering Conformer Records") ligTrajCube = ParallelConfTrajsToLigTraj("ConfTrajsToLigTraj") ligMMPBSA = ParallelConcatenateTrajMMPBSACube('ConcatenateTrajMMPBSACube') ofs = DatasetWriterCube('ofs', title='OFS-Success') ofs.promote_parameter("data_out", promoted_name="out", title="System Output OERecord", description="OERecord file name") fail = DatasetWriterCube('fail', title='Failures') fail.promote_parameter("data_out", promoted_name="fail") job.add_cubes(ifs, confGather, ligTrajCube, ligMMPBSA, ofs, fail) ifs.success.connect(confGather.intake) confGather.success.connect(ligTrajCube.intake) ligTrajCube.success.connect(ligMMPBSA.intake) ligMMPBSA.success.connect(ofs.intake) ligMMPBSA.failure.connect(fail.intake)
# Protein Reading cube. The protein prefix parameter is used to select a name for the # output system files iprot = DatasetReaderCube("ProteinReader", title="Protein Reader") iprot.promote_parameter("data_in", promoted_name="protein", title='Protein Input Dataset', description="Protein Dataset") # Complex cube used to assemble the ligands and the solvated protein complx = ComplexPrepCube("Complex", title="Complex Preparation") complx.set_parameters(lig_res_name='LIG') # Protein Setting protset = MDComponentCube("ProteinSetting", title="Protein Setting") ofs = DatasetWriterCube('ofs', title='MD Out') ofs.promote_parameter("data_out", promoted_name="out", title="MD Out", description="MD Dataset out") fail = DatasetWriterCube('fail', title='Failures') fail.promote_parameter("data_out", promoted_name="fail", title="Failures", description="MD Dataset Failures out") job.add_cubes(iligs, ligset, ligid, iprot, protset, chargelig, complx, ofs, fail) iligs.success.connect(ligset.intake)
-------- out (.oedb file): file of the Analysis results for all ligands. """ job.uuid = "2717cf39-5bdd-4a1e-880e-5208bb232959" job.classification = [['Analysis']] job.tags = [tag for lists in job.classification for tag in lists] ifs = DatasetReaderCube("ifs") ifs.promote_parameter("data_in", promoted_name="in", title="System Input OERecord", description="OERecord file name") ofs = DatasetWriterCube('ofs', title='MD Out') ofs.promote_parameter("data_out", promoted_name="out") fail = DatasetWriterCube('fail', title='Failures') fail.promote_parameter("data_out", promoted_name="fail") IntECube = ParallelTrajInteractionEnergyCube("TrajInteractionEnergyCube") PBSACube = ParallelTrajPBSACube("TrajPBSACube") report = MDFloeReportCube("report", title="Floe Report") job.add_cubes(ifs, IntECube, PBSACube, ofs, fail) ifs.success.connect(IntECube.intake) IntECube.success.connect(PBSACube.intake) IntECube.failure.connect(fail.intake)
job.classification = [['MD Data']] job.uuid = "6665ca20-6014-4f3b-8d02-4b5d15b75ee3" job.tags = [tag for lists in job.classification for tag in lists] ifs = DatasetReaderCube("SystemReader", title="System Reader") ifs.promote_parameter( "data_in", promoted_name="system", title='STMDA Input File', description= "The Dataset produced by the Short Trajectory MD with Analysis floe") data = ExtractMDDataCube("MDData", title="Extract MD Data") data.promote_parameter('out_file_name', promoted_name='out_file_name', description="Output File name", default="md_data.tar.gz") fail = DatasetWriterCube('fail', title='Failures') fail.promote_parameter("data_out", promoted_name="fail", description="Fail Data Set") job.add_cubes(ifs, data, fail) ifs.success.connect(data.intake) data.failure.connect(fail.intake) if __name__ == "__main__": job.run()
from orionplatform.cubes import DatasetReaderCube, DatasetWriterCube from cubes.perses import PersesCube # Declare and document floe job = WorkFloe("Peres Floe", title="Perses Floe") job.description = ("Run a star-map relative free energy calculation") job.classification = [['Molecular Dynamics']] job.uuid = "155b90cf-90fd-4068-8558-3eac7c01c615" job.tags = [tag for lists in job.classification for tag in lists] # Declare Cubes protein_input_cube = DatasetReaderCube("protein_input_cube") reference_ligand_input_cube = DatasetReaderCube("reference_ligand_input_cube") target_ligands_input_cube = DatasetReaderCube("target_ligands_input_cube") perses_cube = PersesCube("perses_cube") success_output_cube = DatasetWriterCube("success_output_cube", title='success') failure_output_cube = DatasetWriterCube("failure_output_cube", title='failure') # Add cubes to floe job.add_cube(protein_input_cube) job.add_cube(reference_ligand_input_cube) job.add_cube(target_ligands_input_cube) job.add_cube(perses_cube) job.add_cube(success_output_cube) job.add_cube(failure_output_cube) # Promote parameters protein_input_cube.promote_parameter("data_in", promoted_name="protein", title="Protein") reference_ligand_input_cube.promote_parameter("data_in",
# or its use. from floe.api import WorkFloe from orionplatform.cubes import DatasetReaderCube, DatasetWriterCube from am1bcc_charge.am1bcc_charge import AM1BCCCharge # Declare and document floe job = WorkFloe("am1bcc_charge", title="am1bcc charge") job.description = ("AM1BCC Charge") job.classification = [["Charge"]] job.tags = ["Charge"] # Declare Cubes input_cube = DatasetReaderCube("input_cube") charge_cube = MyCube("charge_cube") output_cube = DatasetWriterCube("output_cube") # Add cubes to floe job.add_cube(input_cube) job.add_cube(charge_cube) job.add_cube(output_cube) # Promote parameters input_cube.promote_parameter("data_in", promoted_name="in", title="Input data set of records") charge_cube.promoted_parameter('max_confs', promoted_name='in', title='Maximum number of conformers.')