def _get_fragments(project: Project) -> Dict[Fragment, List]: fragments: Dict = {} for crystal in project.get_crystals(): if crystal.is_apo(): continue frag = get_crystals_fragment(crystal) crystals = fragments.get(frag, []) crystals.append(crystal) fragments[frag] = crystals return fragments
def compare_poses(request, result_id): project = current_project(request) result = get_refine_result_by_id(project, result_id) return render( request, "dual_density.html", { "result": result, "rhofit_result": result.get_ligfit_result("rhofit"), "ligandfit_result": result.get_ligfit_result("ligandfit"), "fragment": get_crystals_fragment(result.dataset.crystal), }, )
def show(request, result_id): project = current_project(request) result = get_refine_result_by_id(project, result_id) return render( request, "density.html", { "result": result, "rhofit_result": result.get_ligfit_result("rhofit"), "ligandfit_result": result.get_ligfit_result("ligandfit"), "fragment": get_crystals_fragment(result.dataset.crystal), "previous_result": result.previous(), "next_result": result.next(), }, )
def pandda_analyse(request, method: str, dataset_name: str): project = current_project(request) analysis_dir = Path(project.pandda_method_dir(method), "pandda", "analyses") # load analyse events events = PanddaAnalyseEvents( Path(analysis_dir, "pandda_analyse_events.csv")) event = events.get_first_event(dataset_name) # load analyse sites sites = PanddaAnalyseSites(Path(analysis_dir, "pandda_analyse_sites.csv")) # load pandda dataset description all_datasets = PanddaAllDatasetInfo( Path(analysis_dir, "all_datasets_info.csv")) dataset = PanddaDataset(all_datasets.get_dataset(dataset_name)) # fetch fragment object from the database db_dataset = get_dataset_by_name(project, dataset_name) fragment = get_crystals_fragment(db_dataset.crystal) return render( request, "pandda_densityA.html", { "event": PanddaEvent(event), "method": method, "dataset": dataset, "fragment": fragment, "ground_model": event is None, "centroids": list(sites.get_native_centroids()), "summary": get_summary_url(project, method, dataset_name), }, )
def _write_prepare_script( project: Project, rn, method, dataset, pdb, mtz, resHigh, free_r_flag, native_f, sigma_fp, cif_method, ): epoch = round(time.time()) output_dir = Path(project.pandda_method_dir(method), dataset.name) hpc = SITE.get_hpc_runner() batch = hpc.new_batch_file( f"PnD{rn}", project_script(project, f"pandda_prepare_{method}_{dataset.name}.sh"), project_log_path(project, f"{dataset.name}_PanDDA_{epoch}_%j_out.txt"), project_log_path(project, f"{dataset.name}_PanDDA_{epoch}_%j_err.txt"), cpus=1, ) batch.set_options(time=Duration(minutes=15), memory=DataSize(gigabyte=5)) batch.add_command(crypt_shell.crypt_cmd(project)) batch.assign_variable("DEST_DIR", output_dir) batch.assign_variable("WORK_DIR", "`mktemp -d`") batch.add_commands( "cd $WORK_DIR", crypt_shell.fetch_file(project, pdb, "final.pdb"), crypt_shell.fetch_file(project, mtz, "final.mtz"), ) batch.purge_modules() batch.load_modules( ["gopresto", versions.PHENIX_MOD, versions.CCP4_MOD, versions.BUSTER_MOD] ) if not dataset.crystal.is_apo(): fragment = get_crystals_fragment(dataset.crystal) # non-apo crystal should have a fragment assert fragment if cif_method == "elbow": cif_cmd = f"phenix.elbow --smiles='{fragment.smiles}' --output=$WORK_DIR/{fragment.code} --opt\n" else: assert cif_method == "grade" cif_cmd = ( f"grade '{fragment.smiles}' -ocif $WORK_DIR/{fragment.code}.cif " f"-opdb $WORK_DIR/{fragment.code}.pdb -nomogul\n" ) batch.add_command(cif_cmd) batch.add_commands( f'printf "monitor BRIEF\\n labin file 1 -\\n ALL\\n resolution file 1 999.0 {resHigh}\\n" | \\\n' " cad hklin1 $WORK_DIR/final.mtz hklout $WORK_DIR/final.mtz", "uniqueify -f FreeR_flag $WORK_DIR/final.mtz $WORK_DIR/final.mtz", f'printf "COMPLETE FREE={free_r_flag} \\nEND\\n" | \\\n' " freerflag hklin $WORK_DIR/final.mtz hklout $WORK_DIR/final_rfill.mtz", f"phenix.maps final_rfill.mtz final.pdb maps.input.reflection_data.labels='{native_f},{sigma_fp}'", "mv final.mtz final_original.mtz", "mv final_map_coeffs.mtz final.mtz", "rm -rf $DEST_DIR", crypt_shell.upload_dir(project, "$WORK_DIR", "$DEST_DIR"), "rm -rf $WORK_DIR", ) batch.save() return batch
def auto_ligand_fit(project, useLigFit, useRhoFit, filters, cifMethod, custom_ligfit, custom_rhofit): # Modules for HPC env softwares = ["gopresto", versions.BUSTER_MOD, versions.PHENIX_MOD] jobs = JobsSet("Ligand Fit") hpc = SITE.get_hpc_runner() refine_results = _get_refine_results(project, filters, useLigFit, useRhoFit) for num, result in enumerate(refine_results): dataset = result.dataset if dataset.crystal.is_apo(): # don't try to fit ligand to an apo crystal continue fragment = get_crystals_fragment(dataset.crystal) result_dir = project.get_refine_result_dir(result) pdb = Path(result_dir, "final.pdb") clear_tmp_cmd = "" cif_out = Path(result_dir, fragment.code) if cifMethod == "elbow": cif_cmd = f"phenix.elbow --smiles='{fragment.smiles}' --output={cif_out}\n" elif cifMethod == "acedrg": cif_cmd = f"acedrg -i '{fragment.smiles}' -o {cif_out}\n" clear_tmp_cmd = f"rm -rf {cif_out}_TMP/\n" elif cifMethod == "grade": cif_cmd = ( f"rm -f {cif_out}.cif {cif_out}.pdb\n" f"grade '{fragment.smiles}' -ocif {cif_out}.cif -opdb {cif_out}.pdb -nomogul\n" ) else: cif_cmd = "" rhofit_cmd = "" ligfit_cmd = "" ligCIF = f"{cif_out}.cif" ligPDB = f"{cif_out}.pdb" rhofit_outdir = Path(result_dir, "rhofit") ligfit_outdir = Path(result_dir, "ligfit") mtz_input = Path(result_dir, "final.mtz") if useRhoFit: if rhofit_outdir.exists(): rhofit_cmd += f"rm -rf {rhofit_outdir}\n" rhofit_cmd += f"rhofit -l {ligCIF} -m {mtz_input} -p {pdb} -d {rhofit_outdir} {custom_rhofit}\n" if useLigFit: if ligfit_outdir.exists(): ligfit_cmd += f"rm -rf {ligfit_outdir}\n" ligfit_cmd += f"mkdir -p {ligfit_outdir}\n" ligfit_cmd += f"cd {ligfit_outdir} \n" ligfit_cmd += ( f"phenix.ligandfit data={mtz_input} model={pdb} ligand={ligPDB} " f"fill=True clean_up=True {custom_ligfit}\n") batch = hpc.new_batch_file( "autoLigfit", project_script(project, f"autoligand_{dataset.name}_{num}.sh"), project_log_path(project, "auto_ligfit_%j_out.txt"), project_log_path(project, "auto_ligfit_%j_err.txt"), cpus=1, ) batch.set_options(time=Duration(hours=1)) batch.purge_modules() batch.load_modules(softwares) batch.add_commands( cif_cmd, rhofit_cmd, ligfit_cmd, ) batch.add_commands(clear_tmp_cmd) batch.save() jobs.add_job(batch) # NOTE: all the update commands needs to be chained to run after each other, # due to limitations (bugs!) in jobsd handling of 'run_after' dependencies. # Currently it does not work to specify that multiple jobs should be run after # a job is finished. # if useRhoFit: batch = add_update_job(jobs, hpc, project, "rhofit", dataset, batch) if useLigFit: add_update_job(jobs, hpc, project, "ligandfit", dataset, batch) jobs.submit()
def pandda_consensus(request, method, dataset_name, site_idx, event_idx): project = current_project(request) # fetch fragment object from the database db_dataset = get_dataset_by_name(project, dataset_name) fragment = get_crystals_fragment(db_dataset.crystal) centroids = find_site_centroids(project, method) pandda_res_dir = Path(project.pandda_method_dir(method), "pandda") events_csv = Path(pandda_res_dir, "analyses", "pandda_inspect_events.csv") with events_csv.open("r") as inp: inspect_events = inp.readlines() for i in inspect_events: if dataset_name in i: line = i.split(",") if (dataset_name == line[0] and event_idx == line[1] and site_idx == line[11]): k = line headers = inspect_events[0].split(",") bdc = k[2] site_idx = k[11] center = "[" + k[12] + "," + k[13] + "," + k[14] + "]" resolution = k[18] rfree = k[20] rwork = k[21] spg = k[35] analysed = k[headers.index("analysed")] interesting = k[headers.index("Interesting")] ligplaced = k[headers.index("Ligand Placed")] ligconfid = k[headers.index("Ligand Confidence")] comment = k[headers.index("Comment")] if "true" in ligplaced.lower(): ligplaced = "lig_radio" else: ligplaced = "nolig_radio" if "true" in interesting.lower(): interesting = "interesting_radio" else: interesting = "notinteresting_radio" if "high" in ligconfid.lower(): ligconfid = "high_conf_radio" elif "medium" in ligconfid.lower(): ligconfid = "medium_conf_radio" else: ligconfid = "low_conf_radio" prev_event, next_event = Inspects.find( project.pandda_dir, Inspect(dataset_name, method, site_idx, event_idx)) return render( request, "pandda_densityC.html", { "protein": project.protein, "siten": site_idx, "event": event_idx, "dataset": dataset_name, "method": method, "rwork": rwork, "rfree": rfree, "resolution": resolution, "spg": spg, "fragment": fragment, "center": center, "centroids": centroids, "analysed": analysed, "interesting": interesting, "ligplaced": ligplaced, "ligconfid": ligconfid, "comment": comment, "bdc": bdc, "summary": get_summary_url(project, method, dataset_name), "prev": prev_event, "next": next_event, "panddatype": "consensus", }, )
def pandda(request): project = current_project(request) panddaInput = str(request.GET.get("structure")) if len(panddaInput.split(";")) == 5: method, dataset, event, site, nav = panddaInput.split(";") if len(panddaInput.split(";")) == 3: method, dataset, nav = panddaInput.split(";") datasets_dir = project.pandda_processed_datasets_dir(method) mdl = [ x.split("/")[-3] for x in sorted( glob(f"{datasets_dir}/*/modelled_structures/*model.pdb")) ] if len(mdl) != 0: indices = [i for i, s in enumerate(mdl) if dataset in s][0] if "prev" in nav: try: dataset = mdl[indices - 1] except IndexError: dataset = mdl[-1] if "next" in nav: try: dataset = mdl[indices + 1] except IndexError: dataset = mdl[0] centroids = find_site_centroids(project, method) pandda_res_dir = Path(project.pandda_method_dir(method), "pandda") with open( path.join(pandda_res_dir, "analyses", "pandda_inspect_events.csv"), "r") as inp: inspect_events = inp.readlines() for i in inspect_events: if dataset in i: k = i.split(",") break headers = inspect_events[0].split(",") bdc = k[2] site_idx = k[11] center = "[" + k[12] + "," + k[13] + "," + k[14] + "]" resolution = k[18] rfree = k[20] rwork = k[21] spg = k[35] analysed = k[headers.index("analysed")] interesting = k[headers.index("Interesting")] ligplaced = k[headers.index("Ligand Placed")] ligconfid = k[headers.index("Ligand Confidence")] comment = k[headers.index("Comment")] if len(panddaInput.split(";")) == 3: event = k[1] if "true" in ligplaced.lower(): ligplaced = "lig_radio" else: ligplaced = "nolig_radio" if "true" in interesting.lower(): interesting = "interesting_radio" else: interesting = "notinteresting_radio" if "high" in ligconfid.lower(): ligconfid = "high_conf_radio" elif "medium" in ligconfid.lower(): ligconfid = "medium_conf_radio" else: ligconfid = "low_conf_radio" # fetch fragment object from the database db_dataset = get_dataset_by_name(project, dataset) fragment = get_crystals_fragment(db_dataset.crystal) return render( request, "pandda_density.html", { "method": method, "siten": site_idx, "event": event, "centroids": centroids, "rwork": rwork, "rfree": rfree, "resolution": resolution, "spg": spg, "dataset": dataset, "fragment": fragment, "center": center, "analysed": analysed, "interesting": interesting, "ligplaced": ligplaced, "ligconfid": ligconfid, "comment": comment, "bdc": bdc, "summary": get_summary_url(project, method, dataset), "panddatype": "inspection", }, ) else: return render( request, "error.html", { "issue": "No modelled structure for " + method + "_" + dataset + " was found." }, )
def fragment(self) -> Optional[Fragment]: return get_crystals_fragment(self.orig.crystal)