def delete_report(request, method, date): project = current_project(request) analysis_dir = get_analysis_dir(project, method, date) shutil.rmtree(analysis_dir) return HttpResponseRedirect(reverse("pandda_analyse"))
def refined_map(request, result_id, type): project = current_project(request) result = get_refine_result_by_id(project, result_id) mtz_path = Path(project.get_refine_result_dir(result), _density_filename("final", type)) return download_http_response(mtz_path, f"{result.name}{mtz_path.suffix}")
def jobhistory(request): project = current_project(request) # new logs are saved with a pattern based name # Dataset_software_epochTime_jobID_err.txt logHistory = list() logs_dir = project.logs_dir relative_log_dir = Path(logs_dir).relative_to(project.project_dir) for file in logs_dir.glob("*out.txt"): fpath = str(file) if "_" in fpath: try: epoch = int(fpath.split("/")[-1].split("_")[-3]) except (IndexError, ValueError): # handle old-style log files, # where epoch was not included into the filename epoch = path.getmtime(fpath) jobID = fpath.split("_")[-2] logName = fpath.split("/")[-1].split(f"_{jobID}")[0] errFile = Path(relative_log_dir, f"{logName}_{jobID}_err.txt") outFile = Path(relative_log_dir, f"{logName}_{jobID}_out.txt") logHistory.append( [logName, jobID, datetime.fromtimestamp(epoch), errFile, outFile] ) # sort jobs by date, newest first logHistory.sort(key=lambda e: e[2], reverse=True) return render(request, "jobhistory.html", {"logHistory": logHistory})
def rfactor(request): """ return rfactors statistics for datasets in the results, in Json format, suitable for drawing interactive plots """ r_factors = ["r_work", "r_free"] data = _get_results_data(current_project(request), r_factors) r_factors_values = [] for r_factor in r_factors: data[r_factor] = pandas.to_numeric(data[r_factor]) mean_by_dataset = ( data.groupby("dataset")[r_factor] .mean() .round(2) .to_frame(name=r_factor) .reset_index() ) r_factors_values.append(mean_by_dataset) std_err_by_dataset = ( data.groupby("dataset")[r_factor] .std() .round(2) .to_frame(name="std_" + r_factor) .reset_index() ) r_factors_values.append(std_err_by_dataset) result = r_factors_values[0] for i in range(len(r_factors_values) - 1): result = result.merge(r_factors_values[i + 1]) return HttpResponse(result.to_json(), content_type="application/json")
def show(request): project = current_project(request) return render( request, "results.html", {"refine_results": wrap_refine_results(project.get_refine_results())}, )
def image(request, dataset_id: str, angle: str): """ generated diffraction jpeg picture for given dataset and specified angle Angle is relative to the first frame. First frame is defined to be at angle 0°, next frame at 0° + <angle increment>, etc. That is, each frames angle is defined as: <angle> = <frame number> * <angle increment> """ project = current_project(request) dataset = get_dataset_by_id(project, dataset_id) jpeg_file = _jpeg_image_path(project, dataset, angle) if not jpeg_file.is_file(): jpeg_file.parent.mkdir(parents=True, exist_ok=True) # request worker to run diffraction jpeg generation command # and wait until it's completed cmd = get_diffraction_picture_command(project, dataset, int(angle), jpeg_file) make_diffraction_jpeg.delay(cmd).wait() return jpeg_http_response(project, jpeg_file)
def datasets(request): proj = current_project(request) form = ProcessForm(request.POST) if not form.is_valid(): return HttpResponseBadRequest( f"invalid processing arguments {form.errors}") filters = form.datasets_filter options = { "spacegroup": form.space_group, "cellparam": form.cell_params, "friedel_law": form.friedel_law, "customxds": form.custom_xds, "customautoproc": form.custom_autoproc, "customdials": form.custom_dials, "customxdsapp": form.custom_xdsapp, } if form.use_dials: start_thread(run_dials, proj, filters, options) if form.use_xds: start_thread(run_xds, proj, filters, options) if form.use_xdsapp: start_thread(run_xdsapp, proj, filters, options) if form.use_autoproc: start_thread(run_autoproc, proj, filters, options) return render(request, "jobs_submitted.html")
def show(request, dataset_id): project = current_project(request) dataset = get_dataset_by_id(project, dataset_id) # must be a list, as template needs to iterate over this multiple times processing_stats = list(_get_processing_info(project, dataset)) return render( request, "dataset_info.html", { "dataset": DatasetInfo(dataset), "processing_stats": processing_stats, "refine_results": wrap_refine_results( project.get_datasets_refine_results(dataset) ), "energy": 12.4 / dataset.wavelength, "total_exposure": dataset.exposure_time * dataset.images, "total_rotation": dataset.images * dataset.angle_increment, "corner_resolution": dataset.resolution * 0.75625, "proc_logs": _get_processing_logs(project, dataset), "refine_logs": _get_refine_logs(project, dataset), "site": SITE, "beamline": SITE.get_beamline_info(), }, )
def pandda_input(request, dataset, method): project = current_project(request) processed_dir = project.pandda_processed_dataset_dir(method, dataset) pdb_path = next(processed_dir.glob("*pandda-input.pdb")) return download_http_response(pdb_path)
def page(request): """ render download form page """ project = current_project(request) return render(request, "download.html", {"pandda_dirs": _get_method_dirs(project)})
def test_current_project(self): """ test the case were we successfully can get current project """ request = self._setup_request_mock(self.proposals) cur_proj = projects.current_project(request) self.assertEqual(self.project.id, cur_proj.id)
def forget_key(request): project = current_project(request) if not project.encrypted: return HttpResponseBadRequest(ENCRYPTION_DISABLED_MSG) project.forget_key() return _redirect_to_encryption()
def test_current_project_no_access(self): """ test the case when user is not a part of it's current project's proposal """ request = self._setup_request_mock([self.OTHER_PROPOSAL]) self.assertIsNone(projects.current_project(request))
def _show_html_log(request, html_file_url): """ render a HTML log """ project = current_project(request) rel_path = html_file_url.relative_to(project.project_dir) html_file_url = f"/logs/htmldata/{rel_path}" return render(request, "html_log.html", {"html_url": html_file_url})
def htmldata(request, data_file): project = current_project(request) log_path = Path(project.project_dir, data_file) if not log_path.is_file(): return HttpResponseNotFound() return HttpResponse(read_proj_file(project, log_path))
def pandda_consensus_zmap(request, dataset, method): project = current_project(request) zmap_path = Path( project.pandda_processed_dataset_dir(method, dataset), f"{dataset}-z_map.native.ccp4", ) return download_http_response(zmap_path)
def pandda_average(request, dataset, method): project = current_project(request) zmap_path = Path( project.pandda_processed_dataset_dir(method, dataset), f"{dataset}-ground-state-average-map.native.ccp4", ) return download_http_response(zmap_path)
def imported_htmldata(request, data_file): proj = current_project(request) log_path = Path(SITE.RAW_DATA_DIR, proj.proposal, data_file) if not log_path.is_file(): return HttpResponseNotFound() return HttpResponse(read_proj_file(proj, log_path))
def download(request, log_file): project = current_project(request) log_path = Path(project.project_dir, log_file) if not log_path.is_file(): return _log_not_found_resp(log_file) return HttpResponse( read_proj_file(project, log_path), content_type="application/octet-stream", )
def show(request, log_file): project = current_project(request) log_path = Path(project.project_dir, log_file) if not log_path.is_file(): return _log_not_found_resp(log_file) if _is_html(log_path): return _show_html_log(request, log_path) return _show_text_log(request, project, log_file, log_path)
def show_autoproc(request, dataset, log_file): proj = current_project(request) log_path = Path(autoproc.get_logs_dir(proj, dataset), log_file) if not log_path.is_file(): return _log_not_found_resp(log_file) if _is_html(log_path): return _show_imported_html_log(request, log_path) return _show_text_log(request, proj, log_file, log_path)
def pandda_bdc(request, dataset, method): project = current_project(request) dataset_dir = project.pandda_processed_dataset_dir(method, dataset) # pick one of the matching .ccp4 files, # TODO: this gives us random ccp4 file of any # TODO: potentional BDC files, is this a good way to roll? ccp4_path = next(dataset_dir.glob("*BDC*.ccp4")) return download_http_response(ccp4_path)
def show(request, dataset_id, snapshot_index): project = current_project(request) snapshot = project.get_dataset_snapshot(dataset_id, snapshot_index) if snapshot is None: return HttpResponseNotFound( f"no snapshot '{snapshot_index}' for dataset '{dataset_id}' found" ) snapshot_path = project.get_dataset_snapshot_path(snapshot) return jpeg_http_response(project, snapshot_path)
def edit(request, id): """ GET request shows the 'PDB info' page POST request will delete the PDB """ project = current_project(request) pdb = get_pdb_by_id(project, id) if request.method == "POST": _delete_pdb(project, pdb) return redirect(urls.reverse("manage_pdbs")) return render(request, "pdb.html", {"pdb": pdb})
def download_key(request): project = current_project(request) try: key = _get_key(project) except CryptoKeyError as e: return HttpResponseBadRequest(e.error_message()) key_filename = f"{project.protein}_{project.proposal}_key" response = HttpResponse(key, content_type="application/force-download") response["Content-Disposition"] = f'attachment; filename="{key_filename}"' return response
def pandda_fitted(request, dataset: str, method: str): project = current_project(request) modelled_structures_dir = Path( project.pandda_processed_dataset_dir(method, dataset), "modelled_structures", ) # # pick 'fitted-vNNNN.pdb' file, with highest NNNN number # pdb_path = max(modelled_structures_dir.glob("*fitted*.pdb")) return download_http_response(pdb_path)
def show(request): proj = current_project(request) if not proj.encrypted: return HttpResponseBadRequest(ENCRYPTION_DISABLED_MSG) if proj.has_encryption_key(): # encryption key currently uploaded template = "encryption.html" else: # key needs to be uploaded template = "upload_enc_key.html" return render(request, template)
def compare_poses(request, result_id): project = current_project(request) result = get_refine_result_by_id(project, result_id) return render( request, "dual_density.html", { "result": result, "rhofit_result": result.get_ligfit_result("rhofit"), "ligandfit_result": result.get_ligfit_result("ligandfit"), "fragment": get_crystals_fragment(result.dataset.crystal), }, )
def cluster_image(request, method, cluster): project = current_project(request) png_path = Path( project.pandda_dir, method, "clustered-datasets", "dendrograms", f"{cluster}.png", ) if not png_path.is_file(): return HttpResponseNotFound(f"no dendrogram image for {method}/{cluster} found") return png_http_response(project, png_path)
def _show_imported_html_log(request, log_file): """ render a HTML log that is outside the fragmax folder, that is, in one of the shift folders this is used to display logs for auto-processing tools, which are imported into fragmax projects """ proj = current_project(request) rel_path = log_file.relative_to(Path(SITE.RAW_DATA_DIR, proj.proposal)) html_file_url = f"/logs/imported/htmldata/{rel_path}" return render(request, "html_log.html", {"html_url": html_file_url})