def makeUnmergedTable(body, tableId, holderId, data, row): pyrvapi.rvapi_add_table(tableId, "<h2>Summary</h2>", holderId, row, 0, 1, 1, 0) pyrvapi.rvapi_set_table_style(tableId, "table-blue", "text-align:left;") r = body.putTableLine(tableId, "File name", "Imported file name", data.files[0], 0) r = body.putTableLine(tableId, "Assigned name", "Assigned data name", data.dname, r) r = body.putTableLine(tableId, "Dataset name", "Original data name", data.dataset.name, r) r = body.putTableLine(tableId, "Resolution (Å)", "Dataset resolution in angstroms", data.dataset.reso, r) r = body.putTableLine(tableId, "Wavelength (Å)", "Beam wavelength in angstroms", data.dataset.wlen, r) if data.HM: r = body.putTableLine(tableId, "Space group", "Space group", data.HM, r) else: r = body.putTableLine(tableId, "Space group", "Space group", "unspecified", r) cell_spec = "not specified" """ if data.CELL: cell_spec = str(data.CELL[0]) + " " + \ str(data.CELL[1]) + " " + \ str(data.CELL[2]) + " " + \ " " + \ str(data.CELL[3]) + " " + \ str(data.CELL[4]) + " " + \ str(data.CELL[5]) """ cell_spec = data.dataset.cell[0] + " " + \ data.dataset.cell[1] + " " + \ data.dataset.cell[2] + " " + \ data.dataset.cell[3] + " " + \ data.dataset.cell[4] + " " + \ data.dataset.cell[5] r = body.putTableLine(tableId, "Cell", "Cell parameters", cell_spec, r) """ range = "not found" if data.BRNG: range = str(data.BRNG) r = body.putTableLine ( tableId,"Batches","Batch range(s)",range,r ); """ range = [] for run in data.dataset.runs: range += [(int(run[1]), int(run[2]))] r = body.putTableLine(tableId, "Ranges", "Image range(s)", str(range), r) pyrvapi.rvapi_flush() return
def import_all(self): # ============================================================================ # start page construction: summary table pyrvapi.rvapi_add_table(self.import_summary_id(), "<font size='+1'>Import Summary</font>", self.report_page_id(), self.rvrow + 1, 0, 1, 1, 0) pyrvapi.rvapi_set_table_style(self.import_summary_id(), "table-blue", "text-align:left;") pyrvapi.rvapi_add_text(" ", self.report_page_id(), self.rvrow + 2, 0, 1, 1) self.rvrow += 3 pyrvapi.rvapi_put_horz_theader(self.import_summary_id(), "Imported file", "Name of imported file", 0) pyrvapi.rvapi_put_horz_theader(self.import_summary_id(), "Type", "Dataset type", 1) pyrvapi.rvapi_put_horz_theader(self.import_summary_id(), "Generated dataset(s)", "List of generated datasets", 2) # ============================================================================ # get list of uploaded files #self.files_all = [f for f in os.listdir(self.importDir()) if os.path.isfile(os.path.join(self.importDir(),f))] self.files_all = [] for dirName, subdirList, fileList in os.walk(self.importDir(), topdown=False): dName = dirName[len(self.importDir()) + 1:] for fname in fileList: self.files_all.append(os.path.join(dName, fname)) # ============================================================================ # do individual data type imports for importer in importers: importer.run(self) # ============================================================================ # finish import if len(self.files_all) > 0: self.file_stdout.write ( "\n\n" + "="*80 + \ "\n*** The following files are not recognised and will be ignored:\n" ) for f in self.files_all: self.file_stdout.write(" " + f + "\n") self.file_stdout.write("\n") for f in self.files_all: self.putSummaryLine_red(f, "UNKNOWN", "Failed to recognise, ignored")
def makeTable(tableDict, tableId, holderId, row, col, rowSpan, colSpan): # # Table dictionary example: # # { title: "Table Title", # empty string by default # state: 0, # -1,0,1, -100,100 # class: "table-blue", # "table-blue" by default # css : "text-align:right;", # "text-align:rigt;" by default # horzHeaders : [ # either empty list or full header structures for all columns # { label: "Size" , tooltip: "" }, # { label: "Weight", tooltip: "" }, # ..... # ], # rows : [ # { header: { label: "1st row", tooltip: "" }, # header may be missing # data : [ "string1","string2", ... ] # }, # ...... # ] # } # pyrvapi.rvapi_add_table(tableId, __get_item("title", tableDict, ""), holderId, row, col, rowSpan, colSpan, __get_item("state", tableDict, 0)) if ("class" in tableDict) or ("css" in tableDict): pyrvapi.rvapi_set_table_style( tableId, __get_item("class", tableDict, "table-blue"), __get_item("css", tableDict, "text-align:right;")) if "horzHeaders" in tableDict: for i in range(len(tableDict["horzHeaders"])): header = tableDict["horzHeaders"][i] pyrvapi.rvapi_put_horz_theader(tableId, header["label"], header["tooltip"], i) if "rows" in tableDict: for i in range(len(tableDict["rows"])): trow = tableDict["rows"][i] if "header" in trow: pyrvapi.rvapi_put_vert_theader(tableId, trow["header"]["label"], trow["header"]["tooltip"], i) data = trow["data"] for j in range(len(data)): pyrvapi.rvapi_put_table_string(tableId, data[j], i, j) return
def results_section(self, results_tab_id, mrb_results, ensemble_results, section_title): # # Results Tab # if not mrb_results: return # Create unique identifier for this section by using the id # All ids will have this appended to avoid clashes uid = str(uuid.uuid4()) section_id = section_title.replace(" ", "_") + uid self.results_tab_sections.append( section_id) # Add to list so we can remove if we update pyrvapi.rvapi_add_panel(section_id, results_tab_id, 0, 0, 1, 1) pyrvapi.rvapi_add_text("<h3>{0}</h3>".format(section_title), section_id, 0, 0, 1, 1) results_tree = "results_tree" + section_id pyrvapi.rvapi_add_tree_widget(results_tree, section_title, section_id, 0, 0, 1, 1) for r in mrb_results: name = r['ensemble_name'] # container_id="sec_{0}".format(name) # pyrvapi.rvapi_add_section(container_id,"Results for: {0}".format(name),results_tree,0,0,1,1,True) container_id = "sec_{0}".format(name) + uid pyrvapi.rvapi_add_panel(container_id, results_tree, 0, 0, 1, 1) header = "<h3>Results for ensemble: {0}</h3>".format(name) pyrvapi.rvapi_add_text(header, container_id, 0, 0, 1, 1) sec_table = "sec_table_{0}".format(name) + uid title = "Results table: {0}".format(name) title = "Summary" pyrvapi.rvapi_add_section(sec_table, title, container_id, 0, 0, 1, 1, True) table_id = "table_{0}".format(name) + uid pyrvapi.rvapi_add_table(table_id, "", sec_table, 1, 0, 1, 1, False) tdata = mrbump_util.ResultsSummary().results_table([r]) self.fill_table(table_id, tdata, tooltips=self._mrbump_tooltips) # Ensemble if ensemble_results: epdb = self.ensemble_pdb(r, ensemble_results) if epdb: sec_ensemble = "sec_ensemble_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_ensemble, "Ensemble Search Model", container_id, 0, 0, 1, 1, False) data_ensemble = "data_ensemble_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_ensemble, "Ensemble PDB", self.fix_path(epdb), "XYZOUT", sec_ensemble, 2, 0, 1, 1, True) # PHASER if os.path.isfile(str(r['PHASER_logfile'])) or ( os.path.isfile(str(r['PHASER_pdbout'])) and os.path.isfile(str(r['PHASER_mtzout']))): sec_phaser = "sec_phaser_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_phaser, "PHASER Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['PHASER_pdbout'])) and os.path.isfile( str(r['PHASER_mtzout'])): data_phaser = "data_phaser_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_phaser, "PHASER PDB", os.path.splitext(self.fix_path(r['PHASER_pdbout']))[0], "xyz:map", sec_phaser, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_phaser, self.fix_path(r['PHASER_mtzout']), "xyz:map") if os.path.isfile(str(r['PHASER_logfile'])): pyrvapi.rvapi_add_data( "data_phaser_logfile_{0}".format(name), "PHASER Logfile", self.fix_path(r['PHASER_logfile']), "text", sec_phaser, 2, 0, 1, 1, True) # REFMAC if os.path.isfile(str(r['REFMAC_logfile'])) or ( os.path.isfile(str(r['REFMAC_pdbout'])) and os.path.isfile(str(r['REFMAC_mtzout']))): sec_refmac = "sec_refmac_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_refmac, "REFMAC Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['REFMAC_pdbout'])) and os.path.isfile( str(r['REFMAC_mtzout'])): data_refmac = "data_refmac_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_refmac, "REFMAC PDB", os.path.splitext(self.fix_path(r['REFMAC_pdbout']))[0], "xyz:map", sec_refmac, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_refmac, self.fix_path(r['REFMAC_mtzout']), "xyz:map") if os.path.isfile(str(r['REFMAC_logfile'])): pyrvapi.rvapi_add_data( "data_refmac_logfile_{0}".format(name), "REFMAC Logfile", self.fix_path(r['REFMAC_logfile']), "text", sec_refmac, 2, 0, 1, 1, True) # Buccaner if os.path.isfile(str(r['BUCC_logfile'])) or ( os.path.isfile(str(r['BUCC_pdbout'])) and os.path.isfile(str(r['BUCC_mtzout']))): sec_bucc = "sec_bucc_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_bucc, "BUCCANEER Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['BUCC_pdbout'])) and os.path.isfile( str(r['BUCC_mtzout'])): data_bucc = "data_bucc_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_bucc, "BUCC PDB", os.path.splitext(self.fix_path(r['BUCC_pdbout']))[0], "xyz:map", sec_bucc, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_bucc, self.fix_path(r['BUCC_mtzout']), "xyz:map") if os.path.isfile(str(r['BUCC_logfile'])): pyrvapi.rvapi_add_data( "data_bucc_logfile_{0}".format(name), "BUCC Logfile", self.fix_path(r['BUCC_logfile']), "text", sec_bucc, 2, 0, 1, 1, True) # Arpwarp if os.path.isfile(str(r['ARP_logfile'])) or ( os.path.isfile(str(r['ARP_pdbout'])) and os.path.isfile(str(r['ARP_mtzout']))): sec_arp = "sec_arp_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_arp, "ARPWARP Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['ARP_pdbout'])) and os.path.isfile( str(r['ARP_mtzout'])): data_arp = "data_arp_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_arp, "ARP PDB", os.path.splitext(self.fix_path(r['ARP_pdbout']))[0], "xyz:map", sec_arp, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_arp, self.fix_path(r['ARP_mtzout']), "xyz:map") if os.path.isfile(str(r['ARP_logfile'])): pyrvapi.rvapi_add_data("data_arp_logfile_{0}".format(name), "ARP Logfile", self.fix_path(r['ARP_logfile']), "text", sec_arp, 2, 0, 1, 1, True) # SHELXE if os.path.isfile(str(r['SHELXE_logfile'])) or ( os.path.isfile(str(r['SHELXE_pdbout'])) and os.path.isfile(str(r['SHELXE_mtzout']))): sec_shelxe = "sec_shelxe_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_shelxe, "SHELXE Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SHELXE_pdbout'])) and os.path.isfile( str(r['SHELXE_mtzout'])): data_shelxe = "data_shelxe_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_shelxe, "SHELXE PDB", os.path.splitext(self.fix_path(r['SHELXE_pdbout']))[0], "xyz:map", sec_shelxe, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_shelxe, self.fix_path(r['SHELXE_mtzout']), "xyz:map") if os.path.isfile(str(r['SHELXE_logfile'])): pyrvapi.rvapi_add_data( "data_shelxe_logfile_{0}".format(name), "SHELXE Logfile", self.fix_path(r['SHELXE_logfile']), "text", sec_shelxe, 2, 0, 1, 1, True) # Buccaner Rebuild if os.path.isfile(str(r['SXRBUCC_logfile'])) or ( os.path.isfile(str(r['SXRBUCC_pdbout'])) and os.path.isfile(str(r['SXRBUCC_mtzout']))): sec_sxrbucc = "sec_sxrbucc_{0}".format(name) + uid pyrvapi.rvapi_add_section( sec_sxrbucc, "BUCCANEER SHELXE Trace Rebuild Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SXRBUCC_pdbout'])) and os.path.isfile( str(r['SXRBUCC_mtzout'])): data_sxrbucc = "data_sxrbucc_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_sxrbucc, "SXRBUCC PDB", os.path.splitext(self.fix_path( r['SXRBUCC_pdbout']))[0], "xyz:map", sec_sxrbucc, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_sxrbucc, self.fix_path(r['SXRBUCC_mtzout']), "xyz:map") if os.path.isfile(str(r['SXRBUCC_logfile'])): pyrvapi.rvapi_add_data( "data_sxrbucc_logfile_{0}".format(name), "SXRBUCC Logfile", self.fix_path(r['SXRBUCC_logfile']), "text", sec_sxrbucc, 2, 0, 1, 1, True) # Arpwarp Rebuild if os.path.isfile(str(r['SXRARP_logfile'])) or ( os.path.isfile(str(r['SXRARP_pdbout'])) and os.path.isfile(str(r['SXRARP_mtzout']))): sec_sxrarp = "sec_sxrarp_{0}".format(name) + uid pyrvapi.rvapi_add_section( sec_sxrarp, "ARPWARP SHELXE Trace Redbuild Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SXRARP_pdbout'])) and os.path.isfile( str(r['SXRARP_mtzout'])): data_sxrarp = "data_sxrarp_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_sxrarp, "SXRARP PDB", os.path.splitext(self.fix_path(r['SXRARP_pdbout']))[0], "xyz:map", sec_sxrarp, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_sxrarp, self.fix_path(r['SXRARP_mtzout']), "xyz:map") if os.path.isfile(str(r['SXRARP_logfile'])): pyrvapi.rvapi_add_data( "data_sxrarp_logfile_{0}".format(name), "SXRARP Logfile", self.fix_path(r['SXRARP_logfile']), "text", sec_sxrarp, 2, 0, 1, 1, True) pyrvapi.rvapi_set_tree_node(results_tree, container_id, "{0}".format(name), "auto", "") return
def makeHKLTable(body, tableId, holderId, original_data, new_data, truncation, trunc_msg, row): pyrvapi.rvapi_add_table(tableId, "<h2>Summary</h2>", holderId, row, 0, 1, 1, 0) pyrvapi.rvapi_set_table_style(tableId, "table-blue", "text-align:left;") r = body.putTableLine(tableId, "File name", "Imported file name", new_data.files[0], 0) r = body.putTableLine(tableId, "Dataset name", "Original dataset name", new_data.getDataSetName(), r) r = body.putTableLine(tableId, "Assigned name", "Assigned dataset name", new_data.dname, r) r = body.putTableLine(tableId, "Wavelength", "Wavelength", str(new_data.getMeta("DWAVEL", "unspecified")), r) r = body.putTableLine(tableId, "Space group", "Space group", new_data.getMeta("HM", "unspecified"), r) dcell = new_data.getMeta("DCELL", "*") if dcell == "*": cell_spec = "not specified" else: cell_spec = str(dcell[0]) + " " + \ str(dcell[1]) + " " + \ str(dcell[2]) + " " + \ " " + \ str(dcell[3]) + " " + \ str(dcell[4]) + " " + \ str(dcell[5]) r = body.putTableLine(tableId, "Cell", "Cell parameters", cell_spec, r) r = body.putTableLine(tableId, "Resolution low", "Low resolution limit", new_data.getLowResolution(), r) r = body.putTableLine(tableId, "Resolution high", "High resolution limit", new_data.getHighResolution(), r) if dtype_hkl.subtypeAnomalous() in new_data.subtype: anom = "Present" else: anom = "Not present" r = body.putTableLine(tableId, "Anomalous scattering", "Presence of anomalous data", anom, r) # print new_data.getColumnNames() if trunc_msg: r = body.putTableLine(tableId, "Original columns", "Original data columns", original_data.getColumnNames(), r) r = body.putTableLine ( tableId,"Truncation", "Truncation result","Failed: " + trunc_msg + \ "<br>The dataset cannot be used",r ) elif truncation == 0: r = body.putTableLine(tableId, "Original columns", "Original data columns", original_data.getColumnNames(), r) r = body.putTableLine ( tableId,"Truncation", "Truncation result", "Was not performed due to the absence of " + \ "intensity data.<br>" + \ "The dataset will be used untruncated",r ) else: r = body.putTableLine(tableId, "Original columns", "Original data columns", original_data.getColumnNames(), r) r = body.putTableLine ( tableId,"Truncation", "Truncation result", "Truncated dataset will be used instead of " + \ "the original one.",r ) r = body.putTableLine(tableId, "Columns to be used", "Data columns which will be used further on", new_data.getColumnNames(), r) pyrvapi.rvapi_flush() return
def results_section(self, results_tab_id, mrb_results, ensemble_results, section_title): """Results Tab""" if not mrb_results: return # Create unique identifier for this section by using the id # All ids will have this appended to avoid clashes uid = str(uuid.uuid4()) section_id = section_title.replace(" ", "_") + uid self.results_tab_sections.append( section_id) # Add to list so we can remove if we update pyrvapi.rvapi_add_panel(section_id, results_tab_id, 0, 0, 1, 1) pyrvapi.rvapi_add_text("<h3>{0}</h3>".format(section_title), section_id, 0, 0, 1, 1) results_tree = "results_tree" + section_id pyrvapi.rvapi_add_tree_widget(results_tree, section_title, section_id, 0, 0, 1, 1) for r in mrb_results: ensemble_name = r['ensemble_name'] container_id = "sec_{0}".format(ensemble_name) + uid pyrvapi.rvapi_add_panel(container_id, results_tree, 0, 0, 1, 1) header = "<h3>Results for ensemble: {0}</h3>".format(ensemble_name) pyrvapi.rvapi_add_text(header, container_id, 0, 0, 1, 1) sec_table = "sec_table_{0}".format(ensemble_name) + uid title = "Results table: {0}".format(ensemble_name) title = "Summary" pyrvapi.rvapi_add_section(sec_table, title, container_id, 0, 0, 1, 1, True) table_id = "table_{0}".format(ensemble_name) + uid pyrvapi.rvapi_add_table(table_id, "", sec_table, 1, 0, 1, 1, False) tdata = mrbump_util.ResultsSummary().results_table([r]) self.fill_table(table_id, tdata, tooltips=self._mrbump_tooltips) # Ensemble if ensemble_results: epdb = self.ensemble_pdb(r, ensemble_results) if epdb: sec_ensemble = "sec_ensemble_{0}".format( ensemble_name) + uid pyrvapi.rvapi_add_section(sec_ensemble, "Ensemble Search Model", container_id, 0, 0, 1, 1, False) data_ensemble = "data_ensemble_{0}".format( ensemble_name) + uid pyrvapi.rvapi_add_data(data_ensemble, "Ensemble PDB", self.fix_path(epdb), "XYZOUT", sec_ensemble, 2, 0, 1, 1, True) # PHASER self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='PHASER', logfile_key='PHASER_logfile', pdb_key='PHASER_pdbout', mtz_key='PHASER_mtzout', uid=uid, container_id=container_id, ) # REFMAC self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='Refmac', logfile_key='REFMAC_logfile', pdb_key='REFMAC_pdbout', mtz_key='REFMAC_mtzout', uid=uid, container_id=container_id, ) # Buccaner self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='BUCCANEER', logfile_key='BUCC_logfile', pdb_key='BUCC_pdbout', mtz_key='BUCC_mtzout', uid=uid, container_id=container_id, ) # Arpwarp self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='ArpWarp', logfile_key='ARP_logfile', pdb_key='ARP_pdbout', mtz_key='ARP_mtzout', uid=uid, container_id=container_id, ) # SHELXE self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='SHELXE', logfile_key='SHELXE_logfile', pdb_key='SHELXE_pdbout', mtz_key='SHELXE_mtzout', uid=uid, container_id=container_id, ) # Buccaner Rebuild self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='BUCCANEER SHELXE Trace Rebuild', logfile_key='SXRBUCC_logfile', pdb_key='SXRBUCC_pdbout', mtz_key='SXRBUCC_mtzout', uid=uid, container_id=container_id, ) # Arpwarp Rebuild self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='ARPWARP SHELXE Trace Rebuild', logfile_key='SXRARP_logfile', pdb_key='SXRARP_pdbout', mtz_key='SXRARP_mtzout', uid=uid, container_id=container_id, ) pyrvapi.rvapi_set_tree_node(results_tree, container_id, "{0}".format(ensemble_name), "auto", "") return
def run(self): # put message self.putMessage("Hello World!") # print in standard output and standard error streams self.file_stdout.write("Hello World!\n") self.file_stderr.write("Hello World!\n") # process input data if hasattr(self.input_data.data, "xyz"): # check if any data was chosen xyz = self.input_data.data.xyz self.putMessage ( "<p><b>Total " + str(len(xyz)) +\ " data objects chosen by user.</b>" ) for i in range(len(xyz)): self.putMessage("<i>Process dataset #" + str(i) + "</i>") self.file_stdout.write ( "\n========= Data Object (metadata) #" + str(i) + "\n" +\ xyz[i].to_JSON() + "\n" ) xyzi = self.makeClass(xyz[i]) filepath = xyzi.getFilePath(self.inputDir()) filei = open(filepath, 'r') self.file_stdout.write ( "\n========= Data Content (file " + xyzi.files[0] +\ ") #" + str(i) + "\n\n" + filei.read() + "\n" ) filei.close() else: self.putMessage("<p><b>No input data was chosen by user.</b>") # process input parameters self.putMessage(" ") # spaceholder sec1 = self.task.parameters.sec1.contains # make table tableId = "report_table1" # make sure all PyRVAPI Ids are unique pyrvapi.rvapi_add_table( tableId, "<font size='+1'>Summary of Parameters</font>", self.report_page_id(), self.rvrow, 0, 1, 1, 0) pyrvapi.rvapi_set_table_style(tableId, "table-blue", "text-align:left;") self.rvrow += 1 # important or table will be lost! # make table headers pyrvapi.rvapi_put_horz_theader(tableId, "Type", "Parameter type", 0) pyrvapi.rvapi_put_horz_theader(tableId, "Python type", "Python type", 1) pyrvapi.rvapi_put_horz_theader(tableId, "Label", "Parameter label", 2) pyrvapi.rvapi_put_horz_theader(tableId, "Visibility", "visibility", 3) pyrvapi.rvapi_put_horz_theader(tableId, "Value", "Parameter value", 4) # fill rows with parameter data and metadata def makeRow(name, parameter, row): pyrvapi.rvapi_put_vert_theader(tableId, name, parameter.tooltip, row) pyrvapi.rvapi_put_table_string(tableId, parameter.type, row, 0) pyrvapi.rvapi_put_table_string(tableId, type(parameter.value).__name__, row, 1) pyrvapi.rvapi_put_table_string(tableId, parameter.label, row, 2) pyrvapi.rvapi_put_table_string(tableId, str(parameter.visible), row, 3) pyrvapi.rvapi_put_table_string(tableId, str(parameter.value), row, 4) return makeRow("NCLUST", sec1.NCLUST, 0) makeRow("HATOM", sec1.HATOM, 1) makeRow("PROGRAM", sec1.PROGRAM, 2) makeRow("HANDDET_DO", sec1.HANDDET_DO, 3) # close execution logs and quit self.success() return
def results_section(self, results_tab_id, mrb_results, ensemble_results, section_title): # # Results Tab # if not mrb_results: return # Create unique identifier for this section by using the id # All ids will have this appended to avoid clashes uid = str(uuid.uuid4()) section_id = section_title.replace(" ", "_") + uid self.results_tab_sections.append(section_id) # Add to list so we can remove if we update pyrvapi.rvapi_add_panel(section_id, results_tab_id, 0, 0, 1, 1) pyrvapi.rvapi_add_text("<h3>{0}</h3>".format(section_title), section_id, 0, 0, 1, 1) results_tree = "results_tree" + section_id pyrvapi.rvapi_add_tree_widget(results_tree, section_title, section_id, 0, 0, 1, 1) for r in mrb_results: name = r['ensemble_name'] container_id = "sec_{0}".format(name) + uid pyrvapi.rvapi_add_panel(container_id, results_tree, 0, 0, 1, 1) header = "<h3>Results for ensemble: {0}</h3>".format(name) pyrvapi.rvapi_add_text(header, container_id, 0, 0, 1, 1) sec_table = "sec_table_{0}".format(name) + uid title = "Results table: {0}".format(name) title = "Summary" pyrvapi.rvapi_add_section(sec_table, title, container_id, 0, 0, 1, 1, True) table_id = "table_{0}".format(name) + uid pyrvapi.rvapi_add_table(table_id, "", sec_table, 1, 0, 1, 1, False) tdata = mrbump_util.ResultsSummary().results_table([r]) self.fill_table(table_id, tdata, tooltips=self._mrbump_tooltips) # Ensemble if ensemble_results: epdb = self.ensemble_pdb(r, ensemble_results) if epdb: sec_ensemble = "sec_ensemble_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_ensemble, "Ensemble Search Model", container_id, 0, 0, 1, 1, False) data_ensemble = "data_ensemble_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_ensemble, "Ensemble PDB", self.fix_path(epdb), "XYZOUT", sec_ensemble, 2, 0, 1, 1, True) # PHASER if os.path.isfile(str(r['PHASER_logfile'])) or (os.path.isfile(str(r['PHASER_pdbout'])) and os.path.isfile(str(r['PHASER_mtzout']))): sec_phaser = "sec_phaser_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_phaser, "PHASER Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['PHASER_pdbout'])) and os.path.isfile(str(r['PHASER_mtzout'])): data_phaser = "data_phaser_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_phaser, "PHASER PDB", os.path.splitext(self.fix_path(r['PHASER_pdbout']))[0], "xyz:map", sec_phaser, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_phaser, self.fix_path(r['PHASER_mtzout']), "xyz:map") if os.path.isfile(str(r['PHASER_logfile'])): pyrvapi.rvapi_add_data("data_phaser_logfile_{0}".format(name), "PHASER Logfile", self.fix_path(r['PHASER_logfile']), "text", sec_phaser, 2, 0, 1, 1, True) # REFMAC if os.path.isfile(str(r['REFMAC_logfile'])) or (os.path.isfile(str(r['REFMAC_pdbout'])) and os.path.isfile(str(r['REFMAC_mtzout']))): sec_refmac = "sec_refmac_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_refmac, "REFMAC Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['REFMAC_pdbout'])) and os.path.isfile(str(r['REFMAC_mtzout'])): data_refmac = "data_refmac_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_refmac, "REFMAC PDB", os.path.splitext(self.fix_path(r['REFMAC_pdbout']))[0], "xyz:map", sec_refmac, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_refmac, self.fix_path(r['REFMAC_mtzout']), "xyz:map") if os.path.isfile(str(r['REFMAC_logfile'])): pyrvapi.rvapi_add_data("data_refmac_logfile_{0}".format(name), "REFMAC Logfile", self.fix_path(r['REFMAC_logfile']), "text", sec_refmac, 2, 0, 1, 1, True) # Buccaner if os.path.isfile(str(r['BUCC_logfile'])) or (os.path.isfile(str(r['BUCC_pdbout'])) and os.path.isfile(str(r['BUCC_mtzout']))): sec_bucc = "sec_bucc_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_bucc, "BUCCANEER Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['BUCC_pdbout'])) and os.path.isfile(str(r['BUCC_mtzout'])): data_bucc = "data_bucc_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_bucc, "BUCC PDB", os.path.splitext(self.fix_path(r['BUCC_pdbout']))[0], "xyz:map", sec_bucc, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_bucc, self.fix_path(r['BUCC_mtzout']), "xyz:map") if os.path.isfile(str(r['BUCC_logfile'])): pyrvapi.rvapi_add_data("data_bucc_logfile_{0}".format(name), "BUCC Logfile", self.fix_path(r['BUCC_logfile']), "text", sec_bucc, 2, 0, 1, 1, True) # Arpwarp if os.path.isfile(str(r['ARP_logfile'])) or (os.path.isfile(str(r['ARP_pdbout'])) and os.path.isfile(str(r['ARP_mtzout']))): sec_arp = "sec_arp_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_arp, "ARPWARP Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['ARP_pdbout'])) and os.path.isfile(str(r['ARP_mtzout'])): data_arp = "data_arp_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_arp, "ARP PDB", os.path.splitext(self.fix_path(r['ARP_pdbout']))[0], "xyz:map", sec_arp, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_arp, self.fix_path(r['ARP_mtzout']), "xyz:map") if os.path.isfile(str(r['ARP_logfile'])): pyrvapi.rvapi_add_data("data_arp_logfile_{0}".format(name), "ARP Logfile", self.fix_path(r['ARP_logfile']), "text", sec_arp, 2, 0, 1, 1, True) # SHELXE if os.path.isfile(str(r['SHELXE_logfile'])) or (os.path.isfile(str(r['SHELXE_pdbout'])) and os.path.isfile(str(r['SHELXE_mtzout']))): sec_shelxe = "sec_shelxe_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_shelxe, "SHELXE Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SHELXE_pdbout'])) and os.path.isfile(str(r['SHELXE_mtzout'])): data_shelxe = "data_shelxe_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_shelxe, "SHELXE PDB", os.path.splitext(self.fix_path(r['SHELXE_pdbout']))[0], "xyz:map", sec_shelxe, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_shelxe, self.fix_path(r['SHELXE_mtzout']), "xyz:map") if os.path.isfile(str(r['SHELXE_logfile'])): pyrvapi.rvapi_add_data("data_shelxe_logfile_{0}".format(name), "SHELXE Logfile", self.fix_path(r['SHELXE_logfile']), "text", sec_shelxe, 2, 0, 1, 1, True) # Buccaner Rebuild if os.path.isfile(str(r['SXRBUCC_logfile'])) or (os.path.isfile(str(r['SXRBUCC_pdbout'])) and os.path.isfile(str(r['SXRBUCC_mtzout']))): sec_sxrbucc = "sec_sxrbucc_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_sxrbucc, "BUCCANEER SHELXE Trace Rebuild Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SXRBUCC_pdbout'])) and os.path.isfile(str(r['SXRBUCC_mtzout'])): data_sxrbucc = "data_sxrbucc_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_sxrbucc, "SXRBUCC PDB", os.path.splitext(self.fix_path(r['SXRBUCC_pdbout']))[0], "xyz:map", sec_sxrbucc, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_sxrbucc, self.fix_path(r['SXRBUCC_mtzout']), "xyz:map") if os.path.isfile(str(r['SXRBUCC_logfile'])): pyrvapi.rvapi_add_data("data_sxrbucc_logfile_{0}".format(name), "SXRBUCC Logfile", self.fix_path(r['SXRBUCC_logfile']), "text", sec_sxrbucc, 2, 0, 1, 1, True) # Arpwarp Rebuild if os.path.isfile(str(r['SXRARP_logfile'])) or (os.path.isfile(str(r['SXRARP_pdbout'])) and os.path.isfile(str(r['SXRARP_mtzout']))): sec_sxrarp = "sec_sxrarp_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_sxrarp, "ARPWARP SHELXE Trace Redbuild Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SXRARP_pdbout'])) and os.path.isfile(str(r['SXRARP_mtzout'])): data_sxrarp = "data_sxrarp_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_sxrarp, "SXRARP PDB", os.path.splitext(self.fix_path(r['SXRARP_pdbout']))[0], "xyz:map", sec_sxrarp, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_sxrarp, self.fix_path(r['SXRARP_mtzout']), "xyz:map") if os.path.isfile(str(r['SXRARP_logfile'])): pyrvapi.rvapi_add_data("data_sxrarp_logfile_{0}".format(name), "SXRARP Logfile", self.fix_path(r['SXRARP_logfile']), "text", sec_sxrarp, 2, 0, 1, 1, True) pyrvapi.rvapi_set_tree_node(results_tree, container_id, "{0}".format(name), "auto", "") return
def write_output(items, json_file=None, xml_file=None, xmlroot=None, docid=None, output=None): # in non-i2 mode items are added to the output dictionary which is dumped to json if json_file is not None: if 'result' in items: result = items['result'] for solution in output['solutions']: if solution['id'] == result['id']: solution.update({'acornCC': result['acornCC']}) else: output.update(items) temp_filename = json_file + '.tmp' with open(temp_filename, 'w') as jsonfile: print(json.dumps(output, sort_keys=True, indent=2, separators=(',', ': ')), file=jsonfile) if os.path.exists(json_file): import uuid tmpfile = str(uuid.uuid4()) os.rename(json_file, tmpfile) os.remove(tmpfile) os.rename(temp_filename, json_file) return output elif xmlroot is None and xml_file is not None: xmlroot = etree.Element('Fragon') return xmlroot elif docid is None: jsrview_dir = os.path.join(os.environ['CCP4'], 'share', 'jsrview') pyrvapi.rvapi_init_document('fragon_results', os.getcwd(), 'Fragon %s results' % items['Fragon'], 1, 7, jsrview_dir, None, None, None, None) pyrvapi.rvapi_add_tab('tab1', 'Fragon results', True) pyrvapi.rvapi_add_section('status', 'Current status', 'tab1', 0, 0, 1, 1, True) pyrvapi.rvapi_add_text( 'The job is currently running. Updates will be shown here after fragment placement and density modification.', 'status', 0, 0, 1, 1) pyrvapi.rvapi_flush() output.update(items) return 'tab1', output elif xml_file is not None: # in i2 mode new items are added to the etree as this preserves the order in the xml for key in items: if key == 'Fragon': version_node = etree.SubElement(xmlroot, 'Version') version_node.text = output['Fragon'] elif key == 'callback': callback = items['callback'] if callback[0] == 'progress': try: progress_node = xmlroot.xpath( '//Fragon/phaser_progress')[0] except IndexError: progress_node = etree.SubElement( xmlroot, 'phaser_progress') progress_node.text = callback[1] elif callback[0] == 'Best LLG/TFZ': best_llg_node = etree.SubElement(xmlroot, 'best_llg') best_llg_node.text = callback[1]['llg'] best_tfz_node = etree.SubElement(xmlroot, 'best_tfz') best_tfz_node.text = callback[1]['tfz'] elif key == 'solutions': solutions = items['solutions'] try: solutions_node = xmlroot.xpath('//Fragon/solutions')[0] except IndexError: solutions_node = etree.SubElement(xmlroot, 'solutions') if len(solutions) > 0: solutions_node.text = json.dumps(solutions) else: node = etree.SubElement(xmlroot, key) node.text = items[key].__str__() temp_filename = 'program.xml.tmp' with open(temp_filename, 'w') as xmlfile: xmlfile.write(etree.tostring(xmlroot, pretty_print=True)) if os.path.exists(xml_file): import uuid tmpfile = str(uuid.uuid4()) os.rename(xml_file, tmpfile) os.remove(tmpfile) os.rename(temp_filename, xml_file) elif docid is not None: for key in items: if key == 'copies': if items['copies'] > 1: pyrvapi.rvapi_set_text( 'Running Phaser to place %d fragments' % items['copies'], 'status', 0, 0, 1, 1) else: pyrvapi.rvapi_set_text( 'Running Phaser to place the fragment', 'status', 0, 0, 1, 1) pyrvapi.rvapi_add_tab('tab2', 'Phaser log file', False) pyrvapi.rvapi_append_content(output['root'] + '_Phaser.log', True, 'tab2') pyrvapi.rvapi_flush() output.update(items) elif key == 'callback': callback = items['callback'] if callback[0] == 'progress': pyrvapi.rvapi_set_text( 'Current Phaser stage: %s' % callback[1], 'status', 1, 0, 1, 1) pyrvapi.rvapi_flush() elif callback[0] == 'Best LLG': pyrvapi.rvapi_set_text( 'Current best solution Log Likelihood Gain (LLG): %s Translation Function Z-score (TFZ): %s' % (callback[1], output['best_tfz']), 'status', 2, 0, 1, 1) pyrvapi.rvapi_flush() elif callback[0] == 'Best TFZ': output.update({'best_tfz': callback[1]}) elif key == 'solutions': solutions = items['solutions'] top_llg = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['llg'] top_tfz = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['tfz'] top_acornCC = sorted([ solution['acornCC'] if solution['acornCC'] not in ['Running', '-', None] else None for solution in solutions ], reverse=True)[0] if len(solutions) == 1: pyrvapi.rvapi_set_text( 'Phaser has found a single solution with Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f' % (top_llg, top_tfz), 'status', 0, 0, 1, 1) else: pyrvapi.rvapi_set_text( 'Phaser has found %d solutions. The top solution has Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TF Z-score) of %0.2f' % (output['num_phaser_solutions'], top_llg, top_tfz), 'status', 0, 0, 1, 1) if output['num_phaser_solutions'] > len(solutions): pyrvapi.rvapi_set_text( 'Attempting to improve phases for the top %d solutions by density modification with ACORN' % len(solns), 'status', 1, 0, 1, 1) else: pyrvapi.rvapi_set_text( 'Attempting to improve phases by density modification with ACORN', 'status', 1, 0, 1, 1) if top_acornCC is not None: pyrvapi.rvapi_set_text( 'The best solution so far has a correlation coefficient from density modification of %0.3f' % top_acornCC, 'status', 2, 0, 1, 1) else: pyrvapi.rvapi_set_text('', 'status', 2, 0, 1, 1) pyrvapi.rvapi_add_table('results_table', 'Phaser solutions', 'tab1', 1, 0, 1, 1, 1) pyrvapi.rvapi_put_horz_theader('results_table', 'Solution number', '', 0) pyrvapi.rvapi_put_horz_theader('results_table', 'Space group', '', 1) pyrvapi.rvapi_put_horz_theader('results_table', 'LLG', 'Phaser Log Likelihood Gain', 2) pyrvapi.rvapi_put_horz_theader( 'results_table', 'TF Z-score', 'Phaser Translation Function Z-score', 3) pyrvapi.rvapi_put_horz_theader( 'results_table', 'CC', 'CC from ACORN density modification', 4) for solution in solutions: pyrvapi.rvapi_put_table_string('results_table', '%d' % solution['number'], solution['number'] - 1, 0) pyrvapi.rvapi_put_table_string('results_table', solution['sg'], solution['number'] - 1, 1) pyrvapi.rvapi_put_table_string('results_table', '%0.2f' % solution['llg'], solution['number'] - 1, 2) pyrvapi.rvapi_put_table_string('results_table', '%0.2f' % solution['tfz'], solution['number'] - 1, 3) if solution['acornCC'] in ['Running', '-']: pyrvapi.rvapi_put_table_string( 'results_table', solution['acornCC'].replace('-', ''), solution['number'] - 1, 4) elif solution['acornCC'] is None: pyrvapi.rvapi_put_table_string('results_table', 'Not tested', solution['number'] - 1, 4) else: pyrvapi.rvapi_put_table_string( 'results_table', '%0.3f' % solution['acornCC'], solution['number'] - 1, 4) output.update(items) pyrvapi.rvapi_flush() elif key == 'cc_best': solutions = output['solutions'] top_llg = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['llg'] top_tfz = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['tfz'] top_acornCC = sorted([ solution['acornCC'] if solution['acornCC'] not in ['Running', '-', None] else None for solution in solutions ], reverse=True)[0] pyrvapi.rvapi_set_section_state('status', False) pyrvapi.rvapi_add_section('results', 'Results', 'tab1', 2, 0, 1, 1, True) pyrvapi.rvapi_add_text( 'Phaser found %d solutions. The top solution had Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f' % (output['num_phaser_solutions'], top_llg, top_tfz), 'results', 0, 0, 1, 1) pyrvapi.rvapi_add_text( 'The best solution has a correlation coefficient from density modification of %0.3f' % top_acornCC, 'results', 1, 0, 1, 1) if top_acornCC > 0.15: pyrvapi.rvapi_add_text( 'This suggests the structure has been solved and the phases from ACORN will enable automated model building', 'results', 2, 0, 1, 1) else: pyrvapi.rvapi_add_text( 'Sorry this does not suggest a solution', 'results', 3, 0, 1, 1) pyrvapi.rvapi_flush() elif key == 'best_solution_id': pdbout = output['name'] + '_phaser_solution.pdb' mtzout = output['name'] + '_acorn_phases.mtz' pyrvapi.rvapi_add_data( 'best', 'Best fragment placement and electron density', pdbout, 'xyz', 'tab1', 3, 0, 1, 1, True) pyrvapi.rvapi_append_to_data('best', mtzout, 'hkl:map') else: output.update(items) return output
def run(self): # Prepare gesamt job # Just in case (of repeated run) remove the output xyz file. When gesamt # succeeds, this file is created. if os.path.isfile(self.gesamt_xyz()): os.remove(self.gesamt_xyz()) if os.path.isfile(self.gesamt_json()): os.remove(self.gesamt_json()) # Prepare gesamt input # fetch input data xyz = self.input_data.data.xyz nXYZ = len(xyz) # make command-line parameters cmd = [] for i in range(nXYZ): cmd += [ os.path.join(self.inputDir(), xyz[i].files[0]), "-s", xyz[i].chainSel ] if nXYZ < 2: if not "GESAMT_ARCHIVE" in os.environ: self.fail ( "<b> *** Error: jsCofe is not configured to work " + \ "with GESAMT Archive</b><br>" + \ "<i> Please look for support</i><br>", "No GESAMT Archive configured" ) cmd += [ "-archive", os.environ["GESAMT_ARCHIVE"], "-nthreads=auto", "-min1=" + self.getParameter(self.task.parameters.sec1.contains.MIN1), "-min2=" + self.getParameter(self.task.parameters.sec1.contains.MIN2), "-trim-size=1", "-trim-Q=" + self.getParameter(self.task.parameters.sec1.contains.QSCORE), "--json", self.gesamt_json() ] self.rvrow += 1 pyrvapi.rvapi_add_grid(self.progress_grid_id(), False, self.report_page_id(), self.rvrow, 0, 1, 1) pyrvapi.rvapi_add_progress_bar(self.progress_bar_id(), self.progress_grid_id(), 0, 0, 1, 1) pyrvapi.rvapi_add_text(" ETR: ", self.progress_grid_id(), 0, 1, 1, 1) pyrvapi.rvapi_add_label(self.etr_label_id(), self.progress_grid_id(), "--:--:--", 0, 2, 1, 1) self.storeReportDocument(self.progress_bar_id() + ";" + self.etr_label_id()) else: cmd += [ "-o", self.gesamt_xyz(), "-o-cs", self.task.parameters.sec1.contains.MODE.value ] if nXYZ == 2: cmd += ["-domains"] self.putPanel(self.gesamt_report_id()) self.storeReportDocument( self.gesamt_report_id()) # self.job_id.zfill(4) ) r0 = self.getParameter(self.task.parameters.sec1.contains.R0) if r0: cmd += ["-r0=" + r0] sigma = self.getParameter(self.task.parameters.sec1.contains.SIGMA) if sigma: cmd += ["-sigma=" + sigma] cmd += ["--rvapi-rdoc", self.reportDocumentName()] # run gesamt self.runApp("gesamt", cmd) if nXYZ < 2: # PDB scan pyrvapi.rvapi_remove_widget(self.progress_grid_id()) pyrvapi.rvapi_reset_task() pyrvapi.rvapi_flush() if os.path.isfile(self.gesamt_json()): hitlist = jsonut.readjObject(self.gesamt_json()) pyrvapi.rvapi_add_table(self.query_table_id(), "Query structure", self.report_page_id(), self.rvrow, 0, 1, 1, 0) pyrvapi.rvapi_put_horz_theader(self.query_table_id(), "Name", "Structure name", 0) pyrvapi.rvapi_put_horz_theader( self.query_table_id(), "Size", "Structure size in number of residues", 1) pyrvapi.rvapi_put_table_string( self.query_table_id(), hitlist.query.file + " (" + hitlist.query.selection + ")", 0, 0) pyrvapi.rvapi_put_table_string(self.query_table_id(), hitlist.query.size, 0, 1) self.rvrow += 1 self.putMessage(" ") querySize = float(hitlist.query.size) nColumns = len(hitlist.columns) if nColumns < 1 or not hasattr(hitlist.columns[0], "value"): nHits = 0 elif type(hitlist.columns[0].value) is list: nHits = min( len(hitlist.columns[0].value), self.task.parameters.sec1.contains.MAXHITS.value) else: nHits = 1 if nHits < 1: self.putTitle("No PDB matches found") self.putMessage( "<i>Hint:</i> try to reduce report thresholds " + "(ultimately down to 0) in order to see any hits;<br>" + "doing so will increase computation time and report " + "lower-quality (less relevant) matches.") else: self.putSection(self.hits_table_sec_id(), "PDB Hits Table", False) pyrvapi.rvapi_add_table(self.hits_table_id(), "PDB hits found", self.hits_table_sec_id(), 0, 0, 1, 1, 100) pyrvapi.rvapi_set_table_type(self.hits_table_id(), True, True) pyrvapi.rvapi_set_table_style(self.hits_table_id(), "", "text-align:center;") for j in range(nHits): pyrvapi.rvapi_put_vert_theader(self.hits_table_id(), str(j + 1), "Hit number", j) pyrvapi.rvapi_shape_vert_theader( self.hits_table_id(), j, "text-align:right;", "", 1, 1) for i in range(nColumns): column = hitlist.columns[i] pyrvapi.rvapi_put_horz_theader(self.hits_table_id(), column.title, column.tooltip, i) if i == 0: td_css = "font-family:courier;" elif i == nColumns - 1: td_css = "text-align:left;font-size:80%;" pyrvapi.rvapi_shape_horz_theader( self.hits_table_id(), i, td_css, "", 1, 1) else: td_css = "" for j in range(nHits): if nHits == 1: pyrvapi.rvapi_put_table_string( self.hits_table_id(), column.value, j, i) else: pyrvapi.rvapi_put_table_string( self.hits_table_id(), column.value[j], j, i) if td_css: pyrvapi.rvapi_shape_table_cell( self.hits_table_id(), j, i, "", td_css, "", 1, 1) pyrvapi.rvapi_add_button( "hits_dnl_btn", "Export hit list", "{function}", "window.parent.downloadJobFile(" + self.job_id + ",'hits.txt')", False, self.hits_table_sec_id(), 1, 0, 1, 1) if nHits > 1: self.putSection(self.hits_graph_sec_id(), "Score Plots", False) pyrvapi.rvapi_add_text("<h3>Alignment scores</h3>", self.hits_graph_sec_id(), 0, 0, 1, 1) pyrvapi.rvapi_add_graph(self.hits_graph_id(), self.hits_graph_sec_id(), 1, 0, 1, 1) pyrvapi.rvapi_set_graph_size(self.hits_graph_id(), 700, 400) pyrvapi.rvapi_add_text(" <p><hr/>", self.hits_graph_sec_id(), 2, 0, 1, 1) pyrvapi.rvapi_add_text("<h3>Correlation plots</h3>", self.hits_graph_sec_id(), 3, 0, 1, 1) pyrvapi.rvapi_add_loggraph(self.corr_graph_id(), self.hits_graph_sec_id(), 4, 0, 1, 1) pyrvapi.rvapi_add_graph_data("data", self.hits_graph_id(), "Scores") pyrvapi.rvapi_add_graph_data("data", self.corr_graph_id(), "Score correlations") def addDatasets(ref, name): pyrvapi.rvapi_add_graph_dataset( ref, "data", self.hits_graph_id(), name, name) pyrvapi.rvapi_add_graph_dataset( ref, "data", self.corr_graph_id(), name, name) return addDatasets("hno", "Hit number") addDatasets("qscore", "Q-score") addDatasets("rmsd", "R.m.s.d.") addDatasets("nalign", "Nalign/n0") addDatasets("seqid", "Seq. Id.") def addData(ref, value): pyrvapi.rvapi_add_graph_real( ref, "data", self.hits_graph_id(), value, "%g") pyrvapi.rvapi_add_graph_real( ref, "data", self.corr_graph_id(), value, "%g") return for j in range(nHits): pyrvapi.rvapi_add_graph_int( "hno", "data", self.hits_graph_id(), j) addData("qscore", float(hitlist.columns[2].value[j])) addData("rmsd", float(hitlist.columns[3].value[j])) addData( "nalign", float(hitlist.columns[4].value[j]) / querySize) addData("seqid", float(hitlist.columns[5].value[j])) pyrvapi.rvapi_add_graph_plot("plot", self.hits_graph_id(), "Score profiles", "Hit number", "Scores") def addLine(xset, yset, color): pyrvapi.rvapi_add_plot_line( "plot", "data", self.hits_graph_id(), xset, yset) pyrvapi.rvapi_set_line_options( yset, "plot", "data", self.hits_graph_id(), color, "solid", "off", 2.5, True) return addLine("hno", "qscore", "#00008B") addLine("hno", "rmsd", "#8B0000") addLine("hno", "nalign", "#8B8B00") addLine("hno", "seqid", "#008B00") pyrvapi.rvapi_set_plot_legend("plot", self.hits_graph_id(), "e", "") def addPlot(plotId, name, xname, yname, xset, yset, color): pyrvapi.rvapi_add_graph_plot( plotId, self.corr_graph_id(), name, xname, yname) pyrvapi.rvapi_add_plot_line( plotId, "data", self.corr_graph_id(), xset, yset) pyrvapi.rvapi_set_line_options( yset, plotId, "data", self.corr_graph_id(), color, "off", "filledCircle", 2.5, True) return addPlot("p1", "R.m.s.d. vs Seq. Id", "Seq. Id", "R.m.s.d.", "seqid", "rmsd", "#8B0000") addPlot("p2", "R.m.s.d. vs Q-score", "Q-score", "R.m.s.d.", "qscore", "rmsd", "#8B0000") addPlot("p3", "R.m.s.d. vs Nalign", "Normalised alignment length", "R.m.s.d.", "nalign", "rmsd", "#8B0000") addPlot("p4", "Seq. Id. vs Q-score", "Q-score", "Seq. Id.", "qscore", "seqid", "#008B00") addPlot("p5", "Seq. Id. vs Nalign", "Normalised alignment length", "Seq. Id.", "nalign", "seqid", "#008B00") addPlot("p6", "Nalign vs. Q-score", "Q-score", "Normalised alignment length", "qscore", "nalign", "#8B8B00") else: self.putTitle("No PDB matches found") else: # pairwise or multiple alignment self.rvrow += 1 if nXYZ == 2: outFiles = self.restoreReportDocument().split("\n") elif nXYZ > 2: outFiles = [self.gesamt_xyz()] if len(outFiles) > 0: self.putTitle("Gesamt Output") # register output data from temporary location (files will be moved # to output directory by the registration procedure) ensemble = self.registerEnsemble( dtype_template.subtypeProtein(), outFiles[0]) if ensemble: self.putEnsembleWidget("ensemble_btn", "Superposed ensemble ", ensemble, -1) for i in range(1, len(outFiles) - 1): self.rvrow += 1 ensemble = self.registerEnsemble( dtype_template.subtypeProtein(), outFiles[i]) if ensemble: self.putEnsembleWidget("ensemble_" + str(i) + "_btn", "Superposed domain #" + str(i), ensemble, -1) else: self.putTitle("No Output Files Generated") # close execution logs and quit self.success() return