def fill_table(self, table_id, tdata, tooltips={}): # Make column headers for i in range(len(tdata[0])): # Skip name as it's the row header h = tdata[0][i] tt = tooltips[h] if h in tooltips else "" pyrvapi.rvapi_put_horz_theader(table_id, h.encode('utf-8'), tt, i) # Add table data for i in range(1, len(tdata)): for j in range(len(tdata[i])): pyrvapi.rvapi_put_table_string(table_id, str(tdata[i][j]), i - 1, j) # Now colour the ensemble name cells for i in range(len(tdata) - 1): pyrvapi.rvapi_shape_table_cell( table_id, # tableId i, # row 0, # column "", # tooltip "", # cell_css # "table-blue-vh", # cell_style "table-blue", # cell_style 1, # rowSpan 1) # colSpan return
def _refresh_table(self): identifier = self._identifier.split("/")[1] if self._column_sort_index: self._sort_table_content_by_index() for i, row in enumerate(self._content): for j, cell_content in enumerate(row): pyrvapi.rvapi_put_table_string(identifier, str(cell_content), i, j)
def create_table(self, df, table_id): """Function to create/display tables Parameters ---------- df : :obj:`~pandas.DataFrame` Input :obj:`~pandas.DataFrame` containing data to be plotted table_id : str Table ID Returns ------- object table containing the results from SIMBAD """ num_labels = 0 for i, l in enumerate(df): if i == 0: pyrvapi.rvapi_put_horz_theader(table_id, "PDB_code", self._simbad_tooltips["PDB_code"], 0) else: pyrvapi.rvapi_put_horz_theader(table_id, l, self._simbad_tooltips[l], i) num_labels = i ir = len(df) for i in range(0, ir): for j in range(num_labels + 1): if j == 0: pyrvapi.rvapi_put_table_string(table_id, '<a href="http://www.ebi.ac.uk/pdbe/entry/pdb/{0}" ' 'target="_blank">{1}</a>'.format(df.loc[i][j][0:4], df.loc[i][j]), i, j) else: pyrvapi.rvapi_put_table_string(table_id, str(df.loc[i][j]), i, j)
def makeRow ( name,parameter,row ): pyrvapi.rvapi_put_vert_theader ( tableId,name,parameter.tooltip,row ) pyrvapi.rvapi_put_table_string ( tableId,parameter.type,row,0 ) pyrvapi.rvapi_put_table_string ( tableId,type(parameter.value).__name__,row,1 ) pyrvapi.rvapi_put_table_string ( tableId,parameter.label,row,2 ) pyrvapi.rvapi_put_table_string ( tableId,str(parameter.visible),row,3 ) pyrvapi.rvapi_put_table_string ( tableId,str(parameter.value),row,4 ) return
def fill_table(self, table_id, tdata, tooltips={}): # Make column headers for i in range(len(tdata[0])): # Skip name as it's the row header h = tdata[0][i] tt = tooltips[h] if h in tooltips else "" pyrvapi.rvapi_put_horz_theader(table_id, h.encode('utf-8'), tt, i) # Add table data for i in range(1, len(tdata)): for j in range(len(tdata[i])): pyrvapi.rvapi_put_table_string(table_id, str(tdata[i][j]), i - 1, j) # REM - can use pyrvapi.rvapi_shape_table_cell to format cells is required return
def run(body): # body is reference to the main Import class files_map = [] for f in body.files_all: fl = f.lower(); if fl.endswith(('.map', '.mrc')): files_map.append ( f ) if len(files_map) <= 0: return body.file_stdout.write ( "\n" + "%"*80 + "\n" ) body.file_stdout.write ( "%%%%% Map volume data\n" ) body.file_stdout.write ( "%"*80 + "\n" ) mapSecId = "map_sec_" + str(body.widget_no) body.widget_no += 1 pyrvapi.rvapi_add_section ( mapSecId, "Map", body.report_page_id(), body.rvrow, 0, 1, 1, False ) k = 0 for f in files_map: body.files_all.remove ( f ) pyrvapi.rvapi_put_table_string ( body.import_summary_id(), f, body.summary_row, 0 ) fpath = os.path.join ( body.importDir(), f ); with mrcfile.mmap(fpath) as mrc: msg = "MAP {0} x {1} x {2}".format(mrc.header.nx, mrc.header.ny, mrc.header.nz) pyrvapi.rvapi_put_table_string ( body.import_summary_id(), msg, body.summary_row, 1 ) map_ = dtype_map.DType ( body.job_id ) map_.subtype = ['final_map'] map_.setFile ( f ) body.dataSerialNo += 1 map_.makeDName ( body.dataSerialNo ) body.outputDataBox.add_data ( map_ ) # Essential to rename uploaded file to put it in output directory # Might be better to use the standard register() method instead if possible? # Currently the file ends up remaining in the upload directory on the front end, # even though it's removed on the number cruncher... os.rename ( fpath, os.path.join(body.outputDir(), f) ) body.file_stdout.write ( "... processed: " + f + "\n" ) k += 1 body.rvrow += 1 pyrvapi.rvapi_flush() return
def makeTable(tableDict, tableId, holderId, row, col, rowSpan, colSpan): # # Table dictionary example: # # { title: "Table Title", # empty string by default # state: 0, # -1,0,1, -100,100 # class: "table-blue", # "table-blue" by default # css : "text-align:right;", # "text-align:rigt;" by default # horzHeaders : [ # either empty list or full header structures for all columns # { label: "Size" , tooltip: "" }, # { label: "Weight", tooltip: "" }, # ..... # ], # rows : [ # { header: { label: "1st row", tooltip: "" }, # header may be missing # data : [ "string1","string2", ... ] # }, # ...... # ] # } # pyrvapi.rvapi_add_table(tableId, __get_item("title", tableDict, ""), holderId, row, col, rowSpan, colSpan, __get_item("state", tableDict, 0)) if ("class" in tableDict) or ("css" in tableDict): pyrvapi.rvapi_set_table_style( tableId, __get_item("class", tableDict, "table-blue"), __get_item("css", tableDict, "text-align:right;")) if "horzHeaders" in tableDict: for i in range(len(tableDict["horzHeaders"])): header = tableDict["horzHeaders"][i] pyrvapi.rvapi_put_horz_theader(tableId, header["label"], header["tooltip"], i) if "rows" in tableDict: for i in range(len(tableDict["rows"])): trow = tableDict["rows"][i] if "header" in trow: pyrvapi.rvapi_put_vert_theader(tableId, trow["header"]["label"], trow["header"]["tooltip"], i) data = trow["data"] for j in range(len(data)): pyrvapi.rvapi_put_table_string(tableId, data[j], i, j) return
def run(self): # Prepare seqalign input # fetch input data seq = self.input_data.data.seq seqfile = open ( self.file_seq_path(),'wb' ) nseq = 0 # number of sequences smap = {} # map of sequence names seqtype = "" # for checking sequence types for s in seq: s1 = self.makeClass ( s ) if s1._type=='DataSequence': nseq += 1 seqname = "s" + str(nseq).zfill(3) smap[seqname] = {} smap[seqname]["name" ] = s1.dname smap[seqname]["align"] = "" seqfile.write ( "\n>" + seqname + "\n" + s1.getSequence(self.inputDir()) + "\n" ) stype = s1.getType() if not seqtype: seqtype = stype elif seqtype!=stype: seqtype = "x" else: chains = s1.xyzmeta.xyz[0].chains for c in chains: if s1.chainSel=="(all)" or s1.chainSel==c.id: nseq += 1 seqname = "s" + str(nseq).zfill(3) smap[seqname] = {} smap[seqname]["name"] = s1.dname + ":" + c.id smap[seqname]["align"] = "" seqfile.write ( "\n>" + seqname + "\n" + c.seq + "\n" ) stype = c.type.lower() if not seqtype: seqtype = stype elif seqtype!=stype: seqtype = "x" seqfile.close() if nseq<2: self.putTitle ( "Input Error" ) self.putMessage ( "Number of sequences is less than 2" ) elif seqtype=="" or seqtype=="x": self.putTitle ( "Input Error" ) self.putMessage ( "Inconsistent sequence types (mixed protein, dna and rna)" ) else: if seqtype!="protein": seqtype = "dna" cmd = [self.file_seq_path(),"-type="+seqtype,"-stats="+self.file_stat_path()] # Start clustalw2 self.runApp ( os.path.join(os.environ["CCP4"],"libexec","clustalw2"),cmd ) # check solution file and display results if os.path.isfile(self.file_aln_path()) and os.path.isfile(self.file_stat_path()): len_max = "0" len_min = "0" len_avg = "0" len_dev = "0" len_med = "0" id_max = "0" id_min = "0" id_avg = "0" id_dev = "0" id_med = "0" lines = [line.rstrip('\n') for line in open(self.file_stat_path(),'r')] for l in lines: w = l.split(" ") if w[0]=="seqlen": if w[1]=="longest:" : len_max = w[2] if w[1]=="shortest:": len_min = w[2] if w[1]=="avg:" : len_avg = w[2] if w[1]=="std-dev:" : len_dev = w[2] if w[1]=="median:" : len_med = w[2] if w[0]=="aln": if w[1]=="pw-id": if w[2]=="highest:": id_max = w[3] if w[2]=="lowest:" : id_min = w[3] if w[2]=="avg:" : id_avg = w[3] if w[2]=="std-dev:": id_dev = w[3] if w[2]=="median:" : id_med = w[3] lines = [line.rstrip('\n') for line in open(self.file_aln_path(),'r')] smap["align"] = "" i = 0 while i < len(lines): if lines[i].startswith("s"): for j in range(nseq): smap[lines[i][:4]]["align"] += lines[i][16:] i += 1 smap["align"] += lines[i][16:] i += 1 len_cmb = len(smap["align"]) id_cmb = 0 for c in smap["align"]: if c=="*": id_cmb += 1 if float(len_avg)>0: id_cmb = id_cmb/float(len_avg) id_cmb = "%.2f" % id_cmb tableId = "stat_table" self.putTable ( tableId,"<span style='font-size:1.25em'>" + "Alignment statistics</span>", self.report_page_id(),self.rvrow,0 ) self.setTableHorzHeaders ( tableId,["Length","Score"], ["Sequence length","Sequence ID"] ) self.putTableLine2 ( tableId,"Highest",len_max,id_max,0 ) self.putTableLine2 ( tableId,"Lowest" ,len_min,id_min,1 ) self.putTableLine2 ( tableId,"Average",len_avg,id_avg,2 ) self.putTableLine2 ( tableId,"Std-Dev",len_dev,id_dev,3 ) self.putTableLine2 ( tableId,"Median" ,len_med,id_med,4 ) self.putTableLine2 ( tableId,"Combined",str(len_cmb), "<b>"+id_cmb+"%</b>",5 ) self.rvrow += 2 self.putMessage ( " " ) tableId = "align_table" self.putTable ( tableId,"<span style='font-size:1.25em'>" + "Aligned sequences</span>", self.report_page_id(),self.rvrow,100 ) self.setTableHorzHeaders ( tableId,["Sequence","Alignment"], ["Sequence","Alignment"] ) for r in range(nseq): sname = "s" + str(r+1).zfill(3) self.putTableLine ( tableId,str(r+1),"",smap[sname]["name"],r ) pyrvapi.rvapi_put_table_string ( tableId, self.seqHTML(smap[sname]["align"],smap["align"]),r,1 ) pyrvapi.rvapi_shape_table_cell ( tableId,r,1,"", "background:black;color:yellow","",1,1 ); else: self.putTitle ( "Alignment was not generated" ) # close execution logs and quit self.success() return
def putTableLine2 ( self,tableId,header,v1,v2,row ): self.putTableLine ( tableId,header,"",v1,row ) pyrvapi.rvapi_put_table_string ( tableId,v2,row,1 ) return
def write_output(items, json_file=None, xml_file=None, xmlroot=None, docid=None, output=None): # in non-i2 mode items are added to the output dictionary which is dumped to json if json_file is not None: if 'result' in items: result = items['result'] for solution in output['solutions']: if solution['id'] == result['id']: solution.update({'acornCC': result['acornCC']}) else: output.update(items) temp_filename = json_file + '.tmp' with open(temp_filename, 'w') as jsonfile: print(json.dumps(output, sort_keys=True, indent=2, separators=(',', ': ')), file=jsonfile) if os.path.exists(json_file): import uuid tmpfile = str(uuid.uuid4()) os.rename(json_file, tmpfile) os.remove(tmpfile) os.rename(temp_filename, json_file) return output elif xmlroot is None and xml_file is not None: xmlroot = etree.Element('Fragon') return xmlroot elif docid is None: jsrview_dir = os.path.join(os.environ['CCP4'], 'share', 'jsrview') pyrvapi.rvapi_init_document('fragon_results', os.getcwd(), 'Fragon %s results' % items['Fragon'], 1, 7, jsrview_dir, None, None, None, None) pyrvapi.rvapi_add_tab('tab1', 'Fragon results', True) pyrvapi.rvapi_add_section('status', 'Current status', 'tab1', 0, 0, 1, 1, True) pyrvapi.rvapi_add_text( 'The job is currently running. Updates will be shown here after fragment placement and density modification.', 'status', 0, 0, 1, 1) pyrvapi.rvapi_flush() output.update(items) return 'tab1', output elif xml_file is not None: # in i2 mode new items are added to the etree as this preserves the order in the xml for key in items: if key == 'Fragon': version_node = etree.SubElement(xmlroot, 'Version') version_node.text = output['Fragon'] elif key == 'callback': callback = items['callback'] if callback[0] == 'progress': try: progress_node = xmlroot.xpath( '//Fragon/phaser_progress')[0] except IndexError: progress_node = etree.SubElement( xmlroot, 'phaser_progress') progress_node.text = callback[1] elif callback[0] == 'Best LLG/TFZ': best_llg_node = etree.SubElement(xmlroot, 'best_llg') best_llg_node.text = callback[1]['llg'] best_tfz_node = etree.SubElement(xmlroot, 'best_tfz') best_tfz_node.text = callback[1]['tfz'] elif key == 'solutions': solutions = items['solutions'] try: solutions_node = xmlroot.xpath('//Fragon/solutions')[0] except IndexError: solutions_node = etree.SubElement(xmlroot, 'solutions') if len(solutions) > 0: solutions_node.text = json.dumps(solutions) else: node = etree.SubElement(xmlroot, key) node.text = items[key].__str__() temp_filename = 'program.xml.tmp' with open(temp_filename, 'w') as xmlfile: xmlfile.write(etree.tostring(xmlroot, pretty_print=True)) if os.path.exists(xml_file): import uuid tmpfile = str(uuid.uuid4()) os.rename(xml_file, tmpfile) os.remove(tmpfile) os.rename(temp_filename, xml_file) elif docid is not None: for key in items: if key == 'copies': if items['copies'] > 1: pyrvapi.rvapi_set_text( 'Running Phaser to place %d fragments' % items['copies'], 'status', 0, 0, 1, 1) else: pyrvapi.rvapi_set_text( 'Running Phaser to place the fragment', 'status', 0, 0, 1, 1) pyrvapi.rvapi_add_tab('tab2', 'Phaser log file', False) pyrvapi.rvapi_append_content(output['root'] + '_Phaser.log', True, 'tab2') pyrvapi.rvapi_flush() output.update(items) elif key == 'callback': callback = items['callback'] if callback[0] == 'progress': pyrvapi.rvapi_set_text( 'Current Phaser stage: %s' % callback[1], 'status', 1, 0, 1, 1) pyrvapi.rvapi_flush() elif callback[0] == 'Best LLG': pyrvapi.rvapi_set_text( 'Current best solution Log Likelihood Gain (LLG): %s Translation Function Z-score (TFZ): %s' % (callback[1], output['best_tfz']), 'status', 2, 0, 1, 1) pyrvapi.rvapi_flush() elif callback[0] == 'Best TFZ': output.update({'best_tfz': callback[1]}) elif key == 'solutions': solutions = items['solutions'] top_llg = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['llg'] top_tfz = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['tfz'] top_acornCC = sorted([ solution['acornCC'] if solution['acornCC'] not in ['Running', '-', None] else None for solution in solutions ], reverse=True)[0] if len(solutions) == 1: pyrvapi.rvapi_set_text( 'Phaser has found a single solution with Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f' % (top_llg, top_tfz), 'status', 0, 0, 1, 1) else: pyrvapi.rvapi_set_text( 'Phaser has found %d solutions. The top solution has Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TF Z-score) of %0.2f' % (output['num_phaser_solutions'], top_llg, top_tfz), 'status', 0, 0, 1, 1) if output['num_phaser_solutions'] > len(solutions): pyrvapi.rvapi_set_text( 'Attempting to improve phases for the top %d solutions by density modification with ACORN' % len(solns), 'status', 1, 0, 1, 1) else: pyrvapi.rvapi_set_text( 'Attempting to improve phases by density modification with ACORN', 'status', 1, 0, 1, 1) if top_acornCC is not None: pyrvapi.rvapi_set_text( 'The best solution so far has a correlation coefficient from density modification of %0.3f' % top_acornCC, 'status', 2, 0, 1, 1) else: pyrvapi.rvapi_set_text('', 'status', 2, 0, 1, 1) pyrvapi.rvapi_add_table('results_table', 'Phaser solutions', 'tab1', 1, 0, 1, 1, 1) pyrvapi.rvapi_put_horz_theader('results_table', 'Solution number', '', 0) pyrvapi.rvapi_put_horz_theader('results_table', 'Space group', '', 1) pyrvapi.rvapi_put_horz_theader('results_table', 'LLG', 'Phaser Log Likelihood Gain', 2) pyrvapi.rvapi_put_horz_theader( 'results_table', 'TF Z-score', 'Phaser Translation Function Z-score', 3) pyrvapi.rvapi_put_horz_theader( 'results_table', 'CC', 'CC from ACORN density modification', 4) for solution in solutions: pyrvapi.rvapi_put_table_string('results_table', '%d' % solution['number'], solution['number'] - 1, 0) pyrvapi.rvapi_put_table_string('results_table', solution['sg'], solution['number'] - 1, 1) pyrvapi.rvapi_put_table_string('results_table', '%0.2f' % solution['llg'], solution['number'] - 1, 2) pyrvapi.rvapi_put_table_string('results_table', '%0.2f' % solution['tfz'], solution['number'] - 1, 3) if solution['acornCC'] in ['Running', '-']: pyrvapi.rvapi_put_table_string( 'results_table', solution['acornCC'].replace('-', ''), solution['number'] - 1, 4) elif solution['acornCC'] is None: pyrvapi.rvapi_put_table_string('results_table', 'Not tested', solution['number'] - 1, 4) else: pyrvapi.rvapi_put_table_string( 'results_table', '%0.3f' % solution['acornCC'], solution['number'] - 1, 4) output.update(items) pyrvapi.rvapi_flush() elif key == 'cc_best': solutions = output['solutions'] top_llg = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['llg'] top_tfz = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['tfz'] top_acornCC = sorted([ solution['acornCC'] if solution['acornCC'] not in ['Running', '-', None] else None for solution in solutions ], reverse=True)[0] pyrvapi.rvapi_set_section_state('status', False) pyrvapi.rvapi_add_section('results', 'Results', 'tab1', 2, 0, 1, 1, True) pyrvapi.rvapi_add_text( 'Phaser found %d solutions. The top solution had Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f' % (output['num_phaser_solutions'], top_llg, top_tfz), 'results', 0, 0, 1, 1) pyrvapi.rvapi_add_text( 'The best solution has a correlation coefficient from density modification of %0.3f' % top_acornCC, 'results', 1, 0, 1, 1) if top_acornCC > 0.15: pyrvapi.rvapi_add_text( 'This suggests the structure has been solved and the phases from ACORN will enable automated model building', 'results', 2, 0, 1, 1) else: pyrvapi.rvapi_add_text( 'Sorry this does not suggest a solution', 'results', 3, 0, 1, 1) pyrvapi.rvapi_flush() elif key == 'best_solution_id': pdbout = output['name'] + '_phaser_solution.pdb' mtzout = output['name'] + '_acorn_phases.mtz' pyrvapi.rvapi_add_data( 'best', 'Best fragment placement and electron density', pdbout, 'xyz', 'tab1', 3, 0, 1, 1, True) pyrvapi.rvapi_append_to_data('best', mtzout, 'hkl:map') else: output.update(items) return output
def run(self): # Prepare gesamt job # Just in case (of repeated run) remove the output xyz file. When gesamt # succeeds, this file is created. if os.path.isfile(self.gesamt_xyz()): os.remove(self.gesamt_xyz()) if os.path.isfile(self.gesamt_json()): os.remove(self.gesamt_json()) # Prepare gesamt input # fetch input data xyz = self.input_data.data.xyz nXYZ = len(xyz) # make command-line parameters cmd = [] for i in range(nXYZ): cmd += [ os.path.join(self.inputDir(), xyz[i].files[0]), "-s", xyz[i].chainSel ] if nXYZ < 2: if not "GESAMT_ARCHIVE" in os.environ: self.fail ( "<b> *** Error: jsCofe is not configured to work " + \ "with GESAMT Archive</b><br>" + \ "<i> Please look for support</i><br>", "No GESAMT Archive configured" ) cmd += [ "-archive", os.environ["GESAMT_ARCHIVE"], "-nthreads=auto", "-min1=" + self.getParameter(self.task.parameters.sec1.contains.MIN1), "-min2=" + self.getParameter(self.task.parameters.sec1.contains.MIN2), "-trim-size=1", "-trim-Q=" + self.getParameter(self.task.parameters.sec1.contains.QSCORE), "--json", self.gesamt_json() ] self.rvrow += 1 pyrvapi.rvapi_add_grid(self.progress_grid_id(), False, self.report_page_id(), self.rvrow, 0, 1, 1) pyrvapi.rvapi_add_progress_bar(self.progress_bar_id(), self.progress_grid_id(), 0, 0, 1, 1) pyrvapi.rvapi_add_text(" ETR: ", self.progress_grid_id(), 0, 1, 1, 1) pyrvapi.rvapi_add_label(self.etr_label_id(), self.progress_grid_id(), "--:--:--", 0, 2, 1, 1) self.storeReportDocument(self.progress_bar_id() + ";" + self.etr_label_id()) else: cmd += [ "-o", self.gesamt_xyz(), "-o-cs", self.task.parameters.sec1.contains.MODE.value ] if nXYZ == 2: cmd += ["-domains"] self.putPanel(self.gesamt_report_id()) self.storeReportDocument( self.gesamt_report_id()) # self.job_id.zfill(4) ) r0 = self.getParameter(self.task.parameters.sec1.contains.R0) if r0: cmd += ["-r0=" + r0] sigma = self.getParameter(self.task.parameters.sec1.contains.SIGMA) if sigma: cmd += ["-sigma=" + sigma] cmd += ["--rvapi-rdoc", self.reportDocumentName()] # run gesamt self.runApp("gesamt", cmd) if nXYZ < 2: # PDB scan pyrvapi.rvapi_remove_widget(self.progress_grid_id()) pyrvapi.rvapi_reset_task() pyrvapi.rvapi_flush() if os.path.isfile(self.gesamt_json()): hitlist = jsonut.readjObject(self.gesamt_json()) pyrvapi.rvapi_add_table(self.query_table_id(), "Query structure", self.report_page_id(), self.rvrow, 0, 1, 1, 0) pyrvapi.rvapi_put_horz_theader(self.query_table_id(), "Name", "Structure name", 0) pyrvapi.rvapi_put_horz_theader( self.query_table_id(), "Size", "Structure size in number of residues", 1) pyrvapi.rvapi_put_table_string( self.query_table_id(), hitlist.query.file + " (" + hitlist.query.selection + ")", 0, 0) pyrvapi.rvapi_put_table_string(self.query_table_id(), hitlist.query.size, 0, 1) self.rvrow += 1 self.putMessage(" ") querySize = float(hitlist.query.size) nColumns = len(hitlist.columns) if nColumns < 1 or not hasattr(hitlist.columns[0], "value"): nHits = 0 elif type(hitlist.columns[0].value) is list: nHits = min( len(hitlist.columns[0].value), self.task.parameters.sec1.contains.MAXHITS.value) else: nHits = 1 if nHits < 1: self.putTitle("No PDB matches found") self.putMessage( "<i>Hint:</i> try to reduce report thresholds " + "(ultimately down to 0) in order to see any hits;<br>" + "doing so will increase computation time and report " + "lower-quality (less relevant) matches.") else: self.putSection(self.hits_table_sec_id(), "PDB Hits Table", False) pyrvapi.rvapi_add_table(self.hits_table_id(), "PDB hits found", self.hits_table_sec_id(), 0, 0, 1, 1, 100) pyrvapi.rvapi_set_table_type(self.hits_table_id(), True, True) pyrvapi.rvapi_set_table_style(self.hits_table_id(), "", "text-align:center;") for j in range(nHits): pyrvapi.rvapi_put_vert_theader(self.hits_table_id(), str(j + 1), "Hit number", j) pyrvapi.rvapi_shape_vert_theader( self.hits_table_id(), j, "text-align:right;", "", 1, 1) for i in range(nColumns): column = hitlist.columns[i] pyrvapi.rvapi_put_horz_theader(self.hits_table_id(), column.title, column.tooltip, i) if i == 0: td_css = "font-family:courier;" elif i == nColumns - 1: td_css = "text-align:left;font-size:80%;" pyrvapi.rvapi_shape_horz_theader( self.hits_table_id(), i, td_css, "", 1, 1) else: td_css = "" for j in range(nHits): if nHits == 1: pyrvapi.rvapi_put_table_string( self.hits_table_id(), column.value, j, i) else: pyrvapi.rvapi_put_table_string( self.hits_table_id(), column.value[j], j, i) if td_css: pyrvapi.rvapi_shape_table_cell( self.hits_table_id(), j, i, "", td_css, "", 1, 1) pyrvapi.rvapi_add_button( "hits_dnl_btn", "Export hit list", "{function}", "window.parent.downloadJobFile(" + self.job_id + ",'hits.txt')", False, self.hits_table_sec_id(), 1, 0, 1, 1) if nHits > 1: self.putSection(self.hits_graph_sec_id(), "Score Plots", False) pyrvapi.rvapi_add_text("<h3>Alignment scores</h3>", self.hits_graph_sec_id(), 0, 0, 1, 1) pyrvapi.rvapi_add_graph(self.hits_graph_id(), self.hits_graph_sec_id(), 1, 0, 1, 1) pyrvapi.rvapi_set_graph_size(self.hits_graph_id(), 700, 400) pyrvapi.rvapi_add_text(" <p><hr/>", self.hits_graph_sec_id(), 2, 0, 1, 1) pyrvapi.rvapi_add_text("<h3>Correlation plots</h3>", self.hits_graph_sec_id(), 3, 0, 1, 1) pyrvapi.rvapi_add_loggraph(self.corr_graph_id(), self.hits_graph_sec_id(), 4, 0, 1, 1) pyrvapi.rvapi_add_graph_data("data", self.hits_graph_id(), "Scores") pyrvapi.rvapi_add_graph_data("data", self.corr_graph_id(), "Score correlations") def addDatasets(ref, name): pyrvapi.rvapi_add_graph_dataset( ref, "data", self.hits_graph_id(), name, name) pyrvapi.rvapi_add_graph_dataset( ref, "data", self.corr_graph_id(), name, name) return addDatasets("hno", "Hit number") addDatasets("qscore", "Q-score") addDatasets("rmsd", "R.m.s.d.") addDatasets("nalign", "Nalign/n0") addDatasets("seqid", "Seq. Id.") def addData(ref, value): pyrvapi.rvapi_add_graph_real( ref, "data", self.hits_graph_id(), value, "%g") pyrvapi.rvapi_add_graph_real( ref, "data", self.corr_graph_id(), value, "%g") return for j in range(nHits): pyrvapi.rvapi_add_graph_int( "hno", "data", self.hits_graph_id(), j) addData("qscore", float(hitlist.columns[2].value[j])) addData("rmsd", float(hitlist.columns[3].value[j])) addData( "nalign", float(hitlist.columns[4].value[j]) / querySize) addData("seqid", float(hitlist.columns[5].value[j])) pyrvapi.rvapi_add_graph_plot("plot", self.hits_graph_id(), "Score profiles", "Hit number", "Scores") def addLine(xset, yset, color): pyrvapi.rvapi_add_plot_line( "plot", "data", self.hits_graph_id(), xset, yset) pyrvapi.rvapi_set_line_options( yset, "plot", "data", self.hits_graph_id(), color, "solid", "off", 2.5, True) return addLine("hno", "qscore", "#00008B") addLine("hno", "rmsd", "#8B0000") addLine("hno", "nalign", "#8B8B00") addLine("hno", "seqid", "#008B00") pyrvapi.rvapi_set_plot_legend("plot", self.hits_graph_id(), "e", "") def addPlot(plotId, name, xname, yname, xset, yset, color): pyrvapi.rvapi_add_graph_plot( plotId, self.corr_graph_id(), name, xname, yname) pyrvapi.rvapi_add_plot_line( plotId, "data", self.corr_graph_id(), xset, yset) pyrvapi.rvapi_set_line_options( yset, plotId, "data", self.corr_graph_id(), color, "off", "filledCircle", 2.5, True) return addPlot("p1", "R.m.s.d. vs Seq. Id", "Seq. Id", "R.m.s.d.", "seqid", "rmsd", "#8B0000") addPlot("p2", "R.m.s.d. vs Q-score", "Q-score", "R.m.s.d.", "qscore", "rmsd", "#8B0000") addPlot("p3", "R.m.s.d. vs Nalign", "Normalised alignment length", "R.m.s.d.", "nalign", "rmsd", "#8B0000") addPlot("p4", "Seq. Id. vs Q-score", "Q-score", "Seq. Id.", "qscore", "seqid", "#008B00") addPlot("p5", "Seq. Id. vs Nalign", "Normalised alignment length", "Seq. Id.", "nalign", "seqid", "#008B00") addPlot("p6", "Nalign vs. Q-score", "Q-score", "Normalised alignment length", "qscore", "nalign", "#8B8B00") else: self.putTitle("No PDB matches found") else: # pairwise or multiple alignment self.rvrow += 1 if nXYZ == 2: outFiles = self.restoreReportDocument().split("\n") elif nXYZ > 2: outFiles = [self.gesamt_xyz()] if len(outFiles) > 0: self.putTitle("Gesamt Output") # register output data from temporary location (files will be moved # to output directory by the registration procedure) ensemble = self.registerEnsemble( dtype_template.subtypeProtein(), outFiles[0]) if ensemble: self.putEnsembleWidget("ensemble_btn", "Superposed ensemble ", ensemble, -1) for i in range(1, len(outFiles) - 1): self.rvrow += 1 ensemble = self.registerEnsemble( dtype_template.subtypeProtein(), outFiles[i]) if ensemble: self.putEnsembleWidget("ensemble_" + str(i) + "_btn", "Superposed domain #" + str(i), ensemble, -1) else: self.putTitle("No Output Files Generated") # close execution logs and quit self.success() return