def create_citation_tab(self, ample_dict): if self.citation_tab_id: return self.citation_tab_id = "citation_tab" pyrvapi.rvapi_insert_tab(self.citation_tab_id, "Citation", self.log_tab_id, False) refMgr = reference_manager.ReferenceManager(ample_dict) bibtex_file = refMgr.save_citations_to_file(ample_dict) if self.ccp4i2: # The horror of ccp4i2 means that this all gets dumped into xml so we can't use any markup tags tdata = refMgr.citations_as_text else: tdata = refMgr.methods_as_html tdata += refMgr.citations_as_html tdata += '<hr><p>A bibtex file with the relevant citations has been saved to: {}</p>'.format( bibtex_file) pyrvapi.rvapi_add_text(tdata, self.citation_tab_id, 0, 0, 1, 1) if not self.ccp4i2: pyrvapi.rvapi_add_data( "bibtex_file", "Citations as BIBTEX", self.fix_path(bibtex_file), "text", self.citation_tab_id, 2, 0, 1, 1, True, ) return self.citation_tab_id
def display_citation_tab(self): """Function to display citations for programs used within SIMBAD Returns ------- object Section containing the relevant citations """ self._create_citation_tab() args = self.get_arguments_from_log(self.logfile) refMgr = reference_manager.ReferenceManager(args) bibtex_file = refMgr.save_citations_to_file(self.work_dir) if self.ccp4i2: # The horror of ccp4i2 means that this all gets dumped into xml so we can't use any markup tags tdata = refMgr.citations_as_text else: tdata = refMgr.methods_as_html tdata += refMgr.citations_as_html tdata += '<hr><p>A bibtex file with the relevant citations has been saved to: {}</p>'.format(bibtex_file) pyrvapi.rvapi_add_text(tdata, self.citation_tab_id, 0, 0, 1, 1) if not self.ccp4i2: pyrvapi.rvapi_add_data("bibtex_file", "Citations as BIBTEX", self.fix_path(bibtex_file), "text", self.citation_tab_id, 2, 0, 1, 1, True)
def add_results_section( self, result_dict=None, ensemble_name=None, program_name=None, logfile_key=None, pdb_key=None, mtz_key=None, uid=None, container_id=None, ): assert (result_dict and ensemble_name and program_name and logfile_key and pdb_key and mtz_key and uid and container_id) have_logfile = have_files(result_dict, logfile_key) have_pdb_and_mtz = have_files(result_dict, pdb_key, mtz_key) if not (have_logfile or have_pdb_and_mtz): return program_id = program_name.lower().replace(' ', '_') this_sec_id = "sec_{0}_{1}".format(program_id, ensemble_name) + uid pyrvapi.rvapi_add_section(this_sec_id, "{} Outputs".format(program_name), container_id, 0, 0, 1, 1, False) if have_pdb_and_mtz: data_id = "o{0}{1}".format(program_id, ensemble_name) + uid pyrvapi.rvapi_add_data( data_id, "{} OUTPUTS".format(program_name), self.fix_path(result_dict[pdb_key]), "xyz", this_sec_id, 2, 0, 1, 1, True, ) pyrvapi.rvapi_append_to_data(data_id, self.fix_path(result_dict[mtz_key]), "hkl:map") if have_logfile: data_id = "l{0}{1}".format(program_id, ensemble_name) + uid pyrvapi.rvapi_add_data( data_id, "{} Logfile".format(program_name), self.fix_path(result_dict[logfile_key]), # "summary", "text", this_sec_id, 2, 0, 1, 1, True, )
def putStructureWidget(self, title_str, fpath_list, openState): wId = self.page_cursor[0] + "_" + "structure" + str( self.page_cursor[1]) pyrvapi.rvapi_add_data(wId, title_str, fpath_list[0], "xyz", self.page_cursor[0], self.page_cursor[1], 0, 1, 1, openState) if len(fpath_list) > 1: pyrvapi.rvapi_append_to_data(wId, fpath_list[1], "hkl:map") if len(fpath_list) > 2: pyrvapi.rvapi_append_to_data(wId, fpath_list[2], "hkl:ccp4_map") if len(fpath_list) > 3: pyrvapi.rvapi_append_to_data(wId, fpath_list[3], "hkl:ccp4_dmap") self.page_cursor[1] += 1 return
def create_citation_tab(self, ample_dict): if self.citation_tab_id: return self.citation_tab_id = "citation_tab" pyrvapi.rvapi_insert_tab(self.citation_tab_id, "Citation", self.log_tab_id, False) refMgr = reference_manager.ReferenceManager(ample_dict) bibtex_file = refMgr.save_citations_to_file(ample_dict) if self.ccp4i2: # The horror of ccp4i2 means that this all gets dumped into xml so we can't use any markup tags tdata = refMgr.citations_as_text else: tdata = refMgr.methods_as_html tdata += refMgr.citations_as_html tdata += '<hr><p>A bibtex file with the relevant citations has been saved to: {}</p>'.format(bibtex_file) pyrvapi.rvapi_add_text(tdata, self.citation_tab_id, 0, 0, 1, 1) if not self.ccp4i2: pyrvapi.rvapi_add_data("bibtex_file", "Citations as BIBTEX", self.fix_path(bibtex_file), "text", self.citation_tab_id, 2, 0, 1, 1, True) return self.citation_tab_id
def run(body): # body is reference to the main Import class files_xyz = [] for f in body.files_all: fl = f.lower() if fl.endswith(('.pdb', '.cif', '.mmcif', '.ent')): files_xyz.append(f) if len(files_xyz) <= 0: return body.file_stdout.write("\n" + "%" * 80 + "\n") body.file_stdout.write("%%%%% IMPORT OF XYZ COORDINATES\n") body.file_stdout.write("%" * 80 + "\n") xyzSecId = "xyz_sec_" + str(body.widget_no) body.widget_no += 1 pyrvapi.rvapi_add_section(xyzSecId, "XYZ Coordinates", body.report_page_id(), body.rvrow, 0, 1, 1, False) k = 0 for f in files_xyz: body.files_all.remove(f) fpath = os.path.join(body.importDir(), f) #coor.stripLigWat ( fpath,fpath ) # strip ligands and waters # split input file to chains scr_file = open("pdbcur.script", "w") scr_file.write("SPLITTOCHAINS\nEND\n") scr_file.close() # Start pdbcur rc = command.call("pdbcur", ['XYZIN', fpath], "./", "pdbcur.script", body.file_stdout, body.file_stderr) # read pdbcur's json fnamesplit = os.path.splitext(f) fpathsplit = os.path.join(body.importDir(), fnamesplit[0]) + ".json" if not os.path.isfile(fpathsplit): body.putSummaryLine_red(f, "UNKNOWN", "Failed to recognise, ignored") else: with open(fpathsplit, 'r') as json_file: json_str = json_file.read() json_file.close() #xyzmeta = eval(json_str) xyzMeta = xyzmeta.XYZMeta(json_str) if len(xyzMeta["xyz"]) <= 0: body.putSummaryLine_red(f, "XYZ", "Empty file -- ignored") else: subSecId = xyzSecId if len(files_xyz) > 1: subSecId = xyzSecId + str(k) pyrvapi.rvapi_add_section(subSecId, "Import " + f, xyzSecId, k, 0, 1, 1, False) xyz = dtype_xyz.DType(body.job_id) xyz.setFile(f) dtype_xyz.setXYZMeta(xyz, xyzMeta) body.dataSerialNo += 1 xyz.makeDName(body.dataSerialNo) os.rename(fpath, os.path.join(body.outputDir(), f)) xyz.makeUniqueFNames(body.outputDir()) body.outputDataBox.add_data(xyz) xyzTableId = "xyz_" + str(k) + "_table" body.putTable(xyzTableId, "", subSecId, 0) jrow = 0 if len(files_xyz) <= 1: body.putTableLine(xyzTableId, "File name", "Imported file name", f, jrow) jrow += 1 body.putTableLine(xyzTableId, "Assigned name", "Assigned data name", xyz.dname, jrow) crystData = getCrystData(xyzMeta) body.putTableLine(xyzTableId, "Space group", "Space group", crystData[0], jrow + 1) body.putTableLine( xyzTableId, "Cell parameters", "Cell parameters (a,b,c, α,β,γ)", crystData[1], jrow + 2) contents = "" nChains = 0 for model in xyzMeta["xyz"]: for chain in model["chains"]: if chain["type"] != "UNK": nChains += 1 if len(contents) > 0: contents += "<br>" contents += "Model " + str(model['model']) + ", chain " + \ chain['id'] + ": " + str(chain['size']) + \ " residues, type: " + chain['type'] if len(xyzMeta["ligands"]) > 0: if len(contents) > 0: contents += "<br>" contents += "Ligands:" for name in xyzMeta["ligands"]: contents += " " + name body.putTableLine(xyzTableId, "Contents", "File contents", contents, jrow + 3) pyrvapi.rvapi_add_data( xyzTableId + "_structure_btn", xyz.dname + " ", # always relative to job_dir from job_dir/html os.path.join("..", body.outputDir(), xyz.files[0]), "xyz", subSecId, 1, 0, 1, 1, -1) body.putSummaryLine(f, "XYZ", xyz.dname) """ if nChains>1: irow = 2 for model in xyzMeta["xyz"]: for chain in model['chains']: if chain["type"] != "UNK": fname = fnamesplit[0] + "_" + str(model['model']) + "_" + \ chain['id'] + fnamesplit[1] xyz = dtype_xyz.DType ( body.job_id ) xyz.setFile ( fname ) mdl = {} mdl['model'] = model['model'] mdl['chains'] = [chain] xyz_meta = {} xyz_meta["cryst"] = xyzMeta["cryst"] xyz_meta["xyz"] = [mdl] xyz_meta["ligands"] = chain["ligands"] dtype_xyz.setXYZMeta ( xyz,xyz_meta ) body.dataSerialNo += 1 xyz.makeDName ( body.dataSerialNo ) os.rename ( os.path.join(body.importDir(),fname), os.path.join(body.outputDir(),fname) ) xyz.makeUniqueFNames ( body.outputDir() ) body.outputDataBox.add_data ( xyz ) xyzTableId = "xyz_" + str(k) + "_" + str(model['model']) + \ "_" + chain['id'] + "_table" body.putMessage1 ( subSecId ," ",irow ) body.putTable ( xyzTableId,"",subSecId,irow+1 ) body.putTableLine ( xyzTableId,"Assigned name", "Assigned data name",xyz.dname,0 ) crystData = getCrystData ( xyz_meta ) body.putTableLine ( xyzTableId,"Space group", "Space group",crystData[0],1 ) body.putTableLine ( xyzTableId,"Cell parameters", "Cell parameters (a,b,c, α,β,γ)", crystData[1],2 ) contents = "Model " + str(model['model']) + ", chain " + \ chain['id'] + ": " + str(chain['size']) contents += " residues, type: " + chain['type'] if len(xyz.xyzmeta["ligands"])>0: contents += "<br>Ligands:" for name in xyz.xyzmeta["ligands"]: contents += " " + name body.putTableLine ( xyzTableId,"Contents", "File contents",contents,3 ) pyrvapi.rvapi_add_data ( xyzTableId+"_structure_btn",xyz.dname, # always relative to job_dir from job_dir/html os.path.join("..",body.outputDir(),xyz.files[0]), "xyz",subSecId,irow+2,0,1,1,-1 ) #fdebug = open ( "_debug.txt",'a' ) #fdebug.write ( fname + "\n") #fdebug.close() body.addSummaryLine ( "XYZ",xyz.dname ) irow += 3 """ body.file_stdout.write("... processed: " + f + "\n") k += 1 body.rvrow += 1 pyrvapi.rvapi_flush() return
def results_section(self, results_tab_id, mrb_results, ensemble_results, section_title): # # Results Tab # if not mrb_results: return # Create unique identifier for this section by using the id # All ids will have this appended to avoid clashes uid = str(uuid.uuid4()) section_id = section_title.replace(" ", "_") + uid self.results_tab_sections.append( section_id) # Add to list so we can remove if we update pyrvapi.rvapi_add_panel(section_id, results_tab_id, 0, 0, 1, 1) pyrvapi.rvapi_add_text("<h3>{0}</h3>".format(section_title), section_id, 0, 0, 1, 1) results_tree = "results_tree" + section_id pyrvapi.rvapi_add_tree_widget(results_tree, section_title, section_id, 0, 0, 1, 1) for r in mrb_results: name = r['ensemble_name'] # container_id="sec_{0}".format(name) # pyrvapi.rvapi_add_section(container_id,"Results for: {0}".format(name),results_tree,0,0,1,1,True) container_id = "sec_{0}".format(name) + uid pyrvapi.rvapi_add_panel(container_id, results_tree, 0, 0, 1, 1) header = "<h3>Results for ensemble: {0}</h3>".format(name) pyrvapi.rvapi_add_text(header, container_id, 0, 0, 1, 1) sec_table = "sec_table_{0}".format(name) + uid title = "Results table: {0}".format(name) title = "Summary" pyrvapi.rvapi_add_section(sec_table, title, container_id, 0, 0, 1, 1, True) table_id = "table_{0}".format(name) + uid pyrvapi.rvapi_add_table(table_id, "", sec_table, 1, 0, 1, 1, False) tdata = mrbump_util.ResultsSummary().results_table([r]) self.fill_table(table_id, tdata, tooltips=self._mrbump_tooltips) # Ensemble if ensemble_results: epdb = self.ensemble_pdb(r, ensemble_results) if epdb: sec_ensemble = "sec_ensemble_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_ensemble, "Ensemble Search Model", container_id, 0, 0, 1, 1, False) data_ensemble = "data_ensemble_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_ensemble, "Ensemble PDB", self.fix_path(epdb), "XYZOUT", sec_ensemble, 2, 0, 1, 1, True) # PHASER if os.path.isfile(str(r['PHASER_logfile'])) or ( os.path.isfile(str(r['PHASER_pdbout'])) and os.path.isfile(str(r['PHASER_mtzout']))): sec_phaser = "sec_phaser_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_phaser, "PHASER Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['PHASER_pdbout'])) and os.path.isfile( str(r['PHASER_mtzout'])): data_phaser = "data_phaser_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_phaser, "PHASER PDB", os.path.splitext(self.fix_path(r['PHASER_pdbout']))[0], "xyz:map", sec_phaser, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_phaser, self.fix_path(r['PHASER_mtzout']), "xyz:map") if os.path.isfile(str(r['PHASER_logfile'])): pyrvapi.rvapi_add_data( "data_phaser_logfile_{0}".format(name), "PHASER Logfile", self.fix_path(r['PHASER_logfile']), "text", sec_phaser, 2, 0, 1, 1, True) # REFMAC if os.path.isfile(str(r['REFMAC_logfile'])) or ( os.path.isfile(str(r['REFMAC_pdbout'])) and os.path.isfile(str(r['REFMAC_mtzout']))): sec_refmac = "sec_refmac_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_refmac, "REFMAC Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['REFMAC_pdbout'])) and os.path.isfile( str(r['REFMAC_mtzout'])): data_refmac = "data_refmac_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_refmac, "REFMAC PDB", os.path.splitext(self.fix_path(r['REFMAC_pdbout']))[0], "xyz:map", sec_refmac, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_refmac, self.fix_path(r['REFMAC_mtzout']), "xyz:map") if os.path.isfile(str(r['REFMAC_logfile'])): pyrvapi.rvapi_add_data( "data_refmac_logfile_{0}".format(name), "REFMAC Logfile", self.fix_path(r['REFMAC_logfile']), "text", sec_refmac, 2, 0, 1, 1, True) # Buccaner if os.path.isfile(str(r['BUCC_logfile'])) or ( os.path.isfile(str(r['BUCC_pdbout'])) and os.path.isfile(str(r['BUCC_mtzout']))): sec_bucc = "sec_bucc_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_bucc, "BUCCANEER Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['BUCC_pdbout'])) and os.path.isfile( str(r['BUCC_mtzout'])): data_bucc = "data_bucc_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_bucc, "BUCC PDB", os.path.splitext(self.fix_path(r['BUCC_pdbout']))[0], "xyz:map", sec_bucc, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_bucc, self.fix_path(r['BUCC_mtzout']), "xyz:map") if os.path.isfile(str(r['BUCC_logfile'])): pyrvapi.rvapi_add_data( "data_bucc_logfile_{0}".format(name), "BUCC Logfile", self.fix_path(r['BUCC_logfile']), "text", sec_bucc, 2, 0, 1, 1, True) # Arpwarp if os.path.isfile(str(r['ARP_logfile'])) or ( os.path.isfile(str(r['ARP_pdbout'])) and os.path.isfile(str(r['ARP_mtzout']))): sec_arp = "sec_arp_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_arp, "ARPWARP Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['ARP_pdbout'])) and os.path.isfile( str(r['ARP_mtzout'])): data_arp = "data_arp_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_arp, "ARP PDB", os.path.splitext(self.fix_path(r['ARP_pdbout']))[0], "xyz:map", sec_arp, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_arp, self.fix_path(r['ARP_mtzout']), "xyz:map") if os.path.isfile(str(r['ARP_logfile'])): pyrvapi.rvapi_add_data("data_arp_logfile_{0}".format(name), "ARP Logfile", self.fix_path(r['ARP_logfile']), "text", sec_arp, 2, 0, 1, 1, True) # SHELXE if os.path.isfile(str(r['SHELXE_logfile'])) or ( os.path.isfile(str(r['SHELXE_pdbout'])) and os.path.isfile(str(r['SHELXE_mtzout']))): sec_shelxe = "sec_shelxe_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_shelxe, "SHELXE Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SHELXE_pdbout'])) and os.path.isfile( str(r['SHELXE_mtzout'])): data_shelxe = "data_shelxe_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_shelxe, "SHELXE PDB", os.path.splitext(self.fix_path(r['SHELXE_pdbout']))[0], "xyz:map", sec_shelxe, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_shelxe, self.fix_path(r['SHELXE_mtzout']), "xyz:map") if os.path.isfile(str(r['SHELXE_logfile'])): pyrvapi.rvapi_add_data( "data_shelxe_logfile_{0}".format(name), "SHELXE Logfile", self.fix_path(r['SHELXE_logfile']), "text", sec_shelxe, 2, 0, 1, 1, True) # Buccaner Rebuild if os.path.isfile(str(r['SXRBUCC_logfile'])) or ( os.path.isfile(str(r['SXRBUCC_pdbout'])) and os.path.isfile(str(r['SXRBUCC_mtzout']))): sec_sxrbucc = "sec_sxrbucc_{0}".format(name) + uid pyrvapi.rvapi_add_section( sec_sxrbucc, "BUCCANEER SHELXE Trace Rebuild Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SXRBUCC_pdbout'])) and os.path.isfile( str(r['SXRBUCC_mtzout'])): data_sxrbucc = "data_sxrbucc_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_sxrbucc, "SXRBUCC PDB", os.path.splitext(self.fix_path( r['SXRBUCC_pdbout']))[0], "xyz:map", sec_sxrbucc, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_sxrbucc, self.fix_path(r['SXRBUCC_mtzout']), "xyz:map") if os.path.isfile(str(r['SXRBUCC_logfile'])): pyrvapi.rvapi_add_data( "data_sxrbucc_logfile_{0}".format(name), "SXRBUCC Logfile", self.fix_path(r['SXRBUCC_logfile']), "text", sec_sxrbucc, 2, 0, 1, 1, True) # Arpwarp Rebuild if os.path.isfile(str(r['SXRARP_logfile'])) or ( os.path.isfile(str(r['SXRARP_pdbout'])) and os.path.isfile(str(r['SXRARP_mtzout']))): sec_sxrarp = "sec_sxrarp_{0}".format(name) + uid pyrvapi.rvapi_add_section( sec_sxrarp, "ARPWARP SHELXE Trace Redbuild Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SXRARP_pdbout'])) and os.path.isfile( str(r['SXRARP_mtzout'])): data_sxrarp = "data_sxrarp_out_{0}".format(name) + uid pyrvapi.rvapi_add_data( data_sxrarp, "SXRARP PDB", os.path.splitext(self.fix_path(r['SXRARP_pdbout']))[0], "xyz:map", sec_sxrarp, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data( data_sxrarp, self.fix_path(r['SXRARP_mtzout']), "xyz:map") if os.path.isfile(str(r['SXRARP_logfile'])): pyrvapi.rvapi_add_data( "data_sxrarp_logfile_{0}".format(name), "SXRARP Logfile", self.fix_path(r['SXRARP_logfile']), "text", sec_sxrarp, 2, 0, 1, 1, True) pyrvapi.rvapi_set_tree_node(results_tree, container_id, "{0}".format(name), "auto", "") return
def run( body, # body is reference to the main Import class sectionTitle="Reflection datasets created", sectionOpen=False, # to keep result section closed if several datasets freeRflag=True # will be run if necessary ): files_mtz = [] for f_orig in body.files_all: f_base, f_ext = os.path.splitext(f_orig) if f_ext.lower() in ('.hkl', '.mtz'): p_orig = os.path.join(body.importDir(), f_orig) f_fmt = mtz.hkl_format(p_orig, body.file_stdout) if f_fmt in ('xds_merged', 'mtz_merged'): files_mtz.append((f_orig, f_fmt)) if not files_mtz: return mtzSecId = body.getWidgetId("mtz_sec") + "_" k = 0 for f_orig, f_fmt in files_mtz: body.files_all.remove(f_orig) p_orig = os.path.join(body.importDir(), f_orig) p_mtzin = p_orig if not f_fmt.startswith('mtz_'): p_mtzin = os.path.splitext(f_orig)[0] + '.mtz' sp = subprocess.Popen('pointless', stdin=subprocess.PIPE, stdout=body.file_stdout, stderr=body.file_stderr) sp.stdin.write('XDSIN ' + p_orig + '\nHKLOUT ' + p_mtzin + '\nCOPY\n') sp.stdin.close() if sp.wait(): p_mtzin = None if p_mtzin: p_mtzout = p_mtzin rc = command.comrc() if freeRflag: p_mtzout = os.path.join(body.outputDir(), os.path.basename(f_orig)) if k == 0: scr_file = open(freerflag_script(), "w") scr_file.write("UNIQUE\n") scr_file.close() # run freerflag: generate FreeRFlag if it is absent, and expand # all reflections rc = command.call("freerflag", ["HKLIN", p_mtzin, "HKLOUT", p_mtzout], "./", freerflag_script(), body.file_stdout, body.file_stderr, log_parser=None) if rc.msg: msg = "\n\n Freerflag failed with message:\n\n" + \ rc.msg + \ "\n\n File " + f_orig + \ " cannot be processed.\n\n" body.file_stdout.write(msg) body.file_stderr.write(msg) body.putSummaryLine_red(f_orig, "MTZ", "Failed to process/import, ignored") else: mf = mtz.mtz_file(p_mtzout) body.summary_row_0 = -1 # to signal the beginning of summary row for ds in mf: if k == 0: body.file_stdout.write("\n" + "%" * 80 + "\n") body.file_stdout.write( "%%%%% IMPORT REFLECTION DATA\n") body.file_stdout.write("%" * 80 + "\n") # make HKL dataset annotation hkl = dtype_hkl.DType(body.job_id) hkl.importMTZDataset(ds) body.dataSerialNo += 1 hkl.makeDName(body.dataSerialNo) datasetName = "" if k == 0: if sectionTitle: pyrvapi.rvapi_add_section(mtzSecId, sectionTitle, body.report_page_id(), body.rvrow, 0, 1, 1, sectionOpen) else: pyrvapi.rvapi_add_section( mtzSecId, "Reflection dataset created: " + hkl.dname, body.report_page_id(), body.rvrow, 0, 1, 1, sectionOpen) subSecId = mtzSecId if len(files_mtz) > 1 or len(mf) > 1: subSecId = mtzSecId + str(k) pyrvapi.rvapi_add_section(subSecId, hkl.dname, mtzSecId, k, 0, 1, 1, False) #pyrvapi.rvapi_add_section ( subSecId, # f_orig + " / " + hkl.getDataSetName(), # mtzSecId,k,0,1,1,False ) # run crtruncate outFileName = os.path.join(body.outputDir(), hkl.dataId + ".mtz") outXmlName = os.path.join("ctruncate" + hkl.dataId + ".xml") cmd = ["-hklin", p_mtzout, "-hklout", outFileName] amplitudes = "" meanCols = hkl.getMeanColumns() if meanCols[2] != "X": cols = "/*/*/[" if meanCols[1] != None: cols = cols + meanCols[0] + "," + meanCols[1] else: cols = cols + meanCols[0] if meanCols[2] == "F": amplitudes = "-amplitudes" cmd += ["-colin", cols + "]"] anomCols = hkl.getAnomalousColumns() anomalous = False if anomCols[4] != "X": anomalous = True cols = "/*/*/[" for i in range(0, 4): if anomCols[i] != None: if i > 0: cols = cols + "," cols = cols + anomCols[i] if anomCols[4] == "F": amplitudes = "-amplitudes" cmd += ["-colano", cols + "]"] if amplitudes: cmd += [amplitudes] cmd += ["-xmlout", outXmlName] cmd += ["-freein"] pyrvapi.rvapi_add_text( " <p><h2>Data analysis (CTruncate)</h2>", subSecId, 1, 0, 1, 1) pyrvapi.rvapi_add_panel(mtzSecId + str(k), subSecId, 2, 0, 1, 1) """ log_parser = pyrvapi_ext.parsers.generic_parser ( mtzSecId+str(k), False,body.generic_parser_summary,False ) rc = command.call ( "ctruncate",cmd,"./",None, body.file_stdout,body.file_stderr,log_parser ) """ body.file_stdin = None # not clear why this is not None at # this point and needs to be forced, # or else runApp looks for input script body.setGenericLogParser(mtzSecId + str(k), False) body.runApp("ctruncate", cmd) body.file_stdout.flush() mtzTableId = body.getWidgetId("mtz") + "_" + str( k) + "_table" if rc.msg: msg = "\n\n CTruncate failed with message:\n\n" + \ rc.msg + \ "\n\n Dataset " + hkl.dname + \ " cannot be used.\n\n" body.file_stdout.write(msg) body.file_stderr.write(msg) makeHKLTable(body, mtzTableId, subSecId, hkl, hkl, -1, msg, 0) datasetName = hkl.dname elif not os.path.exists(outFileName): body.file_stdout.write ( "\n\n +++ Dataset " + hkl.dname + \ "\n was not truncated and will be used as is\n\n" ) hkl.makeUniqueFNames(body.outputDir()) body.outputDataBox.add_data(hkl) makeHKLTable(body, mtzTableId, subSecId, hkl, hkl, 0, "", 0) datasetName = hkl.dname srf.putSRFDiagram(body, hkl, body.outputDir(), body.reportDir(), subSecId, 3, 0, 1, 1, body.file_stdout, body.file_stderr, None) pyrvapi.rvapi_set_text ( " <br><hr/><h3>Created Reflection Data Set (merged)</h3>" + \ "<b>Assigned name:</b> " + datasetName + "<br> ", subSecId,4,0,1,1 ) pyrvapi.rvapi_add_data( "hkl_data_" + str(body.dataSerialNo), "Merged reflections", # always relative to job_dir from job_dir/html os.path.join("..", body.outputDir(), hkl.files[0]), "hkl:hkl", subSecId, 5, 0, 1, 1, -1) else: body.file_stdout.write ( "\n\n ... Dataset " + hkl.dname + \ "\n was truncated and will substitute the " + \ "original one\n\n" ) mtzf = mtz.mtz_file(outFileName) # ctruncate should create a single dataset here for dset in mtzf: dset.MTZ = os.path.basename(outFileName) hkl_data = dtype_hkl.DType(body.job_id) hkl_data.importMTZDataset(dset) hkl_data.dname = hkl.dname hkl_data.dataId = hkl.dataId hkl_data.makeUniqueFNames(body.outputDir()) body.outputDataBox.add_data(hkl_data) makeHKLTable(body, mtzTableId, subSecId, hkl, hkl_data, 1, "", 0) datasetName = hkl_data.dname srf.putSRFDiagram(body, hkl_data, body.outputDir(), body.reportDir(), subSecId, 3, 0, 1, 1, body.file_stdout, body.file_stderr, None) pyrvapi.rvapi_set_text ( " <br><hr/><h3>Created Reflection Data Set (merged)</h3>" + \ "<b>Assigned name:</b> " + datasetName + "<br> ", subSecId,4,0,1,1 ) pyrvapi.rvapi_add_data( "hkl_data_" + str(body.dataSerialNo), "Merged reflections", # always relative to job_dir from job_dir/html os.path.join("..", body.outputDir(), hkl_data.files[0]), "hkl:hkl", subSecId, 5, 0, 1, 1, -1) if body.summary_row_0 < 0: body.putSummaryLine(f_orig, "HKL", datasetName) else: body.addSummaryLine("HKL", datasetName) k += 1 pyrvapi.rvapi_flush() if len(mf) <= 0: body.putSummaryLine_red(f_orig, "UNKNOWN", "-- ignored") body.file_stdout.write("... processed: " + f_orig + "\n ") body.rvrow += 1 pyrvapi.rvapi_flush() return
def prepare_mtz(self, parent_branch_id): branch_data = None # create work directory for data reduction stage; even if data reduction # is not required, this directory may be used for reindexing reflection # data in other parts of CCP4go. sdir = os.path.join(self.workdir, self.datared_dir()) if not os.path.isdir(sdir): os.mkdir(sdir) # check input data if not self.hklpath: self.stderr(" *** reflection file not given -- stop.\n") self.output_meta["retcode"] = "[02-001] hkl file not given" self.write_meta() return "" if not os.path.isfile(self.hklpath): self.stderr(" *** reflection file does not exist -- stop.\n") self.output_meta["retcode"] = "[02-002] hkl file not found" self.write_meta() return "" mf = mtz.mtz_file(self.hklpath, None) #mf.prn() if len(mf) <= 0: self.stderr(" *** reflection file is empty -- stop.\n") self.output_meta["retcode"] = "[02-003] hkl file empty" self.write_meta() return "" self.input_hkl = mf[0] if mf.is_merged(): self.hkl = self.input_hkl #self.hkl.prn() self.mtzpath = self.hklpath self.stdout(" ... given reflections are merged\n") return "" # reflections should be merged, use pointless - aimless pipeline self.putWaitMessageLF("<b>" + str(self.stage_no + 1) + ". Scaling and Merging (Pointless, Aimless)</b>") self.page_cursor[1] -= 1 branch_data = self.start_branch( "Scaling and Merging", "CCP4go Automated Structure Solver: Scaling and Merging", self.datared_dir(), parent_branch_id) self.putMessage("<h3>1. Extracting images</h3>") self.open_script("pointless1") #self.write_script ( "NAME PROJECT " + self.input_hkl.PROJECT +\ # " CRYSTAL " + self.input_hkl.CRYSTAL +\ # " DATASET 1\n" ) self.write_script("NAME PROJECT x CRYSTAL y DATASET z\n") self.write_script("HKLIN " + self.hklpath + "\n") for i in range(len(mf.BRNG)): self.write_script("RUN 1 FILE 1 BATCH " + str(mf.BRNG[i][0]) + " to " + str(mf.BRNG[i][1] - 1) + "\n") self.write_script("LAUEGROUP HKLIN\n" "SPACEGROUP HKLIN\n" "HKLOUT " + self.joined_mtz() + "\n") self.close_script() self.setGenericLogParser(True) self.runApp("pointless", []) self.putMessage("<h3>2. Symmetry assignment</h3>") self.open_script("pointless2") self.write_script("HKLIN " + self.joined_mtz() + "\n" "HKLOUT " + self.pointless_mtz() + "\n" "XMLOUT " + self.junk_xml() + "\n") self.close_script() self.setGenericLogParser(True) self.runApp("pointless", []) self.putMessage("<h3>3. Generating symmetry tables</h3>") self.open_script("pointless3") self.write_script("HKLIN " + self.pointless_mtz() + "\n" "HKLOUT " + self.junk_mtz() + "\n" "XMLOUT " + self.pointless_xml() + "\n") self.close_script() panel_id = self.setGenericLogParser(True) self.runApp("pointless", []) cursor = self.setOutputPage([panel_id, 3]) self.putSection(self.symm_det(), "Symmetry determination tables", False) try: table_list = datred_utils.parse_xmlout(self.pointless_xml()) except: self.fail( "failed parsing pointless xmlout: possible pointless failure", "failed parsing pointless xmlout") self.output_meta["retcode"] = "[02-004] pointless failure" self.write_meta() self.end_branch(branch_data, self.datared_dir(), "Data Scaling and Merging failed", "pointless failure") return "" datred_utils.report(table_list, self.symm_det()) self.setOutputPage(cursor) self.putMessage("<h3>4. Scaling and merging</h3>") self.open_script("aimless") self.write_script("XMLOUT " + self.aimless_xml() + "\n" "END\n") self.close_script() self.setGenericLogParser(True) self.runApp( "aimless", ["HKLIN", self.pointless_mtz(), "HKLOUT", self.aimless_mtz()]) self.unsetLogParser() # checking merged file if not os.path.isfile(self.aimless_mtz()): self.stderr(" *** reflection file does not exist -- stop.\n") self.output_meta["retcode"] = "[02-005] aimless failure" self.write_meta() self.end_branch(branch_data, self.datared_dir(), "Data Scaling and Merging failed", "aimless failure") return "" # add free R-flag self.open_script("freerflag") self.write_script("UNIQUE\n") self.close_script() self.runApp( "freerflag", ["HKLIN", self.aimless_mtz(), "HKLOUT", self.freer_mtz()]) # checking output file if not os.path.isfile(self.freer_mtz()): self.stderr(" *** reflection file does not exist -- stop.\n") self.output_meta[ "retcode"] = "[02-006] failed to add free R-flag to merged hkl" self.write_meta() self.end_branch(branch_data, self.datared_dir(), "Data Scaling and Merging failed", "freerflag failure") return "" # truncate merged file self.putMessage("<h3>5. Data analysis</h3>") mf = mtz.mtz_file(self.freer_mtz(), None) #mf.prn() cmd = [ "-hklin", self.freer_mtz(), "-hklout", self.merged_mtz(), "-colin", "/*/*/[IMEAN,SIGIMEAN]" ] try: Ipm = mf[0].Ipm if Ipm: cmd += [ "-colano", "/*/*/[" + Ipm.plus.value + "," + Ipm.plus.sigma + "," + Ipm.minus.value + "," + Ipm.minus.sigma + "]" ] except: pass cmd += ["-xmlout", self.ctruncate_xml(), "-freein"] self.setGenericLogParser(True) self.runApp("ctruncate", cmd) # checking output file if not os.path.isfile(self.merged_mtz()): self.stderr(" *** reflection file does not exist -- stop.\n") self.output_meta["retcode"] = "[02-007] failed to truncate hkl" self.write_meta() self.end_branch(branch_data, self.datared_dir(), "Data Scaling and Merging failed", "ctruncate failure") return "" # get merged file metadata self.mtzpath = self.merged_mtz() mf = mtz.mtz_file(self.mtzpath, None) if len(mf) <= 0: self.stderr(" *** reflection file is empty -- stop.\n") self.output_meta["retcode"] = "[02-008] truncated hkl file empty" self.write_meta() self.end_branch(branch_data, self.datared_dir(), "Data Scaling and Merging failed", "empty merged file") return "" self.hkl = mf[0] #mf.prn() self.stdout("\n\n ... merged hkl file created\n\n") self.putMessage("<h3>Success</h3>") pyrvapi.rvapi_add_data( "merged_data_widget_id", "Merged reflections", # always relative to job_dir from job_dir/html os.path.join("../", self.mtzpath), "hkl:hkl", self.page_cursor[0], self.page_cursor[1], 0, 1, 1, -1) meta = {} meta["nResults"] = 1 meta["mtz"] = self.mtzpath meta["merged"] = True meta["spg"] = self.hkl.HM self.output_meta["results"][self.datared_dir()] = meta self.quit_branch( branch_data, self.datared_dir(), "Refection data scaled and merged (Pointless, " + "Aimless), <i>SpG=" + meta["spg"] + "</i>") return branch_data["pageId"]
def results_section(self, results_tab_id, mrb_results, ensemble_results, section_title): """Results Tab""" if not mrb_results: return # Create unique identifier for this section by using the id # All ids will have this appended to avoid clashes uid = str(uuid.uuid4()) section_id = section_title.replace(" ", "_") + uid self.results_tab_sections.append( section_id) # Add to list so we can remove if we update pyrvapi.rvapi_add_panel(section_id, results_tab_id, 0, 0, 1, 1) pyrvapi.rvapi_add_text("<h3>{0}</h3>".format(section_title), section_id, 0, 0, 1, 1) results_tree = "results_tree" + section_id pyrvapi.rvapi_add_tree_widget(results_tree, section_title, section_id, 0, 0, 1, 1) for r in mrb_results: ensemble_name = r['ensemble_name'] container_id = "sec_{0}".format(ensemble_name) + uid pyrvapi.rvapi_add_panel(container_id, results_tree, 0, 0, 1, 1) header = "<h3>Results for ensemble: {0}</h3>".format(ensemble_name) pyrvapi.rvapi_add_text(header, container_id, 0, 0, 1, 1) sec_table = "sec_table_{0}".format(ensemble_name) + uid title = "Results table: {0}".format(ensemble_name) title = "Summary" pyrvapi.rvapi_add_section(sec_table, title, container_id, 0, 0, 1, 1, True) table_id = "table_{0}".format(ensemble_name) + uid pyrvapi.rvapi_add_table(table_id, "", sec_table, 1, 0, 1, 1, False) tdata = mrbump_util.ResultsSummary().results_table([r]) self.fill_table(table_id, tdata, tooltips=self._mrbump_tooltips) # Ensemble if ensemble_results: epdb = self.ensemble_pdb(r, ensemble_results) if epdb: sec_ensemble = "sec_ensemble_{0}".format( ensemble_name) + uid pyrvapi.rvapi_add_section(sec_ensemble, "Ensemble Search Model", container_id, 0, 0, 1, 1, False) data_ensemble = "data_ensemble_{0}".format( ensemble_name) + uid pyrvapi.rvapi_add_data(data_ensemble, "Ensemble PDB", self.fix_path(epdb), "XYZOUT", sec_ensemble, 2, 0, 1, 1, True) # PHASER self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='PHASER', logfile_key='PHASER_logfile', pdb_key='PHASER_pdbout', mtz_key='PHASER_mtzout', uid=uid, container_id=container_id, ) # REFMAC self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='Refmac', logfile_key='REFMAC_logfile', pdb_key='REFMAC_pdbout', mtz_key='REFMAC_mtzout', uid=uid, container_id=container_id, ) # Buccaner self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='BUCCANEER', logfile_key='BUCC_logfile', pdb_key='BUCC_pdbout', mtz_key='BUCC_mtzout', uid=uid, container_id=container_id, ) # Arpwarp self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='ArpWarp', logfile_key='ARP_logfile', pdb_key='ARP_pdbout', mtz_key='ARP_mtzout', uid=uid, container_id=container_id, ) # SHELXE self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='SHELXE', logfile_key='SHELXE_logfile', pdb_key='SHELXE_pdbout', mtz_key='SHELXE_mtzout', uid=uid, container_id=container_id, ) # Buccaner Rebuild self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='BUCCANEER SHELXE Trace Rebuild', logfile_key='SXRBUCC_logfile', pdb_key='SXRBUCC_pdbout', mtz_key='SXRBUCC_mtzout', uid=uid, container_id=container_id, ) # Arpwarp Rebuild self.add_results_section( result_dict=r, ensemble_name=ensemble_name, program_name='ARPWARP SHELXE Trace Rebuild', logfile_key='SXRARP_logfile', pdb_key='SXRARP_pdbout', mtz_key='SXRARP_mtzout', uid=uid, container_id=container_id, ) pyrvapi.rvapi_set_tree_node(results_tree, container_id, "{0}".format(ensemble_name), "auto", "") return
def run(body): # body is reference to the main Import class files_mtz = [] for f_orig in body.files_all: f_base, f_ext = os.path.splitext(f_orig) if f_ext.lower() in ('.hkl', '.mtz'): p_orig = os.path.join(body.importDir(), f_orig) f_fmt = mtz.hkl_format(p_orig, body.file_stdout) if f_fmt in ('xds_integrated', 'xds_scaled', 'mtz_integrated'): files_mtz.append((f_orig, f_fmt)) if not files_mtz: return unmergedSecId = "unmerged_mtz_sec_" + str(body.widget_no) body.widget_no += 1 k = 0 for f_orig, f_fmt in files_mtz: try: body.files_all.remove(f_orig) p_orig = os.path.join(body.importDir(), f_orig) p_mtzin = p_orig if not f_fmt.startswith('mtz_'): p_mtzin = os.path.splitext(f_orig)[0] + '.mtz' sp = subprocess.Popen('pointless', stdin=subprocess.PIPE, stdout=body.file_stdout, stderr=body.file_stderr) sp.stdin.write('XDSIN ' + p_orig + '\nHKLOUT ' + p_mtzin + '\nCOPY\n') sp.stdin.close() if sp.wait(): p_mtzin = None if p_mtzin: if k == 0: body.file_stdout.write("\n" + "%" * 80 + "\n") body.file_stdout.write("%%%%% UNMERGED DATA IMPORT\n") body.file_stdout.write("%" * 80 + "\n") pyrvapi.rvapi_add_section(unmergedSecId, "Unmerged datasets", body.report_page_id(), body.rvrow, 0, 1, 1, False) urow = 0 fileSecId = unmergedSecId frow = 0 if len(files_mtz) > 1: fileSecId = unmergedSecId + "_" + str(k) pyrvapi.rvapi_add_section(fileSecId, "File " + f_orig, unmergedSecId, urow, 0, 1, 1, False) urow += 1 pyrvapi.rvapi_set_text( "<h2>Data analysis (Pointless)</h2>", fileSecId, frow, 0, 1, 1) else: pyrvapi.rvapi_set_text ( "<h2>Data analysis (Pointless)</h2>" + \ "<h3>File: " + f_orig + "</h3>", fileSecId,frow,0,1,1 ) reportPanelId = fileSecId + "_report" pyrvapi.rvapi_add_panel(reportPanelId, fileSecId, frow + 1, 0, 1, 1) frow += 2 log_parser = pyrvapi_ext.parsers.generic_parser( reportPanelId, False) body.file_stdin = open(pointless_script(), 'w') body.file_stdin.write ( "HKLIN " + p_mtzin + "\n" + \ "XMLOUT " + pointless_xml() + "\n" ) body.file_stdin.close() rc = command.call("pointless", [], "./", pointless_script(), body.file_stdout, body.file_stderr, log_parser) body.unsetLogParser() symmTablesId = fileSecId + "_" + symm_det() pyrvapi.rvapi_add_section(symmTablesId, "Symmetry determination tables", fileSecId, frow, 0, 1, 1, True) pyrvapi.rvapi_set_text(" ", fileSecId, frow + 1, 0, 1, 1) frow += 2 #body.putSection ( symmTablesId,"Symmetry determination tables",True ) table_list = datred_utils.parse_xmlout(pointless_xml()) datred_utils.report(table_list, symmTablesId) # dump_keyargs = dict(sort_keys=True, indent=4, separators=(',', ': ')) # print json.dumps(datred_utils.tabs_as_dict(tab_list), **dump_keyargs) if rc.msg: msg = "\n\n Pointless failed with message:\n\n" + \ rc.msg + \ "\n\n File " + f_orig + \ " cannot be processed.\n\n" body.file_stdout.write(msg) body.file_stderr.write(msg) body.putSummaryLine_red( f_orig, "UNMERGED", "Failed to process/import, ignored") else: mf = mtz.mtz_file(p_mtzin) dset_list = datred_utils.point_symm_datasets( pointless_xml(), f_fmt) body.summary_row_0 = -1 # to signal the beginning of summary row for dataset in dset_list: # make HKL dataset annotation unmerged = dtype_unmerged.DType(body.job_id) dataset["symm_summary"] = table_list unmerged.importUnmergedData(mf, dataset) body.dataSerialNo += 1 unmerged.makeDName(body.dataSerialNo) outFileName = unmerged.dataId + ".mtz" body.file_stdin = open(pointless_script(), 'w') body.file_stdin.write ( "NAME PROJECT x CRYSTAL y DATASET z\n" + \ "HKLIN " + p_mtzin + "\n" + \ "HKLOUT " + os.path.join(body.outputDir(),outFileName) + "\n" + \ "COPY\n" + \ "ORIGINALLATTICE\n" ) for offset, first, last in unmerged.dataset.runs: body.file_stdin.write("RUN 1 FILE 1 BATCH " + str(first) + " to " + str(last) + "\n") body.file_stdin.write("END\n") body.file_stdin.close() rc = command.call("pointless", [], "./", pointless_script(), body.file_stdout, body.file_stderr, None) if rc.msg: msg = "\n\n Pointless failed with message:\n\n" + \ rc.msg + \ "\n\n File " + outFileName + \ " cannot be processed.\n\n" body.file_stdout.write(msg) body.file_stderr.write(msg) body.putSummaryLine_red( outFileName, "UNMERGED", "Failed to process/import, ignored") else: unmerged.files[0] = outFileName subSecId = fileSecId if len(dset_list) > 1: subSecId = fileSecId + str(k) pyrvapi.rvapi_add_section( subSecId, "Import " + unmerged.dataset.name, fileSecId, frow, 0, 1, 1, False) frow += 1 mtzTableId = "unmerged_mtz_" + str(k) + "_table" unmerged.makeUniqueFNames(body.outputDir()) body.outputDataBox.add_data(unmerged) makeUnmergedTable(body, mtzTableId, subSecId, unmerged, 0) pyrvapi.rvapi_set_text ( " <br><hr/><h3>Created Reflection Data Set (unmerged)</h3>" + \ "<b>Assigned name:</b> " + unmerged.dname + \ "<br> ",subSecId,frow,0,1,1 ) pyrvapi.rvapi_add_data( "hkl_data_" + str(body.dataSerialNo), "Unmerged reflections", # always relative to job_dir from job_dir/html os.path.join("..", body.outputDir(), unmerged.files[0]), "hkl:hkl", subSecId, frow + 1, 0, 1, 1, -1) frow += 2 if body.summary_row_0 < 0: body.putSummaryLine(f_orig, "UNMERGED", unmerged.dname) else: body.addSummaryLine("UNMERGED", unmerged.dname) k += 1 pyrvapi.rvapi_flush() # move imported file into output directory os.rename( p_mtzin, os.path.join(body.outputDir(), os.path.basename(p_mtzin))) body.file_stdout.write("... processed: " + f_orig + "\n ") trace = '' except: trace = ''.join(traceback.format_exception(*sys.exc_info())) body.file_stdout.write(trace) if trace: body.fail(trace, 'import failed') body.rvrow += 1 pyrvapi.rvapi_flush() return
def results_section(self, results_tab_id, mrb_results, ensemble_results, section_title): # # Results Tab # if not mrb_results: return # Create unique identifier for this section by using the id # All ids will have this appended to avoid clashes uid = str(uuid.uuid4()) section_id = section_title.replace(" ", "_") + uid self.results_tab_sections.append(section_id) # Add to list so we can remove if we update pyrvapi.rvapi_add_panel(section_id, results_tab_id, 0, 0, 1, 1) pyrvapi.rvapi_add_text("<h3>{0}</h3>".format(section_title), section_id, 0, 0, 1, 1) results_tree = "results_tree" + section_id pyrvapi.rvapi_add_tree_widget(results_tree, section_title, section_id, 0, 0, 1, 1) for r in mrb_results: name = r['ensemble_name'] container_id = "sec_{0}".format(name) + uid pyrvapi.rvapi_add_panel(container_id, results_tree, 0, 0, 1, 1) header = "<h3>Results for ensemble: {0}</h3>".format(name) pyrvapi.rvapi_add_text(header, container_id, 0, 0, 1, 1) sec_table = "sec_table_{0}".format(name) + uid title = "Results table: {0}".format(name) title = "Summary" pyrvapi.rvapi_add_section(sec_table, title, container_id, 0, 0, 1, 1, True) table_id = "table_{0}".format(name) + uid pyrvapi.rvapi_add_table(table_id, "", sec_table, 1, 0, 1, 1, False) tdata = mrbump_util.ResultsSummary().results_table([r]) self.fill_table(table_id, tdata, tooltips=self._mrbump_tooltips) # Ensemble if ensemble_results: epdb = self.ensemble_pdb(r, ensemble_results) if epdb: sec_ensemble = "sec_ensemble_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_ensemble, "Ensemble Search Model", container_id, 0, 0, 1, 1, False) data_ensemble = "data_ensemble_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_ensemble, "Ensemble PDB", self.fix_path(epdb), "XYZOUT", sec_ensemble, 2, 0, 1, 1, True) # PHASER if os.path.isfile(str(r['PHASER_logfile'])) or (os.path.isfile(str(r['PHASER_pdbout'])) and os.path.isfile(str(r['PHASER_mtzout']))): sec_phaser = "sec_phaser_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_phaser, "PHASER Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['PHASER_pdbout'])) and os.path.isfile(str(r['PHASER_mtzout'])): data_phaser = "data_phaser_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_phaser, "PHASER PDB", os.path.splitext(self.fix_path(r['PHASER_pdbout']))[0], "xyz:map", sec_phaser, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_phaser, self.fix_path(r['PHASER_mtzout']), "xyz:map") if os.path.isfile(str(r['PHASER_logfile'])): pyrvapi.rvapi_add_data("data_phaser_logfile_{0}".format(name), "PHASER Logfile", self.fix_path(r['PHASER_logfile']), "text", sec_phaser, 2, 0, 1, 1, True) # REFMAC if os.path.isfile(str(r['REFMAC_logfile'])) or (os.path.isfile(str(r['REFMAC_pdbout'])) and os.path.isfile(str(r['REFMAC_mtzout']))): sec_refmac = "sec_refmac_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_refmac, "REFMAC Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['REFMAC_pdbout'])) and os.path.isfile(str(r['REFMAC_mtzout'])): data_refmac = "data_refmac_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_refmac, "REFMAC PDB", os.path.splitext(self.fix_path(r['REFMAC_pdbout']))[0], "xyz:map", sec_refmac, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_refmac, self.fix_path(r['REFMAC_mtzout']), "xyz:map") if os.path.isfile(str(r['REFMAC_logfile'])): pyrvapi.rvapi_add_data("data_refmac_logfile_{0}".format(name), "REFMAC Logfile", self.fix_path(r['REFMAC_logfile']), "text", sec_refmac, 2, 0, 1, 1, True) # Buccaner if os.path.isfile(str(r['BUCC_logfile'])) or (os.path.isfile(str(r['BUCC_pdbout'])) and os.path.isfile(str(r['BUCC_mtzout']))): sec_bucc = "sec_bucc_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_bucc, "BUCCANEER Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['BUCC_pdbout'])) and os.path.isfile(str(r['BUCC_mtzout'])): data_bucc = "data_bucc_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_bucc, "BUCC PDB", os.path.splitext(self.fix_path(r['BUCC_pdbout']))[0], "xyz:map", sec_bucc, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_bucc, self.fix_path(r['BUCC_mtzout']), "xyz:map") if os.path.isfile(str(r['BUCC_logfile'])): pyrvapi.rvapi_add_data("data_bucc_logfile_{0}".format(name), "BUCC Logfile", self.fix_path(r['BUCC_logfile']), "text", sec_bucc, 2, 0, 1, 1, True) # Arpwarp if os.path.isfile(str(r['ARP_logfile'])) or (os.path.isfile(str(r['ARP_pdbout'])) and os.path.isfile(str(r['ARP_mtzout']))): sec_arp = "sec_arp_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_arp, "ARPWARP Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['ARP_pdbout'])) and os.path.isfile(str(r['ARP_mtzout'])): data_arp = "data_arp_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_arp, "ARP PDB", os.path.splitext(self.fix_path(r['ARP_pdbout']))[0], "xyz:map", sec_arp, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_arp, self.fix_path(r['ARP_mtzout']), "xyz:map") if os.path.isfile(str(r['ARP_logfile'])): pyrvapi.rvapi_add_data("data_arp_logfile_{0}".format(name), "ARP Logfile", self.fix_path(r['ARP_logfile']), "text", sec_arp, 2, 0, 1, 1, True) # SHELXE if os.path.isfile(str(r['SHELXE_logfile'])) or (os.path.isfile(str(r['SHELXE_pdbout'])) and os.path.isfile(str(r['SHELXE_mtzout']))): sec_shelxe = "sec_shelxe_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_shelxe, "SHELXE Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SHELXE_pdbout'])) and os.path.isfile(str(r['SHELXE_mtzout'])): data_shelxe = "data_shelxe_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_shelxe, "SHELXE PDB", os.path.splitext(self.fix_path(r['SHELXE_pdbout']))[0], "xyz:map", sec_shelxe, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_shelxe, self.fix_path(r['SHELXE_mtzout']), "xyz:map") if os.path.isfile(str(r['SHELXE_logfile'])): pyrvapi.rvapi_add_data("data_shelxe_logfile_{0}".format(name), "SHELXE Logfile", self.fix_path(r['SHELXE_logfile']), "text", sec_shelxe, 2, 0, 1, 1, True) # Buccaner Rebuild if os.path.isfile(str(r['SXRBUCC_logfile'])) or (os.path.isfile(str(r['SXRBUCC_pdbout'])) and os.path.isfile(str(r['SXRBUCC_mtzout']))): sec_sxrbucc = "sec_sxrbucc_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_sxrbucc, "BUCCANEER SHELXE Trace Rebuild Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SXRBUCC_pdbout'])) and os.path.isfile(str(r['SXRBUCC_mtzout'])): data_sxrbucc = "data_sxrbucc_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_sxrbucc, "SXRBUCC PDB", os.path.splitext(self.fix_path(r['SXRBUCC_pdbout']))[0], "xyz:map", sec_sxrbucc, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_sxrbucc, self.fix_path(r['SXRBUCC_mtzout']), "xyz:map") if os.path.isfile(str(r['SXRBUCC_logfile'])): pyrvapi.rvapi_add_data("data_sxrbucc_logfile_{0}".format(name), "SXRBUCC Logfile", self.fix_path(r['SXRBUCC_logfile']), "text", sec_sxrbucc, 2, 0, 1, 1, True) # Arpwarp Rebuild if os.path.isfile(str(r['SXRARP_logfile'])) or (os.path.isfile(str(r['SXRARP_pdbout'])) and os.path.isfile(str(r['SXRARP_mtzout']))): sec_sxrarp = "sec_sxrarp_{0}".format(name) + uid pyrvapi.rvapi_add_section(sec_sxrarp, "ARPWARP SHELXE Trace Redbuild Outputs", container_id, 0, 0, 1, 1, False) if os.path.isfile(str(r['SXRARP_pdbout'])) and os.path.isfile(str(r['SXRARP_mtzout'])): data_sxrarp = "data_sxrarp_out_{0}".format(name) + uid pyrvapi.rvapi_add_data(data_sxrarp, "SXRARP PDB", os.path.splitext(self.fix_path(r['SXRARP_pdbout']))[0], "xyz:map", sec_sxrarp, 2, 0, 1, 1, True) pyrvapi.rvapi_append_to_data(data_sxrarp, self.fix_path(r['SXRARP_mtzout']), "xyz:map") if os.path.isfile(str(r['SXRARP_logfile'])): pyrvapi.rvapi_add_data("data_sxrarp_logfile_{0}".format(name), "SXRARP Logfile", self.fix_path(r['SXRARP_logfile']), "text", sec_sxrarp, 2, 0, 1, 1, True) pyrvapi.rvapi_set_tree_node(results_tree, container_id, "{0}".format(name), "auto", "") return
def run(self): # Check the existence of PDB archive self.checkPDB() # Prepare mrbump input # fetch input data seq = self.input_data.data.seq[0] hkl = None if hasattr(self.input_data.data, 'hkl'): # optional data parameter hkl = self.input_data.data.hkl[0] # make a file with input script self.open_stdin() if hkl: self.write_stdin ( "JOBID " + self.outdir_name() + "\n" + \ "MDLS False\n" + \ "MDLC True\n" + \ "MDLD False\n" + \ "MDLP False\n" + \ "MDLM False\n" + \ "MDLU False\n" + \ "MRPROG molrep phaser\n" + \ "SHELX False\n" + \ "BUCC True\n" + \ "BCYC 5\n" + \ "ARPW False\n" + \ "CHECK False\n" + \ "UPDATE False\n" + \ "PICKLE False\n" + \ "MRNUM 10\n" + \ "USEE True\n" + \ "SCOP False\n" + \ "DEBUG False\n" + \ "RLEVEL 95\n" + \ "GESE False\n" + \ "GEST False\n" + \ "AMPT False\n" + \ "PDBLOCAL " + os.environ["PDB_DIR"] + "\n" + \ "LABIN F=" + hkl.dataset.Fmean.value + \ " SIGF=" + hkl.dataset.Fmean.sigma + \ " FreeR_flag=" + hkl.dataset.FREE + "\n" + \ "LITE False\n" + \ "END\n" ) else: self.write_stdin ( "JOBID " + self.outdir_name() + "\n" + \ "MDLS False\n" + \ "MDLC True\n" + \ "MDLD False\n" + \ "MDLP False\n" + \ "MDLM False\n" + \ "MDLU False\n" + \ "CHECK False\n" + \ "UPDATE False\n" + \ "PICKLE False\n" + \ "MRNUM 5\n" + \ "USEE True\n" + \ "SCOP False\n" + \ "DEBUG False\n" + \ "RLEVEL 95\n" + \ "GESE True\n" + \ "GEST True\n" + \ "AMPT False\n" + \ "IGNORE 5tha\n" + \ "DOPHMMER True\n" + \ "PDBLOCAL " + os.environ["PDB_DIR"] + "\n" + \ "DOHHPRED False\n" + \ "END\n" ) self.close_stdin() # make command-line parameters for mrbump run on a SHELL-type node cmd = ["seqin", os.path.join(self.inputDir(), seq.files[0])] if hkl: cmd += ["hklin", os.path.join(self.inputDir(), hkl.files[0])] # prepare report parser self.setGenericLogParser(self.mrbump_report(), True) # Start mrbump self.runApp("mrbump", cmd) self.unsetLogParser() # check solution and register data search_dir = "search_" + self.outdir_name() if os.path.isdir(search_dir): if hkl: molrep_dir = os.path.join(search_dir, "results", "solution", "mr", "molrep", "refine") if os.path.isdir(molrep_dir): dirlist = os.listdir(molrep_dir) xyzfile = None #mtzfile = None for filename in dirlist: if filename.startswith("refmac"): if filename.endswith(".pdb"): xyzfile = os.path.join(molrep_dir, filename) #if filename.endswith(".mtz"): #mtzfile = os.path.join(molrep_dir,filename) structure = self.finaliseStructure(xyzfile, "mrbump", hkl, None, [seq], 1, False) if structure: # update structure revision revision = self.makeClass( self.input_data.data.revision[0]) revision.setStructureData(structure) self.registerRevision(revision) else: self.putTitle("No solution found") else: models_found = False ensembles_found = False models_dir = os.path.join(search_dir, "models") if os.path.isdir(models_dir): mdirlist = os.listdir(models_dir) domainNo = 1 dirName = "domain_" + str(domainNo) while dirName in mdirlist: secrow = 0 domains_dir = os.path.join(models_dir, dirName) dirlist = os.listdir(domains_dir) for filename in dirlist: if filename.endswith(".pdb"): if not models_found: models_found = True self.putTitle("Results") if secrow == 0: secId = "domain_sec_" + str(domainNo) self.putSection(secId, "Domain " + str(domainNo)) pyrvapi.rvapi_add_text( "<h2>Models found:</h2>", secId, secrow, 0, 1, 1) secrow += 1 xyz = self.registerXYZ( os.path.join(domains_dir, filename)) if xyz: xyz.addDataAssociation(seq.dataId) pyrvapi.rvapi_add_data( "model_" + str(self.dataSerialNo) + "_btn", "Model #" + str(self.dataSerialNo).zfill(2), # always relative to job_dir from job_dir/html os.path.join("..", self.outputDir(), xyz.files[0]), "xyz", secId, secrow, 0, 1, 1, -1) secrow += 1 ensembles_dir = os.path.join(domains_dir, "ensembles") ensembleSerNo = 0 if os.path.isdir(ensembles_dir): for filename in os.listdir(ensembles_dir): if filename.endswith(".pdb"): if not ensembles_found: pyrvapi.rvapi_add_text( "<h2>Ensembles made:</h2>", secId, secrow, 0, 1, 1) ensembles_found = True secrow += 1 ensembleSerNo += 1 ensemble = self.registerEnsemble( seq, os.path.join(ensembles_dir, filename)) if ensemble: ensemble.addDataAssociation(seq.dataId) self.putEnsembleWidget1( secId, "ensemble_" + str(ensembleSerNo) + "_btn", "Ensemble #" + str(ensembleSerNo).zfill(2), ensemble, -1, secrow, 1) secrow += 1 domainNo += 1 dirName = "domain_" + str(domainNo) # ---------------------------------------------------------------- if not models_found: self.putTitle("No models found") else: self.putTitle("No resuts produced") # close execution logs and quit self.success() return
def write_output(items, json_file=None, xml_file=None, xmlroot=None, docid=None, output=None): # in non-i2 mode items are added to the output dictionary which is dumped to json if json_file is not None: if 'result' in items: result = items['result'] for solution in output['solutions']: if solution['id'] == result['id']: solution.update({'acornCC': result['acornCC']}) else: output.update(items) temp_filename = json_file + '.tmp' with open(temp_filename, 'w') as jsonfile: print(json.dumps(output, sort_keys=True, indent=2, separators=(',', ': ')), file=jsonfile) if os.path.exists(json_file): import uuid tmpfile = str(uuid.uuid4()) os.rename(json_file, tmpfile) os.remove(tmpfile) os.rename(temp_filename, json_file) return output elif xmlroot is None and xml_file is not None: xmlroot = etree.Element('Fragon') return xmlroot elif docid is None: jsrview_dir = os.path.join(os.environ['CCP4'], 'share', 'jsrview') pyrvapi.rvapi_init_document('fragon_results', os.getcwd(), 'Fragon %s results' % items['Fragon'], 1, 7, jsrview_dir, None, None, None, None) pyrvapi.rvapi_add_tab('tab1', 'Fragon results', True) pyrvapi.rvapi_add_section('status', 'Current status', 'tab1', 0, 0, 1, 1, True) pyrvapi.rvapi_add_text( 'The job is currently running. Updates will be shown here after fragment placement and density modification.', 'status', 0, 0, 1, 1) pyrvapi.rvapi_flush() output.update(items) return 'tab1', output elif xml_file is not None: # in i2 mode new items are added to the etree as this preserves the order in the xml for key in items: if key == 'Fragon': version_node = etree.SubElement(xmlroot, 'Version') version_node.text = output['Fragon'] elif key == 'callback': callback = items['callback'] if callback[0] == 'progress': try: progress_node = xmlroot.xpath( '//Fragon/phaser_progress')[0] except IndexError: progress_node = etree.SubElement( xmlroot, 'phaser_progress') progress_node.text = callback[1] elif callback[0] == 'Best LLG/TFZ': best_llg_node = etree.SubElement(xmlroot, 'best_llg') best_llg_node.text = callback[1]['llg'] best_tfz_node = etree.SubElement(xmlroot, 'best_tfz') best_tfz_node.text = callback[1]['tfz'] elif key == 'solutions': solutions = items['solutions'] try: solutions_node = xmlroot.xpath('//Fragon/solutions')[0] except IndexError: solutions_node = etree.SubElement(xmlroot, 'solutions') if len(solutions) > 0: solutions_node.text = json.dumps(solutions) else: node = etree.SubElement(xmlroot, key) node.text = items[key].__str__() temp_filename = 'program.xml.tmp' with open(temp_filename, 'w') as xmlfile: xmlfile.write(etree.tostring(xmlroot, pretty_print=True)) if os.path.exists(xml_file): import uuid tmpfile = str(uuid.uuid4()) os.rename(xml_file, tmpfile) os.remove(tmpfile) os.rename(temp_filename, xml_file) elif docid is not None: for key in items: if key == 'copies': if items['copies'] > 1: pyrvapi.rvapi_set_text( 'Running Phaser to place %d fragments' % items['copies'], 'status', 0, 0, 1, 1) else: pyrvapi.rvapi_set_text( 'Running Phaser to place the fragment', 'status', 0, 0, 1, 1) pyrvapi.rvapi_add_tab('tab2', 'Phaser log file', False) pyrvapi.rvapi_append_content(output['root'] + '_Phaser.log', True, 'tab2') pyrvapi.rvapi_flush() output.update(items) elif key == 'callback': callback = items['callback'] if callback[0] == 'progress': pyrvapi.rvapi_set_text( 'Current Phaser stage: %s' % callback[1], 'status', 1, 0, 1, 1) pyrvapi.rvapi_flush() elif callback[0] == 'Best LLG': pyrvapi.rvapi_set_text( 'Current best solution Log Likelihood Gain (LLG): %s Translation Function Z-score (TFZ): %s' % (callback[1], output['best_tfz']), 'status', 2, 0, 1, 1) pyrvapi.rvapi_flush() elif callback[0] == 'Best TFZ': output.update({'best_tfz': callback[1]}) elif key == 'solutions': solutions = items['solutions'] top_llg = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['llg'] top_tfz = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['tfz'] top_acornCC = sorted([ solution['acornCC'] if solution['acornCC'] not in ['Running', '-', None] else None for solution in solutions ], reverse=True)[0] if len(solutions) == 1: pyrvapi.rvapi_set_text( 'Phaser has found a single solution with Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f' % (top_llg, top_tfz), 'status', 0, 0, 1, 1) else: pyrvapi.rvapi_set_text( 'Phaser has found %d solutions. The top solution has Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TF Z-score) of %0.2f' % (output['num_phaser_solutions'], top_llg, top_tfz), 'status', 0, 0, 1, 1) if output['num_phaser_solutions'] > len(solutions): pyrvapi.rvapi_set_text( 'Attempting to improve phases for the top %d solutions by density modification with ACORN' % len(solns), 'status', 1, 0, 1, 1) else: pyrvapi.rvapi_set_text( 'Attempting to improve phases by density modification with ACORN', 'status', 1, 0, 1, 1) if top_acornCC is not None: pyrvapi.rvapi_set_text( 'The best solution so far has a correlation coefficient from density modification of %0.3f' % top_acornCC, 'status', 2, 0, 1, 1) else: pyrvapi.rvapi_set_text('', 'status', 2, 0, 1, 1) pyrvapi.rvapi_add_table('results_table', 'Phaser solutions', 'tab1', 1, 0, 1, 1, 1) pyrvapi.rvapi_put_horz_theader('results_table', 'Solution number', '', 0) pyrvapi.rvapi_put_horz_theader('results_table', 'Space group', '', 1) pyrvapi.rvapi_put_horz_theader('results_table', 'LLG', 'Phaser Log Likelihood Gain', 2) pyrvapi.rvapi_put_horz_theader( 'results_table', 'TF Z-score', 'Phaser Translation Function Z-score', 3) pyrvapi.rvapi_put_horz_theader( 'results_table', 'CC', 'CC from ACORN density modification', 4) for solution in solutions: pyrvapi.rvapi_put_table_string('results_table', '%d' % solution['number'], solution['number'] - 1, 0) pyrvapi.rvapi_put_table_string('results_table', solution['sg'], solution['number'] - 1, 1) pyrvapi.rvapi_put_table_string('results_table', '%0.2f' % solution['llg'], solution['number'] - 1, 2) pyrvapi.rvapi_put_table_string('results_table', '%0.2f' % solution['tfz'], solution['number'] - 1, 3) if solution['acornCC'] in ['Running', '-']: pyrvapi.rvapi_put_table_string( 'results_table', solution['acornCC'].replace('-', ''), solution['number'] - 1, 4) elif solution['acornCC'] is None: pyrvapi.rvapi_put_table_string('results_table', 'Not tested', solution['number'] - 1, 4) else: pyrvapi.rvapi_put_table_string( 'results_table', '%0.3f' % solution['acornCC'], solution['number'] - 1, 4) output.update(items) pyrvapi.rvapi_flush() elif key == 'cc_best': solutions = output['solutions'] top_llg = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['llg'] top_tfz = sorted(solutions, key=lambda r: r['llg'], reverse=True)[0]['tfz'] top_acornCC = sorted([ solution['acornCC'] if solution['acornCC'] not in ['Running', '-', None] else None for solution in solutions ], reverse=True)[0] pyrvapi.rvapi_set_section_state('status', False) pyrvapi.rvapi_add_section('results', 'Results', 'tab1', 2, 0, 1, 1, True) pyrvapi.rvapi_add_text( 'Phaser found %d solutions. The top solution had Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f' % (output['num_phaser_solutions'], top_llg, top_tfz), 'results', 0, 0, 1, 1) pyrvapi.rvapi_add_text( 'The best solution has a correlation coefficient from density modification of %0.3f' % top_acornCC, 'results', 1, 0, 1, 1) if top_acornCC > 0.15: pyrvapi.rvapi_add_text( 'This suggests the structure has been solved and the phases from ACORN will enable automated model building', 'results', 2, 0, 1, 1) else: pyrvapi.rvapi_add_text( 'Sorry this does not suggest a solution', 'results', 3, 0, 1, 1) pyrvapi.rvapi_flush() elif key == 'best_solution_id': pdbout = output['name'] + '_phaser_solution.pdb' mtzout = output['name'] + '_acorn_phases.mtz' pyrvapi.rvapi_add_data( 'best', 'Best fragment placement and electron density', pdbout, 'xyz', 'tab1', 3, 0, 1, 1, True) pyrvapi.rvapi_append_to_data('best', mtzout, 'hkl:map') else: output.update(items) return output