def getXYZMeta ( fpath,file_stdout,file_stderr,log_parser=None ): # returns chain information as the following disctionary: """ { 'cryst' : { 'spaceGroup': 'P 21 21 21', 'a' : 64.897, 'b' : 78.323, 'c' : 38.792, 'alpha' : 90.00, 'beta' : 90.00, 'gamma' : 90.00 }, 'xyz' : [ { 'model':1, 'chains': [ { 'id':'A', 'file':'rnase_model_1_A.pdb', 'type':'AA', 'seq':'DVSGTVCLSALPPEATDTLNLIASDGPFPYSQDGVVFQNRESVLPTQSYGYYHEYTVITPGARTRGTRRIICGEATQEDYYTGDHYATFSLIDQTC', 'size':96, 'ligands':[] }, { 'id':'B', 'file':'rnase_model_1_B.pdb', 'type':'AA', 'seq':'DVSGTVCLSALPPEATDTLNLIASDGPFPYSQDGVVFQNRESVLPTQSYGYYHEYTVITPGARTRGTRRIICGEATQEDYYTGDHYATFSLIDQTC', 'size':96, 'ligands':['35S'] } ] } ], 'ligands': ['35S'] } """ scr_file = open ( "pdbcur.script","w" ) scr_file.write ( "PDB_META\nEND\n" ) scr_file.close () # Start pdbcur rc = command.call ( "pdbcur",['XYZIN',fpath],"./", "pdbcur.script",file_stdout,file_stderr,log_parser ) # read pdbcur's json jsonpath = os.path.splitext(fpath)[0] + ".json" if not os.path.isfile(jsonpath): return None # read pdbcur's json with open(jsonpath,'r') as json_file: json_str = json_file.read() json_file.close() return XYZMeta ( json_str )
def run(body): # body is reference to the main Import class files_xyz = [] for f in body.files_all: fl = f.lower() if fl.endswith(('.pdb', '.cif', '.mmcif', '.ent')): files_xyz.append(f) if len(files_xyz) <= 0: return body.file_stdout.write("\n" + "%" * 80 + "\n") body.file_stdout.write("%%%%% IMPORT OF XYZ COORDINATES\n") body.file_stdout.write("%" * 80 + "\n") xyzSecId = "xyz_sec_" + str(body.widget_no) body.widget_no += 1 pyrvapi.rvapi_add_section(xyzSecId, "XYZ Coordinates", body.report_page_id(), body.rvrow, 0, 1, 1, False) k = 0 for f in files_xyz: body.files_all.remove(f) fpath = os.path.join(body.importDir(), f) #coor.stripLigWat ( fpath,fpath ) # strip ligands and waters # split input file to chains scr_file = open("pdbcur.script", "w") scr_file.write("SPLITTOCHAINS\nEND\n") scr_file.close() # Start pdbcur rc = command.call("pdbcur", ['XYZIN', fpath], "./", "pdbcur.script", body.file_stdout, body.file_stderr) # read pdbcur's json fnamesplit = os.path.splitext(f) fpathsplit = os.path.join(body.importDir(), fnamesplit[0]) + ".json" if not os.path.isfile(fpathsplit): body.putSummaryLine_red(f, "UNKNOWN", "Failed to recognise, ignored") else: with open(fpathsplit, 'r') as json_file: json_str = json_file.read() json_file.close() #xyzmeta = eval(json_str) xyzMeta = xyzmeta.XYZMeta(json_str) if len(xyzMeta["xyz"]) <= 0: body.putSummaryLine_red(f, "XYZ", "Empty file -- ignored") else: subSecId = xyzSecId if len(files_xyz) > 1: subSecId = xyzSecId + str(k) pyrvapi.rvapi_add_section(subSecId, "Import " + f, xyzSecId, k, 0, 1, 1, False) xyz = dtype_xyz.DType(body.job_id) xyz.setFile(f) dtype_xyz.setXYZMeta(xyz, xyzMeta) body.dataSerialNo += 1 xyz.makeDName(body.dataSerialNo) os.rename(fpath, os.path.join(body.outputDir(), f)) xyz.makeUniqueFNames(body.outputDir()) body.outputDataBox.add_data(xyz) xyzTableId = "xyz_" + str(k) + "_table" body.putTable(xyzTableId, "", subSecId, 0) jrow = 0 if len(files_xyz) <= 1: body.putTableLine(xyzTableId, "File name", "Imported file name", f, jrow) jrow += 1 body.putTableLine(xyzTableId, "Assigned name", "Assigned data name", xyz.dname, jrow) crystData = getCrystData(xyzMeta) body.putTableLine(xyzTableId, "Space group", "Space group", crystData[0], jrow + 1) body.putTableLine( xyzTableId, "Cell parameters", "Cell parameters (a,b,c, α,β,γ)", crystData[1], jrow + 2) contents = "" nChains = 0 for model in xyzMeta["xyz"]: for chain in model["chains"]: if chain["type"] != "UNK": nChains += 1 if len(contents) > 0: contents += "<br>" contents += "Model " + str(model['model']) + ", chain " + \ chain['id'] + ": " + str(chain['size']) + \ " residues, type: " + chain['type'] if len(xyzMeta["ligands"]) > 0: if len(contents) > 0: contents += "<br>" contents += "Ligands:" for name in xyzMeta["ligands"]: contents += " " + name body.putTableLine(xyzTableId, "Contents", "File contents", contents, jrow + 3) pyrvapi.rvapi_add_data( xyzTableId + "_structure_btn", xyz.dname + " ", # always relative to job_dir from job_dir/html os.path.join("..", body.outputDir(), xyz.files[0]), "xyz", subSecId, 1, 0, 1, 1, -1) body.putSummaryLine(f, "XYZ", xyz.dname) """ if nChains>1: irow = 2 for model in xyzMeta["xyz"]: for chain in model['chains']: if chain["type"] != "UNK": fname = fnamesplit[0] + "_" + str(model['model']) + "_" + \ chain['id'] + fnamesplit[1] xyz = dtype_xyz.DType ( body.job_id ) xyz.setFile ( fname ) mdl = {} mdl['model'] = model['model'] mdl['chains'] = [chain] xyz_meta = {} xyz_meta["cryst"] = xyzMeta["cryst"] xyz_meta["xyz"] = [mdl] xyz_meta["ligands"] = chain["ligands"] dtype_xyz.setXYZMeta ( xyz,xyz_meta ) body.dataSerialNo += 1 xyz.makeDName ( body.dataSerialNo ) os.rename ( os.path.join(body.importDir(),fname), os.path.join(body.outputDir(),fname) ) xyz.makeUniqueFNames ( body.outputDir() ) body.outputDataBox.add_data ( xyz ) xyzTableId = "xyz_" + str(k) + "_" + str(model['model']) + \ "_" + chain['id'] + "_table" body.putMessage1 ( subSecId ," ",irow ) body.putTable ( xyzTableId,"",subSecId,irow+1 ) body.putTableLine ( xyzTableId,"Assigned name", "Assigned data name",xyz.dname,0 ) crystData = getCrystData ( xyz_meta ) body.putTableLine ( xyzTableId,"Space group", "Space group",crystData[0],1 ) body.putTableLine ( xyzTableId,"Cell parameters", "Cell parameters (a,b,c, α,β,γ)", crystData[1],2 ) contents = "Model " + str(model['model']) + ", chain " + \ chain['id'] + ": " + str(chain['size']) contents += " residues, type: " + chain['type'] if len(xyz.xyzmeta["ligands"])>0: contents += "<br>Ligands:" for name in xyz.xyzmeta["ligands"]: contents += " " + name body.putTableLine ( xyzTableId,"Contents", "File contents",contents,3 ) pyrvapi.rvapi_add_data ( xyzTableId+"_structure_btn",xyz.dname, # always relative to job_dir from job_dir/html os.path.join("..",body.outputDir(),xyz.files[0]), "xyz",subSecId,irow+2,0,1,1,-1 ) #fdebug = open ( "_debug.txt",'a' ) #fdebug.write ( fname + "\n") #fdebug.close() body.addSummaryLine ( "XYZ",xyz.dname ) irow += 3 """ body.file_stdout.write("... processed: " + f + "\n") k += 1 body.rvrow += 1 pyrvapi.rvapi_flush() return
def run( body, # body is reference to the main Import class sectionTitle="Reflection datasets created", sectionOpen=False, # to keep result section closed if several datasets freeRflag=True # will be run if necessary ): files_mtz = [] for f_orig in body.files_all: f_base, f_ext = os.path.splitext(f_orig) if f_ext.lower() in ('.hkl', '.mtz'): p_orig = os.path.join(body.importDir(), f_orig) f_fmt = mtz.hkl_format(p_orig, body.file_stdout) if f_fmt in ('xds_merged', 'mtz_merged'): files_mtz.append((f_orig, f_fmt)) if not files_mtz: return mtzSecId = body.getWidgetId("mtz_sec") + "_" k = 0 for f_orig, f_fmt in files_mtz: body.files_all.remove(f_orig) p_orig = os.path.join(body.importDir(), f_orig) p_mtzin = p_orig if not f_fmt.startswith('mtz_'): p_mtzin = os.path.splitext(f_orig)[0] + '.mtz' sp = subprocess.Popen('pointless', stdin=subprocess.PIPE, stdout=body.file_stdout, stderr=body.file_stderr) sp.stdin.write('XDSIN ' + p_orig + '\nHKLOUT ' + p_mtzin + '\nCOPY\n') sp.stdin.close() if sp.wait(): p_mtzin = None if p_mtzin: p_mtzout = p_mtzin rc = command.comrc() if freeRflag: p_mtzout = os.path.join(body.outputDir(), os.path.basename(f_orig)) if k == 0: scr_file = open(freerflag_script(), "w") scr_file.write("UNIQUE\n") scr_file.close() # run freerflag: generate FreeRFlag if it is absent, and expand # all reflections rc = command.call("freerflag", ["HKLIN", p_mtzin, "HKLOUT", p_mtzout], "./", freerflag_script(), body.file_stdout, body.file_stderr, log_parser=None) if rc.msg: msg = "\n\n Freerflag failed with message:\n\n" + \ rc.msg + \ "\n\n File " + f_orig + \ " cannot be processed.\n\n" body.file_stdout.write(msg) body.file_stderr.write(msg) body.putSummaryLine_red(f_orig, "MTZ", "Failed to process/import, ignored") else: mf = mtz.mtz_file(p_mtzout) body.summary_row_0 = -1 # to signal the beginning of summary row for ds in mf: if k == 0: body.file_stdout.write("\n" + "%" * 80 + "\n") body.file_stdout.write( "%%%%% IMPORT REFLECTION DATA\n") body.file_stdout.write("%" * 80 + "\n") # make HKL dataset annotation hkl = dtype_hkl.DType(body.job_id) hkl.importMTZDataset(ds) body.dataSerialNo += 1 hkl.makeDName(body.dataSerialNo) datasetName = "" if k == 0: if sectionTitle: pyrvapi.rvapi_add_section(mtzSecId, sectionTitle, body.report_page_id(), body.rvrow, 0, 1, 1, sectionOpen) else: pyrvapi.rvapi_add_section( mtzSecId, "Reflection dataset created: " + hkl.dname, body.report_page_id(), body.rvrow, 0, 1, 1, sectionOpen) subSecId = mtzSecId if len(files_mtz) > 1 or len(mf) > 1: subSecId = mtzSecId + str(k) pyrvapi.rvapi_add_section(subSecId, hkl.dname, mtzSecId, k, 0, 1, 1, False) #pyrvapi.rvapi_add_section ( subSecId, # f_orig + " / " + hkl.getDataSetName(), # mtzSecId,k,0,1,1,False ) # run crtruncate outFileName = os.path.join(body.outputDir(), hkl.dataId + ".mtz") outXmlName = os.path.join("ctruncate" + hkl.dataId + ".xml") cmd = ["-hklin", p_mtzout, "-hklout", outFileName] amplitudes = "" meanCols = hkl.getMeanColumns() if meanCols[2] != "X": cols = "/*/*/[" if meanCols[1] != None: cols = cols + meanCols[0] + "," + meanCols[1] else: cols = cols + meanCols[0] if meanCols[2] == "F": amplitudes = "-amplitudes" cmd += ["-colin", cols + "]"] anomCols = hkl.getAnomalousColumns() anomalous = False if anomCols[4] != "X": anomalous = True cols = "/*/*/[" for i in range(0, 4): if anomCols[i] != None: if i > 0: cols = cols + "," cols = cols + anomCols[i] if anomCols[4] == "F": amplitudes = "-amplitudes" cmd += ["-colano", cols + "]"] if amplitudes: cmd += [amplitudes] cmd += ["-xmlout", outXmlName] cmd += ["-freein"] pyrvapi.rvapi_add_text( " <p><h2>Data analysis (CTruncate)</h2>", subSecId, 1, 0, 1, 1) pyrvapi.rvapi_add_panel(mtzSecId + str(k), subSecId, 2, 0, 1, 1) """ log_parser = pyrvapi_ext.parsers.generic_parser ( mtzSecId+str(k), False,body.generic_parser_summary,False ) rc = command.call ( "ctruncate",cmd,"./",None, body.file_stdout,body.file_stderr,log_parser ) """ body.file_stdin = None # not clear why this is not None at # this point and needs to be forced, # or else runApp looks for input script body.setGenericLogParser(mtzSecId + str(k), False) body.runApp("ctruncate", cmd) body.file_stdout.flush() mtzTableId = body.getWidgetId("mtz") + "_" + str( k) + "_table" if rc.msg: msg = "\n\n CTruncate failed with message:\n\n" + \ rc.msg + \ "\n\n Dataset " + hkl.dname + \ " cannot be used.\n\n" body.file_stdout.write(msg) body.file_stderr.write(msg) makeHKLTable(body, mtzTableId, subSecId, hkl, hkl, -1, msg, 0) datasetName = hkl.dname elif not os.path.exists(outFileName): body.file_stdout.write ( "\n\n +++ Dataset " + hkl.dname + \ "\n was not truncated and will be used as is\n\n" ) hkl.makeUniqueFNames(body.outputDir()) body.outputDataBox.add_data(hkl) makeHKLTable(body, mtzTableId, subSecId, hkl, hkl, 0, "", 0) datasetName = hkl.dname srf.putSRFDiagram(body, hkl, body.outputDir(), body.reportDir(), subSecId, 3, 0, 1, 1, body.file_stdout, body.file_stderr, None) pyrvapi.rvapi_set_text ( " <br><hr/><h3>Created Reflection Data Set (merged)</h3>" + \ "<b>Assigned name:</b> " + datasetName + "<br> ", subSecId,4,0,1,1 ) pyrvapi.rvapi_add_data( "hkl_data_" + str(body.dataSerialNo), "Merged reflections", # always relative to job_dir from job_dir/html os.path.join("..", body.outputDir(), hkl.files[0]), "hkl:hkl", subSecId, 5, 0, 1, 1, -1) else: body.file_stdout.write ( "\n\n ... Dataset " + hkl.dname + \ "\n was truncated and will substitute the " + \ "original one\n\n" ) mtzf = mtz.mtz_file(outFileName) # ctruncate should create a single dataset here for dset in mtzf: dset.MTZ = os.path.basename(outFileName) hkl_data = dtype_hkl.DType(body.job_id) hkl_data.importMTZDataset(dset) hkl_data.dname = hkl.dname hkl_data.dataId = hkl.dataId hkl_data.makeUniqueFNames(body.outputDir()) body.outputDataBox.add_data(hkl_data) makeHKLTable(body, mtzTableId, subSecId, hkl, hkl_data, 1, "", 0) datasetName = hkl_data.dname srf.putSRFDiagram(body, hkl_data, body.outputDir(), body.reportDir(), subSecId, 3, 0, 1, 1, body.file_stdout, body.file_stderr, None) pyrvapi.rvapi_set_text ( " <br><hr/><h3>Created Reflection Data Set (merged)</h3>" + \ "<b>Assigned name:</b> " + datasetName + "<br> ", subSecId,4,0,1,1 ) pyrvapi.rvapi_add_data( "hkl_data_" + str(body.dataSerialNo), "Merged reflections", # always relative to job_dir from job_dir/html os.path.join("..", body.outputDir(), hkl_data.files[0]), "hkl:hkl", subSecId, 5, 0, 1, 1, -1) if body.summary_row_0 < 0: body.putSummaryLine(f_orig, "HKL", datasetName) else: body.addSummaryLine("HKL", datasetName) k += 1 pyrvapi.rvapi_flush() if len(mf) <= 0: body.putSummaryLine_red(f_orig, "UNKNOWN", "-- ignored") body.file_stdout.write("... processed: " + f_orig + "\n ") body.rvrow += 1 pyrvapi.rvapi_flush() return
def run(body): # body is reference to the main Import class files_mtz = [] for f_orig in body.files_all: f_base, f_ext = os.path.splitext(f_orig) if f_ext.lower() in ('.hkl', '.mtz'): p_orig = os.path.join(body.importDir(), f_orig) f_fmt = mtz.hkl_format(p_orig, body.file_stdout) if f_fmt in ('xds_integrated', 'xds_scaled', 'mtz_integrated'): files_mtz.append((f_orig, f_fmt)) if not files_mtz: return unmergedSecId = "unmerged_mtz_sec_" + str(body.widget_no) body.widget_no += 1 k = 0 for f_orig, f_fmt in files_mtz: try: body.files_all.remove(f_orig) p_orig = os.path.join(body.importDir(), f_orig) p_mtzin = p_orig if not f_fmt.startswith('mtz_'): p_mtzin = os.path.splitext(f_orig)[0] + '.mtz' sp = subprocess.Popen('pointless', stdin=subprocess.PIPE, stdout=body.file_stdout, stderr=body.file_stderr) sp.stdin.write('XDSIN ' + p_orig + '\nHKLOUT ' + p_mtzin + '\nCOPY\n') sp.stdin.close() if sp.wait(): p_mtzin = None if p_mtzin: if k == 0: body.file_stdout.write("\n" + "%" * 80 + "\n") body.file_stdout.write("%%%%% UNMERGED DATA IMPORT\n") body.file_stdout.write("%" * 80 + "\n") pyrvapi.rvapi_add_section(unmergedSecId, "Unmerged datasets", body.report_page_id(), body.rvrow, 0, 1, 1, False) urow = 0 fileSecId = unmergedSecId frow = 0 if len(files_mtz) > 1: fileSecId = unmergedSecId + "_" + str(k) pyrvapi.rvapi_add_section(fileSecId, "File " + f_orig, unmergedSecId, urow, 0, 1, 1, False) urow += 1 pyrvapi.rvapi_set_text( "<h2>Data analysis (Pointless)</h2>", fileSecId, frow, 0, 1, 1) else: pyrvapi.rvapi_set_text ( "<h2>Data analysis (Pointless)</h2>" + \ "<h3>File: " + f_orig + "</h3>", fileSecId,frow,0,1,1 ) reportPanelId = fileSecId + "_report" pyrvapi.rvapi_add_panel(reportPanelId, fileSecId, frow + 1, 0, 1, 1) frow += 2 log_parser = pyrvapi_ext.parsers.generic_parser( reportPanelId, False) body.file_stdin = open(pointless_script(), 'w') body.file_stdin.write ( "HKLIN " + p_mtzin + "\n" + \ "XMLOUT " + pointless_xml() + "\n" ) body.file_stdin.close() rc = command.call("pointless", [], "./", pointless_script(), body.file_stdout, body.file_stderr, log_parser) body.unsetLogParser() symmTablesId = fileSecId + "_" + symm_det() pyrvapi.rvapi_add_section(symmTablesId, "Symmetry determination tables", fileSecId, frow, 0, 1, 1, True) pyrvapi.rvapi_set_text(" ", fileSecId, frow + 1, 0, 1, 1) frow += 2 #body.putSection ( symmTablesId,"Symmetry determination tables",True ) table_list = datred_utils.parse_xmlout(pointless_xml()) datred_utils.report(table_list, symmTablesId) # dump_keyargs = dict(sort_keys=True, indent=4, separators=(',', ': ')) # print json.dumps(datred_utils.tabs_as_dict(tab_list), **dump_keyargs) if rc.msg: msg = "\n\n Pointless failed with message:\n\n" + \ rc.msg + \ "\n\n File " + f_orig + \ " cannot be processed.\n\n" body.file_stdout.write(msg) body.file_stderr.write(msg) body.putSummaryLine_red( f_orig, "UNMERGED", "Failed to process/import, ignored") else: mf = mtz.mtz_file(p_mtzin) dset_list = datred_utils.point_symm_datasets( pointless_xml(), f_fmt) body.summary_row_0 = -1 # to signal the beginning of summary row for dataset in dset_list: # make HKL dataset annotation unmerged = dtype_unmerged.DType(body.job_id) dataset["symm_summary"] = table_list unmerged.importUnmergedData(mf, dataset) body.dataSerialNo += 1 unmerged.makeDName(body.dataSerialNo) outFileName = unmerged.dataId + ".mtz" body.file_stdin = open(pointless_script(), 'w') body.file_stdin.write ( "NAME PROJECT x CRYSTAL y DATASET z\n" + \ "HKLIN " + p_mtzin + "\n" + \ "HKLOUT " + os.path.join(body.outputDir(),outFileName) + "\n" + \ "COPY\n" + \ "ORIGINALLATTICE\n" ) for offset, first, last in unmerged.dataset.runs: body.file_stdin.write("RUN 1 FILE 1 BATCH " + str(first) + " to " + str(last) + "\n") body.file_stdin.write("END\n") body.file_stdin.close() rc = command.call("pointless", [], "./", pointless_script(), body.file_stdout, body.file_stderr, None) if rc.msg: msg = "\n\n Pointless failed with message:\n\n" + \ rc.msg + \ "\n\n File " + outFileName + \ " cannot be processed.\n\n" body.file_stdout.write(msg) body.file_stderr.write(msg) body.putSummaryLine_red( outFileName, "UNMERGED", "Failed to process/import, ignored") else: unmerged.files[0] = outFileName subSecId = fileSecId if len(dset_list) > 1: subSecId = fileSecId + str(k) pyrvapi.rvapi_add_section( subSecId, "Import " + unmerged.dataset.name, fileSecId, frow, 0, 1, 1, False) frow += 1 mtzTableId = "unmerged_mtz_" + str(k) + "_table" unmerged.makeUniqueFNames(body.outputDir()) body.outputDataBox.add_data(unmerged) makeUnmergedTable(body, mtzTableId, subSecId, unmerged, 0) pyrvapi.rvapi_set_text ( " <br><hr/><h3>Created Reflection Data Set (unmerged)</h3>" + \ "<b>Assigned name:</b> " + unmerged.dname + \ "<br> ",subSecId,frow,0,1,1 ) pyrvapi.rvapi_add_data( "hkl_data_" + str(body.dataSerialNo), "Unmerged reflections", # always relative to job_dir from job_dir/html os.path.join("..", body.outputDir(), unmerged.files[0]), "hkl:hkl", subSecId, frow + 1, 0, 1, 1, -1) frow += 2 if body.summary_row_0 < 0: body.putSummaryLine(f_orig, "UNMERGED", unmerged.dname) else: body.addSummaryLine("UNMERGED", unmerged.dname) k += 1 pyrvapi.rvapi_flush() # move imported file into output directory os.rename( p_mtzin, os.path.join(body.outputDir(), os.path.basename(p_mtzin))) body.file_stdout.write("... processed: " + f_orig + "\n ") trace = '' except: trace = ''.join(traceback.format_exception(*sys.exc_info())) body.file_stdout.write(trace) if trace: body.fail(trace, 'import failed') body.rvrow += 1 pyrvapi.rvapi_flush() return
def putSRFDiagram( body, # reference on Basic class hkl, # hkl data object dirPath, # directory with hkl object files (outputDir) reportDir, # directory with html report (reportDir) holderId, # rvapi holder of SRF widget row, col, # rvapi coordinates for SRF widget rowSpan, colSpan, # coordinate spans for STF widget file_stdout, # standard output stream file_stderr, # standard error stream log_parser=None # log file parser ): fpath = hkl.getFilePath(dirPath, 0) Fmean = hkl.getMeta("Fmean.value", "") sigF = hkl.getMeta("Fmean.sigma", "") if Fmean == "" or sigF == "": file_stderr.write ( "Fmean and sigFmean columns not found in " +\ hkl.files[0] + " -- SRF not calculated\n" ) return [-1, "Fmean and sigFmean columns not found"] scr_file = open("molrep_srf.script", "w") scr_file.write ( "file_f " + fpath +\ "\nlabin F=" + Fmean + " SIGF=" + sigF + "\n" ) scr_file.close() """ cols = hkl.getMeanColumns() if cols[2]!="F": file_stderr.write ( "Fmean and sigFmean columns not found in " +\ hkl.files[0] + " -- SRF not calculated\n" ) return [-1,"Fmean and sigFmean columns not found"] scr_file = open ( "molrep_srf.script","w" ) scr_file.write ( "file_f " + fpath +\ "\nlabin F=" + cols[0] + " SIGF=" + cols[1] + "\n" ) scr_file.close () """ # Start molrep rc = command.call("molrep", ["-i"], "./", "molrep_srf.script", file_stdout, file_stderr, log_parser) if not os.path.isfile("molrep_rf.ps"): file_stderr.write ( "\nSRF postscript was not generated for " +\ hkl.files[0] + "\n" ) return [-2, rc.msg] rc = command.call("ps2pdf", ["molrep_rf.ps"], "./", None, file_stdout, file_stderr, log_parser) if not os.path.isfile("molrep_rf.pdf"): file_stderr.write ( "\nSRF pdf was not generated for " +\ hkl.files[0] + "\n" ) return [-3, rc.msg] pdfpath = os.path.splitext(hkl.files[0])[0] + ".pdf" os.rename("molrep_rf.pdf", os.path.join(reportDir, pdfpath)) subsecId = body.getWidgetId(holderId) + "_srf" pyrvapi.rvapi_add_section(subsecId, "Self-Rotation Function", holderId, row, col, rowSpan, colSpan, False) pyrvapi.rvapi_set_text ( "<object data=\"" + pdfpath +\ "\" type=\"application/pdf\" " +\ "style=\"border:none;width:100%;height:1000px;\"></object>", subsecId,0,0,1,1 ) pyrvapi.rvapi_flush() return [0, "Ok"]
file_stdout.write( " wrong command specification 'pycofe.tasks.rvapiapp' (" + task.rvapi_command + ")") signal.TaskReadFailure().quitApp() # ============================================================================ # Run job file_stdout.write("[" + job_id.zfill(4) + "] RVAPI Application " + app.upper() + "\n\n") file_stderr.write(" ") rc = command.call(app, args, "./", None, file_stdout, file_stderr, log_parser=None) # ============================================================================ # close execution logs and quit file_stdout.close() file_stderr.close() if rc.msg == "": signal.Success().quitApp() elif messagebox: messagebox.displayMessage( "Failed to launch", "<b>Failed to launch " + task.rvapi_command +