Ejemplo n.º 1
0
 def rm_pending_section(self):
     if self.summary_tab_pending_sec_id:
         pyrvapi.rvapi_flush()
         pyrvapi.rvapi_remove_widget(self.summary_tab_pending_sec_id)
         pyrvapi.rvapi_flush()
         self.summary_tab_pending_sec_id = None
     return
Ejemplo n.º 2
0
    def display_results(self, summarize, results_to_display):

        if self.display_gui or self.ccp4i2:
            if not self.lattice_search_results_displayed:
                lattice_results = os.path.join(self.work_dir, 'latt', 'lattice_search.csv')
                lattice_mr_results = os.path.join(self.work_dir, 'latt', 'lattice_mr.csv')
                if os.path.isfile(lattice_results) or os.path.isfile(lattice_mr_results):
                    self.create_lattice_results_tab(lattice_results, lattice_mr_results, results_to_display)
                    self.lattice_search_results_displayed = True

            if not self.contaminant_results_displayed:
                contaminant_results = os.path.join(self.work_dir, 'cont', 'rot_search.csv')
                contaminant_mr_results = os.path.join(self.work_dir, 'cont', 'cont_mr.csv')
                if os.path.isfile(contaminant_results) or os.path.isfile(contaminant_mr_results):
                    self.create_contaminant_results_tab(contaminant_results, contaminant_mr_results, results_to_display)
                    self.contaminant_results_displayed = True

            if not self.morda_results_displayed:
                morda_db_results = os.path.join(self.work_dir, 'morda', 'rot_search.csv')
                morda_db_mr_results = os.path.join(self.work_dir, 'morda', 'morda_mr.csv')
                if os.path.isfile(morda_db_results) or os.path.isfile(morda_db_mr_results):
                    self.create_morda_db_results_tab(morda_db_results, morda_db_mr_results, results_to_display)
                    self.morda_results_displayed = True

            if summarize:
                self.display_summary_tab()

            pyrvapi.rvapi_flush()
Ejemplo n.º 3
0
 def create_results_tab(self, ample_dict):
     if self.ccp4i2 or not self.summary_tab_id or not self._got_mrbump_results(
             ample_dict):
         return
     mrb_results = ample_dict.get('mrbump_results')
     if mrb_results == self.old_mrbump_results:
         return
     self.old_mrbump_results = mrb_results
     if not self.results_tab_id:
         self.results_tab_id = "results_tab"
         pyrvapi.rvapi_insert_tab(self.results_tab_id, "Results",
                                  self.summary_tab_id, False)
     # Delete old sections:
     pyrvapi.rvapi_flush()
     for section_id in self.results_tab_sections:
         pyrvapi.rvapi_remove_widget(section_id)
     pyrvapi.rvapi_flush()
     self.results_tab_sections = []
     ensemble_results = ample_dict[
         'ensembles_data'] if 'ensembles_data' in ample_dict[
             'ensembles_data'] else None
     mrbsum = mrbump_util.ResultsSummary(
         results=mrb_results[0:min(len(mrb_results), mrbump_util.TOP_KEEP)])
     mrbsum.sortResults(prioritise="SHELXE_CC")
     self.results_section(
         self.results_tab_id, mrbsum.results, ensemble_results,
         "Top {0} SHELXE Results".format(mrbump_util.TOP_KEEP))
     mrbsum.sortResults(prioritise="PHASER_TFZ")
     # Add seperator between results - doesn't work as not deleted on refresh
     # pyrvapi.rvapi_add_text("<br/><hr/><br/>", self.results_tab_id, 0, 0, 1, 1)
     self.results_section(
         self.results_tab_id, mrbsum.results, ensemble_results,
         "Top {0} PHASER Results".format(mrbump_util.TOP_KEEP))
     return self.results_tab_id
Ejemplo n.º 4
0
 def rm_pending_section(self):
     if self.summary_tab_pending_sec_id:
         pyrvapi.rvapi_flush()
         pyrvapi.rvapi_remove_widget(self.summary_tab_pending_sec_id)
         pyrvapi.rvapi_flush()
         self.summary_tab_pending_sec_id = None
     return
Ejemplo n.º 5
0
 def create_results_tab(self, ample_dict):
     if self.ccp4i2 or not self.summary_tab_id or not self._got_mrbump_results(ample_dict):
         return
     mrb_results = ample_dict.get('mrbump_results')
     if mrb_results == self.old_mrbump_results:
         return
     self.old_mrbump_results = mrb_results
     if not self.results_tab_id:
         self.results_tab_id = "results_tab"
         pyrvapi.rvapi_insert_tab(self.results_tab_id,
                                  "Results", self.summary_tab_id, False)
     # Delete old sections:
     pyrvapi.rvapi_flush()
     for section_id in self.results_tab_sections:
         pyrvapi.rvapi_remove_widget(section_id)
     pyrvapi.rvapi_flush()
     self.results_tab_sections = []
     ensemble_results = ample_dict['ensembles_data'] if 'ensembles_data' in ample_dict['ensembles_data'] else None
     mrbsum = mrbump_util.ResultsSummary(results=mrb_results[0:min(len(mrb_results),mrbump_util.TOP_KEEP)])
     mrbsum.sortResults(prioritise="SHELXE_CC")
     self.results_section(self.results_tab_id,
                          mrbsum.results,
                          ensemble_results,
                          "Top {0} SHELXE Results".format(mrbump_util.TOP_KEEP))
     mrbsum.sortResults(prioritise="PHASER_TFZ")
     self.results_section(self.results_tab_id,
                          mrbsum.results,
                          ensemble_results,
                          "Top {0} PHASER Results".format(mrbump_util.TOP_KEEP))
     return self.results_tab_id
Ejemplo n.º 6
0
    def display_results(self, summarize, results_to_display):

        if self.display_gui or self.ccp4i2:
            if not self.lattice_search_results_displayed:
                lattice_results = os.path.join(self.work_dir, 'latt', 'lattice_search.csv')
                lattice_mr_results = os.path.join(self.work_dir, 'latt', 'lattice_mr.csv')
                if os.path.isfile(lattice_results) or os.path.isfile(lattice_mr_results):
                    self.create_lattice_results_tab(lattice_results, lattice_mr_results, results_to_display)
                    self.lattice_search_results_displayed = True

            if not self.contaminant_results_displayed:
                contaminant_results = os.path.join(self.work_dir, 'cont', 'rot_search.csv')
                contaminant_mr_results = os.path.join(self.work_dir, 'cont', 'cont_mr.csv')
                if os.path.isfile(contaminant_results) or os.path.isfile(contaminant_mr_results):
                    self.create_contaminant_results_tab(contaminant_results, contaminant_mr_results, results_to_display)
                    self.contaminant_results_displayed = True

            if not self.morda_results_displayed:
                morda_db_results = os.path.join(self.work_dir, 'morda', 'rot_search.csv')
                morda_db_mr_results = os.path.join(self.work_dir, 'morda', 'morda_mr.csv')
                if os.path.isfile(morda_db_results) or os.path.isfile(morda_db_mr_results):
                    self.create_morda_db_results_tab(morda_db_results, morda_db_mr_results, results_to_display)
                    self.morda_results_displayed = True

            if summarize:
                self.display_summary_tab()

            self.display_citation_tab()

            pyrvapi.rvapi_flush()
Ejemplo n.º 7
0
def exit_error(msg, ample_tb=None):
    """Exit on error collecting as much information as we can.
    
    args:
    ample_tb - this can be got from sys.exc_info()[2]
    """
    # Get the root logger
    logger = logging.getLogger()

    # An error may have occured before we started logging so we need to create one here
    if not logger.handlers:
        logging.basicConfig(format='%(message)s\n', level=logging.DEBUG)
        logger = logging.getLogger()

    #header="**** AMPLE ERROR ****\n\n"
    header = "*" * 70 + "\n"
    header += "*" * 20 + " " * 10 + "AMPLE ERROR" + " " * 10 + "*" * 19 + "\n"
    header += "*" * 70 + "\n\n"

    # Create the Footer
    footer = "\n\n" + "*" * 70 + "\n\n"

    # Get the name of the debug log file
    debug_log = _debug_logfile(logger)
    if debug_log:
        footer += "More information may be found in the debug log file: {0}\n".format(
            debug_log)

    footer += "\nIf you believe that this is an error with AMPLE, please email: [email protected]\n"
    footer += "providing as much information as you can about how you ran the program.\n"
    if debug_log:
        footer += "\nPlease include the debug logfile with your email: {0}\n".format(
            debug_log)

    # String it all together
    msg = header + msg + footer

    # Print out main message
    logger.critical(msg)

    # Get traceback of where we failed for the log file
    if not ample_tb:
        ample_tb = traceback.extract_stack()
    else:
        ample_tb = traceback.extract_tb(ample_tb)

    msg = "AMPLE EXITING AT..." + os.linesep + "".join(
        traceback.format_list(ample_tb))
    if debug_log:
        logger.debug(msg)
    else:
        # If we don't have a debug file we want to output the traceback to the console
        logger.info(msg)

    # Make sure the error widget is updated
    if pyrvapi: pyrvapi.rvapi_flush()

    sys.exit(1)
Ejemplo n.º 8
0
def makeUnmergedTable(body, tableId, holderId, data, row):

    pyrvapi.rvapi_add_table(tableId, "<h2>Summary</h2>", holderId, row, 0, 1,
                            1, 0)
    pyrvapi.rvapi_set_table_style(tableId, "table-blue", "text-align:left;")
    r = body.putTableLine(tableId, "File name", "Imported file name",
                          data.files[0], 0)
    r = body.putTableLine(tableId, "Assigned name", "Assigned data name",
                          data.dname, r)
    r = body.putTableLine(tableId, "Dataset name", "Original data name",
                          data.dataset.name, r)
    r = body.putTableLine(tableId, "Resolution (&Aring;)",
                          "Dataset resolution in angstroms", data.dataset.reso,
                          r)
    r = body.putTableLine(tableId, "Wavelength (&Aring;)",
                          "Beam wavelength in angstroms", data.dataset.wlen, r)

    if data.HM:
        r = body.putTableLine(tableId, "Space group", "Space group", data.HM,
                              r)
    else:
        r = body.putTableLine(tableId, "Space group", "Space group",
                              "unspecified", r)

    cell_spec = "not specified"
    """
    if data.CELL:
        cell_spec = str(data.CELL[0]) + " " + \
                    str(data.CELL[1]) + " " + \
                    str(data.CELL[2]) + " " + \
                    "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;" + \
                    str(data.CELL[3]) + " " + \
                    str(data.CELL[4]) + " " + \
                    str(data.CELL[5])
    """

    cell_spec = data.dataset.cell[0] + "&nbsp;" + \
                data.dataset.cell[1] + "&nbsp;" + \
                data.dataset.cell[2] + "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;" + \
                data.dataset.cell[3] + "&nbsp;" + \
                data.dataset.cell[4] + "&nbsp;" + \
                data.dataset.cell[5]

    r = body.putTableLine(tableId, "Cell", "Cell parameters", cell_spec, r)
    """
    range = "not found"
    if data.BRNG:
        range = str(data.BRNG)
    r = body.putTableLine ( tableId,"Batches","Batch range(s)",range,r );
    """
    range = []
    for run in data.dataset.runs:
        range += [(int(run[1]), int(run[2]))]
    r = body.putTableLine(tableId, "Ranges", "Image range(s)", str(range), r)

    pyrvapi.rvapi_flush()

    return
Ejemplo n.º 9
0
 def display_results(self, ample_dict):
     if not (pyrvapi or ('no_gui' in ample_dict and ample_dict['no_gui'])):
         return
     if not self.header:
         pyrvapi.rvapi_add_header("AMPLE Results")
         self.header = True
     if not self.ccp4i2: self.create_log_tab(ample_dict)
     self.create_summary_tab(ample_dict)
     self.create_results_tab(ample_dict)
     pyrvapi.rvapi_flush()
     return True
Ejemplo n.º 10
0
 def setGenericLogParser(self, split_sections_bool, graphTables=False):
     self.log_parser_cnt += 1
     panel_id = "genlogparser_" + str(self.log_parser_cnt)
     self.putPanel(panel_id)
     self.generic_parser_summary = {}
     self.log_parser = pyrvapi_ext.parsers.generic_parser(
         panel_id,
         split_sections_bool,
         summary=self.generic_parser_summary,
         graph_tables=graphTables)
     pyrvapi.rvapi_flush()
     return panel_id
Ejemplo n.º 11
0
def run(body):  # body is reference to the main Import class

    files_map = []
    for f in body.files_all:
        fl = f.lower();
        if fl.endswith(('.map', '.mrc')):
            files_map.append ( f )

    if len(files_map) <= 0:
        return

    body.file_stdout.write ( "\n" + "%"*80 + "\n"  )
    body.file_stdout.write ( "%%%%%  Map volume data\n" )
    body.file_stdout.write ( "%"*80 + "\n" )

    mapSecId = "map_sec_" + str(body.widget_no)
    body.widget_no += 1

    pyrvapi.rvapi_add_section ( mapSecId, "Map", body.report_page_id(),
                                body.rvrow, 0, 1, 1, False )
    k = 0
    for f in files_map:

        body.files_all.remove ( f )

        pyrvapi.rvapi_put_table_string ( body.import_summary_id(), f, body.summary_row, 0 )

        fpath = os.path.join ( body.importDir(), f );

        with mrcfile.mmap(fpath) as mrc:
            msg = "MAP {0} x {1} x {2}".format(mrc.header.nx, mrc.header.ny, mrc.header.nz)
            pyrvapi.rvapi_put_table_string ( body.import_summary_id(), msg, body.summary_row, 1 )

        map_ = dtype_map.DType ( body.job_id )
        map_.subtype = ['final_map']
        map_.setFile   ( f )
        body.dataSerialNo += 1
        map_.makeDName ( body.dataSerialNo )
        body.outputDataBox.add_data ( map_ )

        # Essential to rename uploaded file to put it in output directory
        # Might be better to use the standard register() method instead if possible?
        # Currently the file ends up remaining in the upload directory on the front end,
        # even though it's removed on the number cruncher...
        os.rename ( fpath, os.path.join(body.outputDir(), f) )

        body.file_stdout.write ( "... processed: " + f + "\n" )
        k += 1

    body.rvrow += 1
    pyrvapi.rvapi_flush()

    return
Ejemplo n.º 12
0
    def display_results(self, ample_dict):
        """Display the results of an AMPLE run using pyrvapi

        Parameters
        ----------
        ample_dict : dict
          An AMPLE job dictionary

        """
        if not (pyrvapi or self.generate_output):
            return
        try:
            if not self.header:
                pyrvapi.rvapi_add_header("AMPLE Results")
                self.header = True
            self.create_log_tab(ample_dict)
            self.create_citation_tab(ample_dict)
            self.create_summary_tab(ample_dict)
            self.create_results_tab(ample_dict)
            pyrvapi.rvapi_flush()
        except Exception as e:
            logger.critical("Error displaying results!\n%s", traceback.format_exc())
        return True
Ejemplo n.º 13
0
    def create_results_tab(self, ample_dict):
        if self.ccp4i2 or not self.summary_tab_id: return
        if not self._got_mrbump_results(ample_dict): return

        mrb_results = ample_dict['mrbump_results']
        if mrb_results == self.old_mrbump_results: return
        self.old_mrbump_results = mrb_results

        if not self.results_tab_id:
            self.results_tab_id = "results_tab"
            # Insert results tab before summary tab
            pyrvapi.rvapi_insert_tab(
                self.results_tab_id, "Results", self.summary_tab_id,
                False)  # Last arg is "open" - i.e. show or hide

        # Delete old sections:
        pyrvapi.rvapi_flush()
        for section_id in self.results_tab_sections:
            pyrvapi.rvapi_remove_widget(section_id)
        pyrvapi.rvapi_flush()
        self.results_tab_sections = []

        ensemble_results = ample_dict[
            'ensembles_data'] if 'ensembles_data' in ample_dict[
                'ensembles_data'] else None
        mrbsum = mrbump_util.ResultsSummary(
            results=mrb_results[0:min(len(mrb_results), mrbump_util.TOP_KEEP)])
        mrbsum.sortResults(prioritise="SHELXE_CC")
        self.results_section(
            self.results_tab_id, mrbsum.results, ensemble_results,
            "Top {0} SHELXE Results".format(mrbump_util.TOP_KEEP))
        mrbsum.sortResults(prioritise="PHASER_TFZ")
        self.results_section(
            self.results_tab_id, mrbsum.results, ensemble_results,
            "Top {0} PHASER Results".format(mrbump_util.TOP_KEEP))

        return self.results_tab_id
Ejemplo n.º 14
0
def exit_error(exc_type, exc_value, exc_traceback):
    """Exit on error collecting as much information as we can.

    Parameters
    ----------
    exc_type : str
       The exception type
    exc_value : str
       The exception value
    exc_traceback
       The exception traceback

    Warnings
    --------
    This function terminates the program after printing appropriate
    error messages.

    """
    # Get the root logger
    logger = logging.getLogger(__name__)

    # Traceback info
    traceback_value_msg = exc_value
    traceback_full_msg = traceback.format_exception(exc_type, exc_value, exc_traceback)

    # Find debug log file
    debug_log = _debug_logfile(logger)

    # Construct the message
    main_msg = "%(sep)s%(hashish)s%(sep)s"\
             + "%(short_hash)s%(msg)s%(short_hash)s%(sep)s"\
             + "%(hashish)s%(sep)s%(sep)s"\
             + "SIMBAD exited with message: %(tb_value)s"\
             + "%(sep)s%(sep)s%(hashish)s%(sep)s%(sep)s"
    if debug_log:
        main_msg += "More information may be found in the debug log file: %(logfile)s%(sep)s"
    main_msg += "%(sep)sIf you believe that this is an error with SIMBAD, please email: %(email)s%(sep)s"
    main_msg += "providing as much information as you can about how you ran the program.%(sep)s"
    if debug_log:
        main_msg += "%(sep)sPlease static the debug logfile with your email: %(logfile)s%(sep)s"

    nhashes = 70
    main_msg_kwargs = {
        'sep': os.linesep,
        'hashish': '*' * nhashes,
        'short_hash': '*' * 19,
        'msg': "SIMBAD_ERROR".center(32, " "),
        'tb_value': traceback_value_msg,
        'logfile': debug_log,
        'email': '*****@*****.**'
    }

    # String it all together
    logger.critical(main_msg, main_msg_kwargs)

    logger.critical("SIMBAD EXITING AT...")
    logger.critical("".join(traceback_full_msg))

    # Make sure the error widget is updated
    if pyrvapi:
        pyrvapi.rvapi_flush()

    sys.exit(1)
Ejemplo n.º 15
0
def run(body):  # body is reference to the main Import class

    files_xyz = []
    for f in body.files_all:
        fl = f.lower()
        if fl.endswith(('.pdb', '.cif', '.mmcif', '.ent')):
            files_xyz.append(f)

    if len(files_xyz) <= 0:
        return

    body.file_stdout.write("\n" + "%" * 80 + "\n")
    body.file_stdout.write("%%%%%  IMPORT OF XYZ COORDINATES\n")
    body.file_stdout.write("%" * 80 + "\n")

    xyzSecId = "xyz_sec_" + str(body.widget_no)
    body.widget_no += 1

    pyrvapi.rvapi_add_section(xyzSecId, "XYZ Coordinates",
                              body.report_page_id(), body.rvrow, 0, 1, 1,
                              False)
    k = 0
    for f in files_xyz:

        body.files_all.remove(f)

        fpath = os.path.join(body.importDir(), f)
        #coor.stripLigWat ( fpath,fpath )  #  strip ligands and waters

        # split input file to chains
        scr_file = open("pdbcur.script", "w")
        scr_file.write("SPLITTOCHAINS\nEND\n")
        scr_file.close()

        # Start pdbcur
        rc = command.call("pdbcur", ['XYZIN', fpath], "./", "pdbcur.script",
                          body.file_stdout, body.file_stderr)

        # read pdbcur's json
        fnamesplit = os.path.splitext(f)
        fpathsplit = os.path.join(body.importDir(), fnamesplit[0]) + ".json"

        if not os.path.isfile(fpathsplit):

            body.putSummaryLine_red(f, "UNKNOWN",
                                    "Failed to recognise, ignored")

        else:

            with open(fpathsplit, 'r') as json_file:
                json_str = json_file.read()
            json_file.close()

            #xyzmeta = eval(json_str)
            xyzMeta = xyzmeta.XYZMeta(json_str)

            if len(xyzMeta["xyz"]) <= 0:

                body.putSummaryLine_red(f, "XYZ", "Empty file -- ignored")

            else:

                subSecId = xyzSecId
                if len(files_xyz) > 1:
                    subSecId = xyzSecId + str(k)
                    pyrvapi.rvapi_add_section(subSecId, "Import " + f,
                                              xyzSecId, k, 0, 1, 1, False)

                xyz = dtype_xyz.DType(body.job_id)
                xyz.setFile(f)
                dtype_xyz.setXYZMeta(xyz, xyzMeta)
                body.dataSerialNo += 1
                xyz.makeDName(body.dataSerialNo)

                os.rename(fpath, os.path.join(body.outputDir(), f))
                xyz.makeUniqueFNames(body.outputDir())

                body.outputDataBox.add_data(xyz)

                xyzTableId = "xyz_" + str(k) + "_table"
                body.putTable(xyzTableId, "", subSecId, 0)
                jrow = 0
                if len(files_xyz) <= 1:
                    body.putTableLine(xyzTableId, "File name",
                                      "Imported file name", f, jrow)
                    jrow += 1
                body.putTableLine(xyzTableId, "Assigned name",
                                  "Assigned data name", xyz.dname, jrow)
                crystData = getCrystData(xyzMeta)
                body.putTableLine(xyzTableId, "Space group", "Space group",
                                  crystData[0], jrow + 1)
                body.putTableLine(
                    xyzTableId, "Cell parameters",
                    "Cell parameters (a,b,c, &alpha;,&beta;,&gamma;)",
                    crystData[1], jrow + 2)
                contents = ""
                nChains = 0
                for model in xyzMeta["xyz"]:
                    for chain in model["chains"]:
                        if chain["type"] != "UNK":
                            nChains += 1
                            if len(contents) > 0:
                                contents += "<br>"
                            contents += "Model " + str(model['model']) + ", chain " + \
                                        chain['id'] + ": " + str(chain['size']) + \
                                        " residues, type: " + chain['type']
                if len(xyzMeta["ligands"]) > 0:
                    if len(contents) > 0:
                        contents += "<br>"
                    contents += "Ligands:"
                    for name in xyzMeta["ligands"]:
                        contents += "&nbsp;&nbsp;" + name
                body.putTableLine(xyzTableId, "Contents", "File contents",
                                  contents, jrow + 3)
                pyrvapi.rvapi_add_data(
                    xyzTableId + "_structure_btn",
                    xyz.dname + "&nbsp;&nbsp;&nbsp;&nbsp;",
                    # always relative to job_dir from job_dir/html
                    os.path.join("..", body.outputDir(), xyz.files[0]),
                    "xyz",
                    subSecId,
                    1,
                    0,
                    1,
                    1,
                    -1)

                body.putSummaryLine(f, "XYZ", xyz.dname)
                """
                if nChains>1:
                    irow = 2
                    for model in xyzMeta["xyz"]:
                        for chain in model['chains']:
                            if chain["type"] != "UNK":
                                fname = fnamesplit[0] + "_" + str(model['model']) + "_" + \
                                        chain['id'] + fnamesplit[1]
                                xyz = dtype_xyz.DType ( body.job_id )
                                xyz.setFile   ( fname )
                                mdl = {}
                                mdl['model']  = model['model']
                                mdl['chains'] = [chain]
                                xyz_meta = {}
                                xyz_meta["cryst"]   = xyzMeta["cryst"]
                                xyz_meta["xyz"]     = [mdl]
                                xyz_meta["ligands"] = chain["ligands"]
                                dtype_xyz.setXYZMeta ( xyz,xyz_meta )
                                body.dataSerialNo += 1
                                xyz.makeDName ( body.dataSerialNo )

                                os.rename ( os.path.join(body.importDir(),fname),
                                            os.path.join(body.outputDir(),fname) )
                                xyz.makeUniqueFNames ( body.outputDir() )

                                body.outputDataBox.add_data ( xyz )

                                xyzTableId = "xyz_" + str(k) + "_" + str(model['model']) + \
                                             "_" + chain['id'] + "_table"
                                body.putMessage1  ( subSecId  ,"&nbsp;",irow )
                                body.putTable     ( xyzTableId,"",subSecId,irow+1 )
                                body.putTableLine ( xyzTableId,"Assigned name",
                                                    "Assigned data name",xyz.dname,0 )
                                crystData = getCrystData ( xyz_meta )
                                body.putTableLine ( xyzTableId,"Space group",
                                                    "Space group",crystData[0],1 )
                                body.putTableLine ( xyzTableId,"Cell parameters",
                                    "Cell parameters (a,b,c, &alpha;,&beta;,&gamma;)",
                                                    crystData[1],2 )
                                contents  = "Model " + str(model['model']) + ", chain " + \
                                            chain['id'] + ": " + str(chain['size'])
                                contents += " residues, type: " + chain['type']
                                if len(xyz.xyzmeta["ligands"])>0:
                                    contents += "<br>Ligands:"
                                    for name in xyz.xyzmeta["ligands"]:
                                        contents += "&nbsp;&nbsp;" + name
                                body.putTableLine ( xyzTableId,"Contents",
                                                    "File contents",contents,3 )
                                pyrvapi.rvapi_add_data ( xyzTableId+"_structure_btn",xyz.dname,
                                                 # always relative to job_dir from job_dir/html
                                                 os.path.join("..",body.outputDir(),xyz.files[0]),
                                                 "xyz",subSecId,irow+2,0,1,1,-1 )

                                #fdebug = open ( "_debug.txt",'a' )
                                #fdebug.write ( fname + "\n")
                                #fdebug.close()

                                body.addSummaryLine ( "XYZ",xyz.dname )

                                irow += 3
                """

        body.file_stdout.write("... processed: " + f + "\n")
        k += 1

    body.rvrow += 1
    pyrvapi.rvapi_flush()

    return
Ejemplo n.º 16
0
def putSRFDiagram(
        body,  # reference on Basic class
        hkl,  # hkl data object
        dirPath,  # directory with hkl object files (outputDir)
        reportDir,  # directory with html report (reportDir)
        holderId,  # rvapi holder of SRF widget
        row,
        col,  # rvapi coordinates for SRF widget
        rowSpan,
        colSpan,  # coordinate spans for STF widget
        file_stdout,  # standard output stream
        file_stderr,  # standard error stream
        log_parser=None  # log file parser
):

    fpath = hkl.getFilePath(dirPath, 0)
    Fmean = hkl.getMeta("Fmean.value", "")
    sigF = hkl.getMeta("Fmean.sigma", "")

    if Fmean == "" or sigF == "":
        file_stderr.write ( "Fmean and sigFmean columns not found in " +\
                            hkl.files[0] + " -- SRF not calculated\n" )
        return [-1, "Fmean and sigFmean columns not found"]

    scr_file = open("molrep_srf.script", "w")
    scr_file.write ( "file_f " + fpath +\
                     "\nlabin F=" + Fmean + " SIGF=" + sigF + "\n" )
    scr_file.close()
    """
    cols  = hkl.getMeanColumns()
    if cols[2]!="F":
        file_stderr.write ( "Fmean and sigFmean columns not found in " +\
                            hkl.files[0] + " -- SRF not calculated\n" )
        return [-1,"Fmean and sigFmean columns not found"]

    scr_file = open ( "molrep_srf.script","w" )
    scr_file.write ( "file_f " + fpath +\
                     "\nlabin F=" + cols[0] + " SIGF=" + cols[1] + "\n" )
    scr_file.close ()
    """

    # Start molrep
    rc = command.call("molrep", ["-i"], "./", "molrep_srf.script", file_stdout,
                      file_stderr, log_parser)

    if not os.path.isfile("molrep_rf.ps"):
        file_stderr.write ( "\nSRF postscript was not generated for " +\
                            hkl.files[0] + "\n" )
        return [-2, rc.msg]

    rc = command.call("ps2pdf", ["molrep_rf.ps"], "./", None, file_stdout,
                      file_stderr, log_parser)

    if not os.path.isfile("molrep_rf.pdf"):
        file_stderr.write ( "\nSRF pdf was not generated for " +\
                            hkl.files[0] + "\n" )
        return [-3, rc.msg]

    pdfpath = os.path.splitext(hkl.files[0])[0] + ".pdf"
    os.rename("molrep_rf.pdf", os.path.join(reportDir, pdfpath))

    subsecId = body.getWidgetId(holderId) + "_srf"
    pyrvapi.rvapi_add_section(subsecId, "Self-Rotation Function", holderId,
                              row, col, rowSpan, colSpan, False)

    pyrvapi.rvapi_set_text ( "<object data=\"" + pdfpath +\
            "\" type=\"application/pdf\" " +\
            "style=\"border:none;width:100%;height:1000px;\"></object>",
            subsecId,0,0,1,1 )
    pyrvapi.rvapi_flush()

    return [0, "Ok"]
Ejemplo n.º 17
0
def exit_error(*args, **kwargs):
    """Exit on error collecting as much information as we can.
    
    Parameters
    ----------
    message : str, optional
      A error message to print
    
    Notes
    -----
    This previously accepted two arguments of a string to print as an error message and
    an exception traceback.
    We now just use sys.exch_info() so the messsage argument is no longer required but optional.
    While we refactor the code, we'll use *args to get any argument parameters passed in
    as the first argument.

    """
    # Get the root logger 
    logger = logging.getLogger()
    
    # An error may have occured before we started logging so we need to create one here
    if not logger.handlers:
        logging.basicConfig(format='%(message)s\n', level=logging.DEBUG)
        logger = logging.getLogger()

    
    exc_type, exc_value, exc_traceback = sys.exc_info()
    msg = kwargs.get('message')
    if msg is None:
        if len(args) >= 1: # Fix for old cases
            msg = args[0]
        else:
            msg = "{0}: {1}".format(exc_type.__name__, exc_value.message)

    #header="**** AMPLE ERROR ****\n\n"
    header="*"*70 + "\n"
    header+="*"*20 + " "*10 + "AMPLE ERROR" + " "*10 +"*"*19 + "\n" 
    header+="*"*70 + "\n\n"
    
    # Create the Footer 
    footer="\n\n" + "*"*70+"\n\n"
    
    # Get the name of the debug log file
    debug_log = _debug_logfile(logger)
    if debug_log: footer += "More information may be found in the debug log file: {0}\n".format(debug_log) 
    
    footer += "\nIf you believe that this is an error with AMPLE, please email: [email protected]\n"
    footer += "providing as much information as you can about how you ran the program.\n"
    if debug_log: footer += "\nPlease include the debug logfile with your email: {0}\n".format(debug_log)   
    
    # String it all together
    msg = header + msg + footer
    
    # Print out main message
    logger.critical(msg)
    
    # If we were called without an exception being raised, we just print the current stack
    if exc_traceback is None:
        traceback_str = "".join(traceback.format_stack())
    else:
        traceback_str =     "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))

    msg = "AMPLE EXITING AT2..." + os.linesep + traceback_str
    if debug_log:
        logger.debug(msg)
    else:
        # If we don't have a debug file we want to output the traceback to the console
        logger.info(msg)
    
    # Make sure the error widget is updated
    if pyrvapi: pyrvapi.rvapi_flush()
    
    sys.exit(1)
Ejemplo n.º 18
0
def makeHKLTable(body, tableId, holderId, original_data, new_data, truncation,
                 trunc_msg, row):
    pyrvapi.rvapi_add_table(tableId, "<h2>Summary</h2>", holderId, row, 0, 1,
                            1, 0)
    pyrvapi.rvapi_set_table_style(tableId, "table-blue", "text-align:left;")
    r = body.putTableLine(tableId, "File name", "Imported file name",
                          new_data.files[0], 0)
    r = body.putTableLine(tableId, "Dataset name", "Original dataset name",
                          new_data.getDataSetName(), r)
    r = body.putTableLine(tableId, "Assigned name", "Assigned dataset name",
                          new_data.dname, r)
    r = body.putTableLine(tableId, "Wavelength", "Wavelength",
                          str(new_data.getMeta("DWAVEL", "unspecified")), r)
    r = body.putTableLine(tableId, "Space group", "Space group",
                          new_data.getMeta("HM", "unspecified"), r)

    dcell = new_data.getMeta("DCELL", "*")
    if dcell == "*":
        cell_spec = "not specified"
    else:
        cell_spec = str(dcell[0]) + " " + \
                    str(dcell[1]) + " " + \
                    str(dcell[2]) + " " + \
                    "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;" + \
                    str(dcell[3]) + " " + \
                    str(dcell[4]) + " " + \
                    str(dcell[5])

    r = body.putTableLine(tableId, "Cell", "Cell parameters", cell_spec, r)

    r = body.putTableLine(tableId, "Resolution low", "Low resolution limit",
                          new_data.getLowResolution(), r)

    r = body.putTableLine(tableId, "Resolution high", "High resolution limit",
                          new_data.getHighResolution(), r)

    if dtype_hkl.subtypeAnomalous() in new_data.subtype:
        anom = "Present"
    else:
        anom = "Not present"
    r = body.putTableLine(tableId, "Anomalous scattering",
                          "Presence of anomalous data", anom, r)

    # print new_data.getColumnNames()

    if trunc_msg:
        r = body.putTableLine(tableId, "Original columns",
                              "Original data columns",
                              original_data.getColumnNames(), r)
        r = body.putTableLine ( tableId,"Truncation",
            "Truncation result","Failed: " + trunc_msg + \
            "<br>The dataset cannot be used",r )
    elif truncation == 0:
        r = body.putTableLine(tableId, "Original columns",
                              "Original data columns",
                              original_data.getColumnNames(), r)
        r = body.putTableLine ( tableId,"Truncation",
            "Truncation result",
            "Was not performed due to the absence of " + \
            "intensity data.<br>" + \
            "The dataset will be used untruncated",r )
    else:
        r = body.putTableLine(tableId, "Original columns",
                              "Original data columns",
                              original_data.getColumnNames(), r)
        r = body.putTableLine ( tableId,"Truncation",
            "Truncation result",
            "Truncated dataset will be used instead of " + \
            "the original one.",r )
        r = body.putTableLine(tableId, "Columns to be used",
                              "Data columns which will be used further on",
                              new_data.getColumnNames(), r)

    pyrvapi.rvapi_flush()

    return
Ejemplo n.º 19
0
def exit_error(exc_type, exc_value, exc_traceback):
    """Exit on error collecting as much information as we can.

    Parameters
    ----------
    exc_type : str
       The exception type
    exc_value : str
       The exception value
    exc_traceback
       The exception traceback
    
    Warnings
    --------
    This function terminates the program after printing appropriate
    error messages.
    
    """
    # Get the root logger
    logger = logging.getLogger(__name__)

    # Traceback info
    traceback_value_msg = exc_value
    traceback_full_msg = traceback.format_exception(exc_type, exc_value,
                                                    exc_traceback)

    # Find debug log file
    debug_log = _debug_logfile(logger)

    # Construct the message
    main_msg = "%(sep)s%(hashish)s%(sep)s"\
             + "%(short_hash)s%(msg)s%(short_hash)s%(sep)s"\
             + "%(hashish)s%(sep)s%(sep)s"\
             + "SIMBAD exited with message: %(tb_value)s"\
             + "%(sep)s%(sep)s%(hashish)s%(sep)s%(sep)s"
    if debug_log:
        main_msg += "More information may be found in the debug log file: %(logfile)s%(sep)s"
    main_msg += "%(sep)sIf you believe that this is an error with SIMBAD, please email: %(email)s%(sep)s"
    main_msg += "providing as much information as you can about how you ran the program.%(sep)s"
    if debug_log:
        main_msg += "%(sep)sPlease static the debug logfile with your email: %(logfile)s%(sep)s"

    nhashes = 70
    main_msg_kwargs = {
        'sep': os.linesep,
        'hashish': '*' * nhashes,
        'short_hash': '*' * 19,
        'msg': "SIMBAD_ERROR".center(32, " "),
        'tb_value': traceback_value_msg,
        'logfile': debug_log,
        'email': '*****@*****.**'
    }

    # String it all together
    logger.critical(main_msg, main_msg_kwargs)

    logger.critical("SIMBAD EXITING AT...")
    logger.critical("".join(traceback_full_msg))

    # Make sure the error widget is updated
    if pyrvapi:
        pyrvapi.rvapi_flush()

    sys.exit(1)
Ejemplo n.º 20
0
def run(
    body,  # body is reference to the main Import class
    sectionTitle="Reflection datasets created",
    sectionOpen=False,  # to keep result section closed if several datasets
    freeRflag=True  # will be run if necessary
):

    files_mtz = []
    for f_orig in body.files_all:
        f_base, f_ext = os.path.splitext(f_orig)
        if f_ext.lower() in ('.hkl', '.mtz'):
            p_orig = os.path.join(body.importDir(), f_orig)
            f_fmt = mtz.hkl_format(p_orig, body.file_stdout)
            if f_fmt in ('xds_merged', 'mtz_merged'):
                files_mtz.append((f_orig, f_fmt))

    if not files_mtz:
        return

    mtzSecId = body.getWidgetId("mtz_sec") + "_"

    k = 0
    for f_orig, f_fmt in files_mtz:
        body.files_all.remove(f_orig)
        p_orig = os.path.join(body.importDir(), f_orig)
        p_mtzin = p_orig
        if not f_fmt.startswith('mtz_'):
            p_mtzin = os.path.splitext(f_orig)[0] + '.mtz'
            sp = subprocess.Popen('pointless',
                                  stdin=subprocess.PIPE,
                                  stdout=body.file_stdout,
                                  stderr=body.file_stderr)
            sp.stdin.write('XDSIN ' + p_orig + '\nHKLOUT ' + p_mtzin +
                           '\nCOPY\n')
            sp.stdin.close()
            if sp.wait():
                p_mtzin = None

        if p_mtzin:

            p_mtzout = p_mtzin
            rc = command.comrc()

            if freeRflag:

                p_mtzout = os.path.join(body.outputDir(),
                                        os.path.basename(f_orig))

                if k == 0:
                    scr_file = open(freerflag_script(), "w")
                    scr_file.write("UNIQUE\n")
                    scr_file.close()

                # run freerflag: generate FreeRFlag if it is absent, and expand
                # all reflections

                rc = command.call("freerflag",
                                  ["HKLIN", p_mtzin, "HKLOUT", p_mtzout],
                                  "./",
                                  freerflag_script(),
                                  body.file_stdout,
                                  body.file_stderr,
                                  log_parser=None)

            if rc.msg:
                msg = "\n\n Freerflag failed with message:\n\n" + \
                      rc.msg + \
                      "\n\n File " + f_orig + \
                      " cannot be processed.\n\n"
                body.file_stdout.write(msg)
                body.file_stderr.write(msg)
                body.putSummaryLine_red(f_orig, "MTZ",
                                        "Failed to process/import, ignored")

            else:

                mf = mtz.mtz_file(p_mtzout)
                body.summary_row_0 = -1  # to signal the beginning of summary row

                for ds in mf:

                    if k == 0:
                        body.file_stdout.write("\n" + "%" * 80 + "\n")
                        body.file_stdout.write(
                            "%%%%%  IMPORT REFLECTION DATA\n")
                        body.file_stdout.write("%" * 80 + "\n")

                    # make HKL dataset annotation
                    hkl = dtype_hkl.DType(body.job_id)
                    hkl.importMTZDataset(ds)
                    body.dataSerialNo += 1
                    hkl.makeDName(body.dataSerialNo)
                    datasetName = ""

                    if k == 0:
                        if sectionTitle:
                            pyrvapi.rvapi_add_section(mtzSecId, sectionTitle,
                                                      body.report_page_id(),
                                                      body.rvrow, 0, 1, 1,
                                                      sectionOpen)
                        else:
                            pyrvapi.rvapi_add_section(
                                mtzSecId,
                                "Reflection dataset created: " + hkl.dname,
                                body.report_page_id(), body.rvrow, 0, 1, 1,
                                sectionOpen)

                    subSecId = mtzSecId
                    if len(files_mtz) > 1 or len(mf) > 1:
                        subSecId = mtzSecId + str(k)
                        pyrvapi.rvapi_add_section(subSecId, hkl.dname,
                                                  mtzSecId, k, 0, 1, 1, False)
                        #pyrvapi.rvapi_add_section ( subSecId,
                        #            f_orig + " / " + hkl.getDataSetName(),
                        #            mtzSecId,k,0,1,1,False )

                    # run crtruncate
                    outFileName = os.path.join(body.outputDir(),
                                               hkl.dataId + ".mtz")
                    outXmlName = os.path.join("ctruncate" + hkl.dataId +
                                              ".xml")
                    cmd = ["-hklin", p_mtzout, "-hklout", outFileName]
                    amplitudes = ""

                    meanCols = hkl.getMeanColumns()
                    if meanCols[2] != "X":
                        cols = "/*/*/["
                        if meanCols[1] != None:
                            cols = cols + meanCols[0] + "," + meanCols[1]
                        else:
                            cols = cols + meanCols[0]
                        if meanCols[2] == "F":
                            amplitudes = "-amplitudes"
                        cmd += ["-colin", cols + "]"]

                    anomCols = hkl.getAnomalousColumns()
                    anomalous = False
                    if anomCols[4] != "X":
                        anomalous = True
                        cols = "/*/*/["
                        for i in range(0, 4):
                            if anomCols[i] != None:
                                if i > 0:
                                    cols = cols + ","
                                cols = cols + anomCols[i]
                        if anomCols[4] == "F":
                            amplitudes = "-amplitudes"
                        cmd += ["-colano", cols + "]"]

                    if amplitudes:
                        cmd += [amplitudes]

                    cmd += ["-xmlout", outXmlName]
                    cmd += ["-freein"]

                    pyrvapi.rvapi_add_text(
                        "&nbsp;<p><h2>Data analysis (CTruncate)</h2>",
                        subSecId, 1, 0, 1, 1)
                    pyrvapi.rvapi_add_panel(mtzSecId + str(k), subSecId, 2, 0,
                                            1, 1)
                    """
                    log_parser = pyrvapi_ext.parsers.generic_parser ( mtzSecId+str(k),
                            False,body.generic_parser_summary,False )
                    rc = command.call ( "ctruncate",cmd,"./",None,
                                        body.file_stdout,body.file_stderr,log_parser )
                    """
                    body.file_stdin = None  # not clear why this is not None at
                    # this point and needs to be forced,
                    # or else runApp looks for input script
                    body.setGenericLogParser(mtzSecId + str(k), False)
                    body.runApp("ctruncate", cmd)

                    body.file_stdout.flush()

                    mtzTableId = body.getWidgetId("mtz") + "_" + str(
                        k) + "_table"

                    if rc.msg:
                        msg = "\n\n CTruncate failed with message:\n\n" + \
                              rc.msg + \
                              "\n\n Dataset " + hkl.dname + \
                              " cannot be used.\n\n"
                        body.file_stdout.write(msg)
                        body.file_stderr.write(msg)
                        makeHKLTable(body, mtzTableId, subSecId, hkl, hkl, -1,
                                     msg, 0)
                        datasetName = hkl.dname

                    elif not os.path.exists(outFileName):
                        body.file_stdout.write ( "\n\n +++ Dataset " + hkl.dname + \
                            "\n was not truncated and will be used as is\n\n" )
                        hkl.makeUniqueFNames(body.outputDir())
                        body.outputDataBox.add_data(hkl)
                        makeHKLTable(body, mtzTableId, subSecId, hkl, hkl, 0,
                                     "", 0)
                        datasetName = hkl.dname

                        srf.putSRFDiagram(body, hkl, body.outputDir(),
                                          body.reportDir(), subSecId, 3, 0, 1,
                                          1, body.file_stdout,
                                          body.file_stderr, None)

                        pyrvapi.rvapi_set_text (
                                "&nbsp;<br><hr/><h3>Created Reflection Data Set (merged)</h3>" + \
                                "<b>Assigned name:</b>&nbsp;&nbsp;" + datasetName + "<br>&nbsp;",
                                subSecId,4,0,1,1 )
                        pyrvapi.rvapi_add_data(
                            "hkl_data_" + str(body.dataSerialNo),
                            "Merged reflections",
                            # always relative to job_dir from job_dir/html
                            os.path.join("..", body.outputDir(), hkl.files[0]),
                            "hkl:hkl",
                            subSecId,
                            5,
                            0,
                            1,
                            1,
                            -1)

                    else:
                        body.file_stdout.write ( "\n\n ... Dataset " + hkl.dname + \
                            "\n was truncated and will substitute the " + \
                            "original one\n\n" )
                        mtzf = mtz.mtz_file(outFileName)
                        # ctruncate should create a single dataset here
                        for dset in mtzf:
                            dset.MTZ = os.path.basename(outFileName)
                            hkl_data = dtype_hkl.DType(body.job_id)
                            hkl_data.importMTZDataset(dset)
                            hkl_data.dname = hkl.dname
                            hkl_data.dataId = hkl.dataId
                            hkl_data.makeUniqueFNames(body.outputDir())
                            body.outputDataBox.add_data(hkl_data)
                            makeHKLTable(body, mtzTableId, subSecId, hkl,
                                         hkl_data, 1, "", 0)
                            datasetName = hkl_data.dname

                            srf.putSRFDiagram(body, hkl_data, body.outputDir(),
                                              body.reportDir(), subSecId, 3, 0,
                                              1, 1, body.file_stdout,
                                              body.file_stderr, None)

                            pyrvapi.rvapi_set_text (
                                "&nbsp;<br><hr/><h3>Created Reflection Data Set (merged)</h3>" + \
                                "<b>Assigned name:</b>&nbsp;&nbsp;" + datasetName + "<br>&nbsp;",
                                subSecId,4,0,1,1 )
                            pyrvapi.rvapi_add_data(
                                "hkl_data_" + str(body.dataSerialNo),
                                "Merged reflections",
                                # always relative to job_dir from job_dir/html
                                os.path.join("..", body.outputDir(),
                                             hkl_data.files[0]),
                                "hkl:hkl",
                                subSecId,
                                5,
                                0,
                                1,
                                1,
                                -1)

                    if body.summary_row_0 < 0:
                        body.putSummaryLine(f_orig, "HKL", datasetName)
                    else:
                        body.addSummaryLine("HKL", datasetName)
                    k += 1
                    pyrvapi.rvapi_flush()

                if len(mf) <= 0:
                    body.putSummaryLine_red(f_orig, "UNKNOWN", "-- ignored")

            body.file_stdout.write("... processed: " + f_orig + "\n    ")

    body.rvrow += 1
    pyrvapi.rvapi_flush()

    return
Ejemplo n.º 21
0
def write_output(items,
                 json_file=None,
                 xml_file=None,
                 xmlroot=None,
                 docid=None,
                 output=None):
    # in non-i2 mode items are added to the output dictionary which is dumped to json
    if json_file is not None:
        if 'result' in items:
            result = items['result']
            for solution in output['solutions']:
                if solution['id'] == result['id']:
                    solution.update({'acornCC': result['acornCC']})
        else:
            output.update(items)
        temp_filename = json_file + '.tmp'
        with open(temp_filename, 'w') as jsonfile:
            print(json.dumps(output,
                             sort_keys=True,
                             indent=2,
                             separators=(',', ': ')),
                  file=jsonfile)
        if os.path.exists(json_file):
            import uuid
            tmpfile = str(uuid.uuid4())
            os.rename(json_file, tmpfile)
            os.remove(tmpfile)
        os.rename(temp_filename, json_file)
        return output
    elif xmlroot is None and xml_file is not None:
        xmlroot = etree.Element('Fragon')
        return xmlroot
    elif docid is None:
        jsrview_dir = os.path.join(os.environ['CCP4'], 'share', 'jsrview')
        pyrvapi.rvapi_init_document('fragon_results', os.getcwd(),
                                    'Fragon %s results' % items['Fragon'], 1,
                                    7, jsrview_dir, None, None, None, None)
        pyrvapi.rvapi_add_tab('tab1', 'Fragon results', True)
        pyrvapi.rvapi_add_section('status', 'Current status', 'tab1', 0, 0, 1,
                                  1, True)
        pyrvapi.rvapi_add_text(
            'The job is currently running. Updates will be shown here after fragment placement and density modification.',
            'status', 0, 0, 1, 1)
        pyrvapi.rvapi_flush()
        output.update(items)
        return 'tab1', output
    elif xml_file is not None:
        # in i2 mode new items are added to the etree as this preserves the order in the xml
        for key in items:
            if key == 'Fragon':
                version_node = etree.SubElement(xmlroot, 'Version')
                version_node.text = output['Fragon']
            elif key == 'callback':
                callback = items['callback']
                if callback[0] == 'progress':
                    try:
                        progress_node = xmlroot.xpath(
                            '//Fragon/phaser_progress')[0]
                    except IndexError:
                        progress_node = etree.SubElement(
                            xmlroot, 'phaser_progress')
                    progress_node.text = callback[1]
                elif callback[0] == 'Best LLG/TFZ':
                    best_llg_node = etree.SubElement(xmlroot, 'best_llg')
                    best_llg_node.text = callback[1]['llg']
                    best_tfz_node = etree.SubElement(xmlroot, 'best_tfz')
                    best_tfz_node.text = callback[1]['tfz']
            elif key == 'solutions':
                solutions = items['solutions']
                try:
                    solutions_node = xmlroot.xpath('//Fragon/solutions')[0]
                except IndexError:
                    solutions_node = etree.SubElement(xmlroot, 'solutions')
                if len(solutions) > 0:
                    solutions_node.text = json.dumps(solutions)
            else:
                node = etree.SubElement(xmlroot, key)
                node.text = items[key].__str__()
        temp_filename = 'program.xml.tmp'
        with open(temp_filename, 'w') as xmlfile:
            xmlfile.write(etree.tostring(xmlroot, pretty_print=True))
        if os.path.exists(xml_file):
            import uuid
            tmpfile = str(uuid.uuid4())
            os.rename(xml_file, tmpfile)
            os.remove(tmpfile)
        os.rename(temp_filename, xml_file)
    elif docid is not None:
        for key in items:
            if key == 'copies':
                if items['copies'] > 1:
                    pyrvapi.rvapi_set_text(
                        'Running Phaser to place %d fragments' %
                        items['copies'], 'status', 0, 0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text(
                        'Running Phaser to place the fragment', 'status', 0, 0,
                        1, 1)
                pyrvapi.rvapi_add_tab('tab2', 'Phaser log file', False)
                pyrvapi.rvapi_append_content(output['root'] + '_Phaser.log',
                                             True, 'tab2')
                pyrvapi.rvapi_flush()
                output.update(items)
            elif key == 'callback':
                callback = items['callback']
                if callback[0] == 'progress':
                    pyrvapi.rvapi_set_text(
                        'Current Phaser stage: %s' % callback[1], 'status', 1,
                        0, 1, 1)
                    pyrvapi.rvapi_flush()
                elif callback[0] == 'Best LLG':
                    pyrvapi.rvapi_set_text(
                        'Current best solution Log Likelihood Gain (LLG): %s Translation Function Z-score (TFZ): %s'
                        % (callback[1], output['best_tfz']), 'status', 2, 0, 1,
                        1)
                    pyrvapi.rvapi_flush()
                elif callback[0] == 'Best TFZ':
                    output.update({'best_tfz': callback[1]})
            elif key == 'solutions':
                solutions = items['solutions']
                top_llg = sorted(solutions,
                                 key=lambda r: r['llg'],
                                 reverse=True)[0]['llg']
                top_tfz = sorted(solutions,
                                 key=lambda r: r['llg'],
                                 reverse=True)[0]['tfz']
                top_acornCC = sorted([
                    solution['acornCC'] if solution['acornCC']
                    not in ['Running', '-', None] else None
                    for solution in solutions
                ],
                                     reverse=True)[0]
                if len(solutions) == 1:
                    pyrvapi.rvapi_set_text(
                        'Phaser has found a single solution with Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f'
                        % (top_llg, top_tfz), 'status', 0, 0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text(
                        'Phaser has found %d solutions. The top solution has Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TF Z-score) of %0.2f'
                        % (output['num_phaser_solutions'], top_llg, top_tfz),
                        'status', 0, 0, 1, 1)
                if output['num_phaser_solutions'] > len(solutions):
                    pyrvapi.rvapi_set_text(
                        'Attempting to improve phases for the top %d solutions by density modification with ACORN'
                        % len(solns), 'status', 1, 0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text(
                        'Attempting to improve phases by density modification with ACORN',
                        'status', 1, 0, 1, 1)
                if top_acornCC is not None:
                    pyrvapi.rvapi_set_text(
                        'The best solution so far has a correlation coefficient from density modification of %0.3f'
                        % top_acornCC, 'status', 2, 0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text('', 'status', 2, 0, 1, 1)
                pyrvapi.rvapi_add_table('results_table', 'Phaser solutions',
                                        'tab1', 1, 0, 1, 1, 1)
                pyrvapi.rvapi_put_horz_theader('results_table',
                                               'Solution number', '', 0)
                pyrvapi.rvapi_put_horz_theader('results_table', 'Space group',
                                               '', 1)
                pyrvapi.rvapi_put_horz_theader('results_table', 'LLG',
                                               'Phaser Log Likelihood Gain', 2)
                pyrvapi.rvapi_put_horz_theader(
                    'results_table', 'TF Z-score',
                    'Phaser Translation Function Z-score', 3)
                pyrvapi.rvapi_put_horz_theader(
                    'results_table', 'CC',
                    'CC from ACORN density modification', 4)
                for solution in solutions:
                    pyrvapi.rvapi_put_table_string('results_table',
                                                   '%d' % solution['number'],
                                                   solution['number'] - 1, 0)
                    pyrvapi.rvapi_put_table_string('results_table',
                                                   solution['sg'],
                                                   solution['number'] - 1, 1)
                    pyrvapi.rvapi_put_table_string('results_table',
                                                   '%0.2f' % solution['llg'],
                                                   solution['number'] - 1, 2)
                    pyrvapi.rvapi_put_table_string('results_table',
                                                   '%0.2f' % solution['tfz'],
                                                   solution['number'] - 1, 3)
                    if solution['acornCC'] in ['Running', '-']:
                        pyrvapi.rvapi_put_table_string(
                            'results_table',
                            solution['acornCC'].replace('-', ''),
                            solution['number'] - 1, 4)
                    elif solution['acornCC'] is None:
                        pyrvapi.rvapi_put_table_string('results_table',
                                                       'Not tested',
                                                       solution['number'] - 1,
                                                       4)
                    else:
                        pyrvapi.rvapi_put_table_string(
                            'results_table', '%0.3f' % solution['acornCC'],
                            solution['number'] - 1, 4)
                output.update(items)
                pyrvapi.rvapi_flush()
            elif key == 'cc_best':
                solutions = output['solutions']
                top_llg = sorted(solutions,
                                 key=lambda r: r['llg'],
                                 reverse=True)[0]['llg']
                top_tfz = sorted(solutions,
                                 key=lambda r: r['llg'],
                                 reverse=True)[0]['tfz']
                top_acornCC = sorted([
                    solution['acornCC'] if solution['acornCC']
                    not in ['Running', '-', None] else None
                    for solution in solutions
                ],
                                     reverse=True)[0]
                pyrvapi.rvapi_set_section_state('status', False)
                pyrvapi.rvapi_add_section('results', 'Results', 'tab1', 2, 0,
                                          1, 1, True)
                pyrvapi.rvapi_add_text(
                    'Phaser found %d solutions. The top solution had Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f'
                    % (output['num_phaser_solutions'], top_llg, top_tfz),
                    'results', 0, 0, 1, 1)
                pyrvapi.rvapi_add_text(
                    'The best solution has a correlation coefficient from density modification of %0.3f'
                    % top_acornCC, 'results', 1, 0, 1, 1)
                if top_acornCC > 0.15:
                    pyrvapi.rvapi_add_text(
                        'This suggests the structure has been solved and the phases from ACORN will enable automated model building',
                        'results', 2, 0, 1, 1)
                else:
                    pyrvapi.rvapi_add_text(
                        'Sorry this does not suggest a solution', 'results', 3,
                        0, 1, 1)
                pyrvapi.rvapi_flush()
            elif key == 'best_solution_id':
                pdbout = output['name'] + '_phaser_solution.pdb'
                mtzout = output['name'] + '_acorn_phases.mtz'
                pyrvapi.rvapi_add_data(
                    'best', 'Best fragment placement and electron density',
                    pdbout, 'xyz', 'tab1', 3, 0, 1, 1, True)
                pyrvapi.rvapi_append_to_data('best', mtzout, 'hkl:map')
            else:
                output.update(items)
        return output
Ejemplo n.º 22
0
 def wrapper(*args, **kwargs):
     func(*args, **kwargs)
     pyrvapi.rvapi_flush()
Ejemplo n.º 23
0
def run(body):  # body is reference to the main Import class

    files_mtz = []
    for f_orig in body.files_all:
        f_base, f_ext = os.path.splitext(f_orig)
        if f_ext.lower() in ('.hkl', '.mtz'):
            p_orig = os.path.join(body.importDir(), f_orig)
            f_fmt = mtz.hkl_format(p_orig, body.file_stdout)
            if f_fmt in ('xds_integrated', 'xds_scaled', 'mtz_integrated'):
                files_mtz.append((f_orig, f_fmt))

    if not files_mtz:
        return

    unmergedSecId = "unmerged_mtz_sec_" + str(body.widget_no)
    body.widget_no += 1

    k = 0
    for f_orig, f_fmt in files_mtz:
        try:
            body.files_all.remove(f_orig)
            p_orig = os.path.join(body.importDir(), f_orig)
            p_mtzin = p_orig
            if not f_fmt.startswith('mtz_'):
                p_mtzin = os.path.splitext(f_orig)[0] + '.mtz'
                sp = subprocess.Popen('pointless',
                                      stdin=subprocess.PIPE,
                                      stdout=body.file_stdout,
                                      stderr=body.file_stderr)

                sp.stdin.write('XDSIN ' + p_orig + '\nHKLOUT ' + p_mtzin +
                               '\nCOPY\n')
                sp.stdin.close()
                if sp.wait():
                    p_mtzin = None

            if p_mtzin:
                if k == 0:
                    body.file_stdout.write("\n" + "%" * 80 + "\n")
                    body.file_stdout.write("%%%%%  UNMERGED DATA IMPORT\n")
                    body.file_stdout.write("%" * 80 + "\n")

                    pyrvapi.rvapi_add_section(unmergedSecId,
                                              "Unmerged datasets",
                                              body.report_page_id(),
                                              body.rvrow, 0, 1, 1, False)
                    urow = 0

                fileSecId = unmergedSecId
                frow = 0
                if len(files_mtz) > 1:
                    fileSecId = unmergedSecId + "_" + str(k)
                    pyrvapi.rvapi_add_section(fileSecId, "File " + f_orig,
                                              unmergedSecId, urow, 0, 1, 1,
                                              False)
                    urow += 1
                    pyrvapi.rvapi_set_text(
                        "<h2>Data analysis (Pointless)</h2>", fileSecId, frow,
                        0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text ( "<h2>Data analysis (Pointless)</h2>" + \
                                             "<h3>File: " + f_orig + "</h3>",
                                             fileSecId,frow,0,1,1 )
                reportPanelId = fileSecId + "_report"
                pyrvapi.rvapi_add_panel(reportPanelId, fileSecId, frow + 1, 0,
                                        1, 1)

                frow += 2

                log_parser = pyrvapi_ext.parsers.generic_parser(
                    reportPanelId, False)

                body.file_stdin = open(pointless_script(), 'w')
                body.file_stdin.write (
                    "HKLIN "  + p_mtzin + "\n" + \
                    "XMLOUT " + pointless_xml() + "\n"
                )
                body.file_stdin.close()

                rc = command.call("pointless", [], "./", pointless_script(),
                                  body.file_stdout, body.file_stderr,
                                  log_parser)

                body.unsetLogParser()

                symmTablesId = fileSecId + "_" + symm_det()
                pyrvapi.rvapi_add_section(symmTablesId,
                                          "Symmetry determination tables",
                                          fileSecId, frow, 0, 1, 1, True)
                pyrvapi.rvapi_set_text("&nbsp;", fileSecId, frow + 1, 0, 1, 1)
                frow += 2

                #body.putSection ( symmTablesId,"Symmetry determination tables",True )
                table_list = datred_utils.parse_xmlout(pointless_xml())
                datred_utils.report(table_list, symmTablesId)

                # dump_keyargs = dict(sort_keys=True, indent=4, separators=(',', ': '))
                # print json.dumps(datred_utils.tabs_as_dict(tab_list), **dump_keyargs)

                if rc.msg:
                    msg = "\n\n Pointless failed with message:\n\n" + \
                          rc.msg + \
                          "\n\n File " + f_orig + \
                          " cannot be processed.\n\n"
                    body.file_stdout.write(msg)
                    body.file_stderr.write(msg)
                    body.putSummaryLine_red(
                        f_orig, "UNMERGED",
                        "Failed to process/import, ignored")

                else:
                    mf = mtz.mtz_file(p_mtzin)

                    dset_list = datred_utils.point_symm_datasets(
                        pointless_xml(), f_fmt)
                    body.summary_row_0 = -1  # to signal the beginning of summary row

                    for dataset in dset_list:

                        # make HKL dataset annotation
                        unmerged = dtype_unmerged.DType(body.job_id)
                        dataset["symm_summary"] = table_list
                        unmerged.importUnmergedData(mf, dataset)
                        body.dataSerialNo += 1
                        unmerged.makeDName(body.dataSerialNo)

                        outFileName = unmerged.dataId + ".mtz"
                        body.file_stdin = open(pointless_script(), 'w')
                        body.file_stdin.write (
                            "NAME PROJECT x CRYSTAL y DATASET z\n" + \
                            "HKLIN "  + p_mtzin       + "\n" + \
                            "HKLOUT " + os.path.join(body.outputDir(),outFileName) + "\n" + \
                            "COPY\n"  + \
                            "ORIGINALLATTICE\n"
                        )

                        for offset, first, last in unmerged.dataset.runs:
                            body.file_stdin.write("RUN 1 FILE 1 BATCH " +
                                                  str(first) + " to " +
                                                  str(last) + "\n")
                        body.file_stdin.write("END\n")

                        body.file_stdin.close()

                        rc = command.call("pointless", [], "./",
                                          pointless_script(), body.file_stdout,
                                          body.file_stderr, None)

                        if rc.msg:
                            msg = "\n\n Pointless failed with message:\n\n" + \
                                  rc.msg + \
                                  "\n\n File " + outFileName + \
                                  " cannot be processed.\n\n"
                            body.file_stdout.write(msg)
                            body.file_stderr.write(msg)
                            body.putSummaryLine_red(
                                outFileName, "UNMERGED",
                                "Failed to process/import, ignored")

                        else:
                            unmerged.files[0] = outFileName

                            subSecId = fileSecId
                            if len(dset_list) > 1:
                                subSecId = fileSecId + str(k)
                                pyrvapi.rvapi_add_section(
                                    subSecId,
                                    "Import " + unmerged.dataset.name,
                                    fileSecId, frow, 0, 1, 1, False)
                                frow += 1

                            mtzTableId = "unmerged_mtz_" + str(k) + "_table"

                            unmerged.makeUniqueFNames(body.outputDir())

                            body.outputDataBox.add_data(unmerged)
                            makeUnmergedTable(body, mtzTableId, subSecId,
                                              unmerged, 0)

                            pyrvapi.rvapi_set_text (
                                "&nbsp;<br><hr/><h3>Created Reflection Data Set (unmerged)</h3>" + \
                                "<b>Assigned name:</b>&nbsp;&nbsp;" + unmerged.dname + \
                                "<br>&nbsp;",subSecId,frow,0,1,1 )
                            pyrvapi.rvapi_add_data(
                                "hkl_data_" + str(body.dataSerialNo),
                                "Unmerged reflections",
                                # always relative to job_dir from job_dir/html
                                os.path.join("..", body.outputDir(),
                                             unmerged.files[0]),
                                "hkl:hkl",
                                subSecId,
                                frow + 1,
                                0,
                                1,
                                1,
                                -1)
                            frow += 2

                            if body.summary_row_0 < 0:
                                body.putSummaryLine(f_orig, "UNMERGED",
                                                    unmerged.dname)
                            else:
                                body.addSummaryLine("UNMERGED", unmerged.dname)
                            k += 1

                pyrvapi.rvapi_flush()

                # move imported file into output directory
                os.rename(
                    p_mtzin,
                    os.path.join(body.outputDir(), os.path.basename(p_mtzin)))

                body.file_stdout.write("... processed: " + f_orig + "\n    ")

            trace = ''

        except:
            trace = ''.join(traceback.format_exception(*sys.exc_info()))
            body.file_stdout.write(trace)

        if trace:
            body.fail(trace, 'import failed')

    body.rvrow += 1
    pyrvapi.rvapi_flush()

    return
Ejemplo n.º 24
0
    def run(self):

        # Prepare gesamt job

        # Just in case (of repeated run) remove the output xyz file. When gesamt
        # succeeds, this file is created.
        if os.path.isfile(self.gesamt_xyz()):
            os.remove(self.gesamt_xyz())

        if os.path.isfile(self.gesamt_json()):
            os.remove(self.gesamt_json())

        # Prepare gesamt input

        # fetch input data
        xyz = self.input_data.data.xyz
        nXYZ = len(xyz)

        # make command-line parameters
        cmd = []
        for i in range(nXYZ):
            cmd += [
                os.path.join(self.inputDir(), xyz[i].files[0]), "-s",
                xyz[i].chainSel
            ]

        if nXYZ < 2:
            if not "GESAMT_ARCHIVE" in os.environ:
                self.fail ( "<b> *** Error: jsCofe is not configured to work " + \
                            "with GESAMT Archive</b><br>" + \
                            "<i>     Please look for support</i><br>",
                            "No GESAMT Archive configured" )

            cmd += [
                "-archive", os.environ["GESAMT_ARCHIVE"], "-nthreads=auto",
                "-min1=" +
                self.getParameter(self.task.parameters.sec1.contains.MIN1),
                "-min2=" +
                self.getParameter(self.task.parameters.sec1.contains.MIN2),
                "-trim-size=1", "-trim-Q=" +
                self.getParameter(self.task.parameters.sec1.contains.QSCORE),
                "--json",
                self.gesamt_json()
            ]

            self.rvrow += 1
            pyrvapi.rvapi_add_grid(self.progress_grid_id(), False,
                                   self.report_page_id(), self.rvrow, 0, 1, 1)

            pyrvapi.rvapi_add_progress_bar(self.progress_bar_id(),
                                           self.progress_grid_id(), 0, 0, 1, 1)
            pyrvapi.rvapi_add_text("&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ETR:&nbsp;",
                                   self.progress_grid_id(), 0, 1, 1, 1)
            pyrvapi.rvapi_add_label(self.etr_label_id(),
                                    self.progress_grid_id(), "--:--:--", 0, 2,
                                    1, 1)

            self.storeReportDocument(self.progress_bar_id() + ";" +
                                     self.etr_label_id())

        else:
            cmd += [
                "-o",
                self.gesamt_xyz(), "-o-cs",
                self.task.parameters.sec1.contains.MODE.value
            ]
            if nXYZ == 2:
                cmd += ["-domains"]

            self.putPanel(self.gesamt_report_id())
            self.storeReportDocument(
                self.gesamt_report_id())  # self.job_id.zfill(4) )

        r0 = self.getParameter(self.task.parameters.sec1.contains.R0)
        if r0:
            cmd += ["-r0=" + r0]
        sigma = self.getParameter(self.task.parameters.sec1.contains.SIGMA)
        if sigma:
            cmd += ["-sigma=" + sigma]

        cmd += ["--rvapi-rdoc", self.reportDocumentName()]

        # run gesamt
        self.runApp("gesamt", cmd)

        if nXYZ < 2:  # PDB scan

            pyrvapi.rvapi_remove_widget(self.progress_grid_id())
            pyrvapi.rvapi_reset_task()
            pyrvapi.rvapi_flush()

            if os.path.isfile(self.gesamt_json()):

                hitlist = jsonut.readjObject(self.gesamt_json())

                pyrvapi.rvapi_add_table(self.query_table_id(),
                                        "Query structure",
                                        self.report_page_id(), self.rvrow, 0,
                                        1, 1, 0)
                pyrvapi.rvapi_put_horz_theader(self.query_table_id(), "Name",
                                               "Structure name", 0)
                pyrvapi.rvapi_put_horz_theader(
                    self.query_table_id(), "Size",
                    "Structure size in number of residues", 1)
                pyrvapi.rvapi_put_table_string(
                    self.query_table_id(), hitlist.query.file + "&nbsp;(" +
                    hitlist.query.selection + ")", 0, 0)
                pyrvapi.rvapi_put_table_string(self.query_table_id(),
                                               hitlist.query.size, 0, 1)

                self.rvrow += 1
                self.putMessage("&nbsp;")

                querySize = float(hitlist.query.size)

                nColumns = len(hitlist.columns)
                if nColumns < 1 or not hasattr(hitlist.columns[0], "value"):
                    nHits = 0
                elif type(hitlist.columns[0].value) is list:
                    nHits = min(
                        len(hitlist.columns[0].value),
                        self.task.parameters.sec1.contains.MAXHITS.value)
                else:
                    nHits = 1

                if nHits < 1:
                    self.putTitle("No PDB matches found")
                    self.putMessage(
                        "<i>Hint:</i> try to reduce report thresholds " +
                        "(ultimately down to 0) in order to see any hits;<br>"
                        +
                        "doing so will increase computation time and report " +
                        "lower-quality (less relevant) matches.")
                else:

                    self.putSection(self.hits_table_sec_id(), "PDB Hits Table",
                                    False)

                    pyrvapi.rvapi_add_table(self.hits_table_id(),
                                            "PDB hits found",
                                            self.hits_table_sec_id(), 0, 0, 1,
                                            1, 100)
                    pyrvapi.rvapi_set_table_type(self.hits_table_id(), True,
                                                 True)
                    pyrvapi.rvapi_set_table_style(self.hits_table_id(), "",
                                                  "text-align:center;")

                    for j in range(nHits):
                        pyrvapi.rvapi_put_vert_theader(self.hits_table_id(),
                                                       str(j + 1),
                                                       "Hit number", j)
                        pyrvapi.rvapi_shape_vert_theader(
                            self.hits_table_id(), j, "text-align:right;", "",
                            1, 1)

                    for i in range(nColumns):
                        column = hitlist.columns[i]
                        pyrvapi.rvapi_put_horz_theader(self.hits_table_id(),
                                                       column.title,
                                                       column.tooltip, i)
                        if i == 0:
                            td_css = "font-family:courier;"
                        elif i == nColumns - 1:
                            td_css = "text-align:left;font-size:80%;"
                            pyrvapi.rvapi_shape_horz_theader(
                                self.hits_table_id(), i, td_css, "", 1, 1)
                        else:
                            td_css = ""
                        for j in range(nHits):
                            if nHits == 1:
                                pyrvapi.rvapi_put_table_string(
                                    self.hits_table_id(), column.value, j, i)
                            else:
                                pyrvapi.rvapi_put_table_string(
                                    self.hits_table_id(), column.value[j], j,
                                    i)
                            if td_css:
                                pyrvapi.rvapi_shape_table_cell(
                                    self.hits_table_id(), j, i, "", td_css, "",
                                    1, 1)

                    pyrvapi.rvapi_add_button(
                        "hits_dnl_btn", "Export hit list", "{function}",
                        "window.parent.downloadJobFile(" +
                        self.job_id + ",'hits.txt')", False,
                        self.hits_table_sec_id(), 1, 0, 1, 1)

                    if nHits > 1:

                        self.putSection(self.hits_graph_sec_id(),
                                        "Score Plots", False)

                        pyrvapi.rvapi_add_text("<h3>Alignment scores</h3>",
                                               self.hits_graph_sec_id(), 0, 0,
                                               1, 1)
                        pyrvapi.rvapi_add_graph(self.hits_graph_id(),
                                                self.hits_graph_sec_id(), 1, 0,
                                                1, 1)
                        pyrvapi.rvapi_set_graph_size(self.hits_graph_id(), 700,
                                                     400)

                        pyrvapi.rvapi_add_text("&nbsp;<p><hr/>",
                                               self.hits_graph_sec_id(), 2, 0,
                                               1, 1)
                        pyrvapi.rvapi_add_text("<h3>Correlation plots</h3>",
                                               self.hits_graph_sec_id(), 3, 0,
                                               1, 1)

                        pyrvapi.rvapi_add_loggraph(self.corr_graph_id(),
                                                   self.hits_graph_sec_id(), 4,
                                                   0, 1, 1)

                        pyrvapi.rvapi_add_graph_data("data",
                                                     self.hits_graph_id(),
                                                     "Scores")
                        pyrvapi.rvapi_add_graph_data("data",
                                                     self.corr_graph_id(),
                                                     "Score correlations")

                        def addDatasets(ref, name):
                            pyrvapi.rvapi_add_graph_dataset(
                                ref, "data", self.hits_graph_id(), name, name)
                            pyrvapi.rvapi_add_graph_dataset(
                                ref, "data", self.corr_graph_id(), name, name)
                            return

                        addDatasets("hno", "Hit number")
                        addDatasets("qscore", "Q-score")
                        addDatasets("rmsd", "R.m.s.d.")
                        addDatasets("nalign", "Nalign/n0")
                        addDatasets("seqid", "Seq. Id.")

                        def addData(ref, value):
                            pyrvapi.rvapi_add_graph_real(
                                ref, "data", self.hits_graph_id(), value, "%g")
                            pyrvapi.rvapi_add_graph_real(
                                ref, "data", self.corr_graph_id(), value, "%g")
                            return

                        for j in range(nHits):
                            pyrvapi.rvapi_add_graph_int(
                                "hno", "data", self.hits_graph_id(), j)
                            addData("qscore",
                                    float(hitlist.columns[2].value[j]))
                            addData("rmsd", float(hitlist.columns[3].value[j]))
                            addData(
                                "nalign",
                                float(hitlist.columns[4].value[j]) / querySize)
                            addData("seqid",
                                    float(hitlist.columns[5].value[j]))

                        pyrvapi.rvapi_add_graph_plot("plot",
                                                     self.hits_graph_id(),
                                                     "Score profiles",
                                                     "Hit number", "Scores")

                        def addLine(xset, yset, color):
                            pyrvapi.rvapi_add_plot_line(
                                "plot", "data", self.hits_graph_id(), xset,
                                yset)
                            pyrvapi.rvapi_set_line_options(
                                yset, "plot", "data", self.hits_graph_id(),
                                color, "solid", "off", 2.5, True)
                            return

                        addLine("hno", "qscore", "#00008B")
                        addLine("hno", "rmsd", "#8B0000")
                        addLine("hno", "nalign", "#8B8B00")
                        addLine("hno", "seqid", "#008B00")

                        pyrvapi.rvapi_set_plot_legend("plot",
                                                      self.hits_graph_id(),
                                                      "e", "")

                        def addPlot(plotId, name, xname, yname, xset, yset,
                                    color):
                            pyrvapi.rvapi_add_graph_plot(
                                plotId, self.corr_graph_id(), name, xname,
                                yname)
                            pyrvapi.rvapi_add_plot_line(
                                plotId, "data", self.corr_graph_id(), xset,
                                yset)
                            pyrvapi.rvapi_set_line_options(
                                yset, plotId, "data", self.corr_graph_id(),
                                color, "off", "filledCircle", 2.5, True)
                            return

                        addPlot("p1", "R.m.s.d. vs Seq. Id", "Seq. Id",
                                "R.m.s.d.", "seqid", "rmsd", "#8B0000")
                        addPlot("p2", "R.m.s.d. vs Q-score", "Q-score",
                                "R.m.s.d.", "qscore", "rmsd", "#8B0000")
                        addPlot("p3", "R.m.s.d. vs Nalign",
                                "Normalised alignment length", "R.m.s.d.",
                                "nalign", "rmsd", "#8B0000")
                        addPlot("p4", "Seq. Id. vs Q-score", "Q-score",
                                "Seq. Id.", "qscore", "seqid", "#008B00")
                        addPlot("p5", "Seq. Id. vs Nalign",
                                "Normalised alignment length", "Seq. Id.",
                                "nalign", "seqid", "#008B00")
                        addPlot("p6", "Nalign vs. Q-score", "Q-score",
                                "Normalised alignment length", "qscore",
                                "nalign", "#8B8B00")

            else:
                self.putTitle("No PDB matches found")

        else:  # pairwise or multiple alignment

            self.rvrow += 1
            if nXYZ == 2:
                outFiles = self.restoreReportDocument().split("\n")
            elif nXYZ > 2:
                outFiles = [self.gesamt_xyz()]

            if len(outFiles) > 0:

                self.putTitle("Gesamt Output")

                # register output data from temporary location (files will be moved
                # to output directory by the registration procedure)
                ensemble = self.registerEnsemble(
                    dtype_template.subtypeProtein(), outFiles[0])
                if ensemble:
                    self.putEnsembleWidget("ensemble_btn",
                                           "Superposed ensemble&nbsp;&nbsp;",
                                           ensemble, -1)

                for i in range(1, len(outFiles) - 1):
                    self.rvrow += 1
                    ensemble = self.registerEnsemble(
                        dtype_template.subtypeProtein(), outFiles[i])
                    if ensemble:
                        self.putEnsembleWidget("ensemble_" + str(i) + "_btn",
                                               "Superposed domain #" + str(i),
                                               ensemble, -1)

            else:
                self.putTitle("No Output Files Generated")

        # close execution logs and quit
        self.success()
        return
Ejemplo n.º 25
0
def run(body):  # body is reference to the main Import class

    files_xray = []
    for f in body.files_all:
        fl = f.lower()
        if fl.endswith('.xray.link'):
            files_xray.append(f)

    if len(files_xray) <= 0:
        return

    body.file_stdout.write("\n" + "%" * 80 + "\n")
    body.file_stdout.write("%%%%%  IMPORT X-RAY DIFFRACTION IMAGES\n")
    body.file_stdout.write("%" * 80 + "\n")

    k = 0
    for f in files_xray:

        body.files_all.remove(f)

        fpath = os.path.join(body.importDir(), f)
        file = open(fpath, 'r')
        dirpath = file.read()
        file.close()

        fname = os.path.basename(dirpath)

        if os.path.isdir(dirpath):

            if k == 0:
                xraySecId = "xray_sec_" + str(body.widget_no)
                body.widget_no += 1
                pyrvapi.rvapi_add_section(xraySecId,
                                          "X-Ray Diffraction Images",
                                          body.report_page_id(), body.rvrow, 0,
                                          1, 1, False)

            subSecId = xraySecId
            if len(files_xray) > 1:
                subSecId = xraySecId + str(k)
                pyrvapi.rvapi_add_section(subSecId, "Import " + fname,
                                          xraySecId, k, 0, 1, 1, False)

            xray = dtype_xrayimages.DType(body.job_id)
            xray.setFile(f)  # store link
            body.dataSerialNo += 1
            xray.makeDName(body.dataSerialNo)
            body.outputDataBox.add_data(xray)

            xrayTableId = "xray_" + str(k) + "_table"
            body.putTable(xrayTableId, "", subSecId, 0)
            jrow = 0
            if len(files_xray) <= 1:
                body.putTableLine(xrayTableId, "File name",
                                  "Imported file name", fname, jrow)
                jrow += 1

            body.putTableLine(xrayTableId, "Assigned name",
                              "Assigned data name", xray.dname, jrow)
            body.putTableLine(xrayTableId, "Contents", "File contents", "---",
                              jrow + 1)

            os.rename(fpath, os.path.join(body.outputDir(), f))

            body.putSummaryLine(fname, "X-Ray Images", xray.dname)
            k += 1

        else:
            body.putSummaryLine_red(fname, "X-Ray Images",
                                    "Directory not found -- ignored")

        body.file_stdout.write("... processed: " + fname + "\n")

    body.rvrow += 1
    pyrvapi.rvapi_flush()

    return
Ejemplo n.º 26
0
def exit_error(*args, **kwargs):
    """Exit on error collecting as much information as we can.
    
    Parameters
    ----------
    message : str, optional
      A error message to print
    
    Notes
    -----
    This previously accepted two arguments of a string to print as an error message and
    an exception traceback.
    We now just use sys.exch_info() so the messsage argument is no longer required but optional.
    While we refactor the code, we'll use *args to get any argument parameters passed in
    as the first argument.

    """
    # Get the root logger
    logger = logging.getLogger()

    # An error may have occured before we started logging so we need to create one here
    if not logger.handlers:
        logging.basicConfig(format='%(message)s\n', level=logging.DEBUG)
        logger = logging.getLogger()

    exc_type, exc_value, exc_traceback = sys.exc_info()
    msg = kwargs.get('message')
    if msg is None:
        if len(args) >= 1:  # Fix for old cases
            msg = args[0]
        else:
            msg = "{0}: {1}".format(exc_type.__name__, exc_value.message)

    # header="**** AMPLE ERROR ****\n\n"
    header = "*" * 70 + "\n"
    header += "*" * 20 + " " * 10 + "AMPLE ERROR" + " " * 10 + "*" * 19 + "\n"
    header += "*" * 70 + "\n\n"

    # Create the Footer
    footer = "\n\n" + "*" * 70 + "\n\n"

    # Get the name of the debug log file
    debug_log = _debug_logfile(logger)
    if debug_log:
        footer += "More information may be found in the debug log file: {0}\n".format(
            debug_log)

    footer += "\nIf you believe that this is an error with AMPLE, please email: [email protected]\n"
    footer += "providing as much information as you can about how you ran the program.\n"
    if debug_log:
        footer += "\nPlease include the debug logfile with your email: {0}\n".format(
            debug_log)

    # String it all together
    msg = header + msg + footer

    # Print out main message
    logger.critical(msg)

    # If we were called without an exception being raised, we just print the current stack
    if exc_traceback is None:
        traceback_str = "".join(traceback.format_stack())
    else:
        traceback_str = "".join(
            traceback.format_exception(exc_type, exc_value, exc_traceback))

    msg = "AMPLE EXITING AT2..." + os.linesep + traceback_str
    if debug_log:
        logger.debug(msg)
    else:
        # If we don't have a debug file we want to output the traceback to the console
        logger.info(msg)

    # Make sure the error widget is updated
    if pyrvapi:
        pyrvapi.rvapi_flush()

    sys.exit(1)
Ejemplo n.º 27
0
    def __init__(self, rvapi_document, webserver_uri, display_gui, logfile, work_dir, ccp4i2_xml=None, tab_prefix=""):
        self.rvapi_document = rvapi_document
        self.webserver_uri = webserver_uri
        self.display_gui = display_gui
        self.logfile = logfile
        self.work_dir = work_dir
        self.ccp4i2 = bool(ccp4i2_xml)
        self.tab_prefix = tab_prefix

        self.jsrview_dir = None
        self._webserver_start = None
        self.log_tab_id = None
        self.lattice_results_tab_id = None
        self.lattice_df = None
        self.contaminant_results_tab_id = None
        self.contaminant_df = None
        self.morda_db_results_tab_id = None
        self.morda_db_df = None
        self.summary_tab_id = None
        self.summary_tab_results_sec_id = None
        self.citation_tab_id = None

        self.lattice_search_results_displayed = False
        self.contaminant_results_displayed = False
        self.morda_results_displayed = False

        self.jscofe_mode = False
        self.ccp4online_mode = False
        self.rhs_tab_id = None
        self.rvapi_meta = RvapiMetadata()

        if self.display_gui or self.ccp4i2:
            ccp4 = os.environ["CCP4"]
            share_jsrview = os.path.join(ccp4, "share", "jsrview")

            if self.rvapi_document:
                pyrvapi.rvapi_restore_document2(rvapi_document)
                self.rhs_tab_id = pyrvapi.rvapi_get_meta()
                self.jscofe_mode = True
                self.jsrview_dir = os.path.dirname(rvapi_document)
            else:
                self.jsrview_dir = os.path.join(work_dir, SIMBAD_PYRVAPI_SHAREDIR)
                os.mkdir(self.jsrview_dir)
                wintitle = "SIMBAD Results"

                if ccp4i2_xml:
                    self.init_from_ccp4i2_xml(ccp4i2_xml, self.jsrview_dir, share_jsrview, wintitle)
                else:
                    pyrvapi.rvapi_init_document("SIMBAD_results", self.jsrview_dir, wintitle, 1, 7, share_jsrview, None,
                                                None, None, None)
                    self.rvapi_document = os.path.join(self.jsrview_dir, "index.html")

            if webserver_uri:
                self._webserver_start = len(self.jsrview_dir) - 7
                self.ccp4online_mode = True
            elif not ccp4i2_xml:
                # We start our own browser
                jsrview = os.path.join(ccp4, "libexec", "jsrview")
                subprocess.Popen([jsrview, os.path.join(self.jsrview_dir, "index.html")])

            pyrvapi.rvapi_add_header("SIMBAD Results")

            if os.path.isfile(logfile) and not self.ccp4i2:
                self.create_log_tab(logfile)

        pyrvapi.rvapi_flush()
Ejemplo n.º 28
0
 def flush(self):
     pyrvapi.rvapi_flush()
     return
Ejemplo n.º 29
0
 def unsetLogParser(self):
     self.file_stdout.flush()
     self.log_parser = None
     pyrvapi.rvapi_flush()
     return
Ejemplo n.º 30
0
def run(body,
        sectionTitle="Macromolecular sequences"
        ):  # body is reference to the main Import class

    files_seq = []
    for f in body.files_all:
        fl = f.lower()
        if fl.endswith(('.seq', '.fasta', '.pir')):
            files_seq.append(f)

    if len(files_seq) <= 0:
        return

    annotation = None
    try:
        f = open('annotation.json', 'r')
        annotation = jsonut.jObject(f.read()).annotation
    except:
        pass

    body.file_stdout.write("\n" + "%" * 80 + "\n")
    body.file_stdout.write("%%%%%  IMPORT OF SEQUENCES\n")
    body.file_stdout.write("%" * 80 + "\n")

    if not annotation:
        body.file_stdout.write(
            "\n ******** Sequence annotation file NOT FOUND OR CORRUPT (error)\n"
        )
        body.file_stderr.write(
            "\n ******** Sequence annotation file NOT FOUND OR CORRUPT (error)\n"
        )
        return

    seqSecId = "seq_sec_" + str(body.widget_no)
    body.widget_no += 1

    pyrvapi.rvapi_add_section(seqSecId, sectionTitle, body.report_page_id(),
                              body.rvrow, 0, 1, 1, False)
    k = 0
    for f in files_seq:

        body.files_all.remove(f)

        annot = None
        for a in annotation:
            for item in a.items:
                if item.rename == f:
                    annot = item
        if not annot:
            body.file_stdout.write(
                "\n ******** Sequence annotation file DOES NOT MATCH UPLOAD (error)\n"
            )
            body.file_stderr.write(
                "\n ******** Sequence annotation file DOES NOT MATCH UPLOAD (error)\n"
            )
            return

        subSecId = seqSecId
        if len(files_seq) > 1:
            subSecId = seqSecId + str(k)
            pyrvapi.rvapi_add_section(subSecId, "Import " + f, seqSecId, k, 0,
                                      1, 1, False)

        seq = dtype_sequence.DType(body.job_id)
        seq.addSubtype(annot.type)
        seq.setFile(f)
        seq.convert2Seq(body.importDir(), body.outputDir())
        body.dataSerialNo += 1
        seq.makeDName(body.dataSerialNo)

        os.rename(os.path.join(body.importDir(), f),
                  os.path.join(body.outputDir(), f))
        seq.makeUniqueFNames(body.outputDir())

        body.outputDataBox.add_data(seq)

        seqTableId = "seq_" + str(k) + "_table"
        body.putTable(seqTableId, "", subSecId, 0)
        body.putTableLine(seqTableId, "File name", "Imported file name", f, 0)
        body.putTableLine(seqTableId, "Assigned name", "Assigned data name",
                          seq.dname, 1)
        body.putTableLine(seqTableId, "Type", "Polymer type", seq.subtype[0],
                          2)

        lines = filter(None, (line.rstrip() for line in open(
            os.path.join(body.outputDir(), seq.files[0]), "r")))

        htmlLine = ""
        body.file_stdout.write("\n")

        weights = aaWeight
        if annot.type != "protein":
            weights = naWeight

        for i in range(0, len(lines)):
            if i > 0:
                body.file_stdout.write("    ")
            #body.file_stdout.write ( lines[i] + "\n" )
            if i > 0:
                htmlLine += "<br>"
                seq.size += len(lines[i].strip())
                for j in range(len(lines[i])):
                    if lines[i][j] in weights:
                        seq.weight += weights[lines[i][j]]
            htmlLine += lines[i]

        body.putTableLine(seqTableId, "Contents", "Data contents", htmlLine, 3)
        body.putTableLine(seqTableId, "Length  ", "Sequence length",
                          str(seq.size), 4)
        body.putTableLine(seqTableId, "Weight  ", "Molecular weight",
                          str(seq.weight), 5)

        body.putSummaryLine(f, "SEQ", seq.dname)

        body.file_stdout.write("\n... processed: " + f + "\n    ")
        k += 1

    body.rvrow += 1
    pyrvapi.rvapi_flush()

    return
Ejemplo n.º 31
0
    def __init__(self, rvapi_document, webserver_uri, display_gui, logfile, work_dir, ccp4i2_xml=None, tab_prefix=""):
        self.rvapi_document = rvapi_document
        self.webserver_uri = webserver_uri
        self.display_gui = display_gui
        self.logfile = logfile
        self.work_dir = work_dir
        self.ccp4i2 = bool(ccp4i2_xml)
        self.tab_prefix = tab_prefix

        self.jsrview_dir = None
        self._webserver_start = None
        self.log_tab_id = None
        self.lattice_results_tab_id = None
        self.lattice_df = None
        self.contaminant_results_tab_id = None
        self.contaminant_df = None
        self.morda_db_results_tab_id = None
        self.morda_db_df = None
        self.summary_tab_id = None
        self.summary_tab_results_sec_id = None

        self.lattice_search_results_displayed = False
        self.contaminant_results_displayed = False
        self.morda_results_displayed = False

        self.jscofe_mode = False
        self.rhs_tab_id = None
        self.rvapi_meta = RvapiMetadata()

        if self.display_gui or self.ccp4i2:
            ccp4 = os.environ["CCP4"]
            share_jsrview = os.path.join(ccp4, "share", "jsrview")

            if self.rvapi_document:
                pyrvapi.rvapi_restore_document2(rvapi_document)
                self.rhs_tab_id = pyrvapi.rvapi_get_meta()
                self.jscofe_mode = True
                self.jsrview_dir = os.path.dirname(rvapi_document)
            else:
                self.jsrview_dir = os.path.join(work_dir, SIMBAD_PYRVAPI_SHAREDIR)
                os.mkdir(self.jsrview_dir)
                wintitle = "SIMBAD Results"

                if ccp4i2_xml:
                    self.init_from_ccp4i2_xml(ccp4i2_xml, self.jsrview_dir, share_jsrview, wintitle)
                else:
                    pyrvapi.rvapi_init_document("SIMBAD_results", self.jsrview_dir, wintitle, 1, 7, share_jsrview, None,
                                                None, None, None)
                    self.rvapi_document = os.path.join(self.jsrview_dir, "index.html")

            if webserver_uri:
                self._webserver_start = len(self.jsrview_dir) + 1
            elif not ccp4i2_xml:
                # We start our own browser
                jsrview = os.path.join(ccp4, "libexec", "jsrview")
                subprocess.Popen([jsrview, os.path.join(self.jsrview_dir, "index.html")])

            pyrvapi.rvapi_add_header("SIMBAD Results")

            if os.path.isfile(logfile) and not self.ccp4i2:
                self.create_log_tab(logfile)

        pyrvapi.rvapi_flush()