Beispiel #1
0
 def putWaitMessageLF(self, message_str, foregap=1):
     gridId = self.page_cursor[0] + str(self.page_cursor[1])
     pyrvapi.rvapi_add_grid(gridId, False, self.page_cursor[0],
                            self.page_cursor[1], 0, 1, 1)
     for i in range(foregap):
         pyrvapi.rvapi_set_text(" ", gridId, i, 0, 1, 1)
     pyrvapi.rvapi_set_text(
         "<font style='font-size:120%;'>" + message_str + "</font>", gridId,
         foregap, 0, 1, 1)
     pyrvapi.rvapi_set_text("<div class='activity_bar'/>", gridId, foregap,
                            1, 1, 1)
     self.page_cursor[1] += 1
     return
Beispiel #2
0
def run(
    body,  # body is reference to the main Import class
    sectionTitle="Reflection datasets created",
    sectionOpen=False,  # to keep result section closed if several datasets
    freeRflag=True  # will be run if necessary
):

    files_mtz = []
    for f_orig in body.files_all:
        f_base, f_ext = os.path.splitext(f_orig)
        if f_ext.lower() in ('.hkl', '.mtz'):
            p_orig = os.path.join(body.importDir(), f_orig)
            f_fmt = mtz.hkl_format(p_orig, body.file_stdout)
            if f_fmt in ('xds_merged', 'mtz_merged'):
                files_mtz.append((f_orig, f_fmt))

    if not files_mtz:
        return

    mtzSecId = body.getWidgetId("mtz_sec") + "_"

    k = 0
    for f_orig, f_fmt in files_mtz:
        body.files_all.remove(f_orig)
        p_orig = os.path.join(body.importDir(), f_orig)
        p_mtzin = p_orig
        if not f_fmt.startswith('mtz_'):
            p_mtzin = os.path.splitext(f_orig)[0] + '.mtz'
            sp = subprocess.Popen('pointless',
                                  stdin=subprocess.PIPE,
                                  stdout=body.file_stdout,
                                  stderr=body.file_stderr)
            sp.stdin.write('XDSIN ' + p_orig + '\nHKLOUT ' + p_mtzin +
                           '\nCOPY\n')
            sp.stdin.close()
            if sp.wait():
                p_mtzin = None

        if p_mtzin:

            p_mtzout = p_mtzin
            rc = command.comrc()

            if freeRflag:

                p_mtzout = os.path.join(body.outputDir(),
                                        os.path.basename(f_orig))

                if k == 0:
                    scr_file = open(freerflag_script(), "w")
                    scr_file.write("UNIQUE\n")
                    scr_file.close()

                # run freerflag: generate FreeRFlag if it is absent, and expand
                # all reflections

                rc = command.call("freerflag",
                                  ["HKLIN", p_mtzin, "HKLOUT", p_mtzout],
                                  "./",
                                  freerflag_script(),
                                  body.file_stdout,
                                  body.file_stderr,
                                  log_parser=None)

            if rc.msg:
                msg = "\n\n Freerflag failed with message:\n\n" + \
                      rc.msg + \
                      "\n\n File " + f_orig + \
                      " cannot be processed.\n\n"
                body.file_stdout.write(msg)
                body.file_stderr.write(msg)
                body.putSummaryLine_red(f_orig, "MTZ",
                                        "Failed to process/import, ignored")

            else:

                mf = mtz.mtz_file(p_mtzout)
                body.summary_row_0 = -1  # to signal the beginning of summary row

                for ds in mf:

                    if k == 0:
                        body.file_stdout.write("\n" + "%" * 80 + "\n")
                        body.file_stdout.write(
                            "%%%%%  IMPORT REFLECTION DATA\n")
                        body.file_stdout.write("%" * 80 + "\n")

                    # make HKL dataset annotation
                    hkl = dtype_hkl.DType(body.job_id)
                    hkl.importMTZDataset(ds)
                    body.dataSerialNo += 1
                    hkl.makeDName(body.dataSerialNo)
                    datasetName = ""

                    if k == 0:
                        if sectionTitle:
                            pyrvapi.rvapi_add_section(mtzSecId, sectionTitle,
                                                      body.report_page_id(),
                                                      body.rvrow, 0, 1, 1,
                                                      sectionOpen)
                        else:
                            pyrvapi.rvapi_add_section(
                                mtzSecId,
                                "Reflection dataset created: " + hkl.dname,
                                body.report_page_id(), body.rvrow, 0, 1, 1,
                                sectionOpen)

                    subSecId = mtzSecId
                    if len(files_mtz) > 1 or len(mf) > 1:
                        subSecId = mtzSecId + str(k)
                        pyrvapi.rvapi_add_section(subSecId, hkl.dname,
                                                  mtzSecId, k, 0, 1, 1, False)
                        #pyrvapi.rvapi_add_section ( subSecId,
                        #            f_orig + " / " + hkl.getDataSetName(),
                        #            mtzSecId,k,0,1,1,False )

                    # run crtruncate
                    outFileName = os.path.join(body.outputDir(),
                                               hkl.dataId + ".mtz")
                    outXmlName = os.path.join("ctruncate" + hkl.dataId +
                                              ".xml")
                    cmd = ["-hklin", p_mtzout, "-hklout", outFileName]
                    amplitudes = ""

                    meanCols = hkl.getMeanColumns()
                    if meanCols[2] != "X":
                        cols = "/*/*/["
                        if meanCols[1] != None:
                            cols = cols + meanCols[0] + "," + meanCols[1]
                        else:
                            cols = cols + meanCols[0]
                        if meanCols[2] == "F":
                            amplitudes = "-amplitudes"
                        cmd += ["-colin", cols + "]"]

                    anomCols = hkl.getAnomalousColumns()
                    anomalous = False
                    if anomCols[4] != "X":
                        anomalous = True
                        cols = "/*/*/["
                        for i in range(0, 4):
                            if anomCols[i] != None:
                                if i > 0:
                                    cols = cols + ","
                                cols = cols + anomCols[i]
                        if anomCols[4] == "F":
                            amplitudes = "-amplitudes"
                        cmd += ["-colano", cols + "]"]

                    if amplitudes:
                        cmd += [amplitudes]

                    cmd += ["-xmlout", outXmlName]
                    cmd += ["-freein"]

                    pyrvapi.rvapi_add_text(
                        "&nbsp;<p><h2>Data analysis (CTruncate)</h2>",
                        subSecId, 1, 0, 1, 1)
                    pyrvapi.rvapi_add_panel(mtzSecId + str(k), subSecId, 2, 0,
                                            1, 1)
                    """
                    log_parser = pyrvapi_ext.parsers.generic_parser ( mtzSecId+str(k),
                            False,body.generic_parser_summary,False )
                    rc = command.call ( "ctruncate",cmd,"./",None,
                                        body.file_stdout,body.file_stderr,log_parser )
                    """
                    body.file_stdin = None  # not clear why this is not None at
                    # this point and needs to be forced,
                    # or else runApp looks for input script
                    body.setGenericLogParser(mtzSecId + str(k), False)
                    body.runApp("ctruncate", cmd)

                    body.file_stdout.flush()

                    mtzTableId = body.getWidgetId("mtz") + "_" + str(
                        k) + "_table"

                    if rc.msg:
                        msg = "\n\n CTruncate failed with message:\n\n" + \
                              rc.msg + \
                              "\n\n Dataset " + hkl.dname + \
                              " cannot be used.\n\n"
                        body.file_stdout.write(msg)
                        body.file_stderr.write(msg)
                        makeHKLTable(body, mtzTableId, subSecId, hkl, hkl, -1,
                                     msg, 0)
                        datasetName = hkl.dname

                    elif not os.path.exists(outFileName):
                        body.file_stdout.write ( "\n\n +++ Dataset " + hkl.dname + \
                            "\n was not truncated and will be used as is\n\n" )
                        hkl.makeUniqueFNames(body.outputDir())
                        body.outputDataBox.add_data(hkl)
                        makeHKLTable(body, mtzTableId, subSecId, hkl, hkl, 0,
                                     "", 0)
                        datasetName = hkl.dname

                        srf.putSRFDiagram(body, hkl, body.outputDir(),
                                          body.reportDir(), subSecId, 3, 0, 1,
                                          1, body.file_stdout,
                                          body.file_stderr, None)

                        pyrvapi.rvapi_set_text (
                                "&nbsp;<br><hr/><h3>Created Reflection Data Set (merged)</h3>" + \
                                "<b>Assigned name:</b>&nbsp;&nbsp;" + datasetName + "<br>&nbsp;",
                                subSecId,4,0,1,1 )
                        pyrvapi.rvapi_add_data(
                            "hkl_data_" + str(body.dataSerialNo),
                            "Merged reflections",
                            # always relative to job_dir from job_dir/html
                            os.path.join("..", body.outputDir(), hkl.files[0]),
                            "hkl:hkl",
                            subSecId,
                            5,
                            0,
                            1,
                            1,
                            -1)

                    else:
                        body.file_stdout.write ( "\n\n ... Dataset " + hkl.dname + \
                            "\n was truncated and will substitute the " + \
                            "original one\n\n" )
                        mtzf = mtz.mtz_file(outFileName)
                        # ctruncate should create a single dataset here
                        for dset in mtzf:
                            dset.MTZ = os.path.basename(outFileName)
                            hkl_data = dtype_hkl.DType(body.job_id)
                            hkl_data.importMTZDataset(dset)
                            hkl_data.dname = hkl.dname
                            hkl_data.dataId = hkl.dataId
                            hkl_data.makeUniqueFNames(body.outputDir())
                            body.outputDataBox.add_data(hkl_data)
                            makeHKLTable(body, mtzTableId, subSecId, hkl,
                                         hkl_data, 1, "", 0)
                            datasetName = hkl_data.dname

                            srf.putSRFDiagram(body, hkl_data, body.outputDir(),
                                              body.reportDir(), subSecId, 3, 0,
                                              1, 1, body.file_stdout,
                                              body.file_stderr, None)

                            pyrvapi.rvapi_set_text (
                                "&nbsp;<br><hr/><h3>Created Reflection Data Set (merged)</h3>" + \
                                "<b>Assigned name:</b>&nbsp;&nbsp;" + datasetName + "<br>&nbsp;",
                                subSecId,4,0,1,1 )
                            pyrvapi.rvapi_add_data(
                                "hkl_data_" + str(body.dataSerialNo),
                                "Merged reflections",
                                # always relative to job_dir from job_dir/html
                                os.path.join("..", body.outputDir(),
                                             hkl_data.files[0]),
                                "hkl:hkl",
                                subSecId,
                                5,
                                0,
                                1,
                                1,
                                -1)

                    if body.summary_row_0 < 0:
                        body.putSummaryLine(f_orig, "HKL", datasetName)
                    else:
                        body.addSummaryLine("HKL", datasetName)
                    k += 1
                    pyrvapi.rvapi_flush()

                if len(mf) <= 0:
                    body.putSummaryLine_red(f_orig, "UNKNOWN", "-- ignored")

            body.file_stdout.write("... processed: " + f_orig + "\n    ")

    body.rvrow += 1
    pyrvapi.rvapi_flush()

    return
Beispiel #3
0
 def putMessage(self, message_str):
     pyrvapi.rvapi_set_text(message_str, self.page_cursor[0],
                            self.page_cursor[1], 0, 1, 1)
     self.page_cursor[1] += 1
     return
Beispiel #4
0
 def putMessageLF(self, message_str):
     pyrvapi.rvapi_set_text(
         "<font style='font-size:120%;'>" + message_str + "</font>",
         self.page_cursor[0], self.page_cursor[1], 0, 1, 1)
     self.page_cursor[1] += 1
     return
Beispiel #5
0
def run(body):  # body is reference to the main Import class

    files_mtz = []
    for f_orig in body.files_all:
        f_base, f_ext = os.path.splitext(f_orig)
        if f_ext.lower() in ('.hkl', '.mtz'):
            p_orig = os.path.join(body.importDir(), f_orig)
            f_fmt = mtz.hkl_format(p_orig, body.file_stdout)
            if f_fmt in ('xds_integrated', 'xds_scaled', 'mtz_integrated'):
                files_mtz.append((f_orig, f_fmt))

    if not files_mtz:
        return

    unmergedSecId = "unmerged_mtz_sec_" + str(body.widget_no)
    body.widget_no += 1

    k = 0
    for f_orig, f_fmt in files_mtz:
        try:
            body.files_all.remove(f_orig)
            p_orig = os.path.join(body.importDir(), f_orig)
            p_mtzin = p_orig
            if not f_fmt.startswith('mtz_'):
                p_mtzin = os.path.splitext(f_orig)[0] + '.mtz'
                sp = subprocess.Popen('pointless',
                                      stdin=subprocess.PIPE,
                                      stdout=body.file_stdout,
                                      stderr=body.file_stderr)

                sp.stdin.write('XDSIN ' + p_orig + '\nHKLOUT ' + p_mtzin +
                               '\nCOPY\n')
                sp.stdin.close()
                if sp.wait():
                    p_mtzin = None

            if p_mtzin:
                if k == 0:
                    body.file_stdout.write("\n" + "%" * 80 + "\n")
                    body.file_stdout.write("%%%%%  UNMERGED DATA IMPORT\n")
                    body.file_stdout.write("%" * 80 + "\n")

                    pyrvapi.rvapi_add_section(unmergedSecId,
                                              "Unmerged datasets",
                                              body.report_page_id(),
                                              body.rvrow, 0, 1, 1, False)
                    urow = 0

                fileSecId = unmergedSecId
                frow = 0
                if len(files_mtz) > 1:
                    fileSecId = unmergedSecId + "_" + str(k)
                    pyrvapi.rvapi_add_section(fileSecId, "File " + f_orig,
                                              unmergedSecId, urow, 0, 1, 1,
                                              False)
                    urow += 1
                    pyrvapi.rvapi_set_text(
                        "<h2>Data analysis (Pointless)</h2>", fileSecId, frow,
                        0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text ( "<h2>Data analysis (Pointless)</h2>" + \
                                             "<h3>File: " + f_orig + "</h3>",
                                             fileSecId,frow,0,1,1 )
                reportPanelId = fileSecId + "_report"
                pyrvapi.rvapi_add_panel(reportPanelId, fileSecId, frow + 1, 0,
                                        1, 1)

                frow += 2

                log_parser = pyrvapi_ext.parsers.generic_parser(
                    reportPanelId, False)

                body.file_stdin = open(pointless_script(), 'w')
                body.file_stdin.write (
                    "HKLIN "  + p_mtzin + "\n" + \
                    "XMLOUT " + pointless_xml() + "\n"
                )
                body.file_stdin.close()

                rc = command.call("pointless", [], "./", pointless_script(),
                                  body.file_stdout, body.file_stderr,
                                  log_parser)

                body.unsetLogParser()

                symmTablesId = fileSecId + "_" + symm_det()
                pyrvapi.rvapi_add_section(symmTablesId,
                                          "Symmetry determination tables",
                                          fileSecId, frow, 0, 1, 1, True)
                pyrvapi.rvapi_set_text("&nbsp;", fileSecId, frow + 1, 0, 1, 1)
                frow += 2

                #body.putSection ( symmTablesId,"Symmetry determination tables",True )
                table_list = datred_utils.parse_xmlout(pointless_xml())
                datred_utils.report(table_list, symmTablesId)

                # dump_keyargs = dict(sort_keys=True, indent=4, separators=(',', ': '))
                # print json.dumps(datred_utils.tabs_as_dict(tab_list), **dump_keyargs)

                if rc.msg:
                    msg = "\n\n Pointless failed with message:\n\n" + \
                          rc.msg + \
                          "\n\n File " + f_orig + \
                          " cannot be processed.\n\n"
                    body.file_stdout.write(msg)
                    body.file_stderr.write(msg)
                    body.putSummaryLine_red(
                        f_orig, "UNMERGED",
                        "Failed to process/import, ignored")

                else:
                    mf = mtz.mtz_file(p_mtzin)

                    dset_list = datred_utils.point_symm_datasets(
                        pointless_xml(), f_fmt)
                    body.summary_row_0 = -1  # to signal the beginning of summary row

                    for dataset in dset_list:

                        # make HKL dataset annotation
                        unmerged = dtype_unmerged.DType(body.job_id)
                        dataset["symm_summary"] = table_list
                        unmerged.importUnmergedData(mf, dataset)
                        body.dataSerialNo += 1
                        unmerged.makeDName(body.dataSerialNo)

                        outFileName = unmerged.dataId + ".mtz"
                        body.file_stdin = open(pointless_script(), 'w')
                        body.file_stdin.write (
                            "NAME PROJECT x CRYSTAL y DATASET z\n" + \
                            "HKLIN "  + p_mtzin       + "\n" + \
                            "HKLOUT " + os.path.join(body.outputDir(),outFileName) + "\n" + \
                            "COPY\n"  + \
                            "ORIGINALLATTICE\n"
                        )

                        for offset, first, last in unmerged.dataset.runs:
                            body.file_stdin.write("RUN 1 FILE 1 BATCH " +
                                                  str(first) + " to " +
                                                  str(last) + "\n")
                        body.file_stdin.write("END\n")

                        body.file_stdin.close()

                        rc = command.call("pointless", [], "./",
                                          pointless_script(), body.file_stdout,
                                          body.file_stderr, None)

                        if rc.msg:
                            msg = "\n\n Pointless failed with message:\n\n" + \
                                  rc.msg + \
                                  "\n\n File " + outFileName + \
                                  " cannot be processed.\n\n"
                            body.file_stdout.write(msg)
                            body.file_stderr.write(msg)
                            body.putSummaryLine_red(
                                outFileName, "UNMERGED",
                                "Failed to process/import, ignored")

                        else:
                            unmerged.files[0] = outFileName

                            subSecId = fileSecId
                            if len(dset_list) > 1:
                                subSecId = fileSecId + str(k)
                                pyrvapi.rvapi_add_section(
                                    subSecId,
                                    "Import " + unmerged.dataset.name,
                                    fileSecId, frow, 0, 1, 1, False)
                                frow += 1

                            mtzTableId = "unmerged_mtz_" + str(k) + "_table"

                            unmerged.makeUniqueFNames(body.outputDir())

                            body.outputDataBox.add_data(unmerged)
                            makeUnmergedTable(body, mtzTableId, subSecId,
                                              unmerged, 0)

                            pyrvapi.rvapi_set_text (
                                "&nbsp;<br><hr/><h3>Created Reflection Data Set (unmerged)</h3>" + \
                                "<b>Assigned name:</b>&nbsp;&nbsp;" + unmerged.dname + \
                                "<br>&nbsp;",subSecId,frow,0,1,1 )
                            pyrvapi.rvapi_add_data(
                                "hkl_data_" + str(body.dataSerialNo),
                                "Unmerged reflections",
                                # always relative to job_dir from job_dir/html
                                os.path.join("..", body.outputDir(),
                                             unmerged.files[0]),
                                "hkl:hkl",
                                subSecId,
                                frow + 1,
                                0,
                                1,
                                1,
                                -1)
                            frow += 2

                            if body.summary_row_0 < 0:
                                body.putSummaryLine(f_orig, "UNMERGED",
                                                    unmerged.dname)
                            else:
                                body.addSummaryLine("UNMERGED", unmerged.dname)
                            k += 1

                pyrvapi.rvapi_flush()

                # move imported file into output directory
                os.rename(
                    p_mtzin,
                    os.path.join(body.outputDir(), os.path.basename(p_mtzin)))

                body.file_stdout.write("... processed: " + f_orig + "\n    ")

            trace = ''

        except:
            trace = ''.join(traceback.format_exception(*sys.exc_info()))
            body.file_stdout.write(trace)

        if trace:
            body.fail(trace, 'import failed')

    body.rvrow += 1
    pyrvapi.rvapi_flush()

    return
Beispiel #6
0
def write_output(items,
                 json_file=None,
                 xml_file=None,
                 xmlroot=None,
                 docid=None,
                 output=None):
    # in non-i2 mode items are added to the output dictionary which is dumped to json
    if json_file is not None:
        if 'result' in items:
            result = items['result']
            for solution in output['solutions']:
                if solution['id'] == result['id']:
                    solution.update({'acornCC': result['acornCC']})
        else:
            output.update(items)
        temp_filename = json_file + '.tmp'
        with open(temp_filename, 'w') as jsonfile:
            print(json.dumps(output,
                             sort_keys=True,
                             indent=2,
                             separators=(',', ': ')),
                  file=jsonfile)
        if os.path.exists(json_file):
            import uuid
            tmpfile = str(uuid.uuid4())
            os.rename(json_file, tmpfile)
            os.remove(tmpfile)
        os.rename(temp_filename, json_file)
        return output
    elif xmlroot is None and xml_file is not None:
        xmlroot = etree.Element('Fragon')
        return xmlroot
    elif docid is None:
        jsrview_dir = os.path.join(os.environ['CCP4'], 'share', 'jsrview')
        pyrvapi.rvapi_init_document('fragon_results', os.getcwd(),
                                    'Fragon %s results' % items['Fragon'], 1,
                                    7, jsrview_dir, None, None, None, None)
        pyrvapi.rvapi_add_tab('tab1', 'Fragon results', True)
        pyrvapi.rvapi_add_section('status', 'Current status', 'tab1', 0, 0, 1,
                                  1, True)
        pyrvapi.rvapi_add_text(
            'The job is currently running. Updates will be shown here after fragment placement and density modification.',
            'status', 0, 0, 1, 1)
        pyrvapi.rvapi_flush()
        output.update(items)
        return 'tab1', output
    elif xml_file is not None:
        # in i2 mode new items are added to the etree as this preserves the order in the xml
        for key in items:
            if key == 'Fragon':
                version_node = etree.SubElement(xmlroot, 'Version')
                version_node.text = output['Fragon']
            elif key == 'callback':
                callback = items['callback']
                if callback[0] == 'progress':
                    try:
                        progress_node = xmlroot.xpath(
                            '//Fragon/phaser_progress')[0]
                    except IndexError:
                        progress_node = etree.SubElement(
                            xmlroot, 'phaser_progress')
                    progress_node.text = callback[1]
                elif callback[0] == 'Best LLG/TFZ':
                    best_llg_node = etree.SubElement(xmlroot, 'best_llg')
                    best_llg_node.text = callback[1]['llg']
                    best_tfz_node = etree.SubElement(xmlroot, 'best_tfz')
                    best_tfz_node.text = callback[1]['tfz']
            elif key == 'solutions':
                solutions = items['solutions']
                try:
                    solutions_node = xmlroot.xpath('//Fragon/solutions')[0]
                except IndexError:
                    solutions_node = etree.SubElement(xmlroot, 'solutions')
                if len(solutions) > 0:
                    solutions_node.text = json.dumps(solutions)
            else:
                node = etree.SubElement(xmlroot, key)
                node.text = items[key].__str__()
        temp_filename = 'program.xml.tmp'
        with open(temp_filename, 'w') as xmlfile:
            xmlfile.write(etree.tostring(xmlroot, pretty_print=True))
        if os.path.exists(xml_file):
            import uuid
            tmpfile = str(uuid.uuid4())
            os.rename(xml_file, tmpfile)
            os.remove(tmpfile)
        os.rename(temp_filename, xml_file)
    elif docid is not None:
        for key in items:
            if key == 'copies':
                if items['copies'] > 1:
                    pyrvapi.rvapi_set_text(
                        'Running Phaser to place %d fragments' %
                        items['copies'], 'status', 0, 0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text(
                        'Running Phaser to place the fragment', 'status', 0, 0,
                        1, 1)
                pyrvapi.rvapi_add_tab('tab2', 'Phaser log file', False)
                pyrvapi.rvapi_append_content(output['root'] + '_Phaser.log',
                                             True, 'tab2')
                pyrvapi.rvapi_flush()
                output.update(items)
            elif key == 'callback':
                callback = items['callback']
                if callback[0] == 'progress':
                    pyrvapi.rvapi_set_text(
                        'Current Phaser stage: %s' % callback[1], 'status', 1,
                        0, 1, 1)
                    pyrvapi.rvapi_flush()
                elif callback[0] == 'Best LLG':
                    pyrvapi.rvapi_set_text(
                        'Current best solution Log Likelihood Gain (LLG): %s Translation Function Z-score (TFZ): %s'
                        % (callback[1], output['best_tfz']), 'status', 2, 0, 1,
                        1)
                    pyrvapi.rvapi_flush()
                elif callback[0] == 'Best TFZ':
                    output.update({'best_tfz': callback[1]})
            elif key == 'solutions':
                solutions = items['solutions']
                top_llg = sorted(solutions,
                                 key=lambda r: r['llg'],
                                 reverse=True)[0]['llg']
                top_tfz = sorted(solutions,
                                 key=lambda r: r['llg'],
                                 reverse=True)[0]['tfz']
                top_acornCC = sorted([
                    solution['acornCC'] if solution['acornCC']
                    not in ['Running', '-', None] else None
                    for solution in solutions
                ],
                                     reverse=True)[0]
                if len(solutions) == 1:
                    pyrvapi.rvapi_set_text(
                        'Phaser has found a single solution with Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f'
                        % (top_llg, top_tfz), 'status', 0, 0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text(
                        'Phaser has found %d solutions. The top solution has Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TF Z-score) of %0.2f'
                        % (output['num_phaser_solutions'], top_llg, top_tfz),
                        'status', 0, 0, 1, 1)
                if output['num_phaser_solutions'] > len(solutions):
                    pyrvapi.rvapi_set_text(
                        'Attempting to improve phases for the top %d solutions by density modification with ACORN'
                        % len(solns), 'status', 1, 0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text(
                        'Attempting to improve phases by density modification with ACORN',
                        'status', 1, 0, 1, 1)
                if top_acornCC is not None:
                    pyrvapi.rvapi_set_text(
                        'The best solution so far has a correlation coefficient from density modification of %0.3f'
                        % top_acornCC, 'status', 2, 0, 1, 1)
                else:
                    pyrvapi.rvapi_set_text('', 'status', 2, 0, 1, 1)
                pyrvapi.rvapi_add_table('results_table', 'Phaser solutions',
                                        'tab1', 1, 0, 1, 1, 1)
                pyrvapi.rvapi_put_horz_theader('results_table',
                                               'Solution number', '', 0)
                pyrvapi.rvapi_put_horz_theader('results_table', 'Space group',
                                               '', 1)
                pyrvapi.rvapi_put_horz_theader('results_table', 'LLG',
                                               'Phaser Log Likelihood Gain', 2)
                pyrvapi.rvapi_put_horz_theader(
                    'results_table', 'TF Z-score',
                    'Phaser Translation Function Z-score', 3)
                pyrvapi.rvapi_put_horz_theader(
                    'results_table', 'CC',
                    'CC from ACORN density modification', 4)
                for solution in solutions:
                    pyrvapi.rvapi_put_table_string('results_table',
                                                   '%d' % solution['number'],
                                                   solution['number'] - 1, 0)
                    pyrvapi.rvapi_put_table_string('results_table',
                                                   solution['sg'],
                                                   solution['number'] - 1, 1)
                    pyrvapi.rvapi_put_table_string('results_table',
                                                   '%0.2f' % solution['llg'],
                                                   solution['number'] - 1, 2)
                    pyrvapi.rvapi_put_table_string('results_table',
                                                   '%0.2f' % solution['tfz'],
                                                   solution['number'] - 1, 3)
                    if solution['acornCC'] in ['Running', '-']:
                        pyrvapi.rvapi_put_table_string(
                            'results_table',
                            solution['acornCC'].replace('-', ''),
                            solution['number'] - 1, 4)
                    elif solution['acornCC'] is None:
                        pyrvapi.rvapi_put_table_string('results_table',
                                                       'Not tested',
                                                       solution['number'] - 1,
                                                       4)
                    else:
                        pyrvapi.rvapi_put_table_string(
                            'results_table', '%0.3f' % solution['acornCC'],
                            solution['number'] - 1, 4)
                output.update(items)
                pyrvapi.rvapi_flush()
            elif key == 'cc_best':
                solutions = output['solutions']
                top_llg = sorted(solutions,
                                 key=lambda r: r['llg'],
                                 reverse=True)[0]['llg']
                top_tfz = sorted(solutions,
                                 key=lambda r: r['llg'],
                                 reverse=True)[0]['tfz']
                top_acornCC = sorted([
                    solution['acornCC'] if solution['acornCC']
                    not in ['Running', '-', None] else None
                    for solution in solutions
                ],
                                     reverse=True)[0]
                pyrvapi.rvapi_set_section_state('status', False)
                pyrvapi.rvapi_add_section('results', 'Results', 'tab1', 2, 0,
                                          1, 1, True)
                pyrvapi.rvapi_add_text(
                    'Phaser found %d solutions. The top solution had Log Likelihood Gain (LLG) of %0.2f and Translation Function Z-score (TFZ) of %0.2f'
                    % (output['num_phaser_solutions'], top_llg, top_tfz),
                    'results', 0, 0, 1, 1)
                pyrvapi.rvapi_add_text(
                    'The best solution has a correlation coefficient from density modification of %0.3f'
                    % top_acornCC, 'results', 1, 0, 1, 1)
                if top_acornCC > 0.15:
                    pyrvapi.rvapi_add_text(
                        'This suggests the structure has been solved and the phases from ACORN will enable automated model building',
                        'results', 2, 0, 1, 1)
                else:
                    pyrvapi.rvapi_add_text(
                        'Sorry this does not suggest a solution', 'results', 3,
                        0, 1, 1)
                pyrvapi.rvapi_flush()
            elif key == 'best_solution_id':
                pdbout = output['name'] + '_phaser_solution.pdb'
                mtzout = output['name'] + '_acorn_phases.mtz'
                pyrvapi.rvapi_add_data(
                    'best', 'Best fragment placement and electron density',
                    pdbout, 'xyz', 'tab1', 3, 0, 1, 1, True)
                pyrvapi.rvapi_append_to_data('best', mtzout, 'hkl:map')
            else:
                output.update(items)
        return output
Beispiel #7
0
def putSRFDiagram(
        body,  # reference on Basic class
        hkl,  # hkl data object
        dirPath,  # directory with hkl object files (outputDir)
        reportDir,  # directory with html report (reportDir)
        holderId,  # rvapi holder of SRF widget
        row,
        col,  # rvapi coordinates for SRF widget
        rowSpan,
        colSpan,  # coordinate spans for STF widget
        file_stdout,  # standard output stream
        file_stderr,  # standard error stream
        log_parser=None  # log file parser
):

    fpath = hkl.getFilePath(dirPath, 0)
    Fmean = hkl.getMeta("Fmean.value", "")
    sigF = hkl.getMeta("Fmean.sigma", "")

    if Fmean == "" or sigF == "":
        file_stderr.write ( "Fmean and sigFmean columns not found in " +\
                            hkl.files[0] + " -- SRF not calculated\n" )
        return [-1, "Fmean and sigFmean columns not found"]

    scr_file = open("molrep_srf.script", "w")
    scr_file.write ( "file_f " + fpath +\
                     "\nlabin F=" + Fmean + " SIGF=" + sigF + "\n" )
    scr_file.close()
    """
    cols  = hkl.getMeanColumns()
    if cols[2]!="F":
        file_stderr.write ( "Fmean and sigFmean columns not found in " +\
                            hkl.files[0] + " -- SRF not calculated\n" )
        return [-1,"Fmean and sigFmean columns not found"]

    scr_file = open ( "molrep_srf.script","w" )
    scr_file.write ( "file_f " + fpath +\
                     "\nlabin F=" + cols[0] + " SIGF=" + cols[1] + "\n" )
    scr_file.close ()
    """

    # Start molrep
    rc = command.call("molrep", ["-i"], "./", "molrep_srf.script", file_stdout,
                      file_stderr, log_parser)

    if not os.path.isfile("molrep_rf.ps"):
        file_stderr.write ( "\nSRF postscript was not generated for " +\
                            hkl.files[0] + "\n" )
        return [-2, rc.msg]

    rc = command.call("ps2pdf", ["molrep_rf.ps"], "./", None, file_stdout,
                      file_stderr, log_parser)

    if not os.path.isfile("molrep_rf.pdf"):
        file_stderr.write ( "\nSRF pdf was not generated for " +\
                            hkl.files[0] + "\n" )
        return [-3, rc.msg]

    pdfpath = os.path.splitext(hkl.files[0])[0] + ".pdf"
    os.rename("molrep_rf.pdf", os.path.join(reportDir, pdfpath))

    subsecId = body.getWidgetId(holderId) + "_srf"
    pyrvapi.rvapi_add_section(subsecId, "Self-Rotation Function", holderId,
                              row, col, rowSpan, colSpan, False)

    pyrvapi.rvapi_set_text ( "<object data=\"" + pdfpath +\
            "\" type=\"application/pdf\" " +\
            "style=\"border:none;width:100%;height:1000px;\"></object>",
            subsecId,0,0,1,1 )
    pyrvapi.rvapi_flush()

    return [0, "Ok"]
Beispiel #8
0
    def importData(self):

        self.putWaitMessageLF("<b>1. Input Data Import</b>")
        #self.rvrow -= 1

        # -------------------------------------------------------------------
        # import uploaded data
        # make import tab and redirect output to it
        pyrvapi.rvapi_add_tab(self.import_page_id(), "1. Input Data Import",
                              False)
        self.setReportWidget(self.import_page_id())

        fstdout = self.file_stdout
        fstderr = self.file_stderr
        self.file_stdout = open(self.import_stdout_path(), 'w')
        self.file_stderr = open(self.import_stderr_path(), 'w')

        # create tabs for import standard outputs

        if self.navTreeId:
            pyrvapi.rvapi_set_tab_proxy(self.navTreeId, self.import_page_id())
        pyrvapi.rvapi_add_tab(self.import_log_page_id(), "Log file", False)
        pyrvapi.rvapi_append_content(
            os.path.join("..",
                         self.import_stdout_path() + '?capsize'), True,
            self.import_log_page_id())
        pyrvapi.rvapi_add_tab(self.import_err_page_id(), "Errors", False)
        pyrvapi.rvapi_append_content(
            os.path.join("..",
                         self.import_stderr_path() + '?capsize'), True,
            self.import_err_page_id())

        self.putTitle("CCP4go Automated Structure Solver: Data Import")
        super(CCP4go, self).import_all()

        # redirect everything back to report page and original standard streams
        self.file_stdout.close()
        self.file_stderr.close()
        self.file_stdout = fstdout
        self.file_stderr = fstderr
        self.resetReportPage()
        if self.navTreeId:
            pyrvapi.rvapi_set_tab_proxy(self.navTreeId, "")

        # -------------------------------------------------------------------
        # fetch data for CCP4go pipeline

        self.unm = None  # unmerged dataset
        self.hkl = None  # selected merged dataset
        self.seq = None  # list of sequence objects
        self.xyz = None  # coordinates (model/apo)
        self.hkl_alt = {}  # alternative-space group merged datasets

        if "DataUnmerged" in self.outputDataBox.data:
            self.unm = self.outputDataBox.data["DataUnmerged"][0]

        if "DataHKL" in self.outputDataBox.data:
            maxres = 10000.0
            for i in range(len(self.outputDataBox.data["DataHKL"])):
                res = self.outputDataBox.data["DataHKL"][i].getHighResolution(
                    True)
                if res < maxres:
                    maxres = res
                    self.hkl = self.outputDataBox.data["DataHKL"][i]

        if "DataSequence" in self.outputDataBox.data:
            self.seq = self.outputDataBox.data["DataSequence"]

        if "DataXYZ" in self.outputDataBox.data:
            self.xyz = self.outputDataBox.data["DataXYZ"][0]

        # -------------------------------------------------------------------
        # make data summary table

        panelId = "summary_section"
        pyrvapi.rvapi_set_text("", self.report_page_id(), self.rvrow, 0, 1, 1)
        self.putSection(panelId, "<b>1. Input summary</b>")

        tableId = "ccp4go_summary_table"
        #self.putTable ( tableId,"<font style='font-style:normal;font-size:125%;'>" +
        #                        "1. Input Data</font>",self.report_page_id(),
        #                        self.rvrow,0 )
        #self.rvrow += 1
        self.putTable(tableId, "Input data", panelId, 0, 0)
        self.setTableHorzHeaders(
            tableId, ["Assigned Name", "View"],
            ["Name of the assocuated data object", "Data view and export"])

        def addDataLine(name, tooltip, object, nrow):
            if object:
                self.putTableLine(tableId, name, tooltip, object.dname,
                                  nrow[0])
                self.putInspectButton(object, "View", tableId, nrow[0] + 1, 2)
                nrow[0] += 1
            return

        nrow = [0]
        addDataLine("Unmerged Reflections", "Reflection data", self.unm, nrow)
        addDataLine("Merged Reflections", "Reflection data", self.hkl, nrow)
        if self.seq:
            if len(self.seq) < 2:
                addDataLine("Sequence", "Sequence data", self.seq[0], nrow)
            else:
                for i in range(len(self.seq)):
                    addDataLine("Sequence #" + str(i + 1), "Sequence data",
                                self.seq[i], nrow)
        addDataLine("Structure", "Homologue structure", self.xyz, nrow)

        if self.task.ha_type:
            self.putTableLine(tableId, "Anomalous scatterers",
                              "Chemical type of anomalous scatterers",
                              self.task.ha_type, nrow[0])
            nrow[0] += 1

        for i in range(len(self.task.ligands)):
            ligand = self.task.ligands[i]
            if ligand.source != "none":
                dline = "[" + ligand.code + "] "
                if ligand.source == "smiles":
                    m = 0
                    for j in range(len(ligand.smiles)):
                        if m > 40:
                            dline += "<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"
                            m = 0
                        dline += ligand.smiles[j]
                        m += 1
                self.putTableLine(tableId, "Ligand #" + str(i + 1),
                                  "Ligand description", dline, nrow[0])
                nrow[0] += 1

        return
Beispiel #9
0
    def makeOutputData(self, resdir, meta):

        if not "row" in meta or not "nResults" in meta:
            return

        #if meta["nResults"]<1:
        #    return

        row = meta["row"]
        panel_id = "output_panel_" + str(row)
        title = "Details"
        if "title" in meta:
            row -= 1
            pyrvapi.rvapi_set_text("", self.report_page_id(), row, 0, 1, 1)
            title = meta["title"]

        if "merged" in meta:  # output from the data reduction part
            # import reflection data and place HKL widgets
            pyrvapi.rvapi_add_section(
                panel_id, "<b>" + str(meta["stage_no"]) +
                ". Data processing results (<i>Spg=" + meta["spg"] +
                "</i>)</b>", self.report_page_id(), row, 0, 1, 1, False)
            self.setReportWidget(panel_id)
            self.import_dir = "./"
            self.import_table_id = None
            # clean upload directory
            #shutil.rmtree(dirpath)
            #os.mkdir(dirpath)
            self.files_all = [meta["mtz"]]
            import_merged.run(self, "Import merged HKL")
            # get reference to imported structure
            self.hkl = self.outputDataBox.data["DataHKL"][0]

        elif "ligands" in meta:
            # check in generated ligands
            pyrvapi.rvapi_add_section(panel_id, title, self.report_page_id(),
                                      row, 0, 1, 1, False)
            self.setReportWidget(panel_id)

            if meta["nResults"] > 0:
                #self.putMessage ( "<h3>Generated ligand structure(s)</h3>" )
                for code in meta["ligands"]:
                    self.putMessage("&nbsp;")
                    self.putMessageLF("<b>Code: " + code + "</b>")
                    self.finaliseLigand(code, meta["ligands"][code]["xyz"],
                                        meta["ligands"][code]["cif"], False,
                                        "")
            else:
                self.putMessage("<h3>No ligand structure(s) generated</h3>")
                self.putMessage("<i>This is likely to be a program error, " +
                                "please report</i>")

        else:  # look for structure

            pyrvapi.rvapi_add_section(panel_id, title, self.report_page_id(),
                                      row, 0, 1, 1, False)
            self.setReportWidget(panel_id)

            if meta["nResults"] > 0:

                hkl_sol = self.hkl

                # check if space group changed
                if "spg" in meta:
                    spgkey = meta["spg"].replace(" ", "")
                    if spgkey in self.hkl_alt:
                        hkl_sol = self.hkl_alt[spgkey]
                    elif "hkl" in meta:
                        self.putMessage("<h3>Space group changed to " +
                                        meta["spg"] + "</h3>")
                        self.import_dir = "./"
                        self.import_table_id = None
                        hkl0 = self.outputDataBox.data["DataHKL"]
                        id0 = []
                        for i in range(len(hkl0)):
                            id0 += [hkl0[i].dataId]
                        self.files_all = [meta["hkl"]]
                        import_merged.run(
                            self,
                            "Import merged HKL reindexed in " + meta["spg"])
                        self.rvrow += 10
                        # get reference to imported structure
                        hkl0 = self.outputDataBox.data["DataHKL"]
                        hkl_sol = None
                        for i in range(len(hkl0)):
                            if not hkl0[i].dataId in id0:
                                hkl_sol = hkl0[i]
                        self.hkl_alt[spgkey] = hkl_sol

                # register structure data
                libPath = None
                if "lib" in meta:
                    libPath = meta["lib"]
                structure = self.registerStructure(meta["pdb"], meta["mtz"],
                                                   meta["map"], meta["dmap"],
                                                   libPath, True)
                if structure:

                    structure.addDataAssociation(hkl_sol.dataId)
                    structure.setRefmacLabels(hkl_sol)
                    structure.addMRSubtype()
                    structure.addXYZSubtype()

                    if "libindex" in meta:
                        structure.addLigands(meta["libindex"])

                    self.putMessage("&nbsp;")  # just vertical spacer
                    self.putStructureWidget(
                        "structure_btn_" + str(row),
                        meta["name"] + " structure and electron density",
                        structure)

                    if resdir.lower().startswith("simbad"):
                        self.import_dir = resdir
                        self.import_table_id = None
                        asudef.revisionFromStructure(
                            self,
                            hkl_sol,
                            structure,
                            "simbad_" + meta["pdbcode"],
                            useSequences=self.seq,
                            make_revision=(self.seq == None))
                        self.id_modifier += 1
                        if not self.seq:  # sequence was imported in asudef
                            self.seq = self.outputDataBox.data["DataSequence"]

                    elif resdir == "dimple_mr":
                        self.import_dir = resdir
                        self.import_table_id = None
                        asudef.revisionFromStructure(
                            self,
                            hkl_sol,
                            structure,
                            "dimple",
                            useSequences=self.seq,
                            make_revision=(self.seq == None))
                        self.id_modifier += 1
                        if not self.seq:  # sequence was imported in asudef
                            self.seq = self.outputDataBox.data["DataSequence"]

                else:
                    self.putMessage("Structure Data cannot be formed " +
                                    "(probably a bug)")
            else:
                self.putMessageLF("No solution found.")

        self.putMessage("&nbsp;")  # just vertical spacer
        """
        {"results":
           { "simbad12_results":
              {"mtz": "output/simbad12_results/simbad.mtz",
               "map": "output/simbad12_results/simbad.map",
               "name": "Simbad-LC",
               "nResults": 1,
               "rfree": 0.347,
               "dmap": "output/simbad12_results/simbad_dmap.map",
               "rfactor": 0.3792,
               "pdb": "output/simbad12_results/simbad.pdb",
               "columns": {"PHI": "PHIC_ALL_LS", "SIGF": "SIGF", "DELFWT": "DELFWT", "F": "F", "FREE": "FreeR_flag", "FOM": "FOM", "PHDELWT": "PHDELWT"}, "row": 5}, "buccaneer": {"mtz": "output/buccaneer/buccaneer.mtz", "map": "output/buccaneer/buccaneer.map", "name": "Buccanneer", "nResults": 1, "rfree": 0.3671, "dmap": "output/buccaneer/buccaneer_dmap.map", "rfactor": 0.3151, "pdb": "output/buccaneer/buccaneer.pdb", "columns": {"PHI": "PHIC_ALL_LS", "SIGF": "SIGF", "DELFWT": "DELFWT", "F": "F", "FREE": "FreeR_flag", "FOM": "FOM", "PHDELWT": "PHDELWT"},
               "row": 8
              }
            },
          "retcode": "solved",
          "report_row": 9
        }
        """

        self.resetReportPage()
        return