예제 #1
0
    def add_merge_result(self, workdir, clh, LCV, aLCV, xds_files, num_files, stats):
        axis_opts = "cls ClH   LCV aLCV ds.all ds.used  Cmpl Redun I/sigI Rmeas CC1/2 Cmpl.ou Red.ou I/sig.ou Rmeas.ou CC1/2.ou Cmpl.in Red.in I/sig.in Rmeas.in CC1/2.in SigAno.in CCano.in WilsonB Aniso".split()

        cls = os.path.relpath(workdir, self.params.workdir)
        tmps = "%12s %5.2f %4.1f %4.1f %6d %7d %5.1f %5.1f %6.2f %5.1f %5.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %9.1f %8.1f %7.2f %.1e"
        tmps = tmps % (cls, clh, LCV, aLCV,
                       len(xds_files), num_files,
                       stats["cmpl"][0],
                       stats["redundancy"][0],
                       stats["i_over_sigma"][0],
                       stats["r_meas"][0],
                       stats["cc_half"][0],
                       stats["cmpl"][2],
                       stats["redundancy"][2],
                       stats["i_over_sigma"][2],
                       stats["r_meas"][2],
                       stats["cc_half"][2],
                       stats["cmpl"][1],
                       stats["redundancy"][1],
                       stats["i_over_sigma"][1],
                       stats["r_meas"][1],
                       stats["cc_half"][1],
                       stats["sig_ano"][1],
                       stats["cc_ano"][1],
                       stats["xtriage_log"].wilson_b,
                       stats["xtriage_log"].anisotropy,
                       )

        tmptmp = tmps.replace("nan",'"nan"').split()
        tmptmp[0] = '"%s"' % tmptmp[0]
        self.html_merge_plot_data.append("{%s}"%",".join(map(lambda x: '"%s":%s'%tuple(x), zip(axis_opts, tmptmp))))

        tmps = "".join(map(lambda x: "<td>%s</td>"%x, tmps.split()))
        idno = len(self.html_merge_results)
        if self.params.program == "xscale":
            table_snip = xscalelp.snip_symm_and_cell(stats["lp"]) + "\n"
            table_snip += xscalelp.snip_stats_table(stats["lp"])
        else:
            table_snip = ""
        tmps2 = """ <tr><td onClick="toggle_show2(this, 'merge-td-%d');" id="merge-td-mark-%d"">&#x25bc;</td>%s</tr>\n""" %(idno,idno,tmps)
        tmps2 += """ <tr><td style="padding: 0px;"><td colspan="25" style="display:none;padding:0px;" id="merge-td-%d"><pre style="font-size: 1.1em;">%s</pre></td></tr>""" % (idno, table_snip)

        self.html_merge_results.append(tmps2)
예제 #2
0
    def add_merge_result(self, workdir, clh, LCV, aLCV, xds_files, num_files, stats):
        axis_opts = "cls ClH   LCV aLCV ds.all ds.used  Cmpl Redun I/sigI Rmeas CC1/2 Cmpl.ou Red.ou I/sig.ou Rmeas.ou CC1/2.ou Cmpl.in Red.in I/sig.in Rmeas.in CC1/2.in SigAno.in CCano.in WilsonB Aniso".split()

        cls = os.path.relpath(workdir, self.params.workdir)
        tmps = "%12s %5.2f %4.1f %4.1f %6d %7d %5.1f %5.1f %6.2f %5.1f %5.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %9.1f %8.1f %7.2f %.1e"
        tmps = tmps % (cls, clh, LCV, aLCV,
                       len(xds_files), num_files,
                       stats["cmpl"][0],
                       stats["redundancy"][0],
                       stats["i_over_sigma"][0],
                       stats["r_meas"][0],
                       stats["cc_half"][0],
                       stats["cmpl"][2],
                       stats["redundancy"][2],
                       stats["i_over_sigma"][2],
                       stats["r_meas"][2],
                       stats["cc_half"][2],
                       stats["cmpl"][1],
                       stats["redundancy"][1],
                       stats["i_over_sigma"][1],
                       stats["r_meas"][1],
                       stats["cc_half"][1],
                       stats["sig_ano"][1],
                       stats["cc_ano"][1],
                       stats["xtriage_log"].wilson_b,
                       stats["xtriage_log"].anisotropy,
                       )

        tmptmp = tmps.replace("nan",'"nan"').split()
        tmptmp[0] = '"%s"' % tmptmp[0]
        self.html_merge_plot_data.append("{%s}"%",".join(map(lambda x: '"%s":%s'%tuple(x), zip(axis_opts, tmptmp))))

        tmps = "".join(map(lambda x: "<td>%s</td>"%x, tmps.split()))
        idno = len(self.html_merge_results)
        if self.params.program == "xscale":
            table_snip = xscalelp.snip_symm_and_cell(stats["lp"]) + "\n"
            table_snip += xscalelp.snip_stats_table(stats["lp"])
        else:
            table_snip = ""
        tmps2 = """ <tr><td onClick="toggle_show2(this, 'merge-td-%d');" id="merge-td-mark-%d"">&#x25bc;</td>%s</tr>\n""" %(idno,idno,tmps)
        tmps2 += """ <tr><td style="padding: 0px;"><td colspan="25" style="display:none;padding:0px;" id="merge-td-%d"><pre style="font-size: 1.1em;">%s</pre></td></tr>""" % (idno, table_snip)

        self.html_merge_results.append(tmps2)
예제 #3
0
def merge_datasets(params, workdir, xds_files, cells, space_group):
    if not os.path.exists(workdir): os.makedirs(workdir)
    out = open(os.path.join(workdir, "merge.log"), "w")

    if params.program == "xscale":
        cycles = multi_merging.xscale.XscaleCycles(
            workdir,
            anomalous_flag=params.anomalous,
            d_min=params.d_min,
            d_max=params.d_max,
            reject_method=params.reject_method,
            reject_params=params.rejection,
            xscale_params=params.xscale,
            res_params=params.resolution,
            reference_file=params.reference_file,
            space_group=space_group,
            ref_mtz=params.reference.data
            if params.reference.copy_test_flag else None,
            out=out,
            nproc=params.nproc,
            batch_params=params.batch)

        unused_files, reasons = cycles.run_cycles(xds_files)
        used_files = set(xds_files).difference(set(unused_files))

        print >> out
        print >> out, " SUMMARY "
        print >> out, "========================"
        for i, files in enumerate((used_files, unused_files)):
            print >> out, "\n%6s %4d files:\n" % (
                ("Used", "Unused")[i], len(files))
            if len(files) == 0:
                continue

            maxlen_f = max(
                map(lambda f: len(os.path.relpath(f, params.workdir)), files))

            for f in files:
                cell = cells[f]
                merge_log = os.path.join(os.path.dirname(f),
                                         "merging_stats.log")
                try:
                    lines = open(merge_log).readlines()
                    resn = float(
                        filter(lambda x: x.startswith("Resolution:"),
                               lines)[0].split()[-1])
                    cmpl = float(
                        filter(lambda x: x.startswith("Completeness:"),
                               lines)[0].split()[-1].replace("%", ""))
                except:
                    resn = float("nan")
                    cmpl = float("nan")

                if i == 1:  # print reason
                    print >> out, "%-15s" % reasons.get(f, "unknown"),
                print >> out, ("%-" + str(maxlen_f) + "s") % os.path.relpath(
                    f, params.workdir), cell,
                #print >>out, "ISa=%5.1f" % correctlp.get_ISa(os.path.join(os.path.dirname(f), "CORRECT.LP")),
                print >> out, "Cmpl=%3.0f%%, Resn= %.1f" % (cmpl, resn)

        ret = []
        tkvals = lambda x: (x[-1], x[0], x[-2])  # overall, inner, outer

        for i in xrange(1, cycles.get_last_cycle_number() + 1):
            wd = os.path.join(workdir, "run_%.2d" % i)
            xscale_lp = os.path.join(wd, "XSCALE.LP")
            table = xscalelp.read_stats_table(xscale_lp)
            num_files = len(xscalelp.get_read_data(xscale_lp))
            xtriage_logfile = os.path.join(wd, "ccp4", "logfile.log")
            aniso = xds_aniso_analysis.parse_logfile(
                os.path.join(wd, "aniso.log"))
            cellinfo = cycles.cell_info_at_cycles[i]
            ret.append([
                i, wd, num_files,
                dict(cmpl=tkvals(table["cmpl"]),
                     redundancy=tkvals(table["redundancy"]),
                     i_over_sigma=tkvals(table["i_over_sigma"]),
                     r_meas=tkvals(table["r_meas"]),
                     cc_half=tkvals(table["cc_half"]),
                     sig_ano=tkvals(table["sig_ano"]),
                     cc_ano=tkvals(table["cc_ano"]),
                     drange=tkvals(table["d_range"]),
                     lp=xscale_lp,
                     xtriage_log=xtriage.XtriageLogfile(xtriage_logfile),
                     aniso=aniso,
                     lcv=cellinfo[1],
                     alcv=cellinfo[2],
                     dmin_est=cycles.dmin_est_at_cycles.get(i, float("nan")))
            ])

        xscale_lp = os.path.join(cycles.current_working_dir(), "XSCALE.LP")
        print >> out, "\nFinal statistics:\n"
        print >> out, xscalelp.snip_stats_table(xscale_lp)

        return ret

    elif params.program == "aimless":
        worker = Pointless()
        print >> out, "\nRunning pointless"
        runinfo = worker.run_copy(hklout="pointless.mtz",
                                  wdir=workdir,
                                  xdsin=xds_files,
                                  logout=os.path.join(workdir,
                                                      "pointless.log"),
                                  tolerance=30)

        # Table of file name -> Batch range
        assert len(xds_files) == len(runinfo)
        batch_info = collections.OrderedDict(
            map(lambda x: (x[0], (x[1][1:3])), zip(xds_files, runinfo)))

        cycles = multi_merging.aimless.AimlessCycles(
            workdir,
            anomalous_flag=params.anomalous,
            d_min=params.d_min,
            d_max=params.d_max,
            reject_method=params.reject_method,
            cc_cutoff=params.rejection.lpstats.pwcc.abs_cutoff,
            delta_cchalf_bin=params.rejection.delta_cchalf.bin,
            mtzin=os.path.join(workdir, "pointless.mtz"),
            batch_info=batch_info,
            out=out,
            nproc=params.nproc,
            nproc_each=params.batch.nproc_each,
            batchjobs=None)  # FIXME batchjobs
        unused_files, reasons = cycles.run_cycles(xds_files)
        used_files = set(xds_files).difference(set(unused_files))

        print >> out
        print >> out, " SUMMARY "
        print >> out, "========================"
        for i, files in enumerate((used_files, unused_files)):
            print >> out, "\n%6s %4d files:\n" % (
                ("Used", "Unused")[i], len(files))
            if len(files) == 0:
                continue

            maxlen_f = max(
                map(lambda f: len(os.path.relpath(f, params.workdir)), files))

            for f in files:
                cell = cells[f]
                merge_log = os.path.join(os.path.dirname(f),
                                         "merging_stats.log")
                try:
                    lines = open(merge_log).readlines()
                    resn = float(
                        filter(lambda x: x.startswith("Resolution:"),
                               lines)[0].split()[-1])
                    cmpl = float(
                        filter(lambda x: x.startswith("Completeness:"),
                               lines)[0].split()[-1].replace("%", ""))
                except:
                    resn = float("nan")
                    cmpl = float("nan")

                if i == 1:  # print reason
                    print >> out, "%-15s" % reasons.get(f, "unknown"),
                print >> out, ("%-" + str(maxlen_f) + "s") % os.path.relpath(
                    f, params.workdir), cell,
                print >> out, "ISa=%5.1f" % correctlp.get_ISa(
                    os.path.join(os.path.dirname(f), "CORRECT.LP")),
                print >> out, "Cmpl=%3.0f%%, Resn= %.1f" % (cmpl, resn)

        aimless_log = os.path.join(cycles.current_working_dir(), "aimless.log")
        print >> out, "\nFinal statistics:\n"
        print >> out, aimless.snip_summary(aimless_log)

        # Write summary
        table = aimless.read_summary(aimless_log)

        tkvals = lambda x: (x[0], x[1], x[2])  # overall, inner, outer
        return [
            [
                cycles.get_last_cycle_number(),
                cycles.current_working_dir(),
                len(used_files),
                dict(cmpl=tkvals(table["cmpl"]),
                     redundancy=tkvals(table["redundancy"]),
                     i_over_sigma=tkvals(table["i_over_sigma"]),
                     r_meas=tkvals(table["r_meas"]),
                     cc_half=tkvals(table["cc_half"]),
                     sig_ano=(float("nan"), ) * 3,
                     cc_ano=tkvals(table["cc_ano"]))
            ],
        ]

        #print >>out, "\nRunning aimless"
        #aimless.run_aimless(mtzin="pointless.mtz",
        #                    wdir=workdir,
        #                    anomalous=params.anomalous, d_min=params.d_min, prefix=None)

    else:
        print >> out, "Unknown program:", params.program
        return []
예제 #4
0
def merge_datasets(params, workdir, xds_files, cells, batchjobs):
    if not os.path.exists(workdir): os.makedirs(workdir)
    out = open(os.path.join(workdir, "merge.log"), "w")

    if params.program == "xscale":
        cycles = multi_merging.xscale.XscaleCycles(workdir, 
                                                   anomalous_flag=params.anomalous,
                                                   d_min=params.d_min, d_max=params.d_max, 
                                                   reject_method=params.reject_method,
                                                   reject_params=params.rejection,
                                                   xscale_params=params.xscale,
                                                   reference_file=params.reference_file,
                                                   out=out, nproc=params.nproc,
                                                   nproc_each=params.batch.nproc_each,
                                                   batchjobs=batchjobs if "deltacchalf" in params.batch.par_run else None)
        unused_files, reasons = cycles.run_cycles(xds_files)
        used_files = set(xds_files).difference(set(unused_files))

        print >>out
        print >>out, " SUMMARY "
        print >>out, "========================"
        for i, files in enumerate((used_files, unused_files)):
            print >>out, "\n%6s %4d files:\n" % (("Used", "Unused")[i], len(files))
            if len(files) == 0:
                continue

            maxlen_f = max(map(lambda f: len(os.path.relpath(f, params.workdir)), files))

            for f in files:
                cell = cells[f]
                merge_log = os.path.join(os.path.dirname(f), "merging_stats.log")
                try:
                    lines = open(merge_log).readlines()
                    resn = float(filter(lambda x:x.startswith("Resolution:"), lines)[0].split()[-1])
                    cmpl = float(filter(lambda x:x.startswith("Completeness:"), lines)[0].split()[-1].replace("%",""))
                except:
                    resn = float("nan")
                    cmpl = float("nan")

                if i == 1: # print reason
                    print >>out, "%-15s"%reasons.get(f, "unknown"),
                print >>out, ("%-"+str(maxlen_f)+"s")%os.path.relpath(f, params.workdir), cell,
                #print >>out, "ISa=%5.1f" % correctlp.get_ISa(os.path.join(os.path.dirname(f), "CORRECT.LP")),
                print >>out, "Cmpl=%3.0f%%, Resn= %.1f" % (cmpl, resn)

        ret = []
        tkvals = lambda x: (x[-1], x[0], x[-2]) # overall, inner, outer

        for i in xrange(1, cycles.get_last_cycle_number()+1):
            wd = os.path.join(workdir, "run_%.2d"%i)
            xscale_lp = os.path.join(wd, "XSCALE.LP")
            table = xscalelp.read_stats_table(xscale_lp)
            num_files = len(xscalelp.get_read_data(xscale_lp))
            xtriage_logfile = os.path.join(wd, "ccp4", "logfile.log")
            ret.append([i, wd, num_files,
                        dict(cmpl=tkvals(table["cmpl"]),
                             redundancy=tkvals(table["redundancy"]),
                             i_over_sigma=tkvals(table["i_over_sigma"]),
                             r_meas=tkvals(table["r_meas"]),
                             cc_half=tkvals(table["cc_half"]),
                             sig_ano=tkvals(table["sig_ano"]),
                             cc_ano=tkvals(table["cc_ano"]),
                             drange=tkvals(table["d_range"]),
                             lp=xscale_lp,
                             xtriage_log=xtriage.XtriageLogfile(xtriage_logfile))
                        ])

        xscale_lp = os.path.join(cycles.current_working_dir(), "XSCALE.LP")
        print >>out, "\nFinal statistics:\n"
        print >>out, xscalelp.snip_stats_table(xscale_lp)

        return ret

    elif params.program == "aimless":
        worker = Pointless()
        print >>out, "\nRunning pointless"
        runinfo = worker.run_copy(hklout="pointless.mtz", wdir=workdir,
                                  xdsin=xds_files,
                                  logout=os.path.join(workdir, "pointless.log"),
                                  tolerance=30)

        # Table of file name -> Batch range
        assert len(xds_files) == len(runinfo)
        batch_info = collections.OrderedDict(map(lambda x: (x[0], (x[1][1:3])), zip(xds_files, runinfo)))

        cycles = multi_merging.aimless.AimlessCycles(workdir, 
                                                     anomalous_flag=params.anomalous,
                                                     d_min=params.d_min, d_max=params.d_max, 
                                                     reject_method=params.reject_method,
                                                     cc_cutoff=params.rejection.lpstats.pwcc.abs_cutoff,
                                                     delta_cchalf_bin=params.rejection.delta_cchalf.bin,
                                                     mtzin=os.path.join(workdir, "pointless.mtz"),
                                                     batch_info=batch_info,
                                                     out=out, nproc=params.nproc,
                                                     nproc_each=params.batch.nproc_each,
                                                     batchjobs=batchjobs if "deltacchalf" in params.batch.par_run else None)
        unused_files, reasons = cycles.run_cycles(xds_files)
        used_files = set(xds_files).difference(set(unused_files))

        print >>out
        print >>out, " SUMMARY "
        print >>out, "========================"
        for i, files in enumerate((used_files, unused_files)):
            print >>out, "\n%6s %4d files:\n" % (("Used", "Unused")[i], len(files))
            if len(files) == 0:
                continue

            maxlen_f = max(map(lambda f: len(os.path.relpath(f, params.workdir)), files))

            for f in files:
                cell = cells[f]
                merge_log = os.path.join(os.path.dirname(f), "merging_stats.log")
                try:
                    lines = open(merge_log).readlines()
                    resn = float(filter(lambda x:x.startswith("Resolution:"), lines)[0].split()[-1])
                    cmpl = float(filter(lambda x:x.startswith("Completeness:"), lines)[0].split()[-1].replace("%",""))
                except:
                    resn = float("nan")
                    cmpl = float("nan")

                if i == 1: # print reason
                    print >>out, "%-15s"%reasons.get(f, "unknown"),
                print >>out, ("%-"+str(maxlen_f)+"s")%os.path.relpath(f, params.workdir), cell,
                print >>out, "ISa=%5.1f" % correctlp.get_ISa(os.path.join(os.path.dirname(f), "CORRECT.LP")),
                print >>out, "Cmpl=%3.0f%%, Resn= %.1f" % (cmpl, resn)

        aimless_log = os.path.join(cycles.current_working_dir(), "aimless.log")
        print >>out, "\nFinal statistics:\n"
        print >>out, aimless.snip_summary(aimless_log)

        # Write summary
        table = aimless.read_summary(aimless_log)

        tkvals = lambda x: (x[0], x[1], x[2]) # overall, inner, outer
        return [[cycles.get_last_cycle_number(), cycles.current_working_dir(), len(used_files),
                dict(cmpl=tkvals(table["cmpl"]),
                     redundancy=tkvals(table["redundancy"]),
                     i_over_sigma=tkvals(table["i_over_sigma"]),
                     r_meas=tkvals(table["r_meas"]),
                     cc_half=tkvals(table["cc_half"]),
                     sig_ano=(float("nan"),)*3,
                     cc_ano=tkvals(table["cc_ano"]))], ]

        #print >>out, "\nRunning aimless"
        #aimless.run_aimless(mtzin="pointless.mtz",
        #                    wdir=workdir,
        #                    anomalous=params.anomalous, d_min=params.d_min, prefix=None)

    else:
        print >>out, "Unknown program:", params.program
        return []
def run(csvin, prefix, rootdir, datadir=None):
    html_head = """\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<script>
  var toggle_show = function(caller, obj_id) {
    var trg = document.getElementById(obj_id);
    if (trg.style.display === 'block' || trg.style.display === '') {
      trg.style.display = 'none';
      trg.style.padding = '0px';
      caller.innerHTML= '&#x25bc;';
    } else {
      trg.style.display = '';
      trg.style.padding = '7px';
      caller.innerHTML= '&#x25b2;';
    }
  }
</script>

<style>
pre {
    font-family: Consolas, 'Courier New', Courier, Monaco, monospace;
}

.cells td, .dataset_table th,
.merge td {
    font-size: 1em;
    #border: 1px solid #98bf21;
    padding: 4px 7px 4px 7px;
}

.cells th,
.merge th {
    font-size: 1.1em;
    text-align: center;
    padding: 4px;
    background-color: #A7C942;
    color: #ffffff;
}

/*
.cells tr.alt td {
    color: #000000;
    background-color: #EAF2D3;
}
*/

.merge tr:nth-child(4n+3),
.merge tr:nth-child(4n),
.cells tr:nth-child(odd) {
    color: #000000;
    background-color: #EAF2D3;
}
.merge tr:nth-child(4n+1),
.merge tr:nth-child(4n+2),
.cells tr:nth-child(even) {
    #color: #f8fbf1;
    background-color: #f8fbf1;
}

</style>

</head>

<body>
<h1>KAMO.AUTO_MULTI_MERGE report</h1>
<div align="right">
workdir: %(wd)s<br />
created on %(cdate)s
</div>
<hr>

<table class="merge">
<tr>
 <th colspan="2">Sample</th>
 <th>#Collected</th>
 <th>#Processed</th>
 <th>#Mergeable</th>
 <th>Symmetry</th>
 <th>Unit cell</th>
 <th>Cmpl(all)</th>
 <th>Mult(all)</th>
 <th><i>d</i><sub>min</sub></th>
</tr>
""" % dict(wd=rootdir, cdate=time.strftime("%Y-%m-%d %H:%M:%S"))

    ofs = open(os.path.join(rootdir, "report.html"), "w")
    ofs.write(html_head)

    samples = auto_multi_merge.read_sample_info(csvin, datadir)

    for name in samples:
        workdir_rel = "%s%s" % (prefix, replace_forbidden_chars(name).replace(
            " ", "_"))
        workdir = os.path.join(rootdir, workdir_rel)
        print workdir
        assert os.path.isdir(workdir)
        topdirs = samples[name][0]
        ncol, dcol, npro, dpro, nmrg, dmrg = 0, 0, 0, 0, 0, 0
        symm, cell = "?", "?"
        rephtml = "#"
        cmpl, redun, dmin = 0, 0, float("nan")
        symm_group_info = "n/a"  # now useless
        deg_dict = {}
        for topdir in topdirs:
            for root, dirnames, filenames in os.walk(topdir):
                if "XDS.INP" not in filenames: continue
                print "Checking", name, root
                deg = total_deg_from_xds_inp(os.path.join(root, "XDS.INP"))
                deg_dict[root] = deg
                ncol += 1
                dcol += deg
                if "XDS_ASCII.HKL_noscale" in filenames:
                    npro += 1
                    dpro += deg

        mrg_lst = os.path.join(workdir, "formerge.lst")
        if os.path.isfile(mrg_lst):
            mrg_dirs = map(lambda x: os.path.dirname(x.strip()),
                           open(mrg_lst).readlines())
            nmrg = len(mrg_dirs)
            dmrg = sum(map(lambda x: deg_dict[x], mrg_dirs))

            beam_plot_png = os.path.join(workdir, "beam_plot.png")
            #if not os.path.isfile(beam_plot_png):
            #    beam_direction_plot.run_from_args([mrg_lst, 'plot_out="%s"'%beam_plot_png])

        mrg_log = os.path.join(workdir, "multi_merge.log")
        if os.path.isfile(mrg_log):
            flag_read = False
            flag_first = True
            symm_group_info = ""
            for l in open(mrg_log):
                if flag_first and "members:" in l:
                    flag_read = True
                    flag_first = False
                elif flag_read and ("members:" in l or l.strip() == ""):
                    flag_read = False
                elif flag_read and "Members=" not in l:
                    symm_group_info += l

                if "group_choice=" in l:
                    symm, cell = re.search("symmetry= ([^\(]+) \((.+)\)",
                                           l).groups()

        mrg_dirs = glob.glob(os.path.join(workdir, "*final/"))
        best_result_loc, best_table_snip = "N/A", ""
        if mrg_dirs:
            mrg_dir = mrg_dirs[0]
            dmin = float(re.search("_([0-9\.]+)A_final/", mrg_dir).group(1))
            cls_dat = glob.glob(
                os.path.join(mrg_dir, "*", "*_cluster_summary.dat"))[0]
            tmp = open(cls_dat).readlines()[3].split()
            cmpl, redun = float(tmp[3]), float(tmp[4])
            rephtml = os.path.relpath(os.path.join(mrg_dir, "report.html"),
                                      rootdir)
            summary_dat = os.path.join(mrg_dir, "cluster_summary.dat")
            best_result = auto_multi_merge.choose_best_result(
                summary_dat, null_out())
            if best_result:
                best_result_loc = os.path.dirname(best_result)
                lp = os.path.join(best_result_loc, "XSCALE.LP")
                best_table_snip = xscalelp.snip_symm_and_cell(
                    lp) + "\n" + xscalelp.snip_stats_table(lp)
                best_result_loc = os.path.relpath(best_result_loc,
                                                  rootdir)  # to show in html

        html_tr = """\
<tr>
 <td onClick="toggle_show(this, 'sample-td-%(name)s');" id="sample-td-mark-%(name)s"">&#x25bc;</td>
 <td><a href="%(rephtml)s">%(name)s</a></td>
 <td>%(ncol)d (%(dcol).0f&deg;)</td>
 <td>%(npro)d (%(dpro).0f&deg;)</td>
 <td>%(nmrg)d (%(dmrg).0f&deg;)</td>
 <td>%(symm)s</td>
 <td>%(cell)s</td>
 <td>%(cmpl).1f</td>
 <td>%(redun).1f</td>
 <td>%(dmin).1f</td>
</tr>
<tr>
 <td style="padding: 0px;"></td>
  <td colspan="9" style="display:none;padding:0px;" id="sample-td-%(name)s">
  BEST RESULT: <a href="%(best_result_loc)s">%(best_result_loc)s</a> <br><br>
<pre>
 %(best_table_snip)s
</pre>
  
 </td>
</tr>

""" % locals()
        ofs.write(html_tr)
        ofs.flush()
        #break

    ofs.write("\n</table>\n")
    ofs.write("\n</body></html>\n")
    ofs.close()

    print "Done!"
    print "firefox", os.path.join(rootdir, "report.html")
예제 #6
0
    def add_merge_result(self, workdir, clh, LCV, aLCV, xds_files, num_files,
                         stats):
        axis_opts = "cls ClH   LCV aLCV ds.all ds.used  Cmpl Redun I/sigI Rmeas CC1/2 Cmpl.ou Red.ou I/sig.ou Rmeas.ou CC1/2.ou Cmpl.in Red.in I/sig.in Rmeas.in CC1/2.in SigAno.in CCano.in WilsonB aniso.best aniso.worst dmin.est".split(
        )

        cls = os.path.relpath(workdir, self.params.workdir)
        tmps = "%12s %5.2f %4.1f %4.1f %6d %7d %5.1f %5.1f %6.2f %5.1f %5.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %7.1f %6.1f % 8.2f % 8.1f %8.1f %9.1f %8.1f %7.2f %.2f %.2f %.2f"
        tmps = tmps % (
            cls,
            clh,
            LCV,
            aLCV,
            len(xds_files),
            num_files,
            stats["cmpl"][0],
            stats["redundancy"][0],
            stats["i_over_sigma"][0],
            stats["r_meas"][0],
            stats["cc_half"][0],
            stats["cmpl"][2],
            stats["redundancy"][2],
            stats["i_over_sigma"][2],
            stats["r_meas"][2],
            stats["cc_half"][2],
            stats["cmpl"][1],
            stats["redundancy"][1],
            stats["i_over_sigma"][1],
            stats["r_meas"][1],
            stats["cc_half"][1],
            stats["sig_ano"][1],
            stats["cc_ano"][1],
            stats["xtriage_log"].wilson_b,
            #stats["xtriage_log"].anisotropy,
            stats["aniso"]["d_min_best"],
            stats["aniso"]["d_min_worst"],
            stats["dmin_est"],
        )

        tmptmp = tmps.replace("nan", '"nan"').split()
        tmptmp[0] = '"%s"' % tmptmp[0]
        self.html_merge_plot_data.append("{%s}" % ",".join(
            map(lambda x: '"%s":%s' % tuple(x), zip(axis_opts, tmptmp))))

        tmps = "".join(map(lambda x: "<td>%s</td>" % x, tmps.split()))
        idno = len(self.html_merge_results)
        if self.params.program == "xscale":
            table_snip = xscalelp.snip_symm_and_cell(stats["lp"]) + "\n"
            table_snip += xscalelp.snip_stats_table(stats["lp"])
            if stats["aniso"]:
                table_snip += "\nAnisotropy:\n"
                if stats["aniso"]["has_anisotropy"]:
                    if stats["aniso"]["aniso_cutoffs"]:
                        lab_maxlen = max(
                            len("direction"),
                            max(
                                map(lambda x: len(x[1]),
                                    stats["aniso"]["aniso_cutoffs"])))
                        table_snip += ("%" + str(lab_maxlen) +
                                       "s B_eigen Resol(CC1/2=0.5)\n"
                                       ) % "direction"  # XXX if not 0.5?
                        for _, lab, reso, eig in stats["aniso"][
                                "aniso_cutoffs"]:
                            table_snip += ("%" + str(lab_maxlen) +
                                           "s %7.2f %.2f\n") % (lab, eig, reso)
                    else:
                        table_snip += " Anisotropy analysis failed. Check the logfile.\n"
                else:
                    table_snip += " No anisotropy in this symmetry.\n"

        else:
            table_snip = ""
        tmps2 = """ <tr><td onClick="toggle_show2(this, 'merge-td-%d');" id="merge-td-mark-%d"">&#x25bc;</td>%s</tr>\n""" % (
            idno, idno, tmps)
        tmps2 += """ <tr><td style="padding: 0px;"><td colspan="27" style="display:none;padding:0px;" id="merge-td-%d"><pre>%s</pre></td></tr>""" % (
            idno, table_snip)

        self.html_merge_results.append(tmps2)