Пример #1
0
    def __str__(self):
        """Return a string representation of the results.

        Returns:
          str:

        """
        output = []
        output.append("Scoring individual symmetry elements")
        d = self.as_dict()
        output.append(
            table_utils.format(self.sym_ops_table(d),
                               has_header=True,
                               delim="  "))

        output.append("Scoring all possible sub-groups")
        output.append(
            table_utils.format(self.subgroups_table(d),
                               has_header=True,
                               delim="  "))

        output.append(
            "Best solution: %s" %
            self.best_solution.subgroup["best_subsym"].space_group_info())
        output.append("Unit cell: %s" %
                      self.best_solution.subgroup["best_subsym"].unit_cell())
        output.append("Reindex operator: %s" %
                      (self.best_solution.subgroup["cb_op_inp_best"] *
                       self.cb_op_inp_min))
        output.append("Laue group probability: %.3f" %
                      self.best_solution.likelihood)
        output.append("Laue group confidence: %.3f" %
                      self.best_solution.confidence)
        return "\n".join(output)
Пример #2
0
def show_times_vs_complexity(times, header):

    table_header = [
        "# HKL", "cpu-time", "simple-tm", "fft-time", "R(%)", "gpu_d-tm",
        "gpu_f-tm", "R(%)", "d-min(angstrom)"
    ]
    table_data = [table_header]
    for i, t, d, g, gf, gfR, s, f, fR in times:
        table_row = [
            "%.0f" % i,
            "%.2f" % t,
            "%.2f" % s,
            "%.2f" % f,
            "%.2f" % fR,
            "%.2f" % g,
            "%.2f" % gf,
            "%.5f" % gfR,
            "%5.2f" % d
        ]
        table_data.append(table_row)
    print table_utils.format(table_data,
                             has_header=True,
                             justify="left",
                             prefix="| ",
                             postfix=" |")
Пример #3
0
 def print_table(self):
  from libtbx import table_utils
  from libtbx.str_utils import format_value

  table_header = ["Tile","Dist","Nobs","aRmsd","Rmsd","delx","dely","disp","rotdeg","Rsigma","Tsigma"]
  table_data = []
  table_data.append(table_header)
  sort_radii = flex.sort_permutation(flex.double(self.radii))
  tile_rmsds = flex.double()
  radial_sigmas = flex.double(len(self.tiles) // 4)
  tangen_sigmas = flex.double(len(self.tiles) // 4)
  for idx in range(len(self.tiles) // 4):
    x = sort_radii[idx]
    if self.tilecounts[x] < 3:
      wtaveg = 0.0
      radial = (0,0)
      tangential = (0,0)
      rmean,tmean,rsigma,tsigma=(0,0,1,1)
    else:
      wtaveg = self.weighted_average_angle_deg_from_tile(x)
      radial,tangential,rmean,tmean,rsigma,tsigma = get_radial_tangential_vectors(self,x)

    radial_sigmas[x]=rsigma
    tangen_sigmas[x]=tsigma
    table_data.append(  [
      format_value("%3d",   x),
      format_value("%7.2f", self.radii[x]),
      format_value("%6d",  self.tilecounts[x]),
      format_value("%5.2f", self.asymmetric_tile_rmsd[x]),
      format_value("%5.2f", self.tile_rmsd[x]),
      format_value("%5.2f", self.mean_cv[x][0]),
      format_value("%5.2f", self.mean_cv[x][1]),
      format_value("%5.2f", matrix.col(self.mean_cv[x]).length()),
      format_value("%6.2f", wtaveg),
      format_value("%6.2f", rsigma),
      format_value("%6.2f", tsigma),
    ])
  table_data.append([""]*len(table_header))
  rstats = flex.mean_and_variance(radial_sigmas,self.tilecounts.as_double())
  tstats = flex.mean_and_variance(tangen_sigmas,self.tilecounts.as_double())
  table_data.append(  [
      format_value("%3s",   "ALL"),
      format_value("%s", ""),
      format_value("%6d",  self.overall_N),
      format_value("%5.2f", math.sqrt(flex.mean(self.delrsq))),
      format_value("%5.2f", self.overall_rmsd),
      format_value("%5.2f", self.overall_cv[0]),
      format_value("%5.2f", self.overall_cv[1]),
      format_value("%5.2f", flex.mean(flex.double([matrix.col(cv).length() for cv in self.mean_cv]))),
      format_value("%s", ""),
      format_value("%6.2f", rstats.mean()),
      format_value("%6.2f", tstats.mean()),
    ])

  print
  print table_utils.format(table_data,has_header=1,justify='center',delim=" ")
Пример #4
0
 def print_table(self):
  from libtbx import table_utils
  from libtbx.str_utils import format_value

  table_header = ["Tile","Dist","Nobs","aRmsd","Rmsd","delx","dely","disp","rotdeg","Rsigma","Tsigma"]
  table_data = []
  table_data.append(table_header)
  sort_radii = flex.sort_permutation(flex.double(self.radii))
  tile_rmsds = flex.double()
  radial_sigmas = flex.double(len(self.tiles) // 4)
  tangen_sigmas = flex.double(len(self.tiles) // 4)
  for idx in range(len(self.tiles) // 4):
    x = sort_radii[idx]
    if self.tilecounts[x] < 3:
      wtaveg = 0.0
      radial = (0,0)
      tangential = (0,0)
      rmean,tmean,rsigma,tsigma=(0,0,1,1)
    else:
      wtaveg = self.weighted_average_angle_deg_from_tile(x)
      radial,tangential,rmean,tmean,rsigma,tsigma = get_radial_tangential_vectors(self,x)

    radial_sigmas[x]=rsigma
    tangen_sigmas[x]=tsigma
    table_data.append(  [
      format_value("%3d",   x),
      format_value("%7.2f", self.radii[x]),
      format_value("%6d",  self.tilecounts[x]),
      format_value("%5.2f", self.asymmetric_tile_rmsd[x]),
      format_value("%5.2f", self.tile_rmsd[x]),
      format_value("%5.2f", self.mean_cv[x][0]),
      format_value("%5.2f", self.mean_cv[x][1]),
      format_value("%5.2f", matrix.col(self.mean_cv[x]).length()),
      format_value("%6.2f", wtaveg),
      format_value("%6.2f", rsigma),
      format_value("%6.2f", tsigma),
    ])
  table_data.append([""]*len(table_header))
  rstats = flex.mean_and_variance(radial_sigmas,self.tilecounts.as_double())
  tstats = flex.mean_and_variance(tangen_sigmas,self.tilecounts.as_double())
  table_data.append(  [
      format_value("%3s",   "ALL"),
      format_value("%s", ""),
      format_value("%6d",  self.overall_N),
      format_value("%5.2f", math.sqrt(flex.mean(self.delrsq))),
      format_value("%5.2f", self.overall_rmsd),
      format_value("%5.2f", self.overall_cv[0]),
      format_value("%5.2f", self.overall_cv[1]),
      format_value("%5.2f", flex.mean(flex.double([matrix.col(cv).length() for cv in self.mean_cv]))),
      format_value("%s", ""),
      format_value("%6.2f", rstats.mean()),
      format_value("%6.2f", tstats.mean()),
    ])

  print
  print table_utils.format(table_data,has_header=1,justify='center',delim=" ")
Пример #5
0
    def info(self):
        from libtbx import table_utils

        U = matrix.sqr(self.experiment.crystal.get_U())
        B = matrix.sqr(self.experiment.crystal.get_B())

        a_star_ = U * B * a_star
        b_star_ = U * B * b_star
        c_star_ = U * B * c_star

        Binvt = B.inverse().transpose()

        a_ = U * Binvt * a
        b_ = U * Binvt * b
        c_ = U * Binvt * c

        names = self.experiment.goniometer.get_names()
        axes = self.experiment.goniometer.get_axes()
        rows = [['Experimental axis', 'a*', 'b*', 'c*']]
        rows.append([names[0]] + [
            '%.3f' % smallest_angle(axis.angle(matrix.col(axes[0]), deg=True))
            for axis in (a_star_, b_star_, c_star_)
        ])
        rows.append(['Beam'] + [
            '%.3f' % smallest_angle(axis.angle(self.s0, deg=True))
            for axis in (a_star_, b_star_, c_star_)
        ])
        rows.append([names[2]] + [
            '%.3f' % smallest_angle(axis.angle(matrix.col(axes[2]), deg=True))
            for axis in (a_star_, b_star_, c_star_)
        ])
        print(
            'Angles between reciprocal cell axes and principal experimental axes:'
        )
        print(table_utils.format(rows=rows, has_header=True))
        print()

        rows = [['Experimental axis', 'a', 'b', 'c']]
        rows.append([names[0]] + [
            '%.3f' % smallest_angle(axis.angle(matrix.col(axes[0]), deg=True))
            for axis in (a_, b_, c_)
        ])
        rows.append(['Beam'] + [
            '%.3f' % smallest_angle(axis.angle(self.s0, deg=True))
            for axis in (a_, b_, c_)
        ])
        rows.append([names[2]] + [
            '%.3f' % smallest_angle(axis.angle(matrix.col(axes[2]), deg=True))
            for axis in (a_, b_, c_)
        ])
        print('Angles between unit cell axes and principal experimental axes:')
        print(table_utils.format(rows=rows, has_header=True))
        print()
Пример #6
0
  def info(self):
    from libtbx import table_utils

    U = self.experiment.crystal.get_U()
    B = self.experiment.crystal.get_B()

    a_star_ = U * B * a_star
    b_star_ = U * B * b_star
    c_star_ = U * B * c_star

    Binvt = B.inverse().transpose()

    a_ = U * Binvt * a
    b_ = U * Binvt * b
    c_ = U * Binvt * c

    def smallest_angle(angle):
      return min(angle, 180-angle)

    names = self.experiment.goniometer.get_names()
    axes = self.experiment.goniometer.get_axes()
    rows = [['Experimental axis', 'a*', 'b*', 'c*']]
    rows.append([names[0]] + [
      '%.3f' %smallest_angle(axis.angle(matrix.col(axes[0]), deg=True))
      for axis in (a_star_, b_star_, c_star_)])
    rows.append(['Beam'] + [
      '%.3f' %smallest_angle(axis.angle(self.s0, deg=True))
      for axis in (a_star_, b_star_, c_star_)])
    rows.append([names[2]] + [
      '%.3f' %smallest_angle(axis.angle(matrix.col(axes[2]), deg=True))
      for axis in (a_star_, b_star_, c_star_)])
    print 'Angles between reciprocal cell axes and principal experimental axes:'
    print table_utils.format(rows=rows, has_header=True)
    print

    rows = [['Experimental axis', 'a', 'b', 'c']]
    rows.append([names[0]] + [
      '%.3f' %smallest_angle(axis.angle(matrix.col(axes[0]), deg=True))
      for axis in (a_, b_, c_)])
    rows.append(['Beam'] + [
      '%.3f' %smallest_angle(axis.angle(self.s0, deg=True))
      for axis in (a_, b_, c_)])
    rows.append([names[2]] + [
      '%.3f' %smallest_angle(axis.angle(matrix.col(axes[2]), deg=True))
      for axis in (a_, b_, c_)])
    print 'Angles between unit cell axes and principal experimental axes:'
    print table_utils.format(rows=rows, has_header=True)
    print
Пример #7
0
def print_table(spotfinder_result, keys, out=None):
    if out is None:
        import sys
        out = sys.stdout
    from libtbx import table_utils
    rows = table(spotfinder_result, keys)
    print >> out, table_utils.format(rows, has_header=True)
Пример #8
0
    def labelit_printout(self, out=None):
        from libtbx import table_utils
        if out is None:
            import sys
            out = sys.stdout

        table_data = [[
            "Solution", "Metric fit", "rmsd", "min/max cc", "#spots",
            "lattice", "unit_cell", "volume", "cb_op"
        ]]
        for item in self:
            uc = item.refined_crystal.get_unit_cell()
            P = uc.parameters()
            min_max_cc_str = "-/-"
            if item.min_cc is not None and item.max_cc is not None:
                min_max_cc_str = "%.3f/%.3f" % (item.min_cc, item.max_cc)
            if item.recommended: status = '*'
            else: status = ''
            table_data.append([
                "%1s%7d" % (status, item.setting_number),
                "%(max_angular_difference)6.4f" % item,
                "%5.3f" % item.rmsd, min_max_cc_str,
                "%d" % item.Nmatches,
                "%(bravais)s" % item,
                "%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f" % P,
                "%.0f" % uc.volume(),
                "%s" % item['cb_op_inp_best'].as_abc()
            ])

        print(table_utils.format(table_data,
                                 has_header=1,
                                 justify='right',
                                 delim=' '),
              file=out)
        print("* = recommended solution", file=out)
Пример #9
0
    def show_experiments(self, experiments, reflections, d_min=None):
        if d_min is not None:
            reciprocal_lattice_points = reflections["rlp"]
            d_spacings = 1 / reciprocal_lattice_points.norms()
            reflections = reflections.select(d_spacings > d_min)
        for i_expt, expt in enumerate(experiments):
            logger.info("model %i (%i reflections):" %
                        (i_expt + 1,
                         (reflections["id"] == i_expt).count(True)))
            logger.info(expt.crystal)

        indexed_flags = reflections.get_flags(reflections.flags.indexed)
        imageset_id = reflections["imageset_id"]
        rows = [["Imageset", "# indexed", "# unindexed", "% indexed"]]
        for i in range(flex.max(imageset_id) + 1):
            imageset_indexed_flags = indexed_flags.select(imageset_id == i)
            indexed_count = imageset_indexed_flags.count(True)
            unindexed_count = imageset_indexed_flags.count(False)
            rows.append([
                str(i),
                str(indexed_count),
                str(unindexed_count),
                "{:.1%}".format(indexed_count /
                                (indexed_count + unindexed_count)),
            ])
        from libtbx import table_utils

        logger.info(
            table_utils.format(rows,
                               has_header=True,
                               prefix="| ",
                               postfix=" |"))
Пример #10
0
def print_table(stats, perm=None, n_rows=None, out=None):
    if out is None:
        out = sys.stdout

    rows = table(stats, perm=perm, n_rows=n_rows)
    print(table_utils.format(rows, has_header=True, prefix="| ", postfix=" |"),
          file=out)
    def get_formatted_table(self, experiment_count_per_bin,
                            total_experiment_count):
        '''Produce a table with experiment count over resolution bins'''

        table_data = [["Bin", "Resolution Range", "Lattices", "Accepted (%)"]]

        for i_bin in self.resolution_binner.range_used():
            col_legend = '%-13s' % self.resolution_binner.bin_legend(
                i_bin=i_bin,
                show_bin_number=False,
                show_bin_range=False,
                show_d_range=True,
                show_counts=False)
            exp_count_abs = '%8d' % experiment_count_per_bin[i_bin]
            exp_count_percent = '%5.2f' % (100. *
                                           experiment_count_per_bin[i_bin] /
                                           total_experiment_count)
            table_data.append(
                ['%3d' % i_bin, col_legend, exp_count_abs, exp_count_percent])

        table_data.append([""] * len(table_data[0]))
        table_data.append(["All", "", '%8d' % total_experiment_count])

        return "\n          Image Statistics\n" + table_utils.format(
            table_data, has_header=1, justify='center', delim=' ')
Пример #12
0
    def make_log_beam_stop(self, log_message, flags):
        self.norma_work = self.norma_work.map_to_asu()
        self.miller_obs = self.miller_obs.map_to_asu()
        flags = flags.map_to_asu()

        data = self.miller_obs.select(~flags.data())
        evals = self.norma_work.select(~flags.data())

        header = ("Index", "d-spacing", "F_obs", "E-value", "centric")
        table = "No outliers were found"
        rows = []
        if data.data().size() > 0:
            if data.data().size() < 500:
                for hkl, d, fobs, e, c in zip(
                    data.indices(), data.d_spacings().data(), data.data(), evals.data(), data.centric_flags().data()
                ):
                    this_row = [str(hkl), "%4.2f" % (d), "%6.1f" % (fobs), "%4.2f" % (math.sqrt(e)), str(c)]
                    rows.append(this_row)

                table = table_utils.format(
                    [header] + rows, comments=None, has_header=True, separate_rows=False, prefix="| ", postfix=" |"
                )
            else:
                table = """Over 500 outliers have been found."""

        final = log_message + "\n" + table + "\n \n"
        return final
Пример #13
0
    def make_log_wilson(self, log_message, flags, p_values):
        """ produces a 'nice' table of outliers and their reason for
    being an outlier using basic or extreme wilson statistics """

        header = ("Index", "E_obs", "Centric", "p-value")
        flags = flags.common_set(self.norma_work)
        p_vals = p_values.common_set(self.norma_work)

        rogues = self.norma_work.select(~flags.data())
        p_vals = p_vals.select(~flags.data())

        rows = []
        table = "No outliers were found."

        for hkl, e, c, p in zip(rogues.indices(), rogues.data(), rogues.centric_flags().data(), p_vals.data()):
            if e > 0:
                this_row = [str(hkl), "%5.3f" % (math.sqrt(e)), str(c), "%5.3e" % (p)]
            else:
                this_row = [str(hkl), "%5.3f" % (0), str(c), " inf"]
            rows.append(this_row)
        if len(rows) > 0:
            table = table_utils.format(
                [header] + rows, comments=None, has_header=True, separate_rows=False, prefix="| ", postfix=" |"
            )
        final = log_message + "\n" + table
        return final
Пример #14
0
    def make_log_wilson(self, log_message, flags, p_values):
        """ produces a 'nice' table of outliers and their reason for
    being an outlier using basic or extreme wilson statistics """

        header = ("Index", "E_obs", "Centric", "p-value")
        flags = flags.common_set(self.norma_work)
        p_vals = p_values.common_set(self.norma_work)

        rogues = self.norma_work.select(~flags.data())
        p_vals = p_vals.select(~flags.data())

        rows = []
        table = "No outliers were found."

        for hkl, e, c, p in zip(rogues.indices(), rogues.data(),
                                rogues.centric_flags().data(), p_vals.data()):
            if e > 0:
                this_row = [
                    str(hkl),
                    "%5.3f" % (math.sqrt(e)),
                    str(c),
                    "%5.3e" % (p)
                ]
            else:
                this_row = [str(hkl), "%5.3f" % (0), str(c), " inf"]
            rows.append(this_row)
        if len(rows) > 0:
            table = table_utils.format([header] + rows,
                                       comments=None,
                                       has_header=True,
                                       separate_rows=False,
                                       prefix='| ',
                                       postfix=' |')
        final = log_message + "\n" + table
        return final
Пример #15
0
def show_times_vs_complexity(times, header):

  table_header = [ "# HKL",
           "cpu-time",
           "simple-tm",
           "fft-time","R(%)",
           "gpu_d-tm",
           "gpu_f-tm","R(%)",
           "d-min(angstrom)" ]
  table_data = [table_header]
  for i,t,d,g,gf,gfR,s,f,fR in times:
    table_row = ["%.0f"%i,"%.2f"%t,"%.2f"%s,"%.2f"%f,"%.2f"%fR,
                 "%.2f"%g,"%.2f"%gf,"%.5f"%gfR,"%5.2f"%d]
    table_data.append(table_row)
  print table_utils.format(table_data,has_header=True,justify="left",
    prefix="| ",postfix=" |")
Пример #16
0
  def labelit_printout(self,out=None):
    from libtbx import table_utils
    if out is None:
      import sys
      out = sys.stdout

    table_data = [["Solution","Metric fit","rmsd", "min/max cc", "#spots",
                   "lattice","unit_cell","volume", "cb_op"]]
    for item in self:
      uc = item.refined_crystal.get_unit_cell()
      P = uc.parameters()
      min_max_cc_str = "-/-"
      if item.min_cc is not None and item.max_cc is not None:
        min_max_cc_str = "%.3f/%.3f" %(item.min_cc, item.max_cc)
      table_data.append(['%6d'%item.setting_number,
                         "%(max_angular_difference)6.4f"%item,
                         "%5.3f"%item.rmsd,
                         min_max_cc_str,
                         "%d"%item.Nmatches,
                         "%(bravais)s"%item,
                         "%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f"%P,
                         "%.0f"%uc.volume(),
                         "%s"%item['cb_op_inp_best'].as_abc()])

    print >> out, table_utils.format(
        table_data, has_header=1, justify='right', delim=' ')
Пример #17
0
  def show(self):
    from libtbx import table_utils
    self.info()

    rows = []
    names = self.experiment.goniometer.get_names()

    for angles, solutions in self.unique_solutions.iteritems():
      for (v1, v2) in solutions:
        rows.append(
          (self._vector_as_str(v1), self._vector_as_str(v2),
           '% 7.2f' %angles[0], '% 7.2f' %angles[1],
           ))
    rows = [('v1', 'v2', names[1], names[0])] + \
           sorted(rows)
    print 'Independent solutions:'
    print table_utils.format(rows=rows, has_header=True)
Пример #18
0
def print_table(stats, perm=None, n_rows=None, out=None):
  if out is None:
    import sys
    out = sys.stdout
  from libtbx import table_utils

  rows = table(stats, perm=perm, n_rows=n_rows)
  print >> out, table_utils.format(
    rows, has_header=True, prefix="| ", postfix=" |")
Пример #19
0
def print_table(stats, perm=None, n_rows=None, out=None):
  if out is None:
    import sys
    out = sys.stdout
  from libtbx import table_utils

  rows = table(stats, perm=perm, n_rows=n_rows)
  print >> out, table_utils.format(
    rows, has_header=True, prefix="| ", postfix=" |")
Пример #20
0
def print_table(spotfinder_result, keys, out=None):
    if out is None:
        import sys

        out = sys.stdout
    from libtbx import table_utils

    rows = table(spotfinder_result, keys)
    print >> out, table_utils.format(rows, has_header=True)
Пример #21
0
  def show(self):
    from libtbx import table_utils
    self.info()

    rows = []
    names = self.experiment.goniometer.get_names()

    for angles, vector_pairs in self.unique_solutions.iteritems():
      settings_str = '[%s]' %(
        ', '.join(
          '(%s, %s)' %(self._vector_as_str(v1), self._vector_as_str(v2))
          for v1, v2 in vector_pairs))
      rows.append((
        settings_str,
        '% 7.2f' %angles[0], '% 7.2f' %angles[1],
      ))
    rows = [('Settings', names[1], names[0])] + \
           sorted(rows)
    print 'Independent solutions:'
    print table_utils.format(rows=rows, has_header=True)
Пример #22
0
  def show(self):
    from libtbx import table_utils
    self.info()

    rows = []
    names = self.experiment.goniometer.get_names()

    space_group = self.experiment.crystal.get_space_group()
    reciprocal = self.frame == 'reciprocal'
    for angles, vector_pairs in self.unique_solutions.iteritems():
      v1, v2 = list(vector_pairs)[0]
      rows.append((
        describe(v1, space_group, reciprocal=reciprocal),
        describe(v2, space_group, reciprocal=reciprocal),
        '% 7.3f' %angles[0], '% 7.3f' %angles[1],
      ))
    rows = [('Primary axis', 'Secondary axis', names[1], names[0])] + \
           sorted(rows)
    print 'Independent solutions:'
    print table_utils.format(rows=rows, has_header=True)
Пример #23
0
  def write_output(self):

    rows = [["cluster_id", "# datasets", "height", "datasets"]]
    for cid in sorted(self._cluster_dict.keys()):
      cluster = self._cluster_dict[cid]
      datasets = cluster['datasets']
      rows.append([str(cid), str(len(datasets)),
                   '%.2f' %cluster['height'], ' '.join(['%s'] * len(datasets)) % tuple(datasets)])

    with open('intensity_clustering.txt', 'wb') as f:
      from libtbx import table_utils
      print >> f, table_utils.format(
        rows, has_header=True, prefix="|", postfix="|")
Пример #24
0
 def get_table(self):
   from libtbx import table_utils
   rows = [["dataset", "batches", "delta_cc_i", "sigma"]]
   labels = self._labels()
   normalised_score = self._normalised_delta_cc_i()
   perm = flex.sort_permutation(self.delta_cc)
   for i in perm:
     bmin = flex.min(self.batches[i].data())
     bmax = flex.max(self.batches[i].data())
     rows.append(
       [str(labels[i]), '%i to %i' %(bmin, bmax),
        '% .3f' %self.delta_cc[i], '% .2f' %normalised_score[i]])
   return table_utils.format(rows, has_header=True, prefix="|", postfix="|")
Пример #25
0
    def select_and_transform(self, matches_cut_off=0.75):
        ## hopsa
        max_cc = -1.0
        location = 0
        table_data = []
        for ii in range(len(self.nice_cb_ops)):
            table_data.append([
                self.nice_cb_ops[ii].as_hkl(),
                "%4.3f" % (self.cc_values[ii]),
                "%4.1f" % (self.matches[ii]), '   '
            ])

            if self.matches[ii] >= matches_cut_off:
                if max_cc < self.cc_values[ii]:
                    max_cc = self.cc_values[ii]
                    location = ii

        legend = ('Operator', 'Correlation', 'matches (%)', 'choice')
        table_data[location][3] = ' <--- '
        self.table = table_utils.format([legend] + table_data,
                                        comments=None,
                                        has_header=True,
                                        separate_rows=False,
                                        prefix='| ',
                                        postfix=' |')
        print >> self.out
        print >> self.out, "Reference analyses"
        print >> self.out, "   The following reindexing operators have been found:"
        print >> self.out
        print >> self.out, self.table
        print >> self.out
        if str(self.nice_cb_ops[location].as_hkl()) == "h,k,l":
            print >> self.out, "The data doesn't need to be reindexed! Indexing is consistent between these datasets."
        else:
            print >> self.out, "If the data is reindexed with operator (%s), the correlation of" % (
                self.nice_cb_ops[location].as_hkl())
            print >> self.out, "the intensities to the reference data is maximized. "
            if self.file_name is not None:
                print self.file_name
                print >> self.out, "This can be done for instance with:"
                print >> self.out, "  phenix.reflection_file_converter %s --change_of_basis=\"%s\" <output_options> " % (
                    self.file_name, self.nice_cb_ops[location].as_hkl())
        print >> self.out, "-------------------------------------------------------------------------------"
        ##  change things in primitive setting

        transform_b = self.set_b_ori.change_basis(
            self.set_b_to_niggli).change_basis(
                self.ops_in_niggli_setting[location]).change_basis(
                    self.set_b_to_niggli.inverse()).map_to_asu(
                    ).set_observation_type(self.set_b_ori)
        return (transform_b)
Пример #26
0
  def select_and_transform(self,
                           matches_cut_off=0.75
                           ):
    ## hopsa
    max_cc=-1.0
    location = 0
    table_data=[]
    for ii in range(len(self.nice_cb_ops)):
      table_data.append(
        [self.nice_cb_ops[ii].as_hkl(),
         "%4.3f"%(self.cc_values[ii]),
         "%4.1f"%(self.matches[ii]),
         '   ']
        )

      if self.matches[ii]>=matches_cut_off:
        if max_cc<self.cc_values[ii]:
          max_cc = self.cc_values[ii]
          location = ii

    legend = ('Operator', 'Correlation', 'matches (%)', 'choice')
    table_data[location][3]=' <--- '
    self.table = table_utils.format([legend]+table_data,
                                       comments=None,
                                       has_header=True,
                                       separate_rows=False,
                                       prefix='| ',
                                       postfix=' |')
    print >> self.out
    print >> self.out, "Reference analyses"
    print >> self.out, "   The following reindexing operators have been found:"
    print >> self.out
    print >> self.out, self.table
    print >> self.out
    if str(self.nice_cb_ops[location].as_hkl()) == "h,k,l":
      print >> self.out, "The data doesn't need to be reindexed! Indexing is consistent between these datasets."
    else:
      print >> self.out, "If the data is reindexed with operator (%s), the correlation of"%( self.nice_cb_ops[location].as_hkl() )
      print >> self.out, "the intensities to the reference data is maximized. "
      if self.file_name is not None:
        print self.file_name
        print >> self.out, "This can be done for instance with:"
        print >> self.out, "  phenix.reflection_file_converter %s --change_of_basis=\"%s\" <output_options> "%(self.file_name, self.nice_cb_ops[location].as_hkl() )
    print >> self.out, "-------------------------------------------------------------------------------"
    ##  change things in primitive setting

    transform_b = self.set_b_ori.change_basis( self.set_b_to_niggli ).change_basis(
      self.ops_in_niggli_setting[location]  ).change_basis(
      self.set_b_to_niggli.inverse() ).map_to_asu().set_observation_type(
      self.set_b_ori)
    return ( transform_b )
Пример #27
0
    def __str__(self):
        rows = []
        rows.append([
            "unit_cell",
            "volume",
            "volume score",
            "#indexed",
            "% indexed",
            "% indexed score",
            "rmsd_xy",
            "rmsd_xy score",
            "overall score",
        ])

        score_by_fraction_indexed = self.score_by_fraction_indexed()
        score_by_volume = self.score_by_volume()
        score_by_rmsd_xy = self.score_by_rmsd_xy()
        combined_scores = self.combined_scores()

        perm = flex.sort_permutation(combined_scores)

        rmsd_x, rmsd_y, rmsd_z = flex.vec3_double(
            s.rmsds for s in self.all_solutions).parts()
        rmsd_xy = flex.sqrt(flex.pow2(rmsd_x) + flex.pow2(rmsd_y))

        for i in perm:
            s = self.all_solutions[i]
            rows.append([
                format(
                    s.crystal.get_unit_cell(),
                    "{:.2f} {:.2f} {:.2f} {:.1f} {:.1f} {:.1f}",
                ),
                "%.0f" % s.crystal.get_unit_cell().volume(),
                "%.2f" % score_by_volume[i],
                str(s.n_indexed),
                "%.0f" % (s.fraction_indexed * 100),
                "%.2f" % score_by_fraction_indexed[i],
                "%.2f" % rmsd_xy[i],
                "%.2f" % score_by_rmsd_xy[i],
                "%.2f" % combined_scores[i],
            ])

        from libtbx import table_utils

        return table_utils.format(rows=rows, has_header=True)
Пример #28
0
def _create_flag_count_table(table):
    """Generate a summary table of flag values in a reflection table.

    :param table: A reflection table
    :returns:     A string of the formatted flags table
    """

    # Calculate the counts of entries that match each flag
    numpy_flags = table["flags"].as_numpy_array()
    flag_count = {
        flag: numpy.sum(numpy_flags & value != 0)
        for value, flag in table.flags.values.items()
    }

    # Work out the numeric-value order of the flags
    flag_order = sorted(table.flags.values.values(), key=lambda x: x.real)

    # Build the actual table
    flag_rows = [["Flag", "Count", "%"]]
    max_count_len = max(5, len(str(max(flag_count.values()))))
    last_flag = None
    for flag in flag_order:
        indent = ""
        # As a hint for reading, indent any 'summary' flags.
        # A summary flag is any flag which overlaps with the previous one.
        if last_flag and (last_flag.real & flag.real):
            indent = "  "
        last_flag = flag
        # Add the row to the table we're building
        flag_rows.append(
            [
                indent + flag.name,
                "{:{:d}d}".format(flag_count[flag], max_count_len),
                "{:5.01f}".format(100 * flag_count[flag] / len(table)),
            ]
        )

    # Build the array of output strings
    text = []
    text.append("Reflection flags:")
    text.append(
        table_utils.format(flag_rows, has_header=True, prefix="| ", postfix=" |")
    )
    return "\n".join(text)
Пример #29
0
    def write_output(self):

        rows = [["cluster_id", "# datasets", "height", "datasets"]]
        for cid in sorted(self._cluster_dict.keys()):
            cluster = self._cluster_dict[cid]
            datasets = cluster['datasets']
            rows.append([
                str(cid),
                str(len(datasets)),
                '%.2f' % cluster['height'],
                ' '.join(['%s'] * len(datasets)) % tuple(datasets)
            ])

        with open('intensity_clustering.txt', 'wb') as f:
            from libtbx import table_utils
            print >> f, table_utils.format(rows,
                                           has_header=True,
                                           prefix="|",
                                           postfix="|")
Пример #30
0
    def labelit_printout(self):
        from libtbx import table_utils

        table_data = [[
            "Solution",
            "Metric fit",
            "rmsd",
            "min/max cc",
            "#spots",
            "lattice",
            "unit_cell",
            "volume",
            "cb_op",
        ]]
        for item in self:
            uc = item.refined_crystal.get_unit_cell()
            P = uc.parameters()
            min_max_cc_str = "-/-"
            if item.min_cc is not None and item.max_cc is not None:
                min_max_cc_str = "%.3f/%.3f" % (item.min_cc, item.max_cc)
            if item.recommended:
                status = "*"
            else:
                status = ""
            table_data.append([
                "%1s%7d" % (status, item.setting_number),
                "%(max_angular_difference)6.4f" % item,
                "%5.3f" % item.rmsd,
                min_max_cc_str,
                "%d" % item.Nmatches,
                "%(bravais)s" % item,
                "%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f" % P,
                "%.0f" % uc.volume(),
                "%s" % item["cb_op_inp_best"].as_abc(),
            ])

        output = table_utils.format(table_data,
                                    has_header=1,
                                    justify="right",
                                    delim=" ")
        output = output + "\n* = recommended solution\n"
        return output
Пример #31
0
    def __str__(self):
        rows = []
        rows.append([
            "unit_cell", "volume", "n_indexed", "fraction_indexed",
            "likelihood"
        ])

        for i, s in enumerate(self.all_solutions):
            s = self.all_solutions[i]
            rows.append([
                format(
                    s.crystal.get_unit_cell(),
                    "{:.2f} {:.2f} {:.2f} {:.1f} {:.1f} {:.1f}",
                ),
                "%.0f" % s.crystal.get_unit_cell().volume(),
                str(s.n_indexed),
                "%.0f" % (s.fraction_indexed * 100),
                "%.2f" % s.model_likelihood,
            ])

        from libtbx import table_utils

        return table_utils.format(rows=rows, has_header=True)
Пример #32
0
    def make_log_beam_stop(self, log_message, flags):
        self.norma_work = self.norma_work.map_to_asu()
        self.miller_obs = self.miller_obs.map_to_asu()
        flags = flags.map_to_asu()

        data = self.miller_obs.select(~flags.data())
        evals = self.norma_work.select(~flags.data())

        header = ("Index", "d-spacing", "F_obs", "E-value", "centric")
        table = "No outliers were found"
        rows = []
        if data.data().size() > 0:
            if data.data().size() < 500:
                for hkl, d, fobs, e, c in zip(data.indices(),
                                              data.d_spacings().data(),
                                              data.data(), evals.data(),
                                              data.centric_flags().data()):
                    this_row = [
                        str(hkl),
                        "%4.2f" % (d),
                        "%6.1f" % (fobs),
                        "%4.2f" % (math.sqrt(e)),
                        str(c)
                    ]
                    rows.append(this_row)

                table = table_utils.format([header] + rows,
                                           comments=None,
                                           has_header=True,
                                           separate_rows=False,
                                           prefix='| ',
                                           postfix=' |')
            else:
                table = """Over 500 outliers have been found."""

        final = log_message + "\n" + table + "\n \n"
        return final
Пример #33
0
    def make_log_model(self, log_message, flags, ll_gain, p_values, e_obs, e_calc, sigmaa, plot_out=None):
        header = ("Index", "d-spacing", "E_obs", "E_model", "Score", "p-value", "sigmaa", "centric")
        table = "No outliers were found"
        rows = []
        rogues = e_obs.select(~flags.data())
        p_array = p_values.select(~flags.data())
        ll_array = ll_gain.select(~flags.data())
        ec_array = e_calc.select(~flags.data())
        sa_array = sigmaa.select(~flags.data())

        centric_flags = self.miller_obs.centric_flags().select(~flags.data())
        if rogues.indices().size() > 0:
            if rogues.indices().size() < 500:
                sigmas = rogues.sigmas()
                if rogues.sigmas() == None:
                    sigmas = rogues.d_spacings().data() * 0 + 10.0

                for hkl, d, eo, ec, llg, p, sa, c, s, e in zip(
                    rogues.indices(),
                    rogues.d_spacings().data(),
                    rogues.data(),
                    ec_array.data(),
                    ll_array.data(),
                    p_array.data(),
                    sa_array.data(),
                    centric_flags.data(),
                    sigmas,
                    rogues.epsilons().data().as_double(),
                ):

                    this_row = [
                        str(hkl),
                        "%4.2f" % (d),
                        "%6.3f" % (eo),
                        "%6.3f" % (ec),
                        "%5.2f" % (llg),
                        "%5.3e" % (p),
                        "%4.3f" % (sa),
                        str(c),
                    ]
                    rows.append(this_row)

                    if plot_out is not None:
                        outlier_plots.plotit(
                            fobs=eo,
                            sigma=s,
                            fcalc=ec,
                            alpha=sa,
                            beta=1.0 - sa * sa,
                            epsilon=e,
                            centric=c,
                            out=plot_out,
                            plot_title=str(hkl),
                        )

                table = table_utils.format(
                    [header] + rows, comments=None, has_header=True, separate_rows=False, prefix="| ", postfix=" |"
                )

            else:
                table = "More then 500 outliers were found. This is very suspicious. Check data or limits."

        final = log_message + "\n" + table
        return final
Пример #34
0
def run(args):
  import libtbx.load_env
  usage = "%s [options] datablock.json strong.pickle" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    read_reflections=True,
    read_datablocks=True,
    read_experiments=True,
    phil=phil_scope,
    check_format=False,
    epilog=help_message)
  from libtbx.utils import Sorry

  params, options = parser.parse_args(show_diff_phil=False)
  reflections = flatten_reflections(params.input.reflections)
  datablocks = flatten_datablocks(params.input.datablock)
  experiments = flatten_experiments(params.input.experiments)

  if not any([reflections, experiments, datablocks]):
    parser.print_help()
    return

  if len(reflections) != 1:
    raise Sorry('exactly 1 reflection table must be specified')
  if len(datablocks) != 1:
    if experiments:
      if len(experiments.imagesets()) != 1:
        raise Sorry('exactly 1 datablock must be specified')
      imageset = experiments.imagesets()[0]
    else:
      raise Sorry('exactly 1 datablock must be specified')
  else:
    imageset = datablocks[0].extract_imagesets()[0]

  reflections = reflections[0]

  if params.id is not None:
    reflections = reflections.select(reflections['id'] == params.id)

  stats = per_image_analysis.stats_imageset(
    imageset, reflections, resolution_analysis=params.resolution_analysis,
    plot=params.individual_plots)
  per_image_analysis.print_table(stats)

  from libtbx import table_utils
  overall_stats = per_image_analysis.stats_single_image(
    imageset, reflections, resolution_analysis=params.resolution_analysis)
  rows = [
    ("Overall statistics", ""),
    ("#spots", "%i" %overall_stats.n_spots_total),
    ("#spots_no_ice", "%i" %overall_stats.n_spots_no_ice),
    #("total_intensity", "%.0f" %overall_stats.total_intensity),
    ("d_min", "%.2f" %overall_stats.estimated_d_min),
    ("d_min (distl method 1)", "%.2f (%.2f)" %(
      overall_stats.d_min_distl_method_1, overall_stats.noisiness_method_1)),
    ("d_min (distl method 2)", "%.2f (%.2f)" %(
      overall_stats.d_min_distl_method_1, overall_stats.noisiness_method_1)),
    ]
  print table_utils.format(rows, has_header=True, prefix="| ", postfix=" |")

  if params.json is not None:
    import json
    with open(params.json, 'wb') as fp:
      json.dump(stats.__dict__, fp)
  if params.plot is not None:
    per_image_analysis.plot_stats(stats, filename=params.plot)
Пример #35
0
  if col_count1==0: raise Exception("no reflections in any bins")
  for i_bin in miller_set_avg.binner().range_used():
    col_count = '%8d' % results.count_frames(
      appropriate_min_corr, miller_set_avg.binner().selection(i_bin))
    col_legend = '%-13s' % miller_set_avg.binner().bin_legend(
      i_bin=i_bin, show_bin_number=False, show_bin_range=False,
      show_d_range=True, show_counts=False)
    xpercent = results.count_frames(appropriate_min_corr, miller_set_avg.binner().selection(i_bin))/float(col_count1)
    percent = '%5.2f'% (100.*xpercent)
    table_data.append(['%3d' % i_bin, col_legend, col_count,percent])

  n_frames = (scaler.frames['cc'] > appropriate_min_corr).count(True)
  table_data.append([""] * len(table_data[0]))
  table_data.append(["All", "", '%8d' % n_frames])
  print >> out
  print >> out, table_utils.format(
    table_data, has_header=1, justify='center', delim=' ')

  reindexing_ops = {"h,k,l":0} # get a list of all reindexing ops for this dataset
  if work_params.merging.reverse_lookup is not None:
    for key in scaler.reverse_lookup:
      if reindexing_ops.get(scaler.reverse_lookup[key], None) is None:
        reindexing_ops[scaler.reverse_lookup[key]]=0
      reindexing_ops[scaler.reverse_lookup[key]]+=1

  from xfel.cxi.cxi_cc import run_cc
  for key in reindexing_ops.keys():
    run_cc(work_params,reindexing_op=key,output=out)

  return result

if (__name__ == "__main__"):
Пример #36
0
  def print_table_2(self):

    from libtbx import table_utils
    from libtbx.str_utils import format_value
    table_header = ["Tile","Dist","Nobs","aRmsd","Rmsd","delx","dely","disp","rotdeg",
                    "Rsigma","Tsigma","Transx","Transy","DelRot","Rotdeg"]
    table_data = []
    table_data.append(table_header)
    sort_radii = flex.sort_permutation(flex.double(self.radii))
    tile_rmsds = flex.double()
    radial_sigmas = flex.double(len(self.tiles) // 4)
    tangen_sigmas = flex.double(len(self.tiles) // 4)

    wtaveg = [0.]*(len(self.tiles) // 4)
    for x in range(len(self.tiles) // 4):
      if self.tilecounts[x] >= 3:
        wtaveg[x] = self.weighted_average_angle_deg_from_tile(x, self.post_mean_cv[x], self.correction_vector_x,
          self.correction_vector_y)

    def add_line_to_table(idx):
      x = sort_radii[idx]
      if self.tilecounts[x] < 3:
        radial = (0,0)
        tangential = (0,0)
        rmean,tmean,rsigma,tsigma=(0,0,1,1)
      else:
        radial,tangential,rmean,tmean,rsigma,tsigma = get_radial_tangential_vectors(self,x,
          self.post_mean_cv[x],
          self.correction_vector_x, self.correction_vector_y,
          self.model_calcx-self.refined_cntr_x,
          self.model_calcy-self.refined_cntr_y)

      table_data.append(  [
        format_value("%3d",   x),
        format_value("%7.2f", self.radii[x]),
        format_value("%6d",  self.tilecounts[x]),
        format_value("%5.2f", self.asymmetric_tile_rmsd[x]),
        format_value("%5.2f", self.tile_rmsd[x]),
        format_value("%5.2f", self.post_mean_cv[x][0]),
        format_value("%5.2f", self.post_mean_cv[x][1]),
        format_value("%5.2f", matrix.col(self.post_mean_cv[x]).length()),
        format_value("%6.2f", wtaveg[x]),
        format_value("%6.2f", rsigma),
        format_value("%6.2f", tsigma),
        format_value("%5.2f", self.tile_translations.x[2*x]),
        format_value("%5.2f", self.tile_translations.x[2*x+1]),
        "",
        format_value("%5.2f", self.tile_rotations.x[x])
      ])

    # order the printout by sensor, starting from innermost
    new_order = []
    mutable = list(sort_radii)
    idx = 0
    unit_translation_increments = flex.double(len(mutable)*2)
    while 1:
      if idx >= len(mutable): break
      if self.radii[mutable[idx]]==0.0:
        idx+=1; continue
      tile_select = mutable[idx]
      if tile_select%2 == 0:
        # even
        sensor_tiles = (tile_select, tile_select+1)
        sensor_ptrs = (idx, mutable.index(tile_select+1))
      else:
        # odd
        sensor_tiles = (tile_select-1, tile_select)
        sensor_ptrs = ( mutable.index(tile_select-1), idx)

      if self.tilecounts[mutable[sensor_ptrs[0]]] + self.tilecounts[mutable[sensor_ptrs[1]]] < \
         self.params.min_count:
         idx+=1
         continue

      sum_weight = 0.0
      sum_wt_x = 0.0
      sum_wt_y = 0.0
      for iptr, ptr in enumerate(sensor_ptrs):
        if ptr in new_order: break
        if self.tilecounts[mutable[ptr]] > 0:
          #print mutable[ptr]
          add_line_to_table (ptr)
          sum_weight += self.tilecounts[mutable[ptr]]
          sum_wt_x += self.tilecounts[mutable[ptr]] * self.tile_translations.x[2*mutable[ptr]]
          sum_wt_y += self.tilecounts[mutable[ptr]] * self.tile_translations.x[2*mutable[ptr]+1]
        new_order.append(ptr)
        if iptr==1:
          #print
          sensor_line = [""]*len(table_header)
          sensor_line[2]="%6d"%sum_weight
          sensor_line[11]="%5.2f"%round(sum_wt_x/sum_weight,0)
          sensor_line[12]="%5.2f"%round(sum_wt_y/sum_weight,0)
          unit_translation_increments[2*mutable[ptr]-2] = round(sum_wt_x/sum_weight,0)
          unit_translation_increments[2*mutable[ptr]-1] = round(sum_wt_y/sum_weight,0)
          unit_translation_increments[2*mutable[ptr]] = round(sum_wt_x/sum_weight,0)
          unit_translation_increments[2*mutable[ptr]+1] = round(sum_wt_y/sum_weight,0)
          table_data.append(sensor_line)
          table_data.append([""]*len(table_header))
      idx+=1
      if idx>=len(mutable): break

    print "Grouped by sensor, listing lowest Q-angle first:"
    print table_utils.format(table_data,has_header=1,justify='center',delim=" ")
    return unit_translation_increments
Пример #37
0
  def print_table(self):
    from libtbx import table_utils
    from libtbx.str_utils import format_value
    table_header = ["Tile","Dist","Nobs","aRmsd","Rmsd","delx","dely","disp","rotdeg",
                    "Rsigma","Tsigma","Transx","Transy","DelRot","Rotdeg"]
    table_data = []
    table_data.append(table_header)
    sort_radii = flex.sort_permutation(flex.double(self.radii))
    tile_rmsds = flex.double()
    radial_sigmas = flex.double(len(self.tiles) // 4)
    tangen_sigmas = flex.double(len(self.tiles) // 4)

    wtaveg = [0.]*(len(self.tiles) // 4)
    for x in range(len(self.tiles) // 4):
      if self.tilecounts[x] >= 3:
        wtaveg[x] = self.weighted_average_angle_deg_from_tile(x, self.post_mean_cv[x], self.correction_vector_x,
          self.correction_vector_y)

    for idx in range(len(self.tiles) // 4):
      x = sort_radii[idx]
      if self.tilecounts[x] < 3:
        radial = (0,0)
        tangential = (0,0)
        rmean,tmean,rsigma,tsigma=(0,0,1,1)
      else:
        radial,tangential,rmean,tmean,rsigma,tsigma = get_radial_tangential_vectors(self,x,
          self.post_mean_cv[x],
          self.correction_vector_x, self.correction_vector_y,
          self.model_calcx-self.refined_cntr_x,
          self.model_calcy-self.refined_cntr_y)

      # paired rotations of two ASICS on the same sensor
      if x%2==0:
        # previous method: delrot = "%5.2f"%(wtaveg[x]-wtaveg[x+1])
        delrot = "%5.2f"%(self.tile_rotations.x[x] - self.tile_rotations.x[1+x])
      else:
        delrot = ""

      radial_sigmas[x]=rsigma
      tangen_sigmas[x]=tsigma
      table_data.append(  [
        format_value("%3d",   x),
        format_value("%7.2f", self.radii[x]),
        format_value("%6d",  self.tilecounts[x]),
        format_value("%5.2f", self.asymmetric_tile_rmsd[x]),
        format_value("%5.2f", self.tile_rmsd[x]),
        format_value("%5.2f", self.post_mean_cv[x][0]),
        format_value("%5.2f", self.post_mean_cv[x][1]),
        format_value("%5.2f", matrix.col(self.post_mean_cv[x]).length()),
        format_value("%6.2f", wtaveg[x]),
        format_value("%6.2f", rsigma),
        format_value("%6.2f", tsigma),
        format_value("%5.2f", self.tile_translations.x[2*x]),
        format_value("%5.2f", self.tile_translations.x[2*x+1]),
        copy.copy(delrot),
        format_value("%5.2f", self.tile_rotations.x[x])
      ])
    table_data.append([""]*len(table_header))
    rstats = flex.mean_and_variance(radial_sigmas,self.tilecounts.as_double())
    tstats = flex.mean_and_variance(tangen_sigmas,self.tilecounts.as_double())
    table_data.append(  [
        format_value("%3s",   "ALL"),
        format_value("%s", ""),
        format_value("%6d",  self.overall_N),
        format_value("%5.2f", math.sqrt(flex.mean(self.delrsq))),
        format_value("%5.2f", self.overall_rmsd),
        format_value("%5.2f", self.overall_cv[0]),
        format_value("%5.2f", self.overall_cv[1]),
        format_value("%5.2f", flex.mean(flex.double([cv.length() for cv in self.post_mean_cv]))),
        format_value("%s", ""),
        format_value("%6.2f", rstats.mean()),
        format_value("%6.2f", tstats.mean()),
        format_value("%s", ""),
        format_value("%s", ""),
        #root mean squared difference in same-sensor (adjacent)-ASIC rotations, weighted by minimum # of observations on either ASIC of the sensor
        format_value("%5.2f", math.sqrt(
           flex.sum(
             flex.double([
               (min([self.tilecounts[2*isen],self.tilecounts[2*isen+1]])) *
                    (self.tile_rotations.x[2*isen] - self.tile_rotations.x[1+2*isen])**2
               for isen in xrange(len(self.tiles) // 8)]
             )
           )/
           flex.sum(
             flex.double(
               [(min([self.tilecounts[2*isen],self.tilecounts[2*isen+1]])) for isen in xrange(len(self.tiles) // 8)]
             )
           )
        )),
        format_value("%s", ""),
    ])

    print
    print table_utils.format(table_data,has_header=1,justify='center',delim=" ")
Пример #38
0
def show_reflections(
    reflections,
    show_intensities=False,
    show_profile_fit=False,
    show_centroids=False,
    show_all_reflection_data=False,
    show_flags=False,
    max_reflections=None,
    show_identifiers=False,
):

    text = []

    import collections
    from orderedset import OrderedSet

    formats = collections.OrderedDict(
        (
            ("miller_index", "%i, %i, %i"),
            ("d", "%.2f"),
            ("qe", "%.3f"),
            ("dqe", "%.3f"),
            ("id", "%i"),
            ("imageset_id", "%i"),
            ("panel", "%i"),
            ("flags", "%i"),
            ("background.mean", "%.1f"),
            ("background.dispersion", "%.1f"),
            ("background.mse", "%.1f"),
            ("background.sum.value", "%.1f"),
            ("background.sum.variance", "%.1f"),
            ("intensity.prf.value", "%.1f"),
            ("intensity.prf.variance", "%.1f"),
            ("intensity.sum.value", "%.1f"),
            ("intensity.sum.variance", "%.1f"),
            ("intensity.cor.value", "%.1f"),
            ("intensity.cor.variance", "%.1f"),
            ("intensity.scale.value", "%.1f"),
            ("intensity.scale.variance", "%.1f"),
            ("Ih_values", "%.1f"),
            ("lp", "%.3f"),
            ("num_pixels.background", "%i"),
            ("num_pixels.background_used", "%i"),
            ("num_pixels.foreground", "%i"),
            ("num_pixels.valid", "%i"),
            ("partial_id", "%i"),
            ("partiality", "%.4f"),
            ("profile.correlation", "%.3f"),
            ("profile.rmsd", "%.3f"),
            ("xyzcal.mm", "%.2f, %.2f, %.2f"),
            ("xyzcal.px", "%.2f, %.2f, %.2f"),
            ("delpsical.rad", "%.3f"),
            ("delpsical2", "%.3f"),
            ("delpsical.weights", "%.3f"),
            ("xyzobs.mm.value", "%.2f, %.2f, %.2f"),
            ("xyzobs.mm.variance", "%.4e, %.4e, %.4e"),
            ("xyzobs.px.value", "%.2f, %.2f, %.2f"),
            ("xyzobs.px.variance", "%.4f, %.4f, %.4f"),
            ("s1", "%.4f, %.4f, %.4f"),
            ("s2", "%.4f, %.4f, %.4f"),
            ("shoebox", "%.1f"),
            ("rlp", "%.4f, %.4f, %.4f"),
            ("zeta", "%.3f"),
            ("x_resid", "%.3f"),
            ("x_resid2", "%.3f"),
            ("y_resid", "%.3f"),
            ("y_resid2", "%.3f"),
            ("kapton_absorption_correction", "%.3f"),
            ("kapton_absorption_correction_sigmas", "%.3f"),
            ("inverse_scale_factor", "%.3f"),
            ("inverse_scale_factor_variance", "%.3f"),
        )
    )

    for rlist in reflections:
        from dials.array_family import flex
        from dials.algorithms.shoebox import MaskCode

        foreground_valid = MaskCode.Valid | MaskCode.Foreground
        text.append("")
        text.append("Reflection list contains %i reflections" % (len(rlist)))

        if len(rlist) == 0:
            continue

        rows = [["Column", "min", "max", "mean"]]
        for k, col in rlist.cols():
            if k in formats and "%" not in formats.get(k, "%s"):
                # Allow blanking out of entries that wouldn't make sense
                rows.append(
                    [
                        k,
                        formats.get(k, "%s"),
                        formats.get(k, "%s"),
                        formats.get(k, "%s"),
                    ]
                )
            elif type(col) in (flex.double, flex.int, flex.size_t):
                if type(col) in (flex.int, flex.size_t):
                    col = col.as_double()
                rows.append(
                    [
                        k,
                        formats.get(k, "%s") % flex.min(col),
                        formats.get(k, "%s") % flex.max(col),
                        formats.get(k, "%s") % flex.mean(col),
                    ]
                )
            elif type(col) in (flex.vec3_double, flex.miller_index):
                if isinstance(col, flex.miller_index):
                    col = col.as_vec3_double()
                rows.append(
                    [
                        k,
                        formats.get(k, "%s") % col.min(),
                        formats.get(k, "%s") % col.max(),
                        formats.get(k, "%s") % col.mean(),
                    ]
                )
            elif isinstance(col, flex.shoebox):
                rows.append([k, "", "", ""])
                si = col.summed_intensity().observed_value()
                rows.append(
                    [
                        "  summed I",
                        formats.get(k, "%s") % flex.min(si),
                        formats.get(k, "%s") % flex.max(si),
                        formats.get(k, "%s") % flex.mean(si),
                    ]
                )
                x1, x2, y1, y2, z1, z2 = col.bounding_boxes().parts()
                bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
                rows.append(
                    [
                        "  N pix",
                        formats.get(k, "%s") % flex.min(bbox_sizes),
                        formats.get(k, "%s") % flex.max(bbox_sizes),
                        formats.get(k, "%s") % flex.mean(bbox_sizes),
                    ]
                )
                fore_valid = col.count_mask_values(foreground_valid).as_double()
                rows.append(
                    [
                        "  N valid foreground pix",
                        formats.get(k, "%s") % flex.min(fore_valid),
                        formats.get(k, "%s") % flex.max(fore_valid),
                        formats.get(k, "%s") % flex.mean(fore_valid),
                    ]
                )

        text.append(
            table_utils.format(rows, has_header=True, prefix="| ", postfix=" |")
        )

        if show_flags:
            text.append(_create_flag_count_table(rlist))

        if show_identifiers:
            if rlist.experiment_identifiers():
                text.append(
                    """Experiment identifiers id-map values:\n%s"""
                    % (
                        "\n".join(
                            "id:"
                            + str(k)
                            + " -> experiment identifier:"
                            + str(rlist.experiment_identifiers()[k])
                            for k in rlist.experiment_identifiers().keys()
                        )
                    )
                )

    intensity_keys = (
        "miller_index",
        "d",
        "intensity.prf.value",
        "intensity.prf.variance",
        "intensity.sum.value",
        "intensity.sum.variance",
        "background.mean",
        "profile.correlation",
        "profile.rmsd",
    )

    profile_fit_keys = ("miller_index", "d")

    centroid_keys = (
        "miller_index",
        "d",
        "xyzcal.mm",
        "xyzcal.px",
        "xyzobs.mm.value",
        "xyzobs.mm.variance",
        "xyzobs.px.value",
        "xyzobs.px.variance",
    )

    keys_to_print = OrderedSet()

    if show_intensities:
        for k in intensity_keys:
            keys_to_print.add(k)
    if show_profile_fit:
        for k in profile_fit_keys:
            keys_to_print.add(k)
    if show_centroids:
        for k in centroid_keys:
            keys_to_print.add(k)
    if show_all_reflection_data:
        for k in formats:
            keys_to_print.add(k)

    def format_column(key, data, format_strings=None):
        if isinstance(data, flex.vec3_double):
            c_strings = [
                c.as_string(format_strings[i].strip())
                for i, c in enumerate(data.parts())
            ]
        elif isinstance(data, flex.miller_index):
            c_strings = [
                c.as_string(format_strings[i].strip())
                for i, c in enumerate(data.as_vec3_double().parts())
            ]
        elif isinstance(data, flex.size_t):
            c_strings = [data.as_int().as_string(format_strings[0].strip())]
        elif isinstance(data, flex.shoebox):
            x1, x2, y1, y2, z1, z2 = data.bounding_boxes().parts()
            bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
            c_strings = [bbox_sizes.as_string(format_strings[0].strip())]
            key += " (N pix)"
        else:
            c_strings = [data.as_string(format_strings[0].strip())]

        column = flex.std_string()
        max_element_lengths = [c.max_element_length() for c in c_strings]
        for i in range(len(c_strings[0])):

            column.append(
                ("%%%is" % len(key))
                % ", ".join(
                    ("%%%is" % max_element_lengths[j]) % c_strings[j][i]
                    for j in range(len(c_strings))
                )
            )
        return column

    if keys_to_print:
        keys = [k for k in keys_to_print if k in rlist]
        if max_reflections is not None:
            max_reflections = min(len(rlist), max_reflections)
        else:
            max_reflections = len(rlist)

        columns = []

        for k in keys:
            columns.append(
                format_column(k, rlist[k], format_strings=formats[k].split(","))
            )

        text.append("")
        text.append("Printing %i of %i reflections:" % (max_reflections, len(rlist)))
        line = []
        for j in range(len(columns)):
            key = keys[j]
            if key == "shoebox":
                key += " (N pix)"
            width = max(len(key), columns[j].max_element_length())
            line.append("%%%is" % width % key)
        text.append(" ".join(line))
        for i in range(max_reflections):
            line = []
            for j in range(len(columns)):
                line.append(columns[j][i])
            text.append(" ".join(line))

    return "\n".join(text)
Пример #39
0
    def print_table(self):
        from libtbx import table_utils
        from libtbx.str_utils import format_value

        table_header = [
            "Tile",
            "Dist",
            "Nobs",
            "aRmsd",
            "Rmsd",
            "delx",
            "dely",
            "disp",
            "rotdeg",
            "Rsigma",
            "Tsigma",
            "Transx",
            "Transy",
            "DelRot",
        ]
        table_data = []
        table_data.append(table_header)
        sort_radii = flex.sort_permutation(flex.double(self.radii))
        tile_rmsds = flex.double()
        radial_sigmas = flex.double(64)
        tangen_sigmas = flex.double(64)

        wtaveg = [0.0] * 64
        for x in xrange(64):
            if self.tilecounts[x] >= 3:
                wtaveg[x] = self.weighted_average_angle_deg_from_tile(
                    x, self.post_mean_cv[x], self.correction_vector_x, self.correction_vector_y
                )

        for idx in xrange(64):
            x = sort_radii[idx]
            if self.tilecounts[x] < 3:
                radial = (0, 0)
                tangential = (0, 0)
                rmean, tmean, rsigma, tsigma = (0, 0, 1, 1)
            else:
                radial, tangential, rmean, tmean, rsigma, tsigma = get_radial_tangential_vectors(self, x)

            # paired rotations of two ASICS on the same sensor
            if x % 2 == 0:
                delrot = "%5.2f" % (wtaveg[x] - wtaveg[x + 1])
            else:
                delrot = ""

            radial_sigmas[x] = rsigma
            tangen_sigmas[x] = tsigma
            table_data.append(
                [
                    format_value("%3d", x),
                    format_value("%7.2f", self.radii[x]),
                    format_value("%6d", self.tilecounts[x]),
                    format_value("%5.2f", self.asymmetric_tile_rmsd[x]),
                    format_value("%5.2f", self.tile_rmsd[x]),
                    format_value("%5.2f", self.post_mean_cv[x][0]),
                    format_value("%5.2f", self.post_mean_cv[x][1]),
                    format_value("%5.2f", matrix.col(self.post_mean_cv[x]).length()),
                    format_value("%6.2f", wtaveg[x]),
                    format_value("%6.2f", rsigma),
                    format_value("%6.2f", tsigma),
                    format_value("%5.2f", self.x[2 * x]),
                    format_value("%5.2f", self.x[2 * x + 1]),
                    copy.copy(delrot),
                ]
            )
        table_data.append([""] * len(table_header))
        rstats = flex.mean_and_variance(radial_sigmas, self.tilecounts.as_double())
        tstats = flex.mean_and_variance(tangen_sigmas, self.tilecounts.as_double())
        table_data.append(
            [
                format_value("%3s", "ALL"),
                format_value("%s", ""),
                format_value("%6d", self.overall_N),
                format_value("%5.2f", math.sqrt(flex.mean(self.delrsq))),
                format_value("%5.2f", self.overall_rmsd),
                format_value("%5.2f", self.overall_cv[0]),
                format_value("%5.2f", self.overall_cv[1]),
                format_value("%5.2f", flex.mean(flex.double([cv.length() for cv in self.post_mean_cv]))),
                format_value("%s", ""),
                format_value("%6.2f", rstats.mean()),
                format_value("%6.2f", tstats.mean()),
                format_value("%s", ""),
                format_value("%s", ""),
                format_value("%s", ""),
            ]
        )

        print
        print table_utils.format(table_data, has_header=1, justify="center", delim=" ")
Пример #40
0
            show_bin_number=False,
            show_bin_range=False,
            show_d_range=True,
            show_counts=False)
        xpercent = results.count_frames(
            appropriate_min_corr,
            miller_set_avg.binner().selection(i_bin)) / float(col_count1)
        percent = '%5.2f' % (100. * xpercent)
        table_data.append(['%3d' % i_bin, col_legend, col_count, percent])

    n_frames = (scaler.frames['cc'] > appropriate_min_corr).count(True)
    table_data.append([""] * len(table_data[0]))
    table_data.append(["All", "", '%8d' % n_frames])
    print >> out
    print >> out, table_utils.format(table_data,
                                     has_header=1,
                                     justify='center',
                                     delim=' ')

    reindexing_ops = {
        "h,k,l": 0
    }  # get a list of all reindexing ops for this dataset
    if work_params.merging.reverse_lookup is not None:
        for key in scaler.reverse_lookup:
            if reindexing_ops.get(scaler.reverse_lookup[key], None) is None:
                reindexing_ops[scaler.reverse_lookup[key]] = 0
            reindexing_ops[scaler.reverse_lookup[key]] += 1

    from xfel.cxi.cxi_cc import run_cc
    for key in reindexing_ops.keys():
        run_cc(work_params, reindexing_op=key, output=out)
Пример #41
0
def run_cc(params, reindexing_op, output):
    uniform, selected_uniform, have_iso_ref = load_cc_data(
        params, reindexing_op, output)
    NBIN = params.output.n_bins

    if have_iso_ref:
        slope, offset, corr_iso, N_iso = correlation(selected_uniform[1],
                                                     selected_uniform[0],
                                                     params.include_negatives)
        print >> output, "C.C. iso is %.1f%% on %d indices" % (100 * corr_iso,
                                                               N_iso)

    slope, offset, corr_int, N_int = correlation(selected_uniform[2],
                                                 selected_uniform[3],
                                                 params.include_negatives)
    print >> output, "C.C. int is %.1f%% on %d indices" % (100. * corr_int,
                                                           N_int)

    if have_iso_ref:
        binned_cc_ref, binned_cc_ref_N = binned_correlation(
            selected_uniform[1], selected_uniform[0], params.include_negatives)
        #binned_cc_ref.show(f=output)

        ref_scale = scale_factor(selected_uniform[1],
                                 selected_uniform[0],
                                 weights=flex.pow(selected_uniform[1].sigmas(),
                                                  -2),
                                 use_binning=True)
        #ref_scale.show(f=output)

        ref_riso = r1_factor(selected_uniform[1],
                             selected_uniform[0],
                             scale_factor=ref_scale,
                             use_binning=True)
        #ref_riso.show(f=output)

        ref_scale_all = scale_factor(selected_uniform[1],
                                     selected_uniform[0],
                                     weights=flex.pow(
                                         selected_uniform[1].sigmas(), -2))

        ref_riso_all = r1_factor(selected_uniform[1],
                                 selected_uniform[0],
                                 scale_factor=ref_scale_all)

    binned_cc_int, binned_cc_int_N = binned_correlation(
        selected_uniform[2], selected_uniform[3], params.include_negatives)
    #binned_cc_int.show(f=output)

    oe_scale = scale_factor(
        selected_uniform[2],
        selected_uniform[3],
        weights=flex.pow(selected_uniform[2].sigmas(), -2) +
        flex.pow(selected_uniform[3].sigmas(), -2),
        use_binning=True)
    #oe_scale.show(f=output)

    oe_rint = r1_factor(selected_uniform[2],
                        selected_uniform[3],
                        scale_factor=oe_scale,
                        use_binning=True)
    #oe_rint.show(f=output)

    oe_rsplit = r_split(selected_uniform[2],
                        selected_uniform[3],
                        use_binning=True)

    oe_scale_all = scale_factor(
        selected_uniform[2],
        selected_uniform[3],
        weights=flex.pow(selected_uniform[2].sigmas(), -2) +
        flex.pow(selected_uniform[3].sigmas(), -2),
    )

    oe_rint_all = r1_factor(selected_uniform[2],
                            selected_uniform[3],
                            scale_factor=oe_scale_all)
    oe_rsplit_all = r_split(selected_uniform[2], selected_uniform[3])
    if have_iso_ref:
        print >> output, "R factors Riso = %.1f%%, Rint = %.1f%%" % (
            100. * ref_riso_all, 100. * oe_rint_all)
    else:
        print >> output, "R factor Rint = %.1f%%" % (100. * oe_rint_all)

    split_sigma_data = split_sigma_test(selected_uniform[2],
                                        selected_uniform[3],
                                        scale=oe_scale,
                                        use_binning=True,
                                        show_plot=False)
    split_sigma_data_all = split_sigma_test(selected_uniform[2],
                                            selected_uniform[3],
                                            scale=oe_scale_all,
                                            use_binning=False,
                                            show_plot=False)

    print >> output
    if reindexing_op == "h,k,l":
        print >> output, "Table of Scaling Results:"
    else:
        print >> output, "Table of Scaling Results Reindexing as %s:" % reindexing_op

    from libtbx import table_utils
    table_header = [
        "", "", "", "CC", " N", "CC", " N", "R", "R", "R", "Scale", "Scale",
        "SpSig"
    ]
    table_header2 = [
        "Bin", "Resolution Range", "Completeness", "int", "int", "iso", "iso",
        "int", "split", "iso", "int", "iso", "Test"
    ]
    table_data = []
    table_data.append(table_header)
    table_data.append(table_header2)

    items = binned_cc_int.binner.range_used()

    # XXX Make it clear what the completeness here actually is!
    cumulative_counts_given = 0
    cumulative_counts_complete = 0
    for bin in items:
        table_row = []
        table_row.append("%3d" % bin)
        table_row.append("%-13s" %
                         binned_cc_int.binner.bin_legend(i_bin=bin,
                                                         show_bin_number=False,
                                                         show_bin_range=False,
                                                         show_d_range=True,
                                                         show_counts=False))
        table_row.append("%13s" %
                         binned_cc_int.binner.bin_legend(i_bin=bin,
                                                         show_bin_number=False,
                                                         show_bin_range=False,
                                                         show_d_range=False,
                                                         show_counts=True))
        cumulative_counts_given += binned_cc_int.binner._counts_given[bin]
        cumulative_counts_complete += binned_cc_int.binner._counts_complete[
            bin]
        table_row.append("%.1f%%" % (100. * binned_cc_int.data[bin]))
        table_row.append("%7d" % (binned_cc_int_N.data[bin]))

        if have_iso_ref and binned_cc_ref.data[bin] is not None:
            table_row.append("%.1f%%" % (100 * binned_cc_ref.data[bin]))
        else:
            table_row.append("--")

        if have_iso_ref and binned_cc_ref_N.data[bin] is not None:
            table_row.append("%6d" % (binned_cc_ref_N.data[bin]))
        else:
            table_row.append("--")

        if oe_rint.data[bin] is not None:
            table_row.append("%.1f%%" % (100. * oe_rint.data[bin]))
        else:
            table_row.append("--")

        if oe_rsplit.data[bin] is not None:
            table_row.append("%.1f%%" % (100 * oe_rsplit.data[bin]))
        else:
            table_row.append("--")

        if have_iso_ref and ref_riso.data[bin] is not None:
            table_row.append("%.1f%%" % (100 * ref_riso.data[bin]))
        else:
            table_row.append("--")

        if oe_scale.data[bin] is not None:
            table_row.append("%.3f" % oe_scale.data[bin])
        else:
            table_row.append("--")

        if have_iso_ref and ref_scale.data[bin] is not None:
            table_row.append("%.3f" % ref_scale.data[bin])
        else:
            table_row.append("--")

        if split_sigma_data.data[bin] is not None:
            table_row.append("%.4f" % split_sigma_data.data[bin])
        else:
            table_row.append("--")

        table_data.append(table_row)
    table_data.append([""] * len(table_header))

    table_row = [
        format_value("%3s", "All"),
        format_value("%-13s", "                 "),
        format_value(
            "%13s",
            "[%d/%d]" % (cumulative_counts_given, cumulative_counts_complete)),
        format_value("%.1f%%", 100 * corr_int),
        format_value("%7d", N_int)
    ]

    if have_iso_ref:
        table_row.extend(
            (format_value("%.1f%%",
                          100 * corr_iso), format_value("%6d", N_iso)))
    else:
        table_row.extend(("--", "--"))

    table_row.extend((format_value("%.1f%%", 100 * oe_rint_all),
                      format_value("%.1f%%", 100 * oe_rsplit_all)))
    if have_iso_ref:
        table_row.append(format_value("%.1f%%", 100 * ref_riso_all))
    else:
        table_row.append("--")

    table_row.append(format_value("%.3f", oe_scale_all))
    if have_iso_ref:
        table_row.append(format_value("%.3f", ref_scale_all))
    else:
        table_row.append("--")

    if split_sigma_data_all is not None:
        table_row.append("%.1f" % split_sigma_data_all)
    else:
        table_row.append("--")

    table_data.append(table_row)

    print >> output
    print >> output, table_utils.format(table_data,
                                        has_header=2,
                                        justify='center',
                                        delim=" ")
    print >> output, """CCint is the CC-1/2 defined by Diederichs; correlation between odd/even images.
  Similarly, Scale int and R int are the scaling factor and scaling R factor between odd/even images.
  "iso" columns compare the whole XFEL dataset to the isomorphous reference."""

    print >> output, """Niso: result vs. reference common set""",
    if params.include_negatives:
        print >> output, """including negative merged intensities (set by phil parameter)."""
    elif params.scaling.log_cutoff is None:
        print >> output
    else:
        print >> output, """with intensites < %7.2g filtered out (controlled by
    scaling.log_cutoff phil parameter set to %5.1f)""" % (math.exp(
            params.scaling.log_cutoff), params.scaling.log_cutoff)

    if have_iso_ref:
        assert N_iso == flex.sum(
            flex.double([x for x in binned_cc_ref_N.data if x is not None]))
    assert N_int == flex.sum(
        flex.double([x for x in binned_cc_int_N.data if x is not None]))

    if params.scaling.show_plots:
        from matplotlib import pyplot as plt
        plt.plot(flex.log(selected_uniform[-2].data()),
                 flex.log(selected_uniform[-1].data()), 'r.')
        plt.show()
        if have_iso_ref:
            plt.plot(flex.log(selected_uniform[0].data()),
                     flex.log(selected_uniform[1].data()), 'r.')
            plt.show()
    print >> output
Пример #42
0
def run(args):
  sweep_directories = []
  templates = []
  n_strong_spots = flex.int()
  n_strong_spots_dmin_4 = flex.int()
  d_strong_spots_99th_percentile = flex.double()
  d_strong_spots_95th_percentile = flex.double()
  d_strong_spots_50th_percentile = flex.double()
  n_unindexed_spots = flex.int()
  n_indexed_lattices = flex.int()
  n_integrated_lattices = flex.int()
  sweep_dir_cryst = flex.std_string()

  orig_dir = os.path.abspath(os.curdir)

  rmsds = flex.vec3_double()
  cell_params = flex.sym_mat3_double()
  n_indexed = flex.double()
  d_min_indexed = flex.double()
  rmsds = flex.vec3_double()

  nproc = easy_mp.get_processes(libtbx.Auto)
  #nproc = 1
  results = easy_mp.parallel_map(
    func=run_once,
    iterable=args,
    processes=nproc,
    method="multiprocessing",
    preserve_order=True,
    asynchronous=True,
    preserve_exception_message=True,
  )

  for result in results:
    if result is None: continue
    sweep_directories.append(result.sweep_dir)
    templates.append(result.template)
    n_strong_spots.append(result.n_strong_spots)
    n_strong_spots_dmin_4.append(result.n_strong_spots_dmin_4)
    n_unindexed_spots.append(result.n_unindexed_spots)
    n_indexed_lattices.append(result.n_indexed_lattices)
    n_integrated_lattices.append(result.n_integrated_lattices)
    d_strong_spots_50th_percentile.append(result.d_strong_spots_50th_percentile)
    d_strong_spots_95th_percentile.append(result.d_strong_spots_95th_percentile)
    d_strong_spots_99th_percentile.append(result.d_strong_spots_99th_percentile)
    cell_params.extend(result.cell_params)
    n_indexed.extend(result.n_indexed)
    d_min_indexed.extend(result.d_min_indexed)
    rmsds.extend(result.rmsds)
    sweep_dir_cryst.extend(result.sweep_dir_cryst)

  table_data = [('sweep_dir', 'template', '#strong_spots', '#unindexed_spots', '#lattices',
                 'd_spacing_50th_percentile', 'd_spacing_95th_percentile',
                 'd_spacing_99th_percentile',)]
  for i in range(len(sweep_directories)):
    table_data.append((sweep_directories[i],
                       templates[i],
                       str(n_strong_spots[i]),
                       str(n_unindexed_spots[i]),
                       str(n_indexed_lattices[i]),
                       str(d_strong_spots_50th_percentile[i]),
                       str(d_strong_spots_95th_percentile[i]),
                       str(d_strong_spots_99th_percentile[i]),
                       ))

  with open('results.txt', 'wb') as f:
    print >> f, table_utils.format(
      table_data, has_header=True, justify='right')

  table_data = [('sweep_dir', 'cell_a', 'cell_b', 'cell_c', 'alpha', 'beta', 'gamma',
                 '#indexed_reflections', 'd_min_indexed',
                 'rmsd_x', 'rmsd_y', 'rmsd_phi')]
  for i in range(len(cell_params)):
    table_data.append((sweep_dir_cryst[i],
                       str(cell_params[i][0]),
                       str(cell_params[i][1]),
                       str(cell_params[i][2]),
                       str(cell_params[i][3]),
                       str(cell_params[i][4]),
                       str(cell_params[i][5]),
                       str(n_indexed[i]),
                       str(d_min_indexed[i]),
                       str(rmsds[i][0]),
                       str(rmsds[i][1]),
                       str(rmsds[i][2]),
                       ))

  with open('results_indexed.txt', 'wb') as f:
    print >> f, table_utils.format(
      table_data, has_header=True, justify='right')

  cell_a = flex.double([params[0] for params in cell_params])
  cell_b = flex.double([params[1] for params in cell_params])
  cell_c = flex.double([params[2] for params in cell_params])
  cell_alpha = flex.double([params[3] for params in cell_params])
  cell_beta = flex.double([params[4] for params in cell_params])
  cell_gamma = flex.double([params[5] for params in cell_params])

  from matplotlib import pyplot
  from matplotlib.backends.backend_pdf import PdfPages

  pyplot.rc('font', family='serif')
  pyplot.rc('font', serif='Times New Roman')

  red, blue = '#B2182B', '#2166AC'
  hist = flex.histogram(n_strong_spots_dmin_4.as_double(), n_slots=20)
  hist.show()
  fig = pyplot.figure()
  ax = fig.add_subplot(1,1,1)
  ax.bar(hist.slot_centers(), hist.slots(), width=0.75*hist.slot_width(),
         color=blue, edgecolor=blue)
  ax.set_xlabel('Spot count')
  ax.set_ylabel('Frequency')
  pdf = PdfPages("spot_count_histogram.pdf")
  pdf.savefig(fig)
  pdf.close()
  #pyplot.show()

  hist = flex.histogram(n_indexed_lattices.as_double(),
                        n_slots=flex.max(n_indexed_lattices))
  hist.show()
  fig = pyplot.figure()
  ax = fig.add_subplot(1,1,1)
  ax.bar(range(int(hist.data_max())), hist.slots(),
         width=0.75*hist.slot_width(), align='center',
         color=blue, edgecolor=blue)
  ax.set_xlim(-0.5, hist.data_max()-0.5)
  ax.set_xticks(range(0,int(hist.data_max())))
  ax.set_xlabel('Number of indexed lattices')
  ax.set_ylabel('Frequency')
  pdf = PdfPages("n_indexed_lattices_histogram.pdf")
  pdf.savefig(fig)
  pdf.close()
  #pyplot.show()

  if flex.max(n_integrated_lattices) > 0:
    hist = flex.histogram(n_integrated_lattices.as_double(),
                          n_slots=flex.max(n_integrated_lattices))
    hist.show()
    fig = pyplot.figure()
    ax = fig.add_subplot(1,1,1)
    ax.bar(range(int(hist.data_max())), hist.slots(),
           width=0.75*hist.slot_width(),
           align='center', color=blue, edgecolor=blue)
    ax.set_xlim(-0.5, hist.data_max()-0.5)
    ax.set_xticks(range(0,int(hist.data_max())))
    ax.set_xlabel('Number of integrated lattices')
    ax.set_ylabel('Frequency')
    pdf = PdfPages("n_integrated_lattices_histogram.pdf")
    pdf.savefig(fig)
    pdf.close()
    #pyplot.show()

  fig, axes = pyplot.subplots(nrows=2, ncols=3, squeeze=False)
  for i, cell_param in enumerate(
    (cell_a, cell_b, cell_c, cell_alpha, cell_beta, cell_gamma)):
    ax = axes.flat[i]
    flex.min_max_mean_double(cell_param).show()
    print flex.median(cell_param)
    hist = flex.histogram(cell_param, n_slots=20)
    hist.show()
    ax.bar(hist.slot_centers(), hist.slots(), width=0.75*hist.slot_width(),
           color=blue, edgecolor=blue)
    ax.set_xlabel('Cell parameter')
    ax.set_ylabel('Frequency')
  pyplot.tight_layout()
  pdf = PdfPages("cell_parameters.pdf")
  pdf.savefig(fig)
  pdf.close()
Пример #43
0
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_experiments
  from dials.util.options import flatten_datablocks
  from dials.util.options import flatten_reflections
  import libtbx.load_env

  usage = "%s [options] datablock.json | experiments.json | image_*.cbf" %(
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    read_datablocks=True,
    read_datablocks_from_images=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  experiments = flatten_experiments(params.input.experiments)
  datablocks = flatten_datablocks(params.input.datablock)
  reflections = flatten_reflections(params.input.reflections)

  if len(datablocks) == 0 and len(experiments) == 0 and len(reflections) == 0:
    parser.print_help()
    exit()

  for i_expt, expt in enumerate(experiments):
    print "Experiment %i:" %i_expt
    print str(expt.detector)
    print 'Max resolution (at corners): %f' % (
      expt.detector.get_max_resolution(expt.beam.get_s0()))
    print 'Max resolution (inscribed):  %f' % (
      expt.detector.get_max_inscribed_resolution(expt.beam.get_s0()))
    if params.show_panel_distance:
      for ipanel, panel in enumerate(expt.detector):
        from scitbx import matrix
        fast = matrix.col(panel.get_fast_axis())
        slow = matrix.col(panel.get_slow_axis())
        normal = fast.cross(slow)
        origin = matrix.col(panel.get_origin())
        distance = origin.dot(normal)
        fast_origin = - (origin - distance * normal).dot(fast)
        slow_origin = - (origin - distance * normal).dot(slow)
        print 'Panel %d: distance %.2f origin %.2f %.2f' % \
          (ipanel, distance, fast_origin, slow_origin)
      print ''
    print ''
    panel_id, (x, y) = beam_centre(expt.detector, expt.beam)
    if panel_id >= 0 and x is not None and y is not None:
      if len(expt.detector) > 1:
        beam_centre_str = "Beam centre: panel %i, (%.2f,%.2f)" %(panel_id, x, y)
      else:
        beam_centre_str = "Beam centre: (%.2f,%.2f)" %(x, y)
    else:
      beam_centre_str = ""
    print str(expt.beam) + beam_centre_str + '\n'
    if expt.scan is not None:
      print expt.scan
    if expt.goniometer is not None:
      print expt.goniometer
    expt.crystal.show(show_scan_varying=params.show_scan_varying)
    if expt.crystal.num_scan_points:
      from scitbx.array_family import flex
      from cctbx import uctbx
      abc = flex.vec3_double()
      angles = flex.vec3_double()
      for n in range(expt.crystal.num_scan_points):
        a, b, c, alpha, beta, gamma = expt.crystal.get_unit_cell_at_scan_point(n).parameters()
        abc.append((a, b, c))
        angles.append((alpha, beta, gamma))
      a, b, c = abc.mean()
      alpha, beta, gamma = angles.mean()
      mean_unit_cell = uctbx.unit_cell((a, b, c, alpha, beta, gamma))
      print "  Average unit cell: %s" %mean_unit_cell
    print

  for datablock in datablocks:
    if datablock.format_class() is not None:
      print 'Format: %s' %datablock.format_class()
    imagesets = datablock.extract_imagesets()
    for imageset in imagesets:
      try: print imageset.get_template()
      except Exception: pass
      detector = imageset.get_detector()
      print str(detector) + 'Max resolution: %f\n' %(
        detector.get_max_resolution(imageset.get_beam().get_s0()))
      if params.show_panel_distance:
        for ipanel, panel in enumerate(detector):
          from scitbx import matrix
          fast = matrix.col(panel.get_fast_axis())
          slow = matrix.col(panel.get_slow_axis())
          normal = fast.cross(slow)
          origin = matrix.col(panel.get_origin())
          distance = origin.dot(normal)
          fast_origin = - (origin - distance * normal).dot(fast)
          slow_origin = - (origin - distance * normal).dot(slow)
          print 'Panel %d: distance %.2f origin %.2f %.2f' % \
            (ipanel, distance, fast_origin, slow_origin)
        print ''
      panel_id, (x, y) = beam_centre(detector, imageset.get_beam())
      if panel_id >= 0 and x is not None and y is not None:
        if len(detector) > 1:
          beam_centre_str = "Beam centre: panel %i, (%.2f,%.2f)" %(panel_id, x, y)
        else:
          beam_centre_str = "Beam centre: (%.2f,%.2f)" %(x, y)
      else:
        beam_centre_str = ""
      print str(imageset.get_beam()) + beam_centre_str + '\n'
      if imageset.get_scan() is not None:
        print imageset.get_scan()
      if imageset.get_goniometer() is not None:
        print imageset.get_goniometer()

  from libtbx.containers import OrderedDict, OrderedSet
  formats = OrderedDict([
    ('miller_index', '%i, %i, %i'),
    ('d','%.2f'),
    ('dqe','%.3f'),
    ('id','%i'),
    ('imageset_id','%i'),
    ('panel','%i'),
    ('flags', '%i'),
    ('background.mean', '%.1f'),
    ('background.dispersion','%.1f'),
    ('background.mse', '%.1f'),
    ('background.sum.value', '%.1f'),
    ('background.sum.variance', '%.1f'),
    ('intensity.prf.value','%.1f'),
    ('intensity.prf.variance','%.1f'),
    ('intensity.sum.value','%.1f'),
    ('intensity.sum.variance','%.1f'),
    ('intensity.cor.value','%.1f'),
    ('intensity.cor.variance','%.1f'),
    ('lp','%.3f'),
    ('num_pixels.background','%i'),
    ('num_pixels.background_used','%i'),
    ('num_pixels.foreground','%i'),
    ('num_pixels.valid','%i'),
    ('partial_id','%i'),
    ('partiality','%.4f'),
    ('profile.correlation','%.3f'),
    ('profile.rmsd','%.3f'),
    ('xyzcal.mm','%.2f, %.2f, %.2f'),
    ('xyzcal.px','%.2f, %.2f, %.2f'),
    ('delpsical.rad','%.3f'),
    ('delpsical2','%.3f'),
    ('xyzobs.mm.value','%.2f, %.2f, %.2f'),
    ('xyzobs.mm.variance','%.4e, %.4e, %.4e'),
    ('xyzobs.px.value','%.2f, %.2f, %.2f'),
    ('xyzobs.px.variance','%.4f, %.4f, %.4f'),
    ('s1','%.4f, %.4f, %.4f'),
    ('rlp','%.4f, %.4f, %.4f'),
    ('zeta','%.3f'),
    ('x_resid','%.3f'),
    ('x_resid2','%.3f'),
    ('y_resid','%.3f'),
    ('y_resid2','%.3f'),
    ])

  for rlist in reflections:
    from cctbx.array_family import flex
    print
    print "Reflection list contains %i reflections" %(len(rlist))
    rows = [["Column", "min", "max", "mean"]]
    for k, col in rlist.cols():
      if type(col) in (flex.double, flex.int, flex.size_t):
        if type(col) in (flex.int, flex.size_t):
          col = col.as_double()
        rows.append([k, formats[k] %flex.min(col), formats[k] %flex.max(col),
                     formats[k]%flex.mean(col)])
      elif type(col) in (flex.vec3_double, flex.miller_index):
        if type(col) == flex.miller_index:
          col = col.as_vec3_double()
        rows.append([k, formats[k] %col.min(), formats[k] %col.max(),
                     formats[k]%col.mean()])

    from libtbx import table_utils
    print table_utils.format(rows, has_header=True, prefix="| ", postfix=" |")

  intensity_keys = (
    'miller_index', 'd', 'intensity.prf.value', 'intensity.prf.variance',
    'intensity.sum.value', 'intensity.sum.variance', 'background.mean',
    'profile.correlation', 'profile.rmsd'
  )

  profile_fit_keys = ('miller_index', 'd',)

  centroid_keys = (
    'miller_index', 'd', 'xyzcal.mm', 'xyzcal.px', 'xyzobs.mm.value',
    'xyzobs.mm.variance', 'xyzobs.px.value', 'xyzobs.px.variance'
  )

  keys_to_print = OrderedSet()

  if params.show_intensities:
    for k in intensity_keys: keys_to_print.add(k)
  if params.show_profile_fit:
    for k in profile_fit_keys: keys_to_print.add(k)
  if params.show_centroids:
    for k in centroid_keys: keys_to_print.add(k)
  if params.show_all_reflection_data:
    for k in formats: keys_to_print.add(k)

  def format_column(key, data, format_strings=None):
    if isinstance(data, flex.vec3_double):
      c_strings = [c.as_string(format_strings[i].strip()) for i, c in enumerate(data.parts())]
    elif isinstance(data, flex.miller_index):
      c_strings = [c.as_string(format_strings[i].strip()) for i, c in enumerate(data.as_vec3_double().parts())]
    elif isinstance(data, flex.size_t):
      c_strings = [data.as_int().as_string(format_strings[0].strip())]
    else:
      c_strings = [data.as_string(format_strings[0].strip())]

    column = flex.std_string()
    max_element_lengths = [c.max_element_length() for c in c_strings]
    for i in range(len(c_strings[0])):

      column.append(('%%%is' %len(key)) %', '.join(
        ('%%%is' %max_element_lengths[j]) %c_strings[j][i]
        for j in range(len(c_strings))))
    return column


  if keys_to_print:
    keys = [k for k in keys_to_print if k in rlist]
    rows = [keys]
    max_reflections = len(rlist)
    if params.max_reflections is not None:
      max_reflections = min(len(rlist), params.max_reflections)

    columns = []

    for k in keys:
      columns.append(format_column(k, rlist[k], format_strings=formats[k].split(',')))

    print
    print "Printing %i of %i reflections:" %(max_reflections, len(rlist))
    for j in range(len(columns)):
      key = keys[j]
      width = max(len(key), columns[j].max_element_length())
      print ("%%%is" %width) %key,
    print
    for i in range(max_reflections):
      for j in range(len(columns)):
        print columns[j][i],
      print

  return
  def get_table_text(self):
    '''Produce formatted table text'''
    table_header = ["","","","","<asu","<obs","<pred","","","","",""]
    table_header2 = ["Bin","Resolution Range","Completeness","%","multi>","multi>","multi>",
                      "n_meas", "asu_m_meas", "n_pred","<I>","<I/sig(I)>"]

    use_preds = False # TODO?

    include_columns = [True, True, True, True, True, True, use_preds, True, True, use_preds, True, True]

    table_data = []
    table_data.append(table_header)
    table_data.append(table_header2)

    for bin in self.table:
      table_row = []
      table_row.append("%3d" % bin.i_bin)
      table_row.append("%-13s" % bin.d_range)
      table_row.append("%13s" % bin.complete_tag)
      table_row.append("%5.2f" % (100*bin.completeness))
      table_row.append("%6.2f" % bin.redundancy_asu)
      table_row.append("%6.2f" % bin.redundancy_obs)
      table_row.append("%6.2f" % (0)) # if redundancy_to_edge is None else bin.redundancy_to_edge))
      table_row.append("%6d" % bin.measurements)
      table_row.append("%6d" % bin.multiply_measured_asu)
      table_row.append("%6d" % (0)) # if redundancy_to_edge is None else flex.sum(bin.predictions)))
      table_row.append("%8.0f" % bin.mean_I)
      table_row.append("%8.3f" % bin.mean_I_sigI)
      table_data.append(table_row)

    if len(table_data) <= 2:
      return ("Intensity statistics table could not be constructed -- no bins accepted.")

    table_data.append([""] * len(table_header))

    total_completeness = 0
    total_asu_multiplicity = 0
    if self.cumulative_theor_asu_count > 0:
      total_completeness = 100 * (self.cumulative_observed_asu_count / self.cumulative_theor_asu_count)
      total_asu_multiplicity = self.cumulative_observed_count / self.cumulative_theor_asu_count

    total_obs_multiplicity = 0
    if self.cumulative_observed_asu_count:
      total_obs_multiplicity = self.cumulative_observed_count / self.cumulative_observed_asu_count

    total_mean_I = 0
    total_mean_Isigma = 0
    if self.cumulative_multiply_observed_asu_count > 0:
      total_mean_I        = self.cumulative_I / self.cumulative_multiply_observed_asu_count
      total_mean_Isigma   = self.cumulative_Isigma / self.cumulative_multiply_observed_asu_count

    table_data.append(  [
        format_value("%3s",   "All"),
        format_value("%-13s", "                 "),
        format_value("%13s",  "[%d/%d]"%(self.cumulative_observed_asu_count, self.cumulative_theor_asu_count)),
        format_value("%5.2f", total_completeness),
        format_value("%6.2f", total_asu_multiplicity),
        format_value("%6.2f", total_obs_multiplicity),
        format_value("%6.2f", (0)), # if redundancy_to_edge is None else cumulative_n_pred/cumulative_theor)),
        format_value("%6d",   self.cumulative_observed_count),
        format_value("%6d",   self.cumulative_multiply_observed_asu_count),
        format_value("%6d",   (0)), #if redundancy_to_edge is None else flex.sum(redundancy_to_edge))),
        format_value("%8.0f", total_mean_I),
        format_value("%8.3f", total_mean_Isigma),
    ])
    table_data = table_utils.manage_columns(table_data, include_columns)

    return table_utils.format(table_data, has_header = 2, justify ='center', delim = " ")
Пример #45
0
def run(args):
    sweep_directories = []
    templates = []
    n_strong_spots = flex.int()
    n_strong_spots_dmin_4 = flex.int()
    d_strong_spots_99th_percentile = flex.double()
    d_strong_spots_95th_percentile = flex.double()
    d_strong_spots_50th_percentile = flex.double()
    n_unindexed_spots = flex.int()
    n_indexed_lattices = flex.int()
    n_integrated_lattices = flex.int()
    sweep_dir_cryst = flex.std_string()

    orig_dir = os.path.abspath(os.curdir)

    rmsds = flex.vec3_double()
    cell_params = flex.sym_mat3_double()
    n_indexed = flex.double()
    d_min_indexed = flex.double()
    rmsds = flex.vec3_double()

    nproc = easy_mp.get_processes(libtbx.Auto)
    # nproc = 1
    results = easy_mp.parallel_map(
        func=run_once,
        iterable=args,
        processes=nproc,
        method="multiprocessing",
        preserve_order=True,
        asynchronous=True,
        preserve_exception_message=True,
    )

    for result in results:
        if result is None:
            continue
        sweep_directories.append(result.sweep_dir)
        templates.append(result.template)
        n_strong_spots.append(result.n_strong_spots)
        n_strong_spots_dmin_4.append(result.n_strong_spots_dmin_4)
        n_unindexed_spots.append(result.n_unindexed_spots)
        n_indexed_lattices.append(result.n_indexed_lattices)
        n_integrated_lattices.append(result.n_integrated_lattices)
        d_strong_spots_50th_percentile.append(
            result.d_strong_spots_50th_percentile)
        d_strong_spots_95th_percentile.append(
            result.d_strong_spots_95th_percentile)
        d_strong_spots_99th_percentile.append(
            result.d_strong_spots_99th_percentile)
        cell_params.extend(result.cell_params)
        n_indexed.extend(result.n_indexed)
        d_min_indexed.extend(result.d_min_indexed)
        rmsds.extend(result.rmsds)
        sweep_dir_cryst.extend(result.sweep_dir_cryst)

    table_data = [(
        "sweep_dir",
        "template",
        "#strong_spots",
        "#unindexed_spots",
        "#lattices",
        "d_spacing_50th_percentile",
        "d_spacing_95th_percentile",
        "d_spacing_99th_percentile",
    )]
    for i in range(len(sweep_directories)):
        table_data.append((
            sweep_directories[i],
            templates[i],
            str(n_strong_spots[i]),
            str(n_unindexed_spots[i]),
            str(n_indexed_lattices[i]),
            str(d_strong_spots_50th_percentile[i]),
            str(d_strong_spots_95th_percentile[i]),
            str(d_strong_spots_99th_percentile[i]),
        ))

    with open("results.txt", "wb") as f:
        print(table_utils.format(table_data, has_header=True, justify="right"),
              file=f)

    table_data = [(
        "sweep_dir",
        "cell_a",
        "cell_b",
        "cell_c",
        "alpha",
        "beta",
        "gamma",
        "#indexed_reflections",
        "d_min_indexed",
        "rmsd_x",
        "rmsd_y",
        "rmsd_phi",
    )]
    for i in range(len(cell_params)):
        table_data.append((
            sweep_dir_cryst[i],
            str(cell_params[i][0]),
            str(cell_params[i][1]),
            str(cell_params[i][2]),
            str(cell_params[i][3]),
            str(cell_params[i][4]),
            str(cell_params[i][5]),
            str(n_indexed[i]),
            str(d_min_indexed[i]),
            str(rmsds[i][0]),
            str(rmsds[i][1]),
            str(rmsds[i][2]),
        ))

    with open("results_indexed.txt", "wb") as f:
        print(table_utils.format(table_data, has_header=True, justify="right"),
              file=f)

    cell_a = flex.double([params[0] for params in cell_params])
    cell_b = flex.double([params[1] for params in cell_params])
    cell_c = flex.double([params[2] for params in cell_params])
    cell_alpha = flex.double([params[3] for params in cell_params])
    cell_beta = flex.double([params[4] for params in cell_params])
    cell_gamma = flex.double([params[5] for params in cell_params])

    from matplotlib import pyplot
    from matplotlib.backends.backend_pdf import PdfPages

    pyplot.rc("font", family="serif")
    pyplot.rc("font", serif="Times New Roman")

    red, blue = "#B2182B", "#2166AC"
    hist = flex.histogram(n_strong_spots_dmin_4.as_double(), n_slots=20)
    hist.show()
    fig = pyplot.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.bar(
        hist.slot_centers(),
        hist.slots(),
        width=0.75 * hist.slot_width(),
        color=blue,
        edgecolor=blue,
    )
    ax.set_xlabel("Spot count")
    ax.set_ylabel("Frequency")
    pdf = PdfPages("spot_count_histogram.pdf")
    pdf.savefig(fig)
    pdf.close()
    # pyplot.show()

    hist = flex.histogram(n_indexed_lattices.as_double(),
                          n_slots=flex.max(n_indexed_lattices))
    hist.show()
    fig = pyplot.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.bar(
        range(int(hist.data_max())),
        hist.slots(),
        width=0.75 * hist.slot_width(),
        align="center",
        color=blue,
        edgecolor=blue,
    )
    ax.set_xlim(-0.5, hist.data_max() - 0.5)
    ax.set_xticks(range(0, int(hist.data_max())))
    ax.set_xlabel("Number of indexed lattices")
    ax.set_ylabel("Frequency")
    pdf = PdfPages("n_indexed_lattices_histogram.pdf")
    pdf.savefig(fig)
    pdf.close()
    # pyplot.show()

    if flex.max(n_integrated_lattices) > 0:
        hist = flex.histogram(n_integrated_lattices.as_double(),
                              n_slots=flex.max(n_integrated_lattices))
        hist.show()
        fig = pyplot.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.bar(
            range(int(hist.data_max())),
            hist.slots(),
            width=0.75 * hist.slot_width(),
            align="center",
            color=blue,
            edgecolor=blue,
        )
        ax.set_xlim(-0.5, hist.data_max() - 0.5)
        ax.set_xticks(range(0, int(hist.data_max())))
        ax.set_xlabel("Number of integrated lattices")
        ax.set_ylabel("Frequency")
        pdf = PdfPages("n_integrated_lattices_histogram.pdf")
        pdf.savefig(fig)
        pdf.close()
        # pyplot.show()

    fig, axes = pyplot.subplots(nrows=2, ncols=3, squeeze=False)
    for i, cell_param in enumerate(
        (cell_a, cell_b, cell_c, cell_alpha, cell_beta, cell_gamma)):
        ax = axes.flat[i]
        flex.min_max_mean_double(cell_param).show()
        print(flex.median(cell_param))
        hist = flex.histogram(cell_param, n_slots=20)
        hist.show()
        ax.bar(
            hist.slot_centers(),
            hist.slots(),
            width=0.75 * hist.slot_width(),
            color=blue,
            edgecolor=blue,
        )
        ax.set_xlabel("Cell parameter")
        ax.set_ylabel("Frequency")
    pyplot.tight_layout()
    pdf = PdfPages("cell_parameters.pdf")
    pdf.savefig(fig)
    pdf.close()
Пример #46
0
def show_overall_observations(Fit_I,Fit_I_stddev,I_visited,ordered,sim,
  out = None,title = None,work_params = None):

  # at minimum we need the Miller set of merged intensities and sigmas
  # assert len(ordered.indices())==len(Fit_I)
  # have to assert this because the "ordered" indices may not in fact be ordered.
  # but can not assert this in the test1.py test suite.
  model_subset = ordered[0:len(Fit_I)]
  uc = ordered.unit_cell()
  model_subset.crystal_symmetry().show_summary()

  if work_params is not None:
    d_min = work_params.d_min
    d_max = work_params.d_max
    n_bins = work_params.output.n_bins
  else:
    d_min = flex.min(uc.d(model_subset.indices())) # extent of data
    d_max = None
    n_bins = min( len(Fit_I)//20, 15 )

  #obs, redundancy, summed_wt_I, summed_weight, ISIGI, n_bins=15, out=None, title=None, work_params=None):
  if out is None:
    out = sys.stdout
  model_subset.setup_binner(d_max=(d_max or 100000), d_min=d_min, n_bins=n_bins)
  result = []
  multiplicity = flex.int(len(Fit_I))
  for iraw in xrange(len(sim.miller)):
    multiplicity[sim.miller[iraw]] += 1
  cumulative_unique = 0
  cumulative_meas   = 0
  cumulative_theor  = 0
  cumulative_In     = 0
  cumulative_I      = 0.0
  cumulative_Isigma = 0.0
  frame_worker = n_frames_worker(obs_arrays=sim, unique_set=model_subset,
                                 d_max=(d_max or 100000),d_min=d_min,n_bins=n_bins)

  for i_bin in model_subset.binner().range_used():
    sel_w = model_subset.binner().selection(i_bin)
    sel_fo_all = model_subset.select(sel_w)
    d_range = model_subset.binner().bin_legend(
      i_bin=i_bin, show_bin_number=False, show_counts=False)

    sel_multiplicity = multiplicity.select(sel_w)
    sel_absent = sel_multiplicity.count(0)
    n_present = sel_multiplicity.size() - sel_absent
    sel_complete_tag = "[%d/%d]" % (n_present, sel_multiplicity.size())
    sel_measurements = flex.sum(sel_multiplicity)

    # Alternatively, redundancy (or multiplicity) is calculated as the
    # average number of observations for the observed
    # reflections--missing reflections do not affect the redundancy
    # adversely, and the reported value becomes
    # completeness-independent.
    val_multiplicity_obs = 0
    if n_present > 0:
      val_multiplicity_obs = flex.sum(sel_multiplicity) / n_present
    sel_frames = frame_worker.per_bin_frames(i_bin)

    # Per-bin sum of I and I/sig(I).  For any reflection, the weight
    # of the merged intensity must be positive for this to make sense.
    sel_o = (sel_w & (Fit_I_stddev > 0.))
    selected_intensity = Fit_I.select(sel_o)
    selected_stddev    = Fit_I_stddev.select(sel_o)
    I_sum = flex.sum(selected_intensity)
    assert selected_stddev.count(0.) == 0
    I_sigI_sum = flex.sum(selected_intensity / selected_stddev)
    I_n = sel_o.count(True)

    assert sel_measurements == frame_worker.per_bin_meas(i_bin)

    if sel_measurements > 0:
      mean_I = mean_I_sigI = 0
      if I_n > 0:
        mean_I = I_sum / I_n
        mean_I_sigI = I_sigI_sum / I_n
      bin = resolution_bin(
        i_bin=i_bin,
        d_range=d_range,
        d_min=model_subset.binner().bin_d_min(i_bin),
        redundancy_asu=flex.mean(sel_multiplicity.as_double()),
        redundancy_obs=val_multiplicity_obs,
        frames=sel_frames,
        complete_tag=sel_complete_tag,
        completeness=n_present / sel_multiplicity.size(),
        measurements=sel_measurements,
        negative=frame_worker.per_bin_neg(i_bin),
        percent_neg=100.*frame_worker.per_bin_neg(i_bin)/frame_worker.per_bin_meas(i_bin),
        mean_I=mean_I,
        mean_I_sigI=mean_I_sigI
        )
      result.append(bin)
    cumulative_unique += n_present
    cumulative_meas   += sel_measurements
    cumulative_theor  += sel_multiplicity.size()
    cumulative_In     += I_n
    cumulative_I      += I_sum
    cumulative_Isigma += I_sigI_sum

  if (title is not None) :
    print >> out, title
  from libtbx import table_utils
  table_header = ["","","","<asu","<obs",""," #"," %","","",""]
  table_header2 = ["Bin","Resolution Range","Completeness","multi>","multi>","n_meas"," neg"," neg","n_xtal","<I>","<I/sig(I)>"]
  table_data = []
  table_data.append(table_header)
  table_data.append(table_header2)
  for bin in result:
    table_row = []
    table_row.append("%3d" % bin.i_bin)
    table_row.append("%-13s" % bin.d_range)
    table_row.append("%13s" % bin.complete_tag)
    table_row.append("%6.2f" % bin.redundancy_asu)
    table_row.append("%6.2f" % bin.redundancy_obs)
    table_row.append("%6d" % bin.measurements)
    table_row.append("%4d" % bin.negative)
    table_row.append("%5.2f"%bin.percent_neg)
    table_row.append("%6d" % bin.frames)
    table_row.append("%8.3f" % bin.mean_I)
    table_row.append("%8.3f" % bin.mean_I_sigI)
    table_data.append(table_row)
  table_data.append([""]*len(table_header))
  table_data.append(  [
      format_value("%3s",   "All"),
      format_value("%-13s", "                 "),
      format_value("%13s",  "[%d/%d]"%(cumulative_unique,cumulative_theor)),
      format_value("%6.2f", cumulative_meas/cumulative_theor),
      format_value("%6.2f", cumulative_meas/cumulative_unique),
      format_value("%6d",   cumulative_meas),
      format_value("%4d",   frame_worker.all_neg()),
      format_value("%5.2f", 100.*frame_worker.all_neg()/frame_worker.all_meas()),
      format_value("%6d",   frame_worker.all_frames()),
      format_value("%8.3f", cumulative_I/cumulative_In),
      format_value("%8.3f", cumulative_Isigma/cumulative_In),
  ])

  print
  print >>out,table_utils.format(table_data,has_header=2,justify='center',delim=" ")

  # XXX generate table object for displaying plots
  if (title is None) :
    title = "Data statistics by resolution"
  table = data_plots.table_data(
    title=title,
    x_is_inverse_d_min=True,
    force_exact_x_labels=True)
  table.add_column(
    column=[1 / bin.d_min**2 for bin in result],
    column_name="d_min",
    column_label="Resolution")
  table.add_column(
    column=[bin.redundancy_asu for bin in result],
    column_name="redundancy",
    column_label="Redundancy")
  table.add_column(
    column=[bin.completeness for bin in result],
    column_name="completeness",
    column_label="Completeness")
  table.add_column(
    column=[bin.mean_I_sigI for bin in result],
    column_name="mean_i_over_sigI",
    column_label="<I/sig(I)>")
  table.add_graph(
    name="Redundancy vs. resolution",
    type="GRAPH",
    columns=[0,1])
  table.add_graph(
    name="Completeness vs. resolution",
    type="GRAPH",
    columns=[0,2])
  table.add_graph(
    name="<I/sig(I)> vs. resolution",
    type="GRAPH",
    columns=[0,3])
  return table,n_bins,d_min
Пример #47
0
  def run(self):
    '''Execute the script.'''
    import os, math
    from cctbx.crystal import symmetry
    from scitbx.array_family import flex
    from libtbx import table_utils, easy_pickle
    from xfel.command_line.cspad_cbf_metrology import find_files
    from dxtbx.model.experiment.experiment_list import ExperimentListFactory
    table_header = ["","","","I","IsigI","N >","RMSD","Cutoff"]
    table_header2 = ["Bin","Resolution Range","Completeness","","","cutoff","(um)",""]

    # Parse the command line
    params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True)
    exp_paths = []
    refl_paths = []
    for path in all_paths:
      exps, refs = find_files(path, "integrated")
      exp_paths.extend(exps)
      refl_paths.extend(refs)
    assert len(exp_paths) == len(refl_paths)

    best_data = {}
    best_limits = flex.double()
    for exp_path, refl_path in zip(exp_paths, refl_paths):
      experiments = ExperimentListFactory.from_json_file(exp_path)
      reflections = easy_pickle.load(refl_path)
      exp_name = os.path.basename(exp_path)
      if exp_name.startswith("idx-") and exp_name.endswith("_refined_experiments.json"):
        tag = exp_name.lstrip("idx-").rstrip("_refined_experiments.json")
      else:
        tag = "%s, %s"%(exp_path, refl_path)

      for exp_id, experiment in enumerate(experiments):
        print "*"*80
        print "Data table for", tag
        table_data = []
        table_data.append(table_header)
        table_data.append(table_header2)

        crystal = experiment.crystal
        refls = reflections.select(reflections['id'] == exp_id)
        sym = symmetry(unit_cell = crystal.get_unit_cell(), space_group = crystal.get_space_group())
        d = crystal.get_unit_cell().d(refls['miller_index'])
        mset = sym.miller_set(indices = refls['miller_index'].select(d>=params.d_min), anomalous_flag=False)
        binner = mset.setup_binner(n_bins=params.n_bins)
        acceptable_resolution_bins = []
        for i in binner.range_used():
          d_max, d_min = binner.bin_d_range(i)
          sel = (d <= d_max) & (d > d_min)
          sel &= refls['intensity.sum.value'] > 0
          bin_refls = refls.select(sel)
          n_refls = len(bin_refls)
          avg_i = flex.mean(bin_refls['intensity.sum.value']) if n_refls > 0 else 0
          avg_i_sigi = flex.mean(bin_refls['intensity.sum.value'] /
                                 flex.sqrt(bin_refls['intensity.sum.variance'])) if n_refls > 0 else 0
          acceptable_resolution_bins.append(avg_i_sigi >= params.sig_filter_sigma)

          bright_refls = bin_refls.select((bin_refls['intensity.sum.value']/flex.sqrt(bin_refls['intensity.sum.variance'])) >= params.sig_filter_sigma)
          n_bright = len(bright_refls)

          rmsd_obs = 1000*math.sqrt((bright_refls['xyzcal.mm']-bright_refls['xyzobs.mm.value']).sum_sq()/n_bright) if n_bright > 0 else 0

          table_row = []
          table_row.append("%3d"%i)
          table_row.append("%-13s"%binner.bin_legend(i_bin=i,show_bin_number=False,show_bin_range=False,
                                                     show_d_range=True, show_counts=False))
          table_row.append("%13s"%binner.bin_legend(i_bin=i,show_bin_number=False,show_bin_range=False,
                                                    show_d_range=False, show_counts=True))

          table_row.append("%.1f"%(avg_i))
          table_row.append("%.1f"%(avg_i_sigi))
          table_row.append("%3d"%n_bright)
          table_row.append("%.1f"%(rmsd_obs))
          table_data.append(table_row)

        acceptable_resolution_bins = [acceptable_resolution_bins[i] for i in xrange(len(acceptable_resolution_bins))
                                      if False not in acceptable_resolution_bins[:i+1]]

        for b, row in zip(acceptable_resolution_bins, table_data[2:]):
          if b:
            row.append("X")
        print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

        if any(acceptable_resolution_bins):
          best_index = acceptable_resolution_bins.count(True)-1
          best_row = table_data[best_index+2]
          d_min = binner.bin_d_range(binner.range_used()[best_index])[1]
          if len(best_limits) < params.best_count:
            best_limits.append(d_min)
            best_data[tag] = d_min, best_row
          elif (d_min < best_limits).count(True) > 0:
            worst_d_min = flex.max(best_limits)
            for tag, data in best_data.iteritems():
              if worst_d_min == data[0]:
                best_data[tag] = d_min, best_row
                best_limits[flex.first_index(best_limits, worst_d_min)] = d_min
                break
          print tag, "best row:", " ".join(best_row)
        else:
          print "Data didn't pass cutoff"
    if len(best_limits) > 0:
      print "*"*80
      print "Top", len(best_limits)
      for tag, data in best_data.iteritems():
        print tag, " ".join(data[1])
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_datablocks, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)
    datablocks = flatten_datablocks(params.input.datablock)
    reflections = flatten_reflections(params.input.reflections)

    # Find all detector objects
    detectors = []
    detectors.extend(experiments.detectors())
    dbs = []
    for datablock in datablocks:
      dbs.extend(datablock.unique_detectors())
    detectors.extend(dbs)

    # Verify inputs
    if len(detectors) != 2:
      raise Sorry("Please provide a reference and a moving set of experiments and or datablocks")

    reflections = reflections[1]
    detector = detectors[1]

    if not hasattr(detector, 'hierarchy'):
      raise Sorry("Script intended for hierarchical detectors")

    if params.max_hierarchy_level is None or str(params.max_hierarchy_level).lower() == 'auto':
      params.max_hierarchy_level = 0
      root = detector.hierarchy()
      while root.is_group():
        root = root[0]
        params.max_hierarchy_level += 1
      print "Found", params.max_hierarchy_level+1, "hierarchy levels"

    reference_root = detectors[0].hierarchy()
    moving_root = detector.hierarchy()
    rori = get_center(reference_root)
    rf = col(reference_root.get_fast_axis())
    rs = col(reference_root.get_slow_axis())
    r_norm = col(reference_root.get_normal())
    s0 = col(flex.vec3_double([col(b.get_s0()) for b in experiments.beams()]).mean())

    summary_table_header = ["Hierarchy","Delta XY","Delta XY","R Offsets","R Offsets","T Offsets","T Offsets","Z Offsets","Z Offsets","dR Norm","dR Norm","dT Norm","dT Norm","Local dNorm", "Local dNorm", "Rot Z","Rot Z"]
    summary_table_header2 = ["Level","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma"]
    summary_table_header3 = ["","(microns)","(microns)","(microns)","(microns)","(microns)","(microns)","(microns)","(microns)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)"]
    summary_table_data = []
    summary_table_data.append(summary_table_header)
    summary_table_data.append(summary_table_header2)
    summary_table_data.append(summary_table_header3)

    table_header = ["PanelG","BC dist","Delta XY","R Offsets","T Offsets","Z Offsets","dR Norm","dT Norm","Local dNorm","Rot Z","N Refls"]
    table_header2 = ["ID","(mm)","(microns)","(microns)","(microns)","(microns)","(deg)","(deg)","(deg)","(deg)",""]

    from xfel.cftbx.detector.cspad_cbf_tbx import basis
    def get_full_basis_shift(pg):
      """Compute basis shift from pg to lab space"""
      shift = basis(panelgroup=pg)
      while True:
        parent = pg.parent()
        if parent is None:
          break
        shift = basis(panelgroup=parent) * shift
        pg = parent
      return shift

    # Iterate through the hierarchy levels
    for level in xrange(params.max_hierarchy_level+1):
      delta_xy = flex.double()
      r_offsets = flex.double()
      t_offsets = flex.double()
      z_offsets = flex.double()
      rot_z = flex.double()
      delta_r_norm = flex.double()
      delta_t_norm = flex.double()
      local_dnorm = flex.double()
      bc_dists = flex.double()
      weights = flex.double()

      rows = []

      for pg_id, (pg1, pg2) in enumerate(zip(iterate_detector_at_level(reference_root, 0, level),
                                             iterate_detector_at_level(moving_root, 0, level))):
        weight = 0
        for panel_id, p in enumerate(iterate_panels(pg2)):
          weight += len(reflections.select(reflections['panel'] == id_from_name(detector, p.get_name())))
        weights.append(weight)

        bc = col(pg1.get_beam_centre_lab(s0))
        ori = get_center(pg1)
        bc_dist = (ori-bc).length()
        bc_dists.append(bc_dist)

        z_dists = []
        ori_xy = []
        for pg in [pg1,pg2]:
          ori = pg.get_local_origin()
          ori_xy.append(col((ori[0], ori[1])))
          z_dists.append(ori[2]*1000)
        dxy = (ori_xy[1]-ori_xy[0]).length()*1000
        delta_xy.append(dxy)

        z_off = z_dists[1]-z_dists[0]
        z_offsets.append(z_off)

        pgo1 = col(pg1.get_origin())
        ro_pgo = pgo1 - rori # vector from the detector origin to the panel group origin
        if ro_pgo.length() == 0:
          radial = col((0,0,0))
          transverse = col((0,0,0))
        else:
          radial = ((rf.dot(ro_pgo) * rf) + (rs.dot(ro_pgo) * rs)).normalize() # component of ro_pgo in rf rs plane
          transverse = r_norm.cross(radial).normalize()
        # now radial and transverse are vectors othogonal to each other and the detector normal, such that
        # radial points at the panel group origin

        # compute shift in local frame, then convert that shift to lab space, then make it relative to the reference's origin, in lab space
        lpgo1 = col(pg1.get_local_origin())
        lpgo2 = col(pg2.get_local_origin())
        delta_pgo = (get_full_basis_shift(pg1) * (lpgo2-lpgo1)) - pgo1

        # v is the component of delta_pgo along the radial vector
        v = (radial.dot(delta_pgo) * radial)
        r_offset = v.length() * 1000
        angle = r_norm.angle(v, deg=True)
        if r_norm.cross(v).dot(transverse) < 0:
          r_offset = -r_offset
        r_offsets.append(r_offset)
        # v is the component of delta_pgo along the transverse vector
        v = (transverse.dot(delta_pgo) * transverse)
        t_offset = v.length() * 1000
        angle = r_norm.angle(v, deg=True)
        if r_norm.cross(v).dot(radial) < 0:
          t_offset = -t_offset
        t_offsets.append(t_offset)

        pgn1 = col(pg1.get_normal())
        pgf1 = col(pg1.get_fast_axis())
        pgs1 = col(pg1.get_slow_axis())
        pgn2 = col(pg2.get_normal())
        pgf2 = col(pg2.get_fast_axis())

        # v1 and v2 are the component of pgf1 and pgf2 in the rf rs plane
        v1 = (rf.dot(pgf1) * rf) + (rs.dot(pgf1) * rs)
        v2 = (rf.dot(pgf2) * rf) + (rs.dot(pgf2) * rs)
        rz = v1.angle(v2, deg=True)
        rot_z.append(rz)

        # v1 and v2 are the components of pgn1 and pgn2 in the r_norm radial plane
        v1 = (r_norm.dot(pgn1) * r_norm) + (radial.dot(pgn1) * radial)
        v2 = (r_norm.dot(pgn2) * r_norm) + (radial.dot(pgn2) * radial)
        drn = v1.angle(v2, deg=True)
        if v2.cross(v1).dot(transverse) < 0:
          drn = -drn
        delta_r_norm.append(drn)

        # v1 and v2 are the components of pgn1 and pgn2 in the r_norm transverse plane
        v1 = (r_norm.dot(pgn1) * r_norm) + (transverse.dot(pgn1) * transverse)
        v2 = (r_norm.dot(pgn2) * r_norm) + (transverse.dot(pgn2) * transverse)
        dtn = v1.angle(v2, deg=True)
        if v2.cross(v1).dot(radial) < 0:
          dtn = -dtn
        delta_t_norm.append(dtn)

        # Determine angle between normals in local space
        lpgf1 = col(pg1.get_local_fast_axis())
        lpgs1 = col(pg1.get_local_slow_axis())
        lpgn1 = lpgf1.cross(lpgs1)
        lpgf2 = col(pg2.get_local_fast_axis())
        lpgs2 = col(pg2.get_local_slow_axis())
        lpgn2 = lpgf2.cross(lpgs2)
        ldn = lpgn1.angle(lpgn2, deg=True)
        local_dnorm.append(ldn)

        row = ["%3d"%pg_id, "%6.1f"%bc_dist, "%6.1f"%dxy,
               "%6.1f"%r_offset, "%6.1f"%t_offset, "%6.1f"%z_off,
               "%.4f"%drn, "%.4f"%dtn, "%.4f"%ldn, "%.4f"%rz, "%8d"%weight]
        rows.append(row)

      wm_row = ["Weighted mean", ""]
      ws_row = ["Weighted stddev", ""]
      s_row = ["%d"%level]
      iterable = zip([delta_xy, r_offsets, t_offsets, z_offsets, delta_r_norm, delta_t_norm, local_dnorm, rot_z],
                     ["%6.1f","%6.1f","%6.1f","%6.1f","%.4f","%.4f","%.4f","%.4f"])
      if len(z_offsets) == 0:
        wm_row.extend(["%6.1f"%0]*8)
        ws_row.extend(["%6.1f"%0]*8)
        s_row.extend(["%6.1f"%0]*8)
      elif len(z_offsets) == 1:
        for data, fmt in iterable:
          wm_row.append(fmt%data[0])
          ws_row.append(fmt%0)
          s_row.append(fmt%data[0])
          s_row.append(fmt%0)
      else:
        for data, fmt in iterable:
          stats = flex.mean_and_variance(data, weights)
          wm_row.append(fmt%stats.mean())
          ws_row.append(fmt%stats.gsl_stats_wsd())
          s_row.append(fmt%stats.mean())
          s_row.append(fmt%stats.gsl_stats_wsd())
      wm_row.append("")
      ws_row.append("")
      summary_table_data.append(s_row)

      table_data = [table_header, table_header2]
      table_d = {d:row for d, row in zip(bc_dists, rows)}
      table_data.extend([table_d[key] for key in sorted(table_d)])
      table_data.append(wm_row)
      table_data.append(ws_row)

      from libtbx import table_utils
      print "Hierarchy level %d Detector shifts"%level
      print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

    print "Detector shifts summary"
    print table_utils.format(summary_table_data,has_header=3,justify='center',delim=" ")

    print
    print """
Пример #49
0
    def __str__(self):
        from libtbx import table_utils

        U = matrix.sqr(self.experiment.crystal.get_U())
        B = matrix.sqr(self.experiment.crystal.get_B())

        a_star_ = U * B * a_star
        b_star_ = U * B * b_star
        c_star_ = U * B * c_star

        Binvt = B.inverse().transpose()

        a_ = U * Binvt * a
        b_ = U * Binvt * b
        c_ = U * Binvt * c

        names = self.experiment.goniometer.get_names()
        axes = self.experiment.goniometer.get_axes()
        rows = [["Experimental axis", "a*", "b*", "c*"]]
        rows.append([names[0]] + [
            "%.3f" % smallest_angle(axis.angle(matrix.col(axes[0]), deg=True))
            for axis in (a_star_, b_star_, c_star_)
        ])
        rows.append(["Beam"] + [
            "%.3f" % smallest_angle(axis.angle(self.s0, deg=True))
            for axis in (a_star_, b_star_, c_star_)
        ])
        rows.append([names[2]] + [
            "%.3f" % smallest_angle(axis.angle(matrix.col(axes[2]), deg=True))
            for axis in (a_star_, b_star_, c_star_)
        ])

        output = []
        output.append(
            "Angles between reciprocal cell axes and principal experimental axes:"
        )
        output.append(table_utils.format(rows=rows, has_header=True))
        output.append("")

        rows = [["Experimental axis", "a", "b", "c"]]
        rows.append([names[0]] + [
            "%.3f" % smallest_angle(axis.angle(matrix.col(axes[0]), deg=True))
            for axis in (a_, b_, c_)
        ])
        rows.append(["Beam"] + [
            "%.3f" % smallest_angle(axis.angle(self.s0, deg=True))
            for axis in (a_, b_, c_)
        ])
        rows.append([names[2]] + [
            "%.3f" % smallest_angle(axis.angle(matrix.col(axes[2]), deg=True))
            for axis in (a_, b_, c_)
        ])
        output.append(
            "Angles between unit cell axes and principal experimental axes:")
        output.append(table_utils.format(rows=rows, has_header=True))
        output.append("")

        names = self.experiment.goniometer.get_names()

        space_group = self.experiment.crystal.get_space_group()
        reciprocal = self.frame == "reciprocal"
        rows = []
        for angles, vector_pairs in self.unique_solutions.items():
            v1, v2 = list(vector_pairs)[0]
            rows.append((
                describe(v1, space_group, reciprocal=reciprocal),
                describe(v2, space_group, reciprocal=reciprocal),
                "% 7.3f" % angles[0],
                "% 7.3f" % angles[1],
            ))
        rows = [("Primary axis", "Secondary axis", names[1], names[0])
                ] + sorted(rows)
        output.append("Independent solutions:")
        output.append(table_utils.format(rows=rows, has_header=True))

        return "\n".join(output)
Пример #50
0
def run_cc(params, reindexing_op, output):
    uniform, selected_uniform, have_iso_ref = load_cc_data(params, reindexing_op, output)
    NBIN = params.output.n_bins

    if have_iso_ref:
        slope, offset, corr_iso, N_iso = correlation(selected_uniform[1], selected_uniform[0], params.include_negatives)
        print >> output, "C.C. iso is %.1f%% on %d indices" % (100 * corr_iso, N_iso)

    slope, offset, corr_int, N_int = correlation(selected_uniform[2], selected_uniform[3], params.include_negatives)
    print >> output, "C.C. int is %.1f%% on %d indices" % (100.0 * corr_int, N_int)

    if have_iso_ref:
        binned_cc_ref, binned_cc_ref_N = binned_correlation(
            selected_uniform[1], selected_uniform[0], params.include_negatives
        )
        # binned_cc_ref.show(f=output)

        ref_scale = scale_factor(
            selected_uniform[1],
            selected_uniform[0],
            weights=flex.pow(selected_uniform[1].sigmas(), -2),
            use_binning=True,
        )
        # ref_scale.show(f=output)

        ref_riso = r1_factor(selected_uniform[1], selected_uniform[0], scale_factor=ref_scale, use_binning=True)
        # ref_riso.show(f=output)

        ref_scale_all = scale_factor(
            selected_uniform[1], selected_uniform[0], weights=flex.pow(selected_uniform[1].sigmas(), -2)
        )

        ref_riso_all = r1_factor(selected_uniform[1], selected_uniform[0], scale_factor=ref_scale_all)

    binned_cc_int, binned_cc_int_N = binned_correlation(
        selected_uniform[2], selected_uniform[3], params.include_negatives
    )
    # binned_cc_int.show(f=output)

    oe_scale = scale_factor(
        selected_uniform[2],
        selected_uniform[3],
        weights=flex.pow(selected_uniform[2].sigmas(), -2) + flex.pow(selected_uniform[3].sigmas(), -2),
        use_binning=True,
    )
    # oe_scale.show(f=output)

    oe_rint = r1_factor(selected_uniform[2], selected_uniform[3], scale_factor=oe_scale, use_binning=True)
    # oe_rint.show(f=output)

    oe_rsplit = r_split(selected_uniform[2], selected_uniform[3], use_binning=True)

    oe_scale_all = scale_factor(
        selected_uniform[2],
        selected_uniform[3],
        weights=flex.pow(selected_uniform[2].sigmas(), -2) + flex.pow(selected_uniform[3].sigmas(), -2),
    )

    oe_rint_all = r1_factor(selected_uniform[2], selected_uniform[3], scale_factor=oe_scale_all)
    oe_rsplit_all = r_split(selected_uniform[2], selected_uniform[3])
    if have_iso_ref:
        print >> output, "R factors Riso = %.1f%%, Rint = %.1f%%" % (100.0 * ref_riso_all, 100.0 * oe_rint_all)
    else:
        print >> output, "R factor Rint = %.1f%%" % (100.0 * oe_rint_all)

    split_sigma_data = split_sigma_test(
        selected_uniform[2], selected_uniform[3], scale=oe_scale, use_binning=True, show_plot=False
    )
    split_sigma_data_all = split_sigma_test(
        selected_uniform[2], selected_uniform[3], scale=oe_scale_all, use_binning=False, show_plot=False
    )

    print >> output
    if reindexing_op == "h,k,l":
        print >> output, "Table of Scaling Results:"
    else:
        print >> output, "Table of Scaling Results Reindexing as %s:" % reindexing_op

    from libtbx import table_utils

    table_header = ["", "", "", "CC", " N", "CC", " N", "R", "R", "R", "Scale", "Scale", "SpSig"]
    table_header2 = [
        "Bin",
        "Resolution Range",
        "Completeness",
        "int",
        "int",
        "iso",
        "iso",
        "int",
        "split",
        "iso",
        "int",
        "iso",
        "Test",
    ]
    table_data = []
    table_data.append(table_header)
    table_data.append(table_header2)

    items = binned_cc_int.binner.range_used()

    # XXX Make it clear what the completeness here actually is!
    cumulative_counts_given = 0
    cumulative_counts_complete = 0
    for bin in items:
        table_row = []
        table_row.append("%3d" % bin)
        table_row.append(
            "%-13s"
            % binned_cc_int.binner.bin_legend(
                i_bin=bin, show_bin_number=False, show_bin_range=False, show_d_range=True, show_counts=False
            )
        )
        table_row.append(
            "%13s"
            % binned_cc_int.binner.bin_legend(
                i_bin=bin, show_bin_number=False, show_bin_range=False, show_d_range=False, show_counts=True
            )
        )
        cumulative_counts_given += binned_cc_int.binner._counts_given[bin]
        cumulative_counts_complete += binned_cc_int.binner._counts_complete[bin]
        table_row.append("%.1f%%" % (100.0 * binned_cc_int.data[bin]))
        table_row.append("%7d" % (binned_cc_int_N.data[bin]))

        if have_iso_ref and binned_cc_ref.data[bin] is not None:
            table_row.append("%.1f%%" % (100 * binned_cc_ref.data[bin]))
        else:
            table_row.append("--")

        if have_iso_ref and binned_cc_ref_N.data[bin] is not None:
            table_row.append("%6d" % (binned_cc_ref_N.data[bin]))
        else:
            table_row.append("--")

        if oe_rint.data[bin] is not None:
            table_row.append("%.1f%%" % (100.0 * oe_rint.data[bin]))
        else:
            table_row.append("--")

        if oe_rsplit.data[bin] is not None:
            table_row.append("%.1f%%" % (100 * oe_rsplit.data[bin]))
        else:
            table_row.append("--")

        if have_iso_ref and ref_riso.data[bin] is not None:
            table_row.append("%.1f%%" % (100 * ref_riso.data[bin]))
        else:
            table_row.append("--")

        if oe_scale.data[bin] is not None:
            table_row.append("%.3f" % oe_scale.data[bin])
        else:
            table_row.append("--")

        if have_iso_ref and ref_scale.data[bin] is not None:
            table_row.append("%.3f" % ref_scale.data[bin])
        else:
            table_row.append("--")

        if split_sigma_data.data[bin] is not None:
            table_row.append("%.4f" % split_sigma_data.data[bin])
        else:
            table_row.append("--")

        table_data.append(table_row)
    table_data.append([""] * len(table_header))

    table_row = [
        format_value("%3s", "All"),
        format_value("%-13s", "                 "),
        format_value("%13s", "[%d/%d]" % (cumulative_counts_given, cumulative_counts_complete)),
        format_value("%.1f%%", 100 * corr_int),
        format_value("%7d", N_int),
    ]

    if have_iso_ref:
        table_row.extend((format_value("%.1f%%", 100 * corr_iso), format_value("%6d", N_iso)))
    else:
        table_row.extend(("--", "--"))

    table_row.extend((format_value("%.1f%%", 100 * oe_rint_all), format_value("%.1f%%", 100 * oe_rsplit_all)))
    if have_iso_ref:
        table_row.append(format_value("%.1f%%", 100 * ref_riso_all))
    else:
        table_row.append("--")

    table_row.append(format_value("%.3f", oe_scale_all))
    if have_iso_ref:
        table_row.append(format_value("%.3f", ref_scale_all))
    else:
        table_row.append("--")

    if split_sigma_data_all is not None:
        table_row.append("%.1f" % split_sigma_data_all)
    else:
        table_row.append("--")

    table_data.append(table_row)

    print >> output
    print >> output, table_utils.format(table_data, has_header=2, justify="center", delim=" ")
    print >> output, """CCint is the CC-1/2 defined by Diederichs; correlation between odd/even images.
  Similarly, Scale int and R int are the scaling factor and scaling R factor between odd/even images.
  "iso" columns compare the whole XFEL dataset to the isomorphous reference."""

    print >> output, """Niso: result vs. reference common set""",
    if params.include_negatives:
        print >> output, """including negative merged intensities (set by phil parameter)."""
    elif params.scaling.log_cutoff is None:
        print >> output
    else:
        print >> output, """with intensites < %7.2g filtered out (controlled by
    scaling.log_cutoff phil parameter set to %5.1f)""" % (
            math.exp(params.scaling.log_cutoff),
            params.scaling.log_cutoff,
        )

    if have_iso_ref:
        assert N_iso == flex.sum(flex.double([x for x in binned_cc_ref_N.data if x is not None]))
    assert N_int == flex.sum(flex.double([x for x in binned_cc_int_N.data if x is not None]))

    if params.scaling.show_plots:
        from matplotlib import pyplot as plt

        plt.plot(flex.log(selected_uniform[-2].data()), flex.log(selected_uniform[-1].data()), "r.")
        plt.show()
        if have_iso_ref:
            plt.plot(flex.log(selected_uniform[0].data()), flex.log(selected_uniform[1].data()), "r.")
            plt.show()
    print >> output
Пример #51
0
  def __call__(self, experiments, reflections):
    results = flex.reflection_table()
    table_header = ["","","","I","IsigI","N >","RMSD","Cutoff"]
    table_header2 = ["Bin","Resolution Range","Completeness","","","cutoff","(um)",""]

    for exp_id in xrange(len(experiments)):
      print("*"*80)
      print("Significance filtering experiment", exp_id)
      table_data = []
      table_data.append(table_header)
      table_data.append(table_header2)
      experiment = experiments[exp_id]

      # Find the bins for this experiment
      crystal = experiment.crystal
      refls = reflections.select(reflections['id'] == exp_id)
      sym = symmetry(unit_cell = crystal.get_unit_cell(), space_group = crystal.get_space_group())
      d = crystal.get_unit_cell().d(refls['miller_index'])
      mset = sym.miller_set(indices = refls['miller_index'], anomalous_flag=False)
      binner = mset.setup_binner(n_bins=self.params.n_bins)
      acceptable_resolution_bins = []

      # Iterate through the bins, examining I/sigI at each bin
      for i in binner.range_used():
        d_max, d_min = binner.bin_d_range(i)
        sel = (d <= d_max) & (d > d_min)
        sel &= refls['intensity.sum.value'] > 0
        bin_refls = refls.select(sel)
        n_refls = len(bin_refls)
        avg_i = flex.mean(bin_refls['intensity.sum.value']) if n_refls > 0 else 0
        avg_i_sigi = flex.mean(bin_refls['intensity.sum.value'] /
                               flex.sqrt(bin_refls['intensity.sum.variance'])) if n_refls > 0 else 0
        acceptable_resolution_bins.append(avg_i_sigi >= self.params.isigi_cutoff)

        bright_refls = bin_refls.select((bin_refls['intensity.sum.value']/flex.sqrt(bin_refls['intensity.sum.variance'])) >= self.params.isigi_cutoff)
        n_bright = len(bright_refls)

        rmsd_obs = 1000*math.sqrt((bright_refls['xyzcal.mm']-bright_refls['xyzobs.mm.value']).sum_sq()/n_bright) if n_bright > 0 else 0

        table_row = []
        table_row.append("%3d"%i)
        table_row.append("%-13s"%binner.bin_legend(i_bin=i,show_bin_number=False,show_bin_range=False,
                                                   show_d_range=True, show_counts=False))
        table_row.append("%13s"%binner.bin_legend(i_bin=i,show_bin_number=False,show_bin_range=False,
                                                  show_d_range=False, show_counts=True))

        table_row.append("%.1f"%(avg_i))
        table_row.append("%.1f"%(avg_i_sigi))
        table_row.append("%3d"%n_bright)
        table_row.append("%.1f"%(rmsd_obs))
        table_data.append(table_row)

      # Throw out bins that go back above the cutoff after the first non-passing bin is found
      acceptable_resolution_bins = [acceptable_resolution_bins[i] for i in xrange(len(acceptable_resolution_bins))
                                    if False not in acceptable_resolution_bins[:i+1]]

      for b, row in zip(acceptable_resolution_bins, table_data[2:]):
        if b:
          row.append("X")
      print(table_utils.format(table_data,has_header=2,justify='center',delim=" "))

      # Save the results
      if any(acceptable_resolution_bins):
        best_index = acceptable_resolution_bins.count(True)-1
        best_row = table_data[best_index+2]
        d_min = binner.bin_d_range(binner.range_used()[best_index])[1]
        print("best row:", " ".join(best_row))
        if self.params.enable:
          results.extend(refls.select(d >= d_min))
      else:
        print("Data didn't pass cutoff")
    if self.params.enable:
      return results
    else:
      return reflections
Пример #52
0
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)

    # Find all detector objects
    detectors = experiments.detectors()

    # Verify inputs
    if len(params.input.reflections) == len(detectors) and len(detectors) > 1:
      # case for passing in multiple images on the command line
      assert len(params.input.reflections) == len(detectors)
      reflections = flex.reflection_table()
      for expt_id in xrange(len(detectors)):
        subset = params.input.reflections[expt_id].data
        subset['id'] = flex.int(len(subset), expt_id)
        reflections.extend(subset)
    else:
      # case for passing in combined experiments and reflections
      reflections = flatten_reflections(params.input.reflections)[0]

    detector = detectors[0]

    #from dials.algorithms.refinement.prediction import ExperimentsPredictor
    #ref_predictor = ExperimentsPredictor(experiments, force_stills=experiments.all_stills())

    print "N reflections total:", len(reflections)
    if params.residuals.exclude_outliers:
      reflections = reflections.select(reflections.get_flags(reflections.flags.used_in_refinement))
      print "N reflections used in refinement:", len(reflections)
      print "Reporting only on those reflections used in refinement"

    if self.params.residuals.i_sigi_cutoff is not None:
      sel = (reflections['intensity.sum.value']/flex.sqrt(reflections['intensity.sum.variance'])) >= self.params.residuals.i_sigi_cutoff
      reflections = reflections.select(sel)
      print "After filtering by I/sigi cutoff of %f, there are %d reflections left"%(self.params.residuals.i_sigi_cutoff,len(reflections))

    reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()

    n = len(reflections)
    rmsd = self.get_weighted_rmsd(reflections)
    print "Dataset RMSD (microns)", rmsd * 1000

    if params.tag is None:
      tag = ''
    else:
      tag = '%s '%params.tag

    # set up delta-psi ratio heatmap
    p = flex.int() # positive
    n = flex.int() # negative
    for i in set(reflections['id']):
      exprefls = reflections.select(reflections['id']==i)
      p.append(len(exprefls.select(exprefls['delpsical.rad']>0)))
      n.append(len(exprefls.select(exprefls['delpsical.rad']<0)))
    plt.hist2d(p, n, bins=30)
    cb = plt.colorbar()
    cb.set_label("N images")
    plt.title(r"%s2D histogram of pos vs. neg $\Delta\Psi$ per image"%tag)
    plt.xlabel(r"N reflections with $\Delta\Psi$ > 0")
    plt.ylabel(r"N reflections with $\Delta\Psi$ < 0")

    self.delta_scalar = 50

    # Iterate through the detectors, computing detector statistics at the per-panel level (IE one statistic per panel)
    # Per panel dictionaries
    rmsds = {}
    refl_counts = {}
    transverse_rmsds = {}
    radial_rmsds = {}
    ttdpcorr = {}
    pg_bc_dists = {}
    mean_delta_two_theta = {}
    # per panelgroup flex arrays
    pg_rmsds = flex.double()
    pg_r_rmsds = flex.double()
    pg_t_rmsds = flex.double()
    pg_refls_count = flex.int()
    pg_refls_count_d = {}
    table_header = ["PG id", "RMSD","Radial", "Transverse", "N refls"]
    table_header2 = ["","(um)","RMSD (um)","RMSD (um)",""]
    table_data = []
    table_data.append(table_header)
    table_data.append(table_header2)

    # Compute a set of radial and transverse displacements for each reflection
    print "Setting up stats..."
    tmp = flex.reflection_table()
    # Need to construct a variety of vectors
    for panel_id, panel in enumerate(detector):
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      bcl = flex.vec3_double()
      tto = flex.double()
      ttc = flex.double()
      # Compute the beam center in lab space (a vector pointing from the origin to where the beam would intersect
      # the panel, if it did intersect the panel)
      for expt_id in set(panel_refls['id']):
        beam = experiments[expt_id].beam
        s0 = beam.get_s0()
        expt_refls = panel_refls.select(panel_refls['id'] == expt_id)
        beam_centre = panel.get_beam_centre_lab(s0)
        bcl.extend(flex.vec3_double(len(expt_refls), beam_centre))
        obs_x, obs_y, _ = expt_refls['xyzobs.px.value'].parts()
        cal_x, cal_y, _ = expt_refls['xyzcal.px'].parts()
        tto.extend(flex.double([panel.get_two_theta_at_pixel(s0, (obs_x[i], obs_y[i])) for i in xrange(len(expt_refls))]))
        ttc.extend(flex.double([panel.get_two_theta_at_pixel(s0, (cal_x[i], cal_y[i])) for i in xrange(len(expt_refls))]))
      panel_refls['beam_centre_lab'] = bcl
      panel_refls['two_theta_obs'] = tto * (180/math.pi)
      panel_refls['two_theta_cal'] = ttc * (180/math.pi) #+ (0.5*panel_refls['delpsical.rad']*panel_refls['two_theta_obs'])
      # Compute obs in lab space
      x, y, _ = panel_refls['xyzobs.mm.value'].parts()
      c = flex.vec2_double(x, y)
      panel_refls['obs_lab_coords'] = panel.get_lab_coord(c)
      # Compute deltaXY in panel space. This vector is relative to the panel origin
      x, y, _ = (panel_refls['xyzcal.mm'] - panel_refls['xyzobs.mm.value']).parts()
      # Convert deltaXY to lab space, subtracting off of the panel origin
      panel_refls['delta_lab_coords'] = panel.get_lab_coord(flex.vec2_double(x,y)) - panel.get_origin()
      tmp.extend(panel_refls)
    reflections = tmp
    # The radial vector points from the center of the reflection to the beam center
    radial_vectors = (reflections['obs_lab_coords'] - reflections['beam_centre_lab']).each_normalize()
    # The transverse vector is orthogonal to the radial vector and the beam vector
    transverse_vectors = radial_vectors.cross(reflections['beam_centre_lab']).each_normalize()
    # Compute the raidal and transverse components of each deltaXY
    reflections['radial_displacements']     = reflections['delta_lab_coords'].dot(radial_vectors)
    reflections['transverse_displacements'] = reflections['delta_lab_coords'].dot(transverse_vectors)

    # Iterate through the detector at the specified hierarchy level
    for pg_id, pg in enumerate(iterate_detector_at_level(detector.hierarchy(), 0, params.hierarchy_level)):
      pg_msd_sum = 0
      pg_r_msd_sum = 0
      pg_t_msd_sum = 0
      pg_refls = 0
      pg_delpsi = flex.double()
      pg_deltwotheta = flex.double()
      for p in iterate_panels(pg):
        panel_id = id_from_name(detector, p.get_name())
        panel_refls = reflections.select(reflections['panel'] == panel_id)
        n = len(panel_refls)
        pg_refls += n

        delta_x = panel_refls['xyzcal.mm'].parts()[0] - panel_refls['xyzobs.mm.value'].parts()[0]
        delta_y = panel_refls['xyzcal.mm'].parts()[1] - panel_refls['xyzobs.mm.value'].parts()[1]

        tmp = flex.sum((delta_x**2)+(delta_y**2))
        pg_msd_sum += tmp

        r = panel_refls['radial_displacements']
        t = panel_refls['transverse_displacements']
        pg_r_msd_sum += flex.sum_sq(r)
        pg_t_msd_sum += flex.sum_sq(t)

        pg_delpsi.extend(panel_refls['delpsical.rad']*180/math.pi)
        pg_deltwotheta.extend(panel_refls['two_theta_obs'] - panel_refls['two_theta_cal'])

      bc = col(pg.get_beam_centre_lab(s0))
      ori = get_center(pg)
      pg_bc_dists[pg.get_name()] = (ori-bc).length()
      if len(pg_deltwotheta) > 0:
        mean_delta_two_theta[pg.get_name()] = flex.mean(pg_deltwotheta)
      else:
        mean_delta_two_theta[pg.get_name()] = 0

      if pg_refls == 0:
        pg_rmsd = pg_r_rmsd = pg_t_rmsd = 0
      else:
        pg_rmsd = math.sqrt(pg_msd_sum/pg_refls) * 1000
        pg_r_rmsd = math.sqrt(pg_r_msd_sum/pg_refls) * 1000
        pg_t_rmsd = math.sqrt(pg_t_msd_sum/pg_refls) * 1000
      pg_rmsds.append(pg_rmsd)
      pg_r_rmsds.append(pg_r_rmsd)
      pg_t_rmsds.append(pg_t_rmsd)
      pg_refls_count.append(pg_refls)
      pg_refls_count_d[pg.get_name()] = pg_refls
      table_data.append(["%d"%pg_id, "%.1f"%pg_rmsd, "%.1f"%pg_r_rmsd, "%.1f"%pg_t_rmsd, "%6d"%pg_refls])

      refl_counts[pg.get_name()] = pg_refls
      if pg_refls == 0:
        rmsds[p.get_name()] = -1
        radial_rmsds[p.get_name()] = -1
        transverse_rmsds[p.get_name()] = -1
        ttdpcorr[pg.get_name()] = -1
      else:
        rmsds[pg.get_name()] = pg_rmsd
        radial_rmsds[pg.get_name()]     = pg_r_rmsd
        transverse_rmsds[pg.get_name()] = pg_t_rmsd

        lc = flex.linear_correlation(pg_delpsi, pg_deltwotheta)
        ttdpcorr[pg.get_name()] = lc.coefficient()


    r1 = ["Weighted mean"]
    r2 = ["Weighted stddev"]
    if len(pg_rmsds) > 1:
      stats = flex.mean_and_variance(pg_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_r_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_t_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
    else:
      r1.extend([""]*3)
      r2.extend([""]*3)
    r1.append("")
    r2.append("")
    table_data.append(r1)
    table_data.append(r2)
    table_data.append(["Mean", "", "", "", "%8.1f"%flex.mean(pg_refls_count.as_double())])

    from libtbx import table_utils
    print "Detector statistics.  Angles in degrees, RMSDs in microns"
    print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

    self.histogram(reflections, '%sDifference vector norms (mm)'%tag)

    if params.show_plots:
      if self.params.tag is None:
        t = ""
      else:
        t = "%s "%self.params.tag
      self.image_rmsd_histogram(reflections, tag)

      # Plots! these are plots with callbacks to draw on individual panels
      self.detector_plot_refls(detector, reflections, '%sOverall positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltas)
      self.detector_plot_refls(detector, reflections, '%sRadial positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_radial_deltas)
      self.detector_plot_refls(detector, reflections, '%sTransverse positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_transverse_deltas)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta\Psi$'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltapsi, colorbar_units=r"$\circ$")
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY*%s'%(tag, self.delta_scalar), show=False, plot_callback=self.plot_deltas)
      self.detector_plot_refls(detector, reflections, '%sSP Manual CDF'%tag, show=False, plot_callback=self.plot_cdf_manually)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY Histograms'%tag, show=False, plot_callback=self.plot_histograms)
      self.detector_plot_refls(detector, reflections, r'%sRadial displacements vs. $\Delta\Psi$, colored by $\Delta$XY'%tag, show=False, plot_callback=self.plot_radial_displacements_vs_deltapsi)
      self.detector_plot_refls(detector, reflections, r'%sDistance vector norms'%tag, show=False, plot_callback=self.plot_difference_vector_norms_histograms)

      # Plot intensity vs. radial_displacement
      fig = plt.figure()
      panel_id = 15
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      a = panel_refls['radial_displacements']
      b = panel_refls['intensity.sum.value']
      sel = (a > -0.2) & (a < 0.2) & (b < 50000)
      plt.hist2d(a.select(sel), b.select(sel), bins=100)
      plt.title("%s2D histogram of intensity vs. radial displacement for panel %d"%(tag, panel_id))
      plt.xlabel("Radial displacement (mm)")
      plt.ylabel("Intensity")
      ax = plt.colorbar()
      ax.set_label("Counts")

      # Plot delta 2theta vs. deltapsi
      n_bins = 10
      bin_size = len(reflections)//n_bins
      bin_low = []
      bin_high = []
      data = flex.sorted(reflections['two_theta_obs'])
      for i in xrange(n_bins):
        bin_low = data[i*bin_size]
        if (i+1)*bin_size >= len(reflections):
          bin_high = data[-1]
        else:
          bin_high = data[(i+1)*bin_size]
        refls = reflections.select((reflections['two_theta_obs'] >= bin_low) &
                                   (reflections['two_theta_obs'] <= bin_high))
        a = refls['delpsical.rad']*180/math.pi
        b = refls['two_theta_obs'] - refls['two_theta_cal']
        fig = plt.figure()
        sel = (a > -0.2) & (a < 0.2) & (b > -0.05) & (b < 0.05)
        plt.hist2d(a.select(sel), b.select(sel), bins=50, range = [[-0.2, 0.2], [-0.05, 0.05]])
        cb = plt.colorbar()
        cb.set_label("N reflections")
        plt.title(r'%sBin %d (%.02f, %.02f 2$\Theta$) $\Delta2\Theta$ vs. $\Delta\Psi$. Showing %d of %d refls'%(tag,i,bin_low,bin_high,len(a.select(sel)),len(a)))
        plt.xlabel(r'$\Delta\Psi \circ$')
        plt.ylabel(r'$\Delta2\Theta \circ$')

      # Plot delta 2theta vs. 2theta
      a = reflections['two_theta_obs']#[:71610]
      b = reflections['two_theta_obs'] - reflections['two_theta_cal']
      fig = plt.figure()
      limits = -0.05, 0.05
      sel = (b > limits[0]) & (b < limits[1])
      plt.hist2d(a.select(sel), b.select(sel), bins=100, range=((0,50), limits))
      plt.clim((0,100))
      cb = plt.colorbar()
      cb.set_label("N reflections")
      plt.title(r'%s$\Delta2\Theta$ vs. 2$\Theta$. Showing %d of %d refls'%(tag,len(a.select(sel)),len(a)))
      plt.xlabel(r'2$\Theta \circ$')
      plt.ylabel(r'$\Delta2\Theta \circ$')

      # calc the trendline
      z = np.polyfit(a.select(sel), b.select(sel), 1)
      print 'y=%.7fx+(%.7f)'%(z[0],z[1])

      # Plots with single values per panel
      self.detector_plot_dict(detector, refl_counts, u"%s N reflections"%t, u"%6d", show=False)
      self.detector_plot_dict(detector, rmsds, "%s Positional RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, radial_rmsds, "%s Radial RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, transverse_rmsds, "%s Transverse RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, ttdpcorr, r"%s $\Delta2\Theta$ vs. $\Delta\Psi$ CC"%t, u"%5.3f", show=False)

      self.plot_unitcells(experiments)
      self.plot_data_by_two_theta(reflections, tag)

      # Plot data by panel group
      sorted_values = sorted(pg_bc_dists.values())
      vdict = {}
      for k in pg_bc_dists:
        vdict[pg_bc_dists[k]] = k
      sorted_keys = [vdict[v] for v in sorted_values if vdict[v] in rmsds]
      x = [sorted_values[i] for i in xrange(len(sorted_values)) if pg_bc_dists.keys()[i] in rmsds]

      self.plot_multi_data(x,
                           [[pg_refls_count_d[k] for k in sorted_keys],
                            ([rmsds[k] for k in sorted_keys],
                             [radial_rmsds[k] for k in sorted_keys],
                             [transverse_rmsds[k] for k in sorted_keys]),
                            [radial_rmsds[k]/transverse_rmsds[k] for k in sorted_keys],
                            [mean_delta_two_theta[k] for k in sorted_keys]],
                           "Panel group distance from beam center (mm)",
                           ["N reflections",
                            ("Overall RMSD",
                             "Radial RMSD",
                             "Transverse RMSD"),
                            "R/T RMSD ratio",
                            "Delta two theta"],
                           ["N reflections",
                            "RMSD (microns)",
                            "R/T RMSD ratio",
                            "Delta two theta (degrees)"],
                           "%sData by panelgroup"%tag)

      if self.params.save_pdf:
        pp = PdfPages('residuals_%s.pdf'%(tag.strip()))
        for i in plt.get_fignums():
          pp.savefig(plt.figure(i))
        pp.close()
      else:
        plt.show()