コード例 #1
0
  def intensities(self):
    ''' Compare the intensities. '''
    from dials.array_family import flex

    # Sort by resolution
    d = self.refl1['d']
    index = flex.size_t(reversed(sorted(range(len(d)), key=lambda x: d[x])))
    self.refl1.reorder(index)
    self.refl2.reorder(index)

    # Get the intensities
    I1 = self.refl1['intensity.sum.value']
    I2 = self.refl2['intensity.sum.value']
    S1 = flex.sqrt(self.refl1['intensity.sum.variance'])
    S2 = flex.sqrt(self.refl2['intensity.sum.variance'])
    xyz1 = self.refl1['xyzcal.px']
    xyz2 = self.refl2['xyzcal.px']

    # Compute chunked statistics
    corr = []
    R = []
    scale = []
    res = []
    for i in range(len(self.refl1) // 1000):

      # Get the chunks of data
      a = i * 1000
      b = (i+1) * 1000
      II1 = I1[a:b]
      II2 = I2[a:b]
      res.append(d[a])

      # Compute the mean and standard deviation per chunk
      mv1 = flex.mean_and_variance(II1)
      mv2 = flex.mean_and_variance(II2)
      m1 = mv1.mean()
      m2 = mv2.mean()
      s1 = mv1.unweighted_sample_standard_deviation()
      s2 = mv2.unweighted_sample_standard_deviation()

      # compute the correlation coefficient
      r = (1/(len(II1) - 1))*sum(((II1[j] - m1) / s1) * ((II2[j] - m2) / s2)
          for j in range(len(II1)))
      corr.append(r)

      # Compute the scale between the chunks
      s = sum(II1) / sum(II2)
      scale.append(s)

      # Compute R between the chunks
      r = sum(abs(abs(II1[j]) - abs(s * II2[j])) for j in range(len(II1))) \
        / sum(abs(II1[j]) for j in range(len(II1)))
      R.append(r)

    from matplotlib import pylab
    pylab.plot(corr, label="CC")
    pylab.plot(R, label="R")
    pylab.plot(scale, label="K")
    pylab.legend()
    pylab.show()
コード例 #2
0
  def plot_unitcells(self, experiments):
    if len(experiments) == 1:
      return
    all_a = flex.double()
    all_b = flex.double()
    all_c = flex.double()
    for crystal in experiments.crystals():
      a, b, c = crystal.get_unit_cell().parameters()[0:3]
      all_a.append(a); all_b.append(b); all_c.append(c)

    fig, axes = plt.subplots(nrows=3, ncols=1)
    for ax, axis, data in zip(axes, ['A', 'B', 'C'], [all_a, all_b, all_c]):
      stats = flex.mean_and_variance(data)
      cutoff = 4*stats.unweighted_sample_standard_deviation()
      if cutoff < 0.5:
        cutoff = 0.5
      limits = stats.mean()-cutoff, stats.mean()+cutoff
      sel = (data >= limits[0]) & (data <= limits[1])
      subset = data.select(sel)
      h = flex.histogram(subset,n_slots=50)
      ax.plot(h.slot_centers().as_numpy_array(),h.slots().as_numpy_array(),'-')
      ax.set_title("%s axis histogram (showing %d of %d xtals). Mean: %7.2f Stddev: %7.2f"%(
        axis, len(subset), len(data), stats.mean(),
        stats.unweighted_sample_standard_deviation()))
      ax.set_ylabel("N lattices")
      ax.set_xlabel(r"$\AA$")
      ax.set_xlim(limits)
    plt.tight_layout()
コード例 #3
0
ファイル: tst_creator.py プロジェクト: biochem-fan/dials
 def tst_for_dataset(self, creator, filename):
   from dials.array_family import flex
   from dials.algorithms.shoebox import MaskCode
   print filename
   rlist = flex.reflection_table.from_pickle(filename)
   shoebox = rlist['shoebox']
   background = [sb.background.deep_copy() for sb in shoebox]
   success = creator(shoebox)
   assert(success.count(True) == len(success))
   diff = []
   for i in range(len(rlist)):
     mask = flex.bool([(m & MaskCode.Foreground) != 0 for m in shoebox[i].mask])
     px1 = background[i].select(mask)
     px2 = shoebox[i].background.select(mask)
     den = max([flex.mean(px1), 1.0])
     diff.append(flex.mean(px2 - px1) / den)
   diff = flex.double(diff)
   mv = flex.mean_and_variance(flex.double(diff))
   mean = mv.mean()
   sdev = mv.unweighted_sample_standard_deviation()
   try:
     assert(abs(mean) < 0.01)
   except Exception:
     print "Mean: %f, Sdev: %f", mean, sdev
     from matplotlib import pylab
     pylab.hist(diff)
     pylab.show()
     raise
コード例 #4
0
 def assert_is_correct(self, data, mask):
   from scitbx.array_family import flex
   invalid, foreground, background, background_used, background_valid = \
     assert_basic_mask_is_correct(mask, self.ninvalid, self.nforeground)
   subdata = data.select(background_valid)
   mv = flex.mean_and_variance(subdata)
   m = mv.mean()
   s = mv.unweighted_sample_standard_deviation()
   p0 = m - self.lower * s
   p1 = m + self.upper * s
   mask = (subdata >= p0) & (subdata <= p1)
   exp = background_valid.select(mask)
   assert(len(exp) == len(background_used))
   assert(all(ii == jj) for ii, jj in zip(exp, background_used))
コード例 #5
0
  def check_reference(self, reference):
    ''' Check the reference spots. '''
    from dials.array_family import flex
    from dials.algorithms.image.centroid import centroid_image
    from math import sqrt

    # Get a load of stuff
    I_sim = reference['intensity.sim']
    I_exp = reference['intensity.exp']
    I_cal = reference['intensity.prf.value']
    I_var = reference['intensity.prf.variance']

    # Get the transformed shoeboxes
    profiles = reference['rs_shoebox']
    n_sigma = 3
    n_sigma2 = 5
    grid_size = 4
    step_size = n_sigma2 / (grid_size + 0.5)
    eps = 1e-7
    for i in range(len(profiles)):
      data = profiles[i].data
      #dmax = flex.max(data)
      #data = 100 * data / dmax
      #p = data.as_numpy_array()
      #p = p.astype(numpy.int)
      #print p
      print flex.sum(data), I_exp[i], I_cal[i]
      #assert(abs(flex.sum(data) - I_exp[i]) < eps)
      centroid = centroid_image(data)
      m = centroid.mean()
      v = centroid.variance()
      s1 = tuple(sqrt(vv) for vv in v)
      s2 = tuple(ss * step_size for ss in s1)
      assert(all(abs(mm - (grid_size + 0.5)) < 0.25 for mm in m))
      assert(all(abs(ss2 - n_sigma / n_sigma2) < 0.25 for ss2 in s2))

    # Calculate Z
    Z = (I_cal - I_exp) / flex.sqrt(I_var)
    mv = flex.mean_and_variance(Z)
    Z_mean = mv.mean()
    Z_var = mv.unweighted_sample_variance()
    print "Z: mean: %f, var: %f" % (Z_mean, Z_var)

    from matplotlib import pylab
    pylab.hist((I_cal - I_exp) / I_exp)
    pylab.show()
コード例 #6
0
  def run(self):
    '''Execute the script.'''

    from dials.util.options import flatten_experiments
    from libtbx.utils import Sorry
    from dials.array_family import flex

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=True)

    # Try to load the experiments
    if not params.input.experiments:
      print "No Experiments found in the input"
      self.parser.print_help()
      return

    experiments = flatten_experiments(params.input.experiments)
    print "{0} experiments loaded".format(len(experiments))

    us0_vecs = self.extract_us0_vecs(experiments)
    e_vecs = self.extract_rotation_axes(experiments)

    angles = [us0.angle(e, deg=True) for us0, e in zip(us0_vecs, e_vecs)]

    fmt = "{:." + str(params.print_precision) + "f}"
    header = ['Exp\nid','Beam direction', 'Rotation axis', 'Angle (deg)']
    rows = []
    for iexp, (us0, e, ang) in enumerate(zip(us0_vecs, e_vecs, angles)):
      beam_str = " ".join([fmt] * 3).format(*us0.elems)
      e_str = " ".join([fmt] * 3).format(*e.elems)
      rows.append([str(iexp), beam_str, e_str, fmt.format(ang)])
    if len(rows) > 0:
      st = simple_table(rows, header)
      print st.format()

    # mean and sd
    if len(rows) > 1:
      angles = flex.double(angles)
      mv = flex.mean_and_variance(angles)

      print "Mean and standard deviation of the angle"
      print (fmt.format(mv.mean()) + " +/- " + fmt.format(
        mv.unweighted_sample_standard_deviation()))
      print

    return
コード例 #7
0
    def test_for_reflections(self, refl, filename):
        from dials.algorithms.integration import ProfileFittingReciprocalSpace
        from dials.array_family import flex
        from dials.algorithms.shoebox import MaskCode
        from dials.algorithms.statistics import kolmogorov_smirnov_test_standard_normal
        from math import erf, sqrt, pi
        from copy import deepcopy
        from dials.algorithms.simulation.reciprocal_space import Simulator
        from os.path import basename

        print(basename(filename))

        # refl = self.reference

        # Get the calculated background and simulated background
        B_sim = refl["background.sim"].as_double()
        I_sim = refl["intensity.sim"].as_double()
        I_exp = refl["intensity.exp"]

        # Set the background as simulated
        shoebox = refl["shoebox"]
        for i in range(len(shoebox)):
            bg = shoebox[i].background
            ms = shoebox[i].mask
            for j in range(len(bg)):
                bg[j] = B_sim[i]

        # Integrate
        integration = ProfileFittingReciprocalSpace(
            grid_size=4,
            threshold=0.02,
            frame_interval=0,
            n_sigma=4,
            sigma_b=0.024 * pi / 180.0,
            sigma_m=0.044 * pi / 180.0,
        )

        old_size = len(refl)
        refl.extend(self.reference)
        integration(self.experiment, refl)
        reference = refl[-len(self.reference):]
        refl = refl[:len(self.reference)]
        assert len(refl) == old_size
        assert len(reference) == len(self.reference)
        I_cal = refl["intensity.sum.value"]
        I_var = refl["intensity.sum.variance"]

        # Check the reference profiles and spots are ok
        self.check_profiles(integration.learner)
        self.check_reference(reference)

        # reference = integration.learner

        # np = reference.locate().size()
        # for i in range(np):
        # profile = reference.locate().profile(i)
        # print "Profile: %d" % i
        # p = (profile.as_numpy_array() * 1000)
        # import numpy as np
        # p = p.astype(np.int)
        # print p

        # Only select variances greater than zero
        mask = refl.get_flags(refl.flags.integrated)
        I_cal = I_cal.select(mask)
        I_var = I_var.select(mask)
        I_sim = I_sim.select(mask)
        I_exp = I_exp.select(mask)

        # Calculate the z score
        perc = self.mv3n_tolerance_interval(3 * 3)
        Z = (I_cal - I_exp) / flex.sqrt(I_var)
        mv = flex.mean_and_variance(Z)
        Z_mean = mv.mean()
        Z_var = mv.unweighted_sample_variance()
        print("Z: mean: %f, var: %f" % (Z_mean, Z_var))

        # Do the kolmogorov smirnov test
        D, p = kolmogorov_smirnov_test_standard_normal(Z)
        print("KS: D: %f, p-value: %f" % (D, p))

        # FIXME Z score should be a standard normal distribution. When background is
        # the main component, we do indeed see that the z score is in a standard
        # normal distribution. When the intensity dominates, the variance of the Z
        # scores decreases indicating that for increasing intensity of the signal,
        # the variance is over estimated.
        # assert(abs(Z_mean) <= 3 * Z_var)

        # from matplotlib import pylab
        # pylab.hist(Z, 20)
        # pylab.show()

        # Z_I = sorted(Z)
        ##n = int(0.05 * len(Z_I))
        ##Z_I = Z_I[n:-n]
        ##mv = flex.mean_and_variance(flex.double(Z_I))
        ##print "Mean: %f, Sdev: %f" % (mv.mean(), mv.unweighted_sample_standard_deviation())
        # edf = [float(i+1) / len(Z_I) for i in range(len(Z_I))]
        # cdf = [0.5 * (1.0 + erf(z / sqrt(2.0))) for z in Z_I]

        print("OK")
コード例 #8
0
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)

    # Find all detector objects
    detectors = experiments.detectors()

    # Verify inputs
    if len(params.input.reflections) == len(detectors) and len(detectors) > 1:
      # case for passing in multiple images on the command line
      assert len(params.input.reflections) == len(detectors)
      reflections = flex.reflection_table()
      for expt_id in xrange(len(detectors)):
        subset = params.input.reflections[expt_id].data
        subset['id'] = flex.int(len(subset), expt_id)
        reflections.extend(subset)
    else:
      # case for passing in combined experiments and reflections
      reflections = flatten_reflections(params.input.reflections)[0]

    detector = detectors[0]

    #from dials.algorithms.refinement.prediction import ExperimentsPredictor
    #ref_predictor = ExperimentsPredictor(experiments, force_stills=experiments.all_stills())

    print "N reflections total:", len(reflections)
    if params.residuals.exclude_outliers:
      reflections = reflections.select(reflections.get_flags(reflections.flags.used_in_refinement))
      print "N reflections used in refinement:", len(reflections)
      print "Reporting only on those reflections used in refinement"

    if self.params.residuals.i_sigi_cutoff is not None:
      sel = (reflections['intensity.sum.value']/flex.sqrt(reflections['intensity.sum.variance'])) >= self.params.residuals.i_sigi_cutoff
      reflections = reflections.select(sel)
      print "After filtering by I/sigi cutoff of %f, there are %d reflections left"%(self.params.residuals.i_sigi_cutoff,len(reflections))

    reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()

    n = len(reflections)
    rmsd = self.get_weighted_rmsd(reflections)
    print "Dataset RMSD (microns)", rmsd * 1000

    if params.tag is None:
      tag = ''
    else:
      tag = '%s '%params.tag

    # set up delta-psi ratio heatmap
    p = flex.int() # positive
    n = flex.int() # negative
    for i in set(reflections['id']):
      exprefls = reflections.select(reflections['id']==i)
      p.append(len(exprefls.select(exprefls['delpsical.rad']>0)))
      n.append(len(exprefls.select(exprefls['delpsical.rad']<0)))
    plt.hist2d(p, n, bins=30)
    cb = plt.colorbar()
    cb.set_label("N images")
    plt.title(r"%s2D histogram of pos vs. neg $\Delta\Psi$ per image"%tag)
    plt.xlabel(r"N reflections with $\Delta\Psi$ > 0")
    plt.ylabel(r"N reflections with $\Delta\Psi$ < 0")

    self.delta_scalar = 50

    # Iterate through the detectors, computing detector statistics at the per-panel level (IE one statistic per panel)
    # Per panel dictionaries
    rmsds = {}
    refl_counts = {}
    transverse_rmsds = {}
    radial_rmsds = {}
    ttdpcorr = {}
    pg_bc_dists = {}
    mean_delta_two_theta = {}
    # per panelgroup flex arrays
    pg_rmsds = flex.double()
    pg_r_rmsds = flex.double()
    pg_t_rmsds = flex.double()
    pg_refls_count = flex.int()
    pg_refls_count_d = {}
    table_header = ["PG id", "RMSD","Radial", "Transverse", "N refls"]
    table_header2 = ["","(um)","RMSD (um)","RMSD (um)",""]
    table_data = []
    table_data.append(table_header)
    table_data.append(table_header2)

    # Compute a set of radial and transverse displacements for each reflection
    print "Setting up stats..."
    tmp = flex.reflection_table()
    # Need to construct a variety of vectors
    for panel_id, panel in enumerate(detector):
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      bcl = flex.vec3_double()
      tto = flex.double()
      ttc = flex.double()
      # Compute the beam center in lab space (a vector pointing from the origin to where the beam would intersect
      # the panel, if it did intersect the panel)
      for expt_id in set(panel_refls['id']):
        beam = experiments[expt_id].beam
        s0 = beam.get_s0()
        expt_refls = panel_refls.select(panel_refls['id'] == expt_id)
        beam_centre = panel.get_beam_centre_lab(s0)
        bcl.extend(flex.vec3_double(len(expt_refls), beam_centre))
        obs_x, obs_y, _ = expt_refls['xyzobs.px.value'].parts()
        cal_x, cal_y, _ = expt_refls['xyzcal.px'].parts()
        tto.extend(flex.double([panel.get_two_theta_at_pixel(s0, (obs_x[i], obs_y[i])) for i in xrange(len(expt_refls))]))
        ttc.extend(flex.double([panel.get_two_theta_at_pixel(s0, (cal_x[i], cal_y[i])) for i in xrange(len(expt_refls))]))
      panel_refls['beam_centre_lab'] = bcl
      panel_refls['two_theta_obs'] = tto * (180/math.pi)
      panel_refls['two_theta_cal'] = ttc * (180/math.pi) #+ (0.5*panel_refls['delpsical.rad']*panel_refls['two_theta_obs'])
      # Compute obs in lab space
      x, y, _ = panel_refls['xyzobs.mm.value'].parts()
      c = flex.vec2_double(x, y)
      panel_refls['obs_lab_coords'] = panel.get_lab_coord(c)
      # Compute deltaXY in panel space. This vector is relative to the panel origin
      x, y, _ = (panel_refls['xyzcal.mm'] - panel_refls['xyzobs.mm.value']).parts()
      # Convert deltaXY to lab space, subtracting off of the panel origin
      panel_refls['delta_lab_coords'] = panel.get_lab_coord(flex.vec2_double(x,y)) - panel.get_origin()
      tmp.extend(panel_refls)
    reflections = tmp
    # The radial vector points from the center of the reflection to the beam center
    radial_vectors = (reflections['obs_lab_coords'] - reflections['beam_centre_lab']).each_normalize()
    # The transverse vector is orthogonal to the radial vector and the beam vector
    transverse_vectors = radial_vectors.cross(reflections['beam_centre_lab']).each_normalize()
    # Compute the raidal and transverse components of each deltaXY
    reflections['radial_displacements']     = reflections['delta_lab_coords'].dot(radial_vectors)
    reflections['transverse_displacements'] = reflections['delta_lab_coords'].dot(transverse_vectors)

    # Iterate through the detector at the specified hierarchy level
    for pg_id, pg in enumerate(iterate_detector_at_level(detector.hierarchy(), 0, params.hierarchy_level)):
      pg_msd_sum = 0
      pg_r_msd_sum = 0
      pg_t_msd_sum = 0
      pg_refls = 0
      pg_delpsi = flex.double()
      pg_deltwotheta = flex.double()
      for p in iterate_panels(pg):
        panel_id = id_from_name(detector, p.get_name())
        panel_refls = reflections.select(reflections['panel'] == panel_id)
        n = len(panel_refls)
        pg_refls += n

        delta_x = panel_refls['xyzcal.mm'].parts()[0] - panel_refls['xyzobs.mm.value'].parts()[0]
        delta_y = panel_refls['xyzcal.mm'].parts()[1] - panel_refls['xyzobs.mm.value'].parts()[1]

        tmp = flex.sum((delta_x**2)+(delta_y**2))
        pg_msd_sum += tmp

        r = panel_refls['radial_displacements']
        t = panel_refls['transverse_displacements']
        pg_r_msd_sum += flex.sum_sq(r)
        pg_t_msd_sum += flex.sum_sq(t)

        pg_delpsi.extend(panel_refls['delpsical.rad']*180/math.pi)
        pg_deltwotheta.extend(panel_refls['two_theta_obs'] - panel_refls['two_theta_cal'])

      bc = col(pg.get_beam_centre_lab(s0))
      ori = get_center(pg)
      pg_bc_dists[pg.get_name()] = (ori-bc).length()
      if len(pg_deltwotheta) > 0:
        mean_delta_two_theta[pg.get_name()] = flex.mean(pg_deltwotheta)
      else:
        mean_delta_two_theta[pg.get_name()] = 0

      if pg_refls == 0:
        pg_rmsd = pg_r_rmsd = pg_t_rmsd = 0
      else:
        pg_rmsd = math.sqrt(pg_msd_sum/pg_refls) * 1000
        pg_r_rmsd = math.sqrt(pg_r_msd_sum/pg_refls) * 1000
        pg_t_rmsd = math.sqrt(pg_t_msd_sum/pg_refls) * 1000
      pg_rmsds.append(pg_rmsd)
      pg_r_rmsds.append(pg_r_rmsd)
      pg_t_rmsds.append(pg_t_rmsd)
      pg_refls_count.append(pg_refls)
      pg_refls_count_d[pg.get_name()] = pg_refls
      table_data.append(["%d"%pg_id, "%.1f"%pg_rmsd, "%.1f"%pg_r_rmsd, "%.1f"%pg_t_rmsd, "%6d"%pg_refls])

      refl_counts[pg.get_name()] = pg_refls
      if pg_refls == 0:
        rmsds[p.get_name()] = -1
        radial_rmsds[p.get_name()] = -1
        transverse_rmsds[p.get_name()] = -1
        ttdpcorr[pg.get_name()] = -1
      else:
        rmsds[pg.get_name()] = pg_rmsd
        radial_rmsds[pg.get_name()]     = pg_r_rmsd
        transverse_rmsds[pg.get_name()] = pg_t_rmsd

        lc = flex.linear_correlation(pg_delpsi, pg_deltwotheta)
        ttdpcorr[pg.get_name()] = lc.coefficient()


    r1 = ["Weighted mean"]
    r2 = ["Weighted stddev"]
    if len(pg_rmsds) > 1:
      stats = flex.mean_and_variance(pg_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_r_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_t_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
    else:
      r1.extend([""]*3)
      r2.extend([""]*3)
    r1.append("")
    r2.append("")
    table_data.append(r1)
    table_data.append(r2)
    table_data.append(["Mean", "", "", "", "%8.1f"%flex.mean(pg_refls_count.as_double())])

    from libtbx import table_utils
    print "Detector statistics.  Angles in degrees, RMSDs in microns"
    print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

    self.histogram(reflections, '%sDifference vector norms (mm)'%tag)

    if params.show_plots:
      if self.params.tag is None:
        t = ""
      else:
        t = "%s "%self.params.tag
      self.image_rmsd_histogram(reflections, tag)

      # Plots! these are plots with callbacks to draw on individual panels
      self.detector_plot_refls(detector, reflections, '%sOverall positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltas)
      self.detector_plot_refls(detector, reflections, '%sRadial positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_radial_deltas)
      self.detector_plot_refls(detector, reflections, '%sTransverse positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_transverse_deltas)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta\Psi$'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltapsi, colorbar_units=r"$\circ$")
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY*%s'%(tag, self.delta_scalar), show=False, plot_callback=self.plot_deltas)
      self.detector_plot_refls(detector, reflections, '%sSP Manual CDF'%tag, show=False, plot_callback=self.plot_cdf_manually)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY Histograms'%tag, show=False, plot_callback=self.plot_histograms)
      self.detector_plot_refls(detector, reflections, r'%sRadial displacements vs. $\Delta\Psi$, colored by $\Delta$XY'%tag, show=False, plot_callback=self.plot_radial_displacements_vs_deltapsi)
      self.detector_plot_refls(detector, reflections, r'%sDistance vector norms'%tag, show=False, plot_callback=self.plot_difference_vector_norms_histograms)

      # Plot intensity vs. radial_displacement
      fig = plt.figure()
      panel_id = 15
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      a = panel_refls['radial_displacements']
      b = panel_refls['intensity.sum.value']
      sel = (a > -0.2) & (a < 0.2) & (b < 50000)
      plt.hist2d(a.select(sel), b.select(sel), bins=100)
      plt.title("%s2D histogram of intensity vs. radial displacement for panel %d"%(tag, panel_id))
      plt.xlabel("Radial displacement (mm)")
      plt.ylabel("Intensity")
      ax = plt.colorbar()
      ax.set_label("Counts")

      # Plot delta 2theta vs. deltapsi
      n_bins = 10
      bin_size = len(reflections)//n_bins
      bin_low = []
      bin_high = []
      data = flex.sorted(reflections['two_theta_obs'])
      for i in xrange(n_bins):
        bin_low = data[i*bin_size]
        if (i+1)*bin_size >= len(reflections):
          bin_high = data[-1]
        else:
          bin_high = data[(i+1)*bin_size]
        refls = reflections.select((reflections['two_theta_obs'] >= bin_low) &
                                   (reflections['two_theta_obs'] <= bin_high))
        a = refls['delpsical.rad']*180/math.pi
        b = refls['two_theta_obs'] - refls['two_theta_cal']
        fig = plt.figure()
        sel = (a > -0.2) & (a < 0.2) & (b > -0.05) & (b < 0.05)
        plt.hist2d(a.select(sel), b.select(sel), bins=50, range = [[-0.2, 0.2], [-0.05, 0.05]])
        cb = plt.colorbar()
        cb.set_label("N reflections")
        plt.title(r'%sBin %d (%.02f, %.02f 2$\Theta$) $\Delta2\Theta$ vs. $\Delta\Psi$. Showing %d of %d refls'%(tag,i,bin_low,bin_high,len(a.select(sel)),len(a)))
        plt.xlabel(r'$\Delta\Psi \circ$')
        plt.ylabel(r'$\Delta2\Theta \circ$')

      # Plot delta 2theta vs. 2theta
      a = reflections['two_theta_obs']#[:71610]
      b = reflections['two_theta_obs'] - reflections['two_theta_cal']
      fig = plt.figure()
      limits = -0.05, 0.05
      sel = (b > limits[0]) & (b < limits[1])
      plt.hist2d(a.select(sel), b.select(sel), bins=100, range=((0,50), limits))
      plt.clim((0,100))
      cb = plt.colorbar()
      cb.set_label("N reflections")
      plt.title(r'%s$\Delta2\Theta$ vs. 2$\Theta$. Showing %d of %d refls'%(tag,len(a.select(sel)),len(a)))
      plt.xlabel(r'2$\Theta \circ$')
      plt.ylabel(r'$\Delta2\Theta \circ$')

      # calc the trendline
      z = np.polyfit(a.select(sel), b.select(sel), 1)
      print 'y=%.7fx+(%.7f)'%(z[0],z[1])

      # Plots with single values per panel
      self.detector_plot_dict(detector, refl_counts, u"%s N reflections"%t, u"%6d", show=False)
      self.detector_plot_dict(detector, rmsds, "%s Positional RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, radial_rmsds, "%s Radial RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, transverse_rmsds, "%s Transverse RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, ttdpcorr, r"%s $\Delta2\Theta$ vs. $\Delta\Psi$ CC"%t, u"%5.3f", show=False)

      self.plot_unitcells(experiments)
      self.plot_data_by_two_theta(reflections, tag)

      # Plot data by panel group
      sorted_values = sorted(pg_bc_dists.values())
      vdict = {}
      for k in pg_bc_dists:
        vdict[pg_bc_dists[k]] = k
      sorted_keys = [vdict[v] for v in sorted_values if vdict[v] in rmsds]
      x = [sorted_values[i] for i in xrange(len(sorted_values)) if pg_bc_dists.keys()[i] in rmsds]

      self.plot_multi_data(x,
                           [[pg_refls_count_d[k] for k in sorted_keys],
                            ([rmsds[k] for k in sorted_keys],
                             [radial_rmsds[k] for k in sorted_keys],
                             [transverse_rmsds[k] for k in sorted_keys]),
                            [radial_rmsds[k]/transverse_rmsds[k] for k in sorted_keys],
                            [mean_delta_two_theta[k] for k in sorted_keys]],
                           "Panel group distance from beam center (mm)",
                           ["N reflections",
                            ("Overall RMSD",
                             "Radial RMSD",
                             "Transverse RMSD"),
                            "R/T RMSD ratio",
                            "Delta two theta"],
                           ["N reflections",
                            "RMSD (microns)",
                            "R/T RMSD ratio",
                            "Delta two theta (degrees)"],
                           "%sData by panelgroup"%tag)

      if self.params.save_pdf:
        pp = PdfPages('residuals_%s.pdf'%(tag.strip()))
        for i in plt.get_fignums():
          pp.savefig(plt.figure(i))
        pp.close()
      else:
        plt.show()
コード例 #9
0
    def test_for_reference(self):
        from dials.algorithms.integration import ProfileFittingReciprocalSpace
        from dials.array_family import flex
        from dials.algorithms.shoebox import MaskCode
        from dials.algorithms.statistics import kolmogorov_smirnov_test_standard_normal
        from math import erf, sqrt, pi
        from copy import deepcopy
        from dials.algorithms.simulation.reciprocal_space import Simulator
        from os.path import basename

        # Integrate
        integration = ProfileFittingReciprocalSpace(
            grid_size=4,
            threshold=0.00,
            frame_interval=100,
            n_sigma=5,
            mask_n_sigma=3,
            sigma_b=0.024 * pi / 180.0,
            sigma_m=0.044 * pi / 180.0,
        )

        # Integrate the reference profiles
        integration(self.experiment, self.reference)

        p = integration.learner.locate().profile(0)
        m = integration.learner.locate().mask(0)

        locator = integration.learner.locate()

        cor = locator.correlations()
        for j in range(cor.all()[0]):
            print(" ".join([str(cor[j, i]) for i in range(cor.all()[1])]))
        # exit(0)
        # from matplotlib import pylab
        # pylab.imshow(cor.as_numpy_array(), interpolation='none', vmin=-1, vmax=1)
        # pylab.show()

        # n = locator.size()
        # for i in range(n):
        # c = locator.coord(i)
        # p = locator.profile(i)
        # vmax = flex.max(p)
        # from matplotlib import pylab
        # for j in range(9):
        # pylab.subplot(3, 3, j+1)
        # pylab.imshow(p.as_numpy_array()[j], vmin=0, vmax=vmax,
        # interpolation='none')
        # pylab.show()

        # print "NRef: ", n
        # x = []
        # y = []
        # for i in range(n):
        # c = locator.coord(i)
        # x.append(c[0])
        # y.append(c[1])
        # from matplotlib import pylab
        # pylab.scatter(x,y)
        # pylab.show()

        # exit(0)
        import numpy

        # pmax = flex.max(p)
        # scale = 100 / pmax
        # print "Scale: ", 100 / pmax
        # p = p.as_numpy_array() *100 / pmax
        # p = p.astype(numpy.int)
        # print p
        # print m.as_numpy_array()

        # Check the reference profiles and spots are ok
        # self.check_profiles(integration.learner)

        # Make sure background is zero
        profiles = self.reference["rs_shoebox"]
        eps = 1e-7
        for p in profiles:
            assert abs(flex.sum(p.background) - 0) < eps
        print("OK")

        # Only select variances greater than zero
        mask = self.reference.get_flags(self.reference.flags.integrated)
        I_cal = self.reference["intensity.sum.value"]
        I_var = self.reference["intensity.sum.variance"]
        B_sim = self.reference["background.sim"].as_double()
        I_sim = self.reference["intensity.sim"].as_double()
        I_exp = self.reference["intensity.exp"]
        P_cor = self.reference["profile.correlation"]
        X_pos, Y_pos, Z_pos = self.reference["xyzcal.px"].parts()
        I_cal = I_cal.select(mask)
        I_var = I_var.select(mask)
        I_sim = I_sim.select(mask)
        I_exp = I_exp.select(mask)
        P_cor = P_cor.select(mask)

        max_ind = flex.max_index(I_cal)
        max_I = I_cal[max_ind]
        max_P = self.reference[max_ind]["rs_shoebox"].data
        max_C = self.reference[max_ind]["xyzcal.px"]
        max_S = self.reference[max_ind]["shoebox"].data

        min_ind = flex.min_index(P_cor)
        min_I = I_cal[min_ind]
        min_P = self.reference[min_ind]["rs_shoebox"].data
        min_C = self.reference[min_ind]["xyzcal.px"]
        min_S = self.reference[min_ind]["shoebox"].data

        ##for k in range(max_S.all()[0]):
        # if False:
        # for j in range(max_S.all()[1]):
        # for i in range(max_S.all()[2]):
        # max_S[k,j,i] = 0
        # if (abs(i - max_S.all()[2] // 2) < 2 and
        # abs(j - max_S.all()[1] // 2) < 2 and
        # abs(k - max_S.all()[0] // 2) < 2):
        # max_S[k,j,i] = 100

        # p = max_P.as_numpy_array() * 100 / flex.max(max_P)
        # p = p.astype(numpy.int)
        # print p

        # from dials.scratch.jmp.misc.test_transform import test_transform
        # grid_size = 4
        # ndiv = 5
        # sigma_b = 0.024 * pi / 180.0
        # sigma_m = 0.044 * pi / 180.0
        # n_sigma = 4.0
        # max_P2 = test_transform(
        # self.experiment,
        # self.reference[max_ind]['shoebox'],
        # self.reference[max_ind]['s1'],
        # self.reference[max_ind]['xyzcal.mm'][2],
        # grid_size,
        # sigma_m,
        # sigma_b,
        # n_sigma,
        # ndiv)
        # max_P = max_P2

        ref_ind = locator.index(max_C)
        ref_P = locator.profile(ref_ind)
        ref_C = locator.coord(ref_ind)

        print("Max Index: ", max_ind, max_I, flex.sum(max_P), flex.sum(max_S))
        print("Coord: ", max_C, "Ref Coord: ", ref_C)

        print("Min Index: ", min_ind, min_I, flex.sum(min_P), flex.sum(min_S))
        print("Coord: ", min_C, "Ref Coord: ", ref_C)

        # vmax = flex.max(max_P)
        # print sum(max_S)
        # print sum(max_P)
        # from matplotlib import pylab, cm
        # for j in range(9):
        # pylab.subplot(3, 3, j+1)
        # pylab.imshow(max_P.as_numpy_array()[j], vmin=0, vmax=vmax,
        # interpolation='none', cmap=cm.Greys_r)
        # pylab.show()

        # vmax = flex.max(min_P)
        # print sum(min_S)
        # print sum(min_P)
        # from matplotlib import pylab, cm
        # for j in range(9):
        # pylab.subplot(3, 3, j+1)
        # pylab.imshow(min_P.as_numpy_array()[j], vmin=0, vmax=vmax,
        # interpolation='none', cmap=cm.Greys_r)
        # pylab.show()

        # for k in range(max_S.all()[0]):
        # print ''
        # print 'Slice %d' % k
        # for j in range(max_S.all()[1]):
        # print ' '.join(["%-4d" % int(max_S[k,j,i]) for i in range(max_S.all()[2])])

        print("Testing")

        def f(I):
            mask = flex.bool(flex.grid(9, 9, 9), False)
            for k in range(9):
                for j in range(9):
                    for i in range(9):
                        dx = 5 * (i - 4.5) / 4.5
                        dy = 5 * (j - 4.5) / 4.5
                        dz = 5 * (k - 4.5) / 4.5
                        dd = sqrt(dx**2 + dy**2 + dz**2)
                        if dd <= 3:
                            mask[k, j, i] = True

            mask = mask.as_1d() & (ref_P.as_1d() > 0)
            p = ref_P.as_1d().select(mask)
            c = max_P.as_1d().select(mask)
            return flex.sum((c - I * p)**2 / (I * p))

        def df(I):
            mask = flex.bool(flex.grid(9, 9, 9), False)
            for k in range(9):
                for j in range(9):
                    for i in range(9):
                        dx = 5 * (i - 4.5) / 4.5
                        dy = 5 * (j - 4.5) / 4.5
                        dz = 5 * (k - 4.5) / 4.5
                        dd = sqrt(dx**2 + dy**2 + dz**2)
                        if dd <= 3:
                            mask[k, j, i] = True
            mask = mask.as_1d() & (ref_P.as_1d() > 0)
            p = ref_P.as_1d().select(mask)
            c = max_P.as_1d().select(mask)
            b = 0
            return flex.sum(p) - flex.sum(c * c / (I * I * p))
            # return flex.sum(p - p*c*c / ((b + I*p)**2))
            # return flex.sum(3*p*p + (c*c*p*p - 4*b*p*p) / ((b + I*p)**2))
            # return flex.sum(p - c*c / (I*I*p))
            # return flex.sum(p * (-c+p*I)*(c+p*I)/((p*I)**2))

        def d2f(I):
            mask = flex.bool(flex.grid(9, 9, 9), False)
            for k in range(9):
                for j in range(9):
                    for i in range(9):
                        dx = 5 * (i - 4.5) / 4.5
                        dy = 5 * (j - 4.5) / 4.5
                        dz = 5 * (k - 4.5) / 4.5
                        dd = sqrt(dx**2 + dy**2 + dz**2)
                        if dd <= 3:
                            mask[k, j, i] = True

            mask = mask.as_1d() & (ref_P.as_1d() > 0)
            p = ref_P.as_1d().select(mask)
            c = max_P.as_1d().select(mask)
            return flex.sum(2 * c * c * p * p / (p * I)**3)

        I = 10703  # flex.sum(max_P)
        mask = ref_P.as_1d() > 0
        p = ref_P.as_1d().select(mask)
        c = max_P.as_1d().select(mask)
        for i in range(10):
            I = I - df(I) / d2f(I)
            # v = I*p
            # I = flex.sum(c * p / v) / flex.sum(p*p / v)
            print(I)

        from math import log

        ff = []
        for I in range(9500, 11500):
            ff.append(f(I))
        print(sorted(range(len(ff)), key=lambda x: ff[x])[0] + 9500)
        from matplotlib import pylab

        pylab.plot(range(9500, 11500), ff)
        pylab.show()
        # exit(0)

        # I = 10000
        # print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P)

        # I = 10100
        # print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P)
        # exit(0)

        print(flex.sum(self.reference[0]["rs_shoebox"].data))
        print(I_cal[0])

        # Calculate the z score
        perc = self.mv3n_tolerance_interval(3 * 3)
        Z = (I_cal - I_sim) / flex.sqrt(I_var)
        mv = flex.mean_and_variance(Z)
        Z_mean = mv.mean()
        Z_var = mv.unweighted_sample_variance()
        print("Z: mean: %f, var: %f, sig: %f" % (Z_mean, Z_var, sqrt(Z_var)))

        print(len(I_cal))

        from matplotlib import pylab
        from mpl_toolkits.mplot3d import Axes3D

        # fig = pylab.figure()
        # ax = fig.add_subplot(111, projection='3d')
        # ax.scatter(X_pos, Y_pos, P_cor)

        # pylab.scatter(X_pos, P_cor)
        # pylab.scatter(Y_pos, P_cor)
        # pylab.scatter(Z_pos, P_cor)
        # pylab.hist(P_cor,100)
        # pylab.scatter(P_cor, (I_cal - I_exp) / I_exp)
        pylab.hist(Z, 100)
        # pylab.hist(I_cal,100)
        # pylab.hist(I_cal - I_sim, 100)
        pylab.show()
コード例 #10
0
    def __init__(self, Ibase, Gbase, FSIM, curvatures=False, **kwargs):
        # For backward compatibility handle the case where phil is undefined
        if "params" in kwargs.keys():
            self.params = kwargs["params"]
        else:
            from xfel.command_line.cxi_merge import master_phil
            phil = iotbx.phil.process_command_line(
                args=[], master_string=master_phil).show()
            self.params = phil.work.extract()
            self.params.levmar.parameter_flags.append(
                "Bfactor")  # default example refines Bfactor

        self.counter = 0

        self.x = flex.double(list(Ibase) + list(Gbase))
        self.N_I = len(Ibase)
        self.N_G = len(Gbase)
        self.N_raw_obs = FSIM.raw_obs.size()
        print "# structure factors:", self.N_I, "# frames:", self.N_G, "(Visited set; refined parameters)"

        step_threshold = self.params.levmar.termination.step_threshold
        objective_decrease_threshold = self.params.levmar.termination.objective_decrease_threshold
        if "Bfactor" in self.params.levmar.parameter_flags:
            self.x = self.x.concatenate(flex.double(len(Gbase), 0.0))
        if "Deff" in self.params.levmar.parameter_flags:
            D_values = flex.double(
                [2. * e.crystal.domain_size for e in kwargs["experiments"]])
            self.x = self.x.concatenate(D_values)
        if "Rxy" in self.params.levmar.parameter_flags:
            self.x = self.x.concatenate(flex.double(2 * len(Gbase), 0.0))

        levenberg_helper = choice_as_helper_base(
            self.params.levmar.parameter_flags)
        self.helper = levenberg_helper(initial_estimates=self.x)
        self.helper.set_cpp_data(FSIM, self.N_I, self.N_G)
        if kwargs.has_key("experiments"):
            self.helper.set_wavelength(
                [e.beam.get_wavelength() for e in kwargs["experiments"]])
            self.helper.set_domain_size([
                2. * e.crystal.domain_size for e in kwargs["experiments"]
            ])  #ad hoc factor of 2
            self.helper.set_Astar_matrix(
                [e.crystal.get_A() for e in kwargs["experiments"]])

        bitflags = choice_as_bitflag(self.params.levmar.parameter_flags)
        self.helper.set_parameter_flags(bitflags)
        self.helper.restart()

        iterations = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
            non_linear_ls=self.helper,
            n_max_iterations=5000,
            track_all=True,
            step_threshold=step_threshold,
            objective_decrease_threshold=objective_decrease_threshold,
            verbose_iterations=True,
        )
        if "Deff" in self.params.levmar.parameter_flags:
            newDeff = self.helper.x[self.N_I + self.N_G:]  # XXX specific
            Dstats = flex.mean_and_variance(newDeff)
            print "Refined Deff mean & standard deviation:",
            print Dstats.mean(), Dstats.unweighted_sample_standard_deviation()
        if "Rxy" in self.params.levmar.parameter_flags:
            AX = self.helper.x[self.N_I + self.N_G:self.N_I +
                               2 * self.N_G]  # XXX specific
            AY = self.helper.x[self.N_I + 2 * self.N_G:self.N_I +
                               3 * self.N_G]  # XXX specific
            stats = flex.mean_and_variance(AX)
            print "Rx rotational increments in degrees: %8.6f +/- %8.6f" % (
                stats.mean(), stats.unweighted_sample_standard_deviation())
            stats = flex.mean_and_variance(AY)
            print "Ry rotational increments in degrees: %8.6f +/- %8.6f" % (
                stats.mean(), stats.unweighted_sample_standard_deviation())

        print "End of minimisation: Converged", self.helper.counter, "cycles"
        chi_squared = self.helper.objective() * 2.
        print "obj", chi_squared
        print "# of obs:", FSIM.raw_obs.size()
        dof = FSIM.raw_obs.size() - (len(self.x))
        print "degrees of freedom =", dof
        print "chisq/dof: %7.3f" % (chi_squared / dof)
        print
コード例 #11
0
  def test_for_reference(self):
    from dials.algorithms.integration import ProfileFittingReciprocalSpace
    from dials.array_family import flex
    from dials.algorithms.shoebox import MaskCode
    from dials.algorithms.statistics import \
      kolmogorov_smirnov_test_standard_normal
    from math import erf, sqrt, pi
    from copy import deepcopy
    from dials.algorithms.simulation.reciprocal_space import Simulator
    from os.path import basename

    # Integrate
    integration = ProfileFittingReciprocalSpace(
      grid_size=4,
      threshold=0.00,
      frame_interval=100,
      n_sigma=5,
      mask_n_sigma=3,
      sigma_b=0.024 * pi / 180.0,
      sigma_m=0.044 * pi / 180.0
    )

    # Integrate the reference profiles
    integration(self.experiment, self.reference)

    p = integration.learner.locate().profile(0)
    m = integration.learner.locate().mask(0)

    locator = integration.learner.locate()

    cor = locator.correlations()
    for j in range(cor.all()[0]):
      print ' '.join([str(cor[j,i]) for i in range(cor.all()[1])])
    #exit(0)
    #from matplotlib import pylab
    #pylab.imshow(cor.as_numpy_array(), interpolation='none', vmin=-1, vmax=1)
    #pylab.show()


    #n = locator.size()
    #for i in range(n):
      #c = locator.coord(i)
      #p = locator.profile(i)
      #vmax = flex.max(p)
      #from matplotlib import pylab
      #for j in range(9):
        #pylab.subplot(3, 3, j+1)
        #pylab.imshow(p.as_numpy_array()[j], vmin=0, vmax=vmax,
        #interpolation='none')
      #pylab.show()

    #print "NRef: ", n
    #x = []
    #y = []
    #for i in range(n):
      #c = locator.coord(i)
      #x.append(c[0])
      #y.append(c[1])
    #from matplotlib import pylab
    #pylab.scatter(x,y)
    #pylab.show()

    #exit(0)
    import numpy
    #pmax = flex.max(p)
    #scale = 100 / pmax
    #print "Scale: ", 100 / pmax
    #p = p.as_numpy_array() *100 / pmax
    #p = p.astype(numpy.int)
    #print p
    #print m.as_numpy_array()

    # Check the reference profiles and spots are ok
    #self.check_profiles(integration.learner)

    # Make sure background is zero
    profiles = self.reference['rs_shoebox']
    eps = 1e-7
    for p in profiles:
      assert(abs(flex.sum(p.background) - 0) < eps)
    print 'OK'

    # Only select variances greater than zero
    mask = self.reference.get_flags(self.reference.flags.integrated)
    I_cal = self.reference['intensity.sum.value']
    I_var = self.reference['intensity.sum.variance']
    B_sim = self.reference['background.sim'].as_double()
    I_sim = self.reference['intensity.sim'].as_double()
    I_exp = self.reference['intensity.exp']
    P_cor = self.reference['profile.correlation']
    X_pos, Y_pos, Z_pos = self.reference['xyzcal.px'].parts()
    I_cal = I_cal.select(mask)
    I_var = I_var.select(mask)
    I_sim = I_sim.select(mask)
    I_exp = I_exp.select(mask)
    P_cor = P_cor.select(mask)

    max_ind = flex.max_index(I_cal)
    max_I = I_cal[max_ind]
    max_P = self.reference[max_ind]['rs_shoebox'].data
    max_C = self.reference[max_ind]['xyzcal.px']
    max_S = self.reference[max_ind]['shoebox'].data


    min_ind = flex.min_index(P_cor)
    min_I = I_cal[min_ind]
    min_P = self.reference[min_ind]['rs_shoebox'].data
    min_C = self.reference[min_ind]['xyzcal.px']
    min_S = self.reference[min_ind]['shoebox'].data

    ##for k in range(max_S.all()[0]):
    #if False:
      #for j in range(max_S.all()[1]):
        #for i in range(max_S.all()[2]):
          #max_S[k,j,i] = 0
          #if (abs(i - max_S.all()[2] // 2) < 2 and
              #abs(j - max_S.all()[1] // 2) < 2 and
              #abs(k - max_S.all()[0] // 2) < 2):
            #max_S[k,j,i] = 100

    #p = max_P.as_numpy_array() * 100 / flex.max(max_P)
    #p = p.astype(numpy.int)
    #print p

    #from dials.scratch.jmp.misc.test_transform import test_transform
    #grid_size = 4
    #ndiv = 5
    #sigma_b = 0.024 * pi / 180.0
    #sigma_m = 0.044 * pi / 180.0
    #n_sigma = 4.0
    #max_P2 = test_transform(
      #self.experiment,
      #self.reference[max_ind]['shoebox'],
      #self.reference[max_ind]['s1'],
      #self.reference[max_ind]['xyzcal.mm'][2],
      #grid_size,
      #sigma_m,
      #sigma_b,
      #n_sigma,
      #ndiv)
    #max_P = max_P2

    ref_ind = locator.index(max_C)
    ref_P = locator.profile(ref_ind)
    ref_C = locator.coord(ref_ind)

    print "Max Index: ", max_ind, max_I, flex.sum(max_P), flex.sum(max_S)
    print "Coord: ", max_C, "Ref Coord: ", ref_C

    print "Min Index: ", min_ind, min_I, flex.sum(min_P), flex.sum(min_S)
    print "Coord: ", min_C, "Ref Coord: ", ref_C

    #vmax = flex.max(max_P)
    #print sum(max_S)
    #print sum(max_P)
    #from matplotlib import pylab, cm
    #for j in range(9):
      #pylab.subplot(3, 3, j+1)
      #pylab.imshow(max_P.as_numpy_array()[j], vmin=0, vmax=vmax,
      #interpolation='none', cmap=cm.Greys_r)
    #pylab.show()

    #vmax = flex.max(min_P)
    #print sum(min_S)
    #print sum(min_P)
    #from matplotlib import pylab, cm
    #for j in range(9):
      #pylab.subplot(3, 3, j+1)
      #pylab.imshow(min_P.as_numpy_array()[j], vmin=0, vmax=vmax,
      #interpolation='none', cmap=cm.Greys_r)
    #pylab.show()

    #for k in range(max_S.all()[0]):
      #print ''
      #print 'Slice %d' % k
      #for j in range(max_S.all()[1]):
        #print ' '.join(["%-4d" % int(max_S[k,j,i]) for i in range(max_S.all()[2])])

    print "Testing"

    def f(I):
      mask = flex.bool(flex.grid(9,9,9), False)
      for k in range(9):
        for j in range(9):
          for i in range(9):
            dx = 5 * (i - 4.5) / 4.5
            dy = 5 * (j - 4.5) / 4.5
            dz = 5 * (k - 4.5) / 4.5
            dd = sqrt(dx**2 + dy**2 + dz**2)
            if dd <= 3:
              mask[k,j,i] = True

      mask = mask.as_1d() & (ref_P.as_1d() > 0)
      p = ref_P.as_1d().select(mask)
      c = max_P.as_1d().select(mask)
      return flex.sum((c - I * p)**2 / (I * p))

    def df(I):
      mask = flex.bool(flex.grid(9,9,9), False)
      for k in range(9):
        for j in range(9):
          for i in range(9):
            dx = 5 * (i - 4.5) / 4.5
            dy = 5 * (j - 4.5) / 4.5
            dz = 5 * (k - 4.5) / 4.5
            dd = sqrt(dx**2 + dy**2 + dz**2)
            if dd <= 3:
              mask[k,j,i] = True
      mask = mask.as_1d() & (ref_P.as_1d() > 0)
      p = ref_P.as_1d().select(mask)
      c = max_P.as_1d().select(mask)
      b = 0
      return flex.sum(p) - flex.sum(c*c / (I*I*p))
      #return flex.sum(p - p*c*c / ((b + I*p)**2))
      #return flex.sum(3*p*p + (c*c*p*p - 4*b*p*p) / ((b + I*p)**2))
      #return flex.sum(p - c*c / (I*I*p))
      #return flex.sum(p * (-c+p*I)*(c+p*I)/((p*I)**2))

    def d2f(I):
      mask = flex.bool(flex.grid(9,9,9), False)
      for k in range(9):
        for j in range(9):
          for i in range(9):
            dx = 5 * (i - 4.5) / 4.5
            dy = 5 * (j - 4.5) / 4.5
            dz = 5 * (k - 4.5) / 4.5
            dd = sqrt(dx**2 + dy**2 + dz**2)
            if dd <= 3:
              mask[k,j,i] = True

      mask = mask.as_1d() & (ref_P.as_1d() > 0)
      p = ref_P.as_1d().select(mask)
      c = max_P.as_1d().select(mask)
      return flex.sum(2*c*c*p*p / (p*I)**3)

    I = 10703#flex.sum(max_P)
    mask = ref_P.as_1d() > 0
    p = ref_P.as_1d().select(mask)
    c = max_P.as_1d().select(mask)
    for i in range(10):
      I = I - df(I) / d2f(I)
      #v = I*p
      #I = flex.sum(c * p / v) / flex.sum(p*p / v)
      print I


    from math import log
    ff = []
    for I in range(9500, 11500):
      ff.append(f(I))
    print sorted(range(len(ff)), key=lambda x: ff[x])[0] + 9500
    from matplotlib import pylab
    pylab.plot(range(9500,11500), ff)
    pylab.show()
    #exit(0)

    #I = 10000
    #print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P)


    #I = 10100
    #print flex.sum((max_P - I * ref_P)**2) / flex.sum(I * ref_P)
    #exit(0)


    print flex.sum(self.reference[0]['rs_shoebox'].data)
    print I_cal[0]

    # Calculate the z score
    perc = self.mv3n_tolerance_interval(3*3)
    Z = (I_cal - I_sim) / flex.sqrt(I_var)
    mv = flex.mean_and_variance(Z)
    Z_mean = mv.mean()
    Z_var = mv.unweighted_sample_variance()
    print "Z: mean: %f, var: %f, sig: %f" % (Z_mean, Z_var, sqrt(Z_var))

    print len(I_cal)

    from matplotlib import pylab
    from mpl_toolkits.mplot3d import Axes3D
    #fig = pylab.figure()
    #ax = fig.add_subplot(111, projection='3d')
    #ax.scatter(X_pos, Y_pos, P_cor)

    #pylab.scatter(X_pos, P_cor)
    #pylab.scatter(Y_pos, P_cor)
    #pylab.scatter(Z_pos, P_cor)
    #pylab.hist(P_cor,100)
    #pylab.scatter(P_cor, (I_cal - I_exp) / I_exp)
    pylab.hist(Z, 100)
    #pylab.hist(I_cal,100)
    #pylab.hist(I_cal - I_sim, 100)
    pylab.show()
コード例 #12
0
    def test_for_reflections(self, refl, filename):
        from dials.array_family import flex
        from dials.algorithms.statistics import \
          kolmogorov_smirnov_test_standard_normal
        from os.path import basename
        print basename(filename)

        #refl = self.reference

        # Get the calculated background and simulated background
        B_sim = refl['background.sim.a'].as_double()
        I_sim = refl['intensity.sim'].as_double()
        I_exp = refl['intensity.exp']

        # Set the background as simulated
        shoebox = refl['shoebox']
        for i in range(len(shoebox)):
            bg = shoebox[i].background
            ms = shoebox[i].mask
            for j in range(len(bg)):
                bg[j] = B_sim[i]

        # Integrate
        integration = self.experiments[0].profile.fitting_class()(
            self.experiments[0])
        old_size = len(refl)
        refl.extend(self.reference)
        integration.model(self.reference)
        integration.fit(refl)
        reference = refl[-len(self.reference):]
        refl = refl[:len(self.reference)]
        assert (len(refl) == old_size)
        assert (len(reference) == len(self.reference))
        I_cal = refl['intensity.prf.value']
        I_var = refl['intensity.prf.variance']

        # Check the reference profiles and spots are ok
        #self.check_profiles(integration.learner)
        #self.check_reference(reference)

        #np = reference.locate().size()
        #for i in range(np):
        #profile = reference.locate().profile(i)
        #print "Profile: %d" % i
        #p = (profile.as_numpy_array() * 1000)
        #import numpy as np
        #p = p.astype(np.int)
        #print p

        # Only select variances greater than zero
        mask = refl.get_flags(refl.flags.integrated_prf)
        assert (mask.count(True) > 0)
        I_cal = I_cal.select(mask)
        I_var = I_var.select(mask)
        I_sim = I_sim.select(mask)
        I_exp = I_exp.select(mask)

        mask = I_var > 0
        I_cal = I_cal.select(mask)
        I_var = I_var.select(mask)
        I_sim = I_sim.select(mask)
        I_exp = I_exp.select(mask)

        # Calculate the z score
        perc = self.mv3n_tolerance_interval(3 * 3)
        Z = (I_cal - I_sim) / flex.sqrt(I_var)
        mv = flex.mean_and_variance(Z)
        Z_mean = mv.mean()
        Z_var = mv.unweighted_sample_variance()
        print "Z: mean: %f, var: %f" % (Z_mean, Z_var)

        # Do the kolmogorov smirnov test
        D, p = kolmogorov_smirnov_test_standard_normal(Z)
        print "KS: D: %f, p-value: %f" % (D, p)

        # FIXME Z score should be a standard normal distribution. When background is
        # the main component, we do indeed see that the z score is in a standard
        # normal distribution. When the intensity dominates, the variance of the Z
        # scores decreases indicating that for increasing intensity of the signal,
        # the variance is over estimated.
        #assert(abs(Z_mean) <= 3 * Z_var)

        #from matplotlib import pylab
        #pylab.hist(Z, 20)
        #pylab.show()

        #Z_I = sorted(Z)
        ##n = int(0.05 * len(Z_I))
        ##Z_I = Z_I[n:-n]
        ##mv = flex.mean_and_variance(flex.double(Z_I))
        ##print "Mean: %f, Sdev: %f" % (mv.mean(), mv.unweighted_sample_standard_deviation())
        #edf = [float(i+1) / len(Z_I) for i in range(len(Z_I))]
        #cdf = [0.5 * (1.0 + erf(z / sqrt(2.0))) for z in Z_I]

        print 'OK'
コード例 #13
0
  def test_for_reflections(self, refl, filename):
    from dials.array_family import flex
    from dials.algorithms.statistics import \
      kolmogorov_smirnov_test_standard_normal
    from os.path import basename
    print basename(filename)

    #refl = self.reference

    # Get the calculated background and simulated background
    B_sim = refl['background.sim.a'].as_double()
    I_sim = refl['intensity.sim'].as_double()
    I_exp = refl['intensity.exp']

    # Set the background as simulated
    shoebox = refl['shoebox']
    for i in range(len(shoebox)):
      bg = shoebox[i].background
      ms = shoebox[i].mask
      for j in range(len(bg)):
        bg[j] = B_sim[i]

    # Integrate
    integration = self.experiments[0].profile.fitting_class()(self.experiments[0])
    old_size = len(refl)
    refl.extend(self.reference)
    integration.model(self.reference)
    integration.fit(refl)
    reference = refl[-len(self.reference):]
    refl = refl[:len(self.reference)]
    assert(len(refl) == old_size)
    assert(len(reference) == len(self.reference))
    I_cal = refl['intensity.prf.value']
    I_var = refl['intensity.prf.variance']

    # Check the reference profiles and spots are ok
    #self.check_profiles(integration.learner)
    #self.check_reference(reference)

    #np = reference.locate().size()
    #for i in range(np):
      #profile = reference.locate().profile(i)
      #print "Profile: %d" % i
      #p = (profile.as_numpy_array() * 1000)
      #import numpy as np
      #p = p.astype(np.int)
      #print p

    # Only select variances greater than zero
    mask = refl.get_flags(refl.flags.integrated_prf)
    assert(mask.count(True) > 0)
    I_cal = I_cal.select(mask)
    I_var = I_var.select(mask)
    I_sim = I_sim.select(mask)
    I_exp = I_exp.select(mask)

    mask = I_var > 0
    I_cal = I_cal.select(mask)
    I_var = I_var.select(mask)
    I_sim = I_sim.select(mask)
    I_exp = I_exp.select(mask)

    # Calculate the z score
    perc = self.mv3n_tolerance_interval(3*3)
    Z = (I_cal - I_sim) / flex.sqrt(I_var)
    mv = flex.mean_and_variance(Z)
    Z_mean = mv.mean()
    Z_var = mv.unweighted_sample_variance()
    print "Z: mean: %f, var: %f" % (Z_mean, Z_var)

    # Do the kolmogorov smirnov test
    D, p  = kolmogorov_smirnov_test_standard_normal(Z)
    print "KS: D: %f, p-value: %f" % (D, p)

    # FIXME Z score should be a standard normal distribution. When background is
    # the main component, we do indeed see that the z score is in a standard
    # normal distribution. When the intensity dominates, the variance of the Z
    # scores decreases indicating that for increasing intensity of the signal,
    # the variance is over estimated.
    #assert(abs(Z_mean) <= 3 * Z_var)


    #from matplotlib import pylab
    #pylab.hist(Z, 20)
    #pylab.show()

    #Z_I = sorted(Z)
    ##n = int(0.05 * len(Z_I))
    ##Z_I = Z_I[n:-n]
    ##mv = flex.mean_and_variance(flex.double(Z_I))
    ##print "Mean: %f, Sdev: %f" % (mv.mean(), mv.unweighted_sample_standard_deviation())
    #edf = [float(i+1) / len(Z_I) for i in range(len(Z_I))]
    #cdf = [0.5 * (1.0 + erf(z / sqrt(2.0))) for z in Z_I]

    print 'OK'
コード例 #14
0
    def from_reflections(self, experiment, reflections):
        """
        Generate the required data from the reflections

        """

        # Get the beam vector
        s0 = matrix.col(experiment.beam.get_s0())

        # Get the reciprocal lattice vector
        s2_list = reflections["s2"]

        # Initialise the list of observed intensities and covariances
        ctot_list = flex.double(len(s2_list))
        mobs_list = flex.vec2_double(len(s2_list))
        Sobs_list = flex.double(flex.grid(len(s2_list), 4))
        Bmean = matrix.sqr((0, 0, 0, 0))

        logger.info("Computing observed covariance for %d reflections" %
                    len(reflections))
        s0_length = s0.length()
        assert len(experiment.detector) == 1
        panel = experiment.detector[0]
        sbox = reflections["shoebox"]
        for r in range(len(reflections)):

            # Create the coordinate system
            cs = CoordinateSystem2d(s0, s2_list[r])

            # Get data and compute total counts
            data = sbox[r].data
            mask = sbox[r].mask
            bgrd = sbox[r].background

            # Get array of vectors
            i0 = sbox[r].bbox[0]
            j0 = sbox[r].bbox[2]
            assert data.all()[0] == 1
            X = flex.vec2_double(flex.grid(data.all()[1], data.all()[2]))
            ctot = 0
            C = flex.double(X.accessor())

            for j in range(data.all()[1]):
                for i in range(data.all()[2]):
                    c = data[0, j, i] - bgrd[0, j, i]
                    if mask[0, j, i] == 5 and c > 0:
                        ctot += c
                        ii = i + i0
                        jj = j + j0
                        s = panel.get_pixel_lab_coord((ii + 0.5, jj + 0.5))
                        s = matrix.col(s).normalize() * s0_length
                        X[j, i] = cs.from_beam_vector(s)
                        C[j, i] = c

            # Check we have a sensible number of counts
            assert ctot > 0, "BUG: strong spots should have more than 0 counts!"

            # Compute the mean vector
            xbar = matrix.col((0, 0))
            for j in range(X.all()[0]):
                for i in range(X.all()[1]):
                    x = matrix.col(X[j, i])
                    xbar += C[j, i] * x
            xbar /= ctot

            # Compute the covariance matrix
            Sobs = matrix.sqr((0, 0, 0, 0))
            for j in range(X.all()[0]):
                for i in range(X.all()[1]):
                    x = matrix.col(X[j, i])
                    Sobs += (x - xbar) * (x - xbar).transpose() * C[j, i]
            Sobs /= ctot

            # Compute the bias
            zero = matrix.col((0, 0))
            Bias_sq = (xbar - zero) * (xbar - zero).transpose()
            Bmean += Bias_sq

            # Add to the lists
            ctot_list[r] = ctot
            mobs_list[r] = xbar
            Sobs_list[r, 0] = Sobs[0]
            Sobs_list[r, 1] = Sobs[1]
            Sobs_list[r, 2] = Sobs[2]
            Sobs_list[r, 3] = Sobs[3]

        # Print some information
        logger.info("I_min = %.2f, I_max = %.2f" %
                    (flex.min(ctot_list), flex.max(ctot_list)))

        # Print the mean covariance
        Smean = matrix.sqr((0, 0, 0, 0))
        for r in range(Sobs_list.all()[0]):
            Smean += matrix.sqr(tuple(Sobs_list[r:r + 1, :]))
        Smean /= Sobs_list.all()[0]
        Bmean /= len(reflections)

        logger.info("")
        logger.info("Mean observed covariance:")
        print_matrix(Smean)
        print_eigen_values_and_vectors_of_observed_covariance(Smean)
        logger.info("")
        logger.info("Mean observed bias^2:")
        print_matrix(Bmean)

        # Compute the distance from the Ewald sphere
        epsilon = flex.double(s0.length() - matrix.col(s).length()
                              for s in reflections["s2"])
        mv = flex.mean_and_variance(epsilon)
        logger.info("")
        logger.info("Mean distance from Ewald sphere: %.3g" % mv.mean())
        logger.info("Variance in distance from Ewald sphere: %.3g" %
                    mv.unweighted_sample_variance())

        # Return the profile refiner data
        return ProfileRefinerData(s0, s2_list, ctot_list, mobs_list, Sobs_list)
コード例 #15
0
  def __init__(self,Ibase,Gbase,FSIM,curvatures=False,**kwargs):
    # For backward compatibility handle the case where phil is undefined
    if "params" in kwargs.keys():
      self.params = kwargs["params"]
    else:
      from xfel.command_line.cxi_merge import master_phil
      phil = iotbx.phil.process_command_line(args=[], master_string=master_phil).show()
      self.params = phil.work.extract()
      self.params.levmar.parameter_flags.append("Bfactor") # default example refines Bfactor

    self.counter = 0

    self.x = flex.double(list(Ibase) + list(Gbase))
    self.N_I = len(Ibase)
    self.N_G = len(Gbase)
    self.N_raw_obs = FSIM.raw_obs.size()
    print "# structure factors:",self.N_I, "# frames:",self.N_G, "(Visited set; refined parameters)"

    step_threshold = self.params.levmar.termination.step_threshold
    objective_decrease_threshold = self.params.levmar.termination.objective_decrease_threshold
    if "Bfactor" in self.params.levmar.parameter_flags:
        self.x = self.x.concatenate(flex.double(len(Gbase),0.0))
    if "Deff" in self.params.levmar.parameter_flags:
        D_values = flex.double([2.*e.crystal.domain_size for e in kwargs["experiments"]])
        self.x = self.x.concatenate(D_values)
    if "Rxy" in self.params.levmar.parameter_flags:
        self.x = self.x.concatenate(flex.double(2*len(Gbase),0.0))

    levenberg_helper = choice_as_helper_base(self.params.levmar.parameter_flags)
    self.helper = levenberg_helper(initial_estimates = self.x)
    self.helper.set_cpp_data(FSIM, self.N_I, self.N_G)
    if kwargs.has_key("experiments"):
      self.helper.set_wavelength([e.beam.get_wavelength() for e in kwargs["experiments"]])
      self.helper.set_domain_size([2.*e.crystal.domain_size for e in kwargs["experiments"]])#ad hoc factor of 2
      self.helper.set_Astar_matrix([e.crystal.get_A() for e in kwargs["experiments"]])

    bitflags = choice_as_bitflag(self.params.levmar.parameter_flags)
    self.helper.set_parameter_flags(bitflags)
    self.helper.restart()

    iterations = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
               non_linear_ls = self.helper,
               n_max_iterations = 5000,
               track_all=True,
               step_threshold = step_threshold,
               objective_decrease_threshold = objective_decrease_threshold
    )
    if "Deff" in self.params.levmar.parameter_flags:
      newDeff = self.helper.x[self.N_I+self.N_G:] # XXX specific
      Dstats=flex.mean_and_variance(newDeff)
      print "Refined Deff mean & standard deviation:",
      print Dstats.mean(),Dstats.unweighted_sample_standard_deviation()
    if "Rxy" in self.params.levmar.parameter_flags:
      AX = self.helper.x[self.N_I+self.N_G:self.N_I+2*self.N_G] # XXX specific
      AY = self.helper.x[self.N_I+2*self.N_G:self.N_I+3*self.N_G] # XXX specific
      stats=flex.mean_and_variance(AX)
      print "Rx rotational increments in degrees: %8.6f +/- %8.6f"%(
           stats.mean(),stats.unweighted_sample_standard_deviation())
      stats=flex.mean_and_variance(AY)
      print "Ry rotational increments in degrees: %8.6f +/- %8.6f"%(
           stats.mean(),stats.unweighted_sample_standard_deviation())

    print "End of minimisation: Converged", self.helper.counter,"cycles"
    chi_squared = self.helper.objective() * 2.
    print "obj",chi_squared
    print "# of obs:",FSIM.raw_obs.size()
    dof = FSIM.raw_obs.size() - ( len(self.x) )
    print "degrees of freedom =",dof
    print "chisq/dof: %7.3f"%(chi_squared / dof)
    print
コード例 #16
0
  def dI_derrorterms(self):
    refls = self.scaler.ISIGI
    ct = self.scaler.crystal_table

    # notation: dP1_dP2 is derivative of parameter 1 with respect to parameter 2. Here,
    # for example, is the derivative of rx wrt thetax
    drx_dthetax = flex.mat3_double()
    dry_dthetay = flex.mat3_double()
    s0hat = flex.vec3_double(len(refls), (0,0,-1))

    ex = col((1,0,0))
    ey = col((0,1,0))

    # Compute derivatives
    sre = symmetrize_reduce_enlarge(self.scaler.params.target_space_group.group())
    gstar_params = None
    gstar_derivatives = None

    for i in range(len(ct)):
      n_refl = ct['n_refl'][i]

      # Derivatives of rx/y wrt thetax/y come from cctbx
      drx_dthetax.extend(flex.mat3_double(n_refl, ex.axis_and_angle_as_r3_derivative_wrt_angle(ct['thetax'][i])))
      dry_dthetay.extend(flex.mat3_double(n_refl, ey.axis_and_angle_as_r3_derivative_wrt_angle(ct['thetay'][i])))

      # Derivatives of the B matrix wrt to the unit cell parameters also come from cctbx
      sre.set_orientation(orientation=ct['b_matrix'][i])
      p = sre.forward_independent_parameters()
      dB_dp = sre.forward_gradients()
      if gstar_params is None:
        assert gstar_derivatives is None
        gstar_params = [flex.double() for j in range(len(p))]
        gstar_derivatives = [flex.mat3_double() for j in range(len(p))]
      assert len(p) == len(dB_dp) == len(gstar_params) == len(gstar_derivatives)
      for j in range(len(p)):
        gstar_params[j].extend(flex.double(n_refl, p[j]))
        gstar_derivatives[j].extend(flex.mat3_double(n_refl, tuple(dB_dp[j])))

    # Compute the scalar terms used while computing derivatives
    self.r = r = self.compute_intensity_parameters()

    # Begin computing derivatives
    sigma_Iobs = refls['scaled_intensity']/refls['isigi']
    dI_dIobs = 1/r['D']

    def compute_dI_dp(dq_dp):
      """ Deriviatives of the scaled intensity I wrt to thetax, thetay and the unit cell parameters
      are computed the same, starting with the deriviatives of those parameters wrt to q """
      dqlen_dp = r['q'].dot(dq_dp)/r['qlen']
      dd_dp    = -(1/(r['qlen']**2)) * dqlen_dp
      drs_dp   = -(r['eta']/(2 * r['d']**2)) * dd_dp
      dslen_dp = r['s'].dot(dq_dp)/r['slen']
      drhsq_dp = 2 * (r['slen'] - (1/r['wavelength'])) * dslen_dp
      dPn_dp   = 2 * r['rs'] * drs_dp
      dPd_dp   = 2 * ((r['rs'] * drs_dp) + drhsq_dp)
      dP_dp    = ((r['p_d'] * dPn_dp)-(r['p_n'] * dPd_dp))/(r['p_d']**2)
      dI_dp    = -(refls['iobs']/(r['partiality']**2 * r['G'] * r['eepsilon'])) * dP_dp
      return dI_dp

    # Derivatives wrt the unit cell parameters
    dI_dgstar = []
    for j in range(len(gstar_params)):
      dI_dgstar.append(compute_dI_dp(r['ry'] * r['rx'] * r['u'] * gstar_derivatives[j] * r['h']))

    # Derivatives wrt the crystal orientation
    dI_dthetax = compute_dI_dp(r['ry'] * drx_dthetax * r['u'] * r['b'] * r['h'])
    dI_dthetay = compute_dI_dp(dry_dthetay * r['rx'] * r['u'] * r['b'] * r['h'])

    # Derivatives wrt to the wavelength
    dthetah_dlambda  = 1/(flex.sqrt(1 - ((r['wavelength']/(2 * r['d']))**2)) * 2 * r['d'])
    den_dlambda      = flex.cos(r['thetah']) * dthetah_dlambda
    der_dlambda      = ((r['wavelength'] * den_dlambda) - r['sinthetah'])/r['wavelength']**2
    depsilon_dlambda = -16 * r['B'] * r['er'] * der_dlambda
    ds0_dlambda      = s0hat*(-1/r['wavelength']**2)
    dslen_dlambda    = r['s'].dot(ds0_dlambda)/r['slen']
    drhsq_dlambda    = 2*(r['slen']-(1/r['wavelength']))*(dslen_dlambda+(1/r['wavelength']**2))
    dP_dlambda       = -2*(r['p_n']/r['p_d']**2) * drhsq_dlambda
    dD_dlambda       = (r['G'] * r['eepsilon'] * dP_dlambda) + (r['partiality'] * r['G'] * r['eepsilon'] * depsilon_dlambda)
    dI_dlambda       = -(refls['iobs']/r['D']**2) * dD_dlambda

    # Derivatives wrt to the deff
    drs_deff = -1/(r['deff']**2)
    dPn_deff = 2 * r['rs'] * drs_deff
    dPd_deff = 2 * r['rs'] * drs_deff
    dP_deff  = ((r['p_d'] * dPn_deff)-(r['p_n'] * dPd_deff))/(r['p_d']**2)
    dI_deff  = -(refls['iobs']/(r['partiality']**2 * r['G'] * r['eepsilon'])) * dP_deff

    # Derivatives wrt to eta (unused for RS refinement)
    # drs_deta = 1/(2*r['d'])
    # dPn_deta = 2 * r['rs'] * drs_deta
    # dPd_deta = 2 * r['rs'] * drs_deta
    # dP_deta  = ((r['p_d']*dPn_deta)-(r['p_n']*dPd_deta))/(r['p_d']**2)
    # dI_deta  = -(refls['iobs']/(r['partiality']**2 * r['G'] * r['eepsilon'])) * dP_deta

    if self.verbose:
      # Show comparisons to finite differences
      n_cryst_params = sre.constraints.n_independent_params()
      print "Showing finite differences and derivatives for each parameter (first few reflections only)"
      for parameter_name, table, derivatives, delta, in zip(['iobs', 'thetax', 'thetay', 'wavelength', 'deff'] + ['c%d'%cp for cp in range(n_cryst_params)],
                                                    [refls, ct, ct, ct, ct] + [ct]*n_cryst_params,
                                                    [dI_dIobs, dI_dthetax, dI_dthetay, dI_dlambda, dI_deff] + dI_dgstar,
                                                    [1e-7]*5 + [1e-11]*n_cryst_params):
        finite_g = self.finite_difference(parameter_name, table, delta)
        print parameter_name
        for refl_id in range(min(10, len(refls))):
          print "%d % 21.1f % 21.1f"%(refl_id, finite_g[refl_id], derivatives[refl_id])
        stats = flex.mean_and_variance(finite_g-derivatives)
        stats_finite = flex.mean_and_variance(finite_g)
        percent = 0 if stats_finite.mean() == 0 else 100*stats.mean()/stats_finite.mean()
        print "Mean difference between finite and analytical: % 24.4f +/- % 24.4f (%8.3f%% of finite d.)"%( \
            stats.mean(), stats.unweighted_sample_standard_deviation(), percent)
        print

    return [dI_dIobs, dI_dthetax, dI_dthetay, dI_dlambda, dI_deff] + dI_dgstar
コード例 #17
0
    def run(self):
        ''' Parse the options. '''
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        self.params = params
        experiments = [wrapper.data for wrapper in params.input.experiments]
        reflections = [wrapper.data for wrapper in params.input.reflections]

        # Find all detector objects
        detectors = []
        beams = []
        for expts in experiments:
            detectors.extend(expts.detectors())
            beams.extend(expts.beams())

        # Verify inputs
        if len(detectors) != 2:
            print("Please provide two experiments for comparison")
            return

        # These lines exercise the iterate_detector_at_level and iterate_panels functions
        # for a detector with 4 hierarchy levels
        """
    print "Testing iterate_detector_at_level"
    for level in range(4):
      print "iterating at level", level
      for panelg in iterate_detector_at_level(detectors[0].hierarchy(), 0, level):
        print panelg.get_name()

    print "Testing iterate_panels"
    for level in range(4):
      print "iterating at level", level
      for panelg in iterate_detector_at_level(detectors[0].hierarchy(), 0, level):
        for panel in iterate_panels(panelg):
          print panel.get_name()
    """
        tmp = []
        for refls in reflections:
            print("N reflections total:", len(refls))
            sel = refls.get_flags(refls.flags.used_in_refinement)
            if sel.count(True) > 0:
                refls = refls.select(sel)
                print("N reflections used in refinement", len(refls))
                print("Reporting only on those reflections used in refinement")

            refls['difference_vector_norms'] = (
                refls['xyzcal.mm'] - refls['xyzobs.mm.value']).norms()
            tmp.append(refls)
        reflections = tmp

        s0 = col(flex.vec3_double([col(b.get_s0()) for b in beams]).mean())

        # Compute a set of radial and transverse displacements for each reflection
        print("Setting up stats...")
        tmp_refls = []
        for refls, expts in zip(reflections, experiments):
            tmp = flex.reflection_table()
            assert len(expts.detectors()) == 1
            dect = expts.detectors()[0]
            # Need to construct a variety of vectors
            for panel_id, panel in enumerate(dect):
                panel_refls = refls.select(refls['panel'] == panel_id)
                bcl = flex.vec3_double()
                # Compute the beam center in lab space (a vector pointing from the origin to where the beam would intersect
                # the panel, if it did intersect the panel)
                for expt_id in set(panel_refls['id']):
                    beam = expts[expt_id].beam
                    s0_ = beam.get_s0()
                    expt_refls = panel_refls.select(
                        panel_refls['id'] == expt_id)
                    beam_centre = panel.get_beam_centre_lab(s0_)
                    bcl.extend(flex.vec3_double(len(expt_refls), beam_centre))
                panel_refls['beam_centre_lab'] = bcl

                # Compute obs in lab space
                x, y, _ = panel_refls['xyzobs.mm.value'].parts()
                c = flex.vec2_double(x, y)
                panel_refls['obs_lab_coords'] = panel.get_lab_coord(c)
                # Compute deltaXY in panel space. This vector is relative to the panel origin
                x, y, _ = (panel_refls['xyzcal.mm'] -
                           panel_refls['xyzobs.mm.value']).parts()
                # Convert deltaXY to lab space, subtracting off of the panel origin
                panel_refls['delta_lab_coords'] = panel.get_lab_coord(
                    flex.vec2_double(x, y)) - panel.get_origin()
                tmp.extend(panel_refls)
            refls = tmp
            # The radial vector points from the center of the reflection to the beam center
            radial_vectors = (refls['obs_lab_coords'] -
                              refls['beam_centre_lab']).each_normalize()
            # The transverse vector is orthogonal to the radial vector and the beam vector
            transverse_vectors = radial_vectors.cross(
                refls['beam_centre_lab']).each_normalize()
            # Compute the raidal and transverse components of each deltaXY
            refls['radial_displacements'] = refls['delta_lab_coords'].dot(
                radial_vectors)
            refls['transverse_displacements'] = refls['delta_lab_coords'].dot(
                transverse_vectors)

            tmp_refls.append(refls)
        reflections = tmp_refls

        # storage for plots
        refl_counts = {}

        # Data for all tables
        pg_bc_dists = flex.double()
        root1 = detectors[0].hierarchy()
        root2 = detectors[1].hierarchy()
        all_weights = flex.double()
        all_refls_count = flex.int()

        # Data for lab space table
        lab_table_data = []
        lab_delta_table_data = []
        all_lab_x = flex.double()
        all_lab_y = flex.double()
        all_lab_z = flex.double()
        pg_lab_x_sigmas = flex.double()
        pg_lab_y_sigmas = flex.double()
        pg_lab_z_sigmas = flex.double()
        all_rotX = flex.double()
        all_rotY = flex.double()
        all_rotZ = flex.double()
        pg_rotX_sigmas = flex.double()
        pg_rotY_sigmas = flex.double()
        pg_rotZ_sigmas = flex.double()
        all_delta_x = flex.double()
        all_delta_y = flex.double()
        all_delta_z = flex.double()
        all_delta_xy = flex.double()
        all_delta_xyz = flex.double()
        all_delta_r = flex.double()
        all_delta_t = flex.double()
        all_delta_norm = flex.double()

        if params.hierarchy_level > 0:
            # Data for local table
            local_table_data = []
            local_delta_table_data = []
            all_local_x = flex.double()
            all_local_y = flex.double()
            all_local_z = flex.double()
            pg_local_x_sigmas = flex.double()
            pg_local_y_sigmas = flex.double()
            pg_local_z_sigmas = flex.double()
            all_local_rotX = flex.double()
            all_local_rotY = flex.double()
            all_local_rotZ = flex.double()
            pg_local_rotX_sigmas = flex.double()
            pg_local_rotY_sigmas = flex.double()
            pg_local_rotZ_sigmas = flex.double()
            all_local_delta_x = flex.double()
            all_local_delta_y = flex.double()
            all_local_delta_z = flex.double()
            all_local_delta_xy = flex.double()
            all_local_delta_xyz = flex.double()

        # Data for RMSD table
        rmsds_table_data = []

        for pg_id, (pg1, pg2) in enumerate(
                zip(
                    iterate_detector_at_level(root1, 0,
                                              params.hierarchy_level),
                    iterate_detector_at_level(root2, 0,
                                              params.hierarchy_level))):
            # Count up the number of reflections in this panel group pair for use as a weighting scheme
            total_refls = 0
            pg1_refls = 0
            pg2_refls = 0
            for p1, p2 in zip(iterate_panels(pg1), iterate_panels(pg2)):
                r1 = len(reflections[0].select(
                    reflections[0]['panel'] == id_from_name(
                        detectors[0], p1.get_name())))
                r2 = len(reflections[1].select(
                    reflections[1]['panel'] == id_from_name(
                        detectors[1], p2.get_name())))
                total_refls += r1 + r2
                pg1_refls += r1
                pg2_refls += r2
            if pg1_refls == 0 and pg2_refls == 0:
                print("No reflections on panel group", pg_id)
                continue
            all_refls_count.append(total_refls)
            all_weights.append(pg1_refls)
            all_weights.append(pg2_refls)

            assert pg1.get_name() == pg2.get_name()
            refl_counts[pg1.get_name()] = total_refls

            # Compute RMSDs
            row = ["%d" % pg_id]
            for pg, refls, det in zip([pg1, pg2], reflections, detectors):
                pg_refls = flex.reflection_table()
                for p in iterate_panels(pg):
                    pg_refls.extend(
                        refls.select(
                            refls['panel'] == id_from_name(det, p.get_name())))
                if len(pg_refls) == 0:
                    rmsd = r_rmsd = t_rmsd = 0
                else:
                    rmsd = math.sqrt(
                        flex.sum_sq(pg_refls['difference_vector_norms']) /
                        len(pg_refls)) * 1000
                    r_rmsd = math.sqrt(
                        flex.sum_sq(pg_refls['radial_displacements']) /
                        len(pg_refls)) * 1000
                    t_rmsd = math.sqrt(
                        flex.sum_sq(pg_refls['transverse_displacements']) /
                        len(pg_refls)) * 1000

                row.extend([
                    "%6.1f" % rmsd,
                    "%6.1f" % r_rmsd,
                    "%6.1f" % t_rmsd,
                    "%8d" % len(pg_refls)
                ])
            rmsds_table_data.append(row)

            dists = flex.double()
            lab_x = flex.double()
            lab_y = flex.double()
            lab_z = flex.double()
            rot_X = flex.double()
            rot_Y = flex.double()
            rot_Z = flex.double()

            for pg in [pg1, pg2]:
                bc = col(pg.get_beam_centre_lab(s0))
                ori = get_center(pg)

                dists.append((ori - bc).length())

                ori_lab = pg.get_origin()
                lab_x.append(ori_lab[0])
                lab_y.append(ori_lab[1])
                lab_z.append(ori_lab[2])

                f = col(pg.get_fast_axis())
                s = col(pg.get_slow_axis())
                n = col(pg.get_normal())
                basis = sqr(
                    [f[0], s[0], n[0], f[1], s[1], n[1], f[2], s[2], n[2]])
                rotX, rotY, rotZ = basis.r3_rotation_matrix_as_x_y_z_angles(
                    deg=True)
                rot_X.append(rotX)
                rot_Y.append(rotY)
                rot_Z.append(rotZ)

            all_lab_x.extend(lab_x)
            all_lab_y.extend(lab_y)
            all_lab_z.extend(lab_z)
            all_rotX.extend(rot_X)
            all_rotY.extend(rot_Y)
            all_rotZ.extend(rot_Z)

            pg_weights = flex.double([pg1_refls, pg2_refls])
            if 0 in pg_weights:
                dist_m = dist_s = 0
                lx_m = lx_s = ly_m = ly_s = lz_m = lz_s = 0
                lrx_m = lrx_s = lry_m = lry_s = lrz_m = lrz_s = 0
                dx = dy = dz = dxy = dxyz = dr = dt = dnorm = 0
            else:
                stats = flex.mean_and_variance(dists, pg_weights)
                dist_m = stats.mean()
                dist_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(lab_x, pg_weights)
                lx_m = stats.mean()
                lx_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(lab_y, pg_weights)
                ly_m = stats.mean()
                ly_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(lab_z, pg_weights)
                lz_m = stats.mean()
                lz_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(rot_X, pg_weights)
                lrx_m = stats.mean()
                lrx_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(rot_Y, pg_weights)
                lry_m = stats.mean()
                lry_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(rot_Z, pg_weights)
                lrz_m = stats.mean()
                lrz_s = stats.gsl_stats_wsd()

                dx = lab_x[0] - lab_x[1]
                dy = lab_y[0] - lab_y[1]
                dz = lab_z[0] - lab_z[1]
                dxy = math.sqrt(dx**2 + dy**2)
                dxyz = math.sqrt(dx**2 + dy**2 + dz**2)

                delta = col([lab_x[0], lab_y[0], lab_z[0]]) - col(
                    [lab_x[1], lab_y[1], lab_z[1]])
                pg1_center = get_center_lab(pg1).normalize()
                transverse = s0.cross(pg1_center).normalize()
                radial = transverse.cross(s0).normalize()
                dr = delta.dot(radial)
                dt = delta.dot(transverse)
                dnorm = col(pg1.get_normal()).angle(col(pg2.get_normal()),
                                                    deg=True)

            pg_bc_dists.append(dist_m)
            pg_lab_x_sigmas.append(lx_s)
            pg_lab_y_sigmas.append(ly_s)
            pg_lab_z_sigmas.append(lz_s)
            pg_rotX_sigmas.append(lrx_s)
            pg_rotY_sigmas.append(lry_s)
            pg_rotZ_sigmas.append(lrz_s)
            all_delta_x.append(dx)
            all_delta_y.append(dy)
            all_delta_z.append(dz)
            all_delta_xy.append(dxy)
            all_delta_xyz.append(dxyz)
            all_delta_r.append(dr)
            all_delta_t.append(dt)
            all_delta_norm.append(dnorm)

            lab_table_data.append([
                "%d" % pg_id,
                "%5.1f" % dist_m,
                "%9.3f" % lx_m,
                "%9.3f" % lx_s,
                "%9.3f" % ly_m,
                "%9.3f" % ly_s,
                "%9.3f" % lz_m,
                "%9.3f" % lz_s,
                "%9.3f" % lrx_m,
                "%9.3f" % lrx_s,
                "%9.3f" % lry_m,
                "%9.3f" % lry_s,
                "%9.3f" % lrz_m,
                "%9.3f" % lrz_s,
                "%6d" % total_refls
            ])

            lab_delta_table_data.append([
                "%d" % pg_id,
                "%5.1f" % dist_m,
                "%9.1f" % (dx * 1000),
                "%9.1f" % (dy * 1000),
                "%9.3f" % dz,
                "%9.1f" % (dxy * 1000),
                "%9.3f" % dxyz,
                "%9.1f" % (dr * 1000),
                "%9.1f" % (dt * 1000),
                "%9.3f" % dnorm,
                "%6d" % total_refls
            ])

            if params.hierarchy_level > 0:
                local_x = flex.double()
                local_y = flex.double()
                local_z = flex.double()
                l_rot_X = flex.double()
                l_rot_Y = flex.double()
                l_rot_Z = flex.double()
                l_dx = flex.double()
                l_dy = flex.double()
                l_dz = flex.double()
                l_dxy = flex.double()
                l_dxyz = flex.double()

                for pg in [pg1, pg2]:

                    l_ori = pg.get_local_origin()
                    local_x.append(l_ori[0])
                    local_y.append(l_ori[1])
                    local_z.append(l_ori[2])

                    f = col(pg.get_local_fast_axis())
                    s = col(pg.get_local_slow_axis())
                    n = f.cross(s)
                    basis = sqr(
                        [f[0], s[0], n[0], f[1], s[1], n[1], f[2], s[2], n[2]])
                    rotX, rotY, rotZ = basis.r3_rotation_matrix_as_x_y_z_angles(
                        deg=True)
                    l_rot_X.append(rotX)
                    l_rot_Y.append(rotY)
                    l_rot_Z.append(rotZ)

                all_local_x.extend(local_x)
                all_local_y.extend(local_y)
                all_local_z.extend(local_z)
                all_local_rotX.extend(l_rot_X)
                all_local_rotY.extend(l_rot_Y)
                all_local_rotZ.extend(l_rot_Z)

                pg_weights = flex.double([pg1_refls, pg2_refls])
                if 0 in pg_weights:
                    lx_m = lx_s = ly_m = ly_s = lz_m = lz_s = 0
                    lrx_m = lrx_s = lry_m = lry_s = lrz_m = lrz_s = 0
                    ldx = ldy = ldz = ldxy = ldxyz = 0
                else:
                    stats = flex.mean_and_variance(local_x, pg_weights)
                    lx_m = stats.mean()
                    lx_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(local_y, pg_weights)
                    ly_m = stats.mean()
                    ly_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(local_z, pg_weights)
                    lz_m = stats.mean()
                    lz_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(l_rot_X, pg_weights)
                    lrx_m = stats.mean()
                    lrx_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(l_rot_Y, pg_weights)
                    lry_m = stats.mean()
                    lry_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(l_rot_Z, pg_weights)
                    lrz_m = stats.mean()
                    lrz_s = stats.gsl_stats_wsd()

                    ldx = local_x[0] - local_x[1]
                    ldy = local_y[0] - local_y[1]
                    ldz = local_z[0] - local_z[1]
                    ldxy = math.sqrt(ldx**2 + ldy**2)
                    ldxyz = math.sqrt(ldx**2 + ldy**2 + ldz**2)

                pg_local_x_sigmas.append(lx_s)
                pg_local_y_sigmas.append(ly_s)
                pg_local_z_sigmas.append(lz_s)
                pg_local_rotX_sigmas.append(lrx_s)
                pg_local_rotY_sigmas.append(lry_s)
                pg_local_rotZ_sigmas.append(lrz_s)
                all_local_delta_x.append(ldx)
                all_local_delta_y.append(ldy)
                all_local_delta_z.append(ldz)
                all_local_delta_xy.append(ldxy)
                all_local_delta_xyz.append(ldxyz)

                local_table_data.append([
                    "%d" % pg_id,
                    "%5.1f" % dist_m,
                    "%9.3f" % lx_m,
                    "%9.3f" % lx_s,
                    "%9.3f" % ly_m,
                    "%9.3f" % ly_s,
                    "%9.3f" % lz_m,
                    "%9.3f" % lz_s,
                    "%9.3f" % lrx_m,
                    "%9.3f" % lrx_s,
                    "%9.3f" % lry_m,
                    "%9.3f" % lry_s,
                    "%9.3f" % lrz_m,
                    "%9.3f" % lrz_s,
                    "%6d" % total_refls
                ])

                local_delta_table_data.append([
                    "%d" % pg_id,
                    "%5.1f" % dist_m,
                    "%9.1f" % (ldx * 1000),
                    "%9.1f" % (ldy * 1000),
                    "%9.3f" % ldz,
                    "%9.1f" % (ldxy * 1000),
                    "%9.3f" % ldxyz,
                    "%6d" % total_refls
                ])

        # Set up table output, starting with lab table
        table_d = {d: row for d, row in zip(pg_bc_dists, lab_table_data)}
        table_header = [
            "PanelG", "Radial", "Lab X", "Lab X", "Lab Y", "Lab Y", "Lab Z",
            "Lab Z", "Rot X", "Rot X", "Rot Y", "Rot Y", "Rot Z", "Rot Z", "N"
        ]
        table_header2 = [
            "Id", "Dist", "", "Sigma", "", "Sigma", "", "Sigma", "", "Sigma",
            "", "Sigma", "", "Sigma", "Refls"
        ]
        table_header3 = [
            "", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)",
            "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", ""
        ]
        lab_table_data = [table_header, table_header2, table_header3]
        lab_table_data.extend([table_d[key] for key in sorted(table_d)])

        if len(all_weights) > 1:
            r1 = ["All"]
            r2 = ["Mean"]
            for data, weights, fmt in [
                [None, None, None],
                [all_lab_x, all_weights.as_double(), "%9.3f"],
                [pg_lab_x_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_lab_y, all_weights.as_double(), "%9.3f"],
                [pg_lab_y_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_lab_z, all_weights.as_double(), "%9.3f"],
                [pg_lab_z_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_rotX, all_weights.as_double(), "%9.3f"],
                [pg_rotX_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_rotY, all_weights.as_double(), "%9.3f"],
                [pg_rotY_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_rotZ, all_weights.as_double(), "%9.3f"],
                [pg_rotZ_sigmas,
                 all_refls_count.as_double(), "%9.3f"]
            ]:
                r2.append("")
                if data is None and weights is None:
                    r1.append("")
                    continue
                stats = flex.mean_and_variance(data, weights)
                r1.append(fmt % stats.mean())

            r1.append("")
            r2.append("%6.1f" % flex.mean(all_refls_count.as_double()))
            lab_table_data.append(r1)
            lab_table_data.append(r2)

        from libtbx import table_utils
        print("Detector statistics relative to lab origin")
        print(
            table_utils.format(lab_table_data,
                               has_header=3,
                               justify='center',
                               delim=" "))
        print(
            "PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, weighted means and weighted standard deviations (Sigmas) for the properties listed below are computed using the matching panel groups between the input experiments."
        )
        print(
            "Radial dist: distance from center of panel group to the beam center"
        )
        print("Lab X, Y and Z: mean coordinate in lab space")
        print(
            "Rot X, Y and Z: rotation of panel group around lab X, Y and Z axes"
        )
        print(
            "N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations."
        )
        print("All: weighted mean of the values shown")
        print()

        # Next, deltas in lab space
        table_d = {d: row for d, row in zip(pg_bc_dists, lab_delta_table_data)}
        table_header = [
            "PanelG", "Radial", "Lab dX", "Lab dY", "Lab dZ", "Lab dXY",
            "Lab dXYZ", "Lab dR", "Lab dT", "Lab dNorm", "N"
        ]
        table_header2 = ["Id", "Dist", "", "", "", "", "", "", "", "", "Refls"]
        table_header3 = [
            "", "(mm)", "(microns)", "(microns)", "(mm)", "(microns)", "(mm)",
            "(microns)", "(microns)", "(deg)", ""
        ]
        lab_delta_table_data = [table_header, table_header2, table_header3]
        lab_delta_table_data.extend([table_d[key] for key in sorted(table_d)])

        if len(all_weights) > 1:
            r1 = ["WMean"]
            r2 = ["WStddev"]
            r3 = ["Mean"]
            for data, weights, fmt in [
                [None, None, None],
                [all_delta_x * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_y * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_z,
                 all_refls_count.as_double(), "%9.3f"],
                [all_delta_xy * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_xyz,
                 all_refls_count.as_double(), "%9.3f"],
                [all_delta_r * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_t * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_norm,
                 all_refls_count.as_double(), "%9.3f"]
            ]:
                r3.append("")
                if data is None and weights is None:
                    r1.append("")
                    r2.append("")
                    continue
                stats = flex.mean_and_variance(data, weights)
                r1.append(fmt % stats.mean())
                if len(data) > 1:
                    r2.append(fmt % stats.gsl_stats_wsd())
                else:
                    r2.append("-")

            r1.append("")
            r2.append("")
            r3.append("%6.1f" % flex.mean(all_refls_count.as_double()))
            lab_delta_table_data.append(r1)
            lab_delta_table_data.append(r2)
            lab_delta_table_data.append(r3)

        print("Detector deltas in lab space")
        print(
            table_utils.format(lab_delta_table_data,
                               has_header=3,
                               justify='center',
                               delim=" "))
        print(
            "PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, weighted means and weighted standard deviations (Sigmas) for the properties listed below are computed using the matching panel groups between the input experiments."
        )
        print(
            "Radial dist: distance from center of panel group to the beam center"
        )
        print(
            "Lab dX, dY and dZ: delta between X, Y and Z coordinates in lab space"
        )
        print(
            "Lab dR, dT and dZ: radial and transverse components of dXY in lab space"
        )
        print("Lab dNorm: angle between normal vectors in lab space")
        print(
            "N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations."
        )
        print("WMean: weighted mean of the values shown")
        print("WStddev: weighted standard deviation of the values shown")
        print("Mean: mean of the values shown")
        print()

        if params.hierarchy_level > 0:
            # Local table
            table_d = {d: row for d, row in zip(pg_bc_dists, local_table_data)}
            table_header = [
                "PanelG", "Radial", "Local X", "Local X", "Local Y", "Local Y",
                "Local Z", "Local Z", "Rot X", "Rot X", "Rot Y", "Rot Y",
                "Rot Z", "Rot Z", "N"
            ]
            table_header2 = [
                "Id", "Dist", "", "Sigma", "", "Sigma", "", "Sigma", "",
                "Sigma", "", "Sigma", "", "Sigma", "Refls"
            ]
            table_header3 = [
                "", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)",
                "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", ""
            ]
            local_table_data = [table_header, table_header2, table_header3]
            local_table_data.extend([table_d[key] for key in sorted(table_d)])

            if len(all_weights) > 1:
                r1 = ["All"]
                r2 = ["Mean"]
                for data, weights, fmt in [
                    [None, None, None],
                    [all_local_x,
                     all_weights.as_double(), "%9.3f"],
                    [pg_local_x_sigmas,
                     all_refls_count.as_double(), "%9.3f"],
                    [all_local_y,
                     all_weights.as_double(), "%9.3f"],
                    [pg_local_y_sigmas,
                     all_refls_count.as_double(), "%9.3f"],
                    [all_local_z,
                     all_weights.as_double(), "%9.3f"],
                    [pg_local_z_sigmas,
                     all_refls_count.as_double(), "%9.3f"],
                    [all_local_rotX,
                     all_weights.as_double(), "%9.3f"],
                    [
                        pg_local_rotX_sigmas,
                        all_refls_count.as_double(), "%9.3f"
                    ], [all_local_rotY,
                        all_weights.as_double(), "%9.3f"],
                    [
                        pg_local_rotY_sigmas,
                        all_refls_count.as_double(), "%9.3f"
                    ], [all_local_rotZ,
                        all_weights.as_double(), "%9.3f"],
                    [
                        pg_local_rotZ_sigmas,
                        all_refls_count.as_double(), "%9.3f"
                    ]
                ]:
                    r2.append("")
                    if data is None and weights is None:
                        r1.append("")
                        continue
                    stats = flex.mean_and_variance(data, weights)
                    r1.append(fmt % stats.mean())

                r1.append("")
                r2.append("%6.1f" % flex.mean(all_refls_count.as_double()))
                local_table_data.append(r1)
                local_table_data.append(r2)

            print("Detector statistics in local frame of each panel group")
            print(
                table_utils.format(local_table_data,
                                   has_header=3,
                                   justify='center',
                                   delim=" "))
            print(
                "PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, weighted means and weighted standard deviations (Sigmas) for the properties listed below are computed using the matching panel groups between the input experiments."
            )
            print(
                "Radial dist: distance from center of panel group to the beam center"
            )
            print(
                "Lab X, Y and Z: mean coordinate in relative to parent panel group"
            )
            print(
                "Rot X, Y and Z: rotation of panel group around parent panel group X, Y and Z axes"
            )
            print(
                "N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations."
            )
            print("All: weighted mean of the values shown")
            print()

            # Next, deltas in local space
            table_d = {
                d: row
                for d, row in zip(pg_bc_dists, local_delta_table_data)
            }
            table_header = [
                "PanelG", "Radial", "Local dX", "Local dY", "Local dZ",
                "Local dXY", "Local dXYZ", "N"
            ]
            table_header2 = ["Id", "Dist", "", "", "", "", "", "Refls"]
            table_header3 = [
                "", "(mm)", "(microns)", "(microns)", "(mm)", "(microns)",
                "(mm)", ""
            ]
            local_delta_table_data = [
                table_header, table_header2, table_header3
            ]
            local_delta_table_data.extend(
                [table_d[key] for key in sorted(table_d)])

            if len(all_weights) > 1:
                r1 = ["WMean"]
                r2 = ["WStddev"]
                r3 = ["Mean"]
                for data, weights, fmt in [
                    [None, None, None],
                    [
                        all_local_delta_x * 1000,
                        all_refls_count.as_double(), "%9.1f"
                    ],
                    [
                        all_local_delta_y * 1000,
                        all_refls_count.as_double(), "%9.1f"
                    ],
                    [all_local_delta_z,
                     all_refls_count.as_double(), "%9.3f"],
                    [
                        all_local_delta_xy * 1000,
                        all_refls_count.as_double(), "%9.1f"
                    ],
                    [
                        all_local_delta_xyz,
                        all_refls_count.as_double(), "%9.3f"
                    ]
                ]:
                    r3.append("")
                    if data is None and weights is None:
                        r1.append("")
                        r2.append("")
                        continue
                    stats = flex.mean_and_variance(data, weights)
                    r1.append(fmt % stats.mean())
                    r2.append(fmt % stats.gsl_stats_wsd())

                r1.append("")
                r2.append("")
                r3.append("%6.1f" % flex.mean(all_refls_count.as_double()))
                local_delta_table_data.append(r1)
                local_delta_table_data.append(r2)
                local_delta_table_data.append(r3)

            print("Detector deltas relative to panel group origin")
            print(
                table_utils.format(local_delta_table_data,
                                   has_header=3,
                                   justify='center',
                                   delim=" "))
            print(
                "PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, weighted means and weighted standard deviations (Sigmas) for the properties listed below are computed using the matching panel groups between the input experiments."
            )
            print(
                "Radial dist: distance from center of panel group to the beam center"
            )
            print(
                "Local dX, dY and dZ: delta between X, Y and Z coordinates in the local frame of the panel group"
            )
            print(
                "N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations."
            )
            print("All: weighted mean of the values shown")
            print()

        #RMSD table
        table_d = {d: row for d, row in zip(pg_bc_dists, rmsds_table_data)}
        table_header = ["PanelG"]
        table_header2 = ["Id"]
        table_header3 = [""]
        for i in range(len(detectors)):
            table_header.extend(["D%d" % i] * 4)
            table_header2.extend(["RMSD", "rRMSD", "tRMSD", "N refls"])
            table_header3.extend(["(microns)"] * 3)
            table_header3.append("")
        rmsds_table_data = [table_header, table_header2, table_header3]
        rmsds_table_data.extend([table_d[key] for key in sorted(table_d)])

        row = ["Overall"]
        for refls in reflections:
            row.append("%6.1f" % (math.sqrt(
                flex.sum_sq(refls['difference_vector_norms']) / len(refls)) *
                                  1000))
            row.append("%6.1f" % (math.sqrt(
                flex.sum_sq(refls['radial_displacements']) / len(refls)) *
                                  1000))
            row.append("%6.1f" % (math.sqrt(
                flex.sum_sq(refls['transverse_displacements']) / len(refls)) *
                                  1000))
            row.append("%8d" % len(refls))
        rmsds_table_data.append(row)

        print("RMSDs by detector number")
        print(
            table_utils.format(rmsds_table_data,
                               has_header=3,
                               justify='center',
                               delim=" "))
        print(
            "PanelG Id: panel group id or panel id, depending on hierarchy_level"
        )
        print(
            "RMSD: root mean squared deviation between observed and predicted spot locations"
        )
        print(
            "rRMSD: RMSD of radial components of the observed-predicted vectors"
        )
        print(
            "tRMSD: RMSD of transverse components of the observed-predicted vectors"
        )
        print("N refls: number of reflections")

        if params.tag is None:
            tag = ""
        else:
            tag = "%s " % params.tag

        if params.show_plots:
            # Plot the results
            self.detector_plot_dict(detectors[0],
                                    refl_counts,
                                    u"%sN reflections" % tag,
                                    u"%6d",
                                    show=False)
コード例 #18
0
    def intensities(self):
        """Compare the intensities."""
        from dials.array_family import flex

        # Sort by resolution
        d = self.refl1["d"]
        index = flex.size_t(reversed(sorted(range(len(d)),
                                            key=lambda x: d[x])))
        self.refl1.reorder(index)
        self.refl2.reorder(index)

        # Get the intensities
        I1 = self.refl1["intensity.sum.value"]
        I2 = self.refl2["intensity.sum.value"]

        # Compute chunked statistics
        corr = []
        R = []
        scale = []
        res = []
        for i in range(len(self.refl1) // 1000):

            # Get the chunks of data
            a = i * 1000
            b = (i + 1) * 1000
            II1 = I1[a:b]
            II2 = I2[a:b]
            res.append(d[a])

            # Compute the mean and standard deviation per chunk
            mv1 = flex.mean_and_variance(II1)
            mv2 = flex.mean_and_variance(II2)
            m1 = mv1.mean()
            m2 = mv2.mean()
            s1 = mv1.unweighted_sample_standard_deviation()
            s2 = mv2.unweighted_sample_standard_deviation()

            # compute the correlation coefficient
            r = (1 / (len(II1) - 1)) * sum(
                ((II1[j] - m1) / s1) * ((II2[j] - m2) / s2)
                for j in range(len(II1)))
            corr.append(r)

            # Compute the scale between the chunks
            s = sum(II1) / sum(II2)
            scale.append(s)

            # Compute R between the chunks
            r = sum(
                abs(abs(II1[j]) - abs(s * II2[j]))
                for j in range(len(II1))) / sum(
                    abs(II1[j]) for j in range(len(II1)))
            R.append(r)

        from matplotlib import pylab

        pylab.plot(corr, label="CC")
        pylab.plot(R, label="R")
        pylab.plot(scale, label="K")
        pylab.legend()
        pylab.show()
コード例 #19
0
  def per_sensor_analysis(self): # hardcoded Jungfrau 16M geometry
    for isensor in range(32):
      print ("Panel Sensor  <Δx>(μm)     <Δy>(μm)      Nrefl  RMS Δx(μm)  RMS Δy(μm) ")

      if len(self.cumCALC[isensor]) <= 3: continue

      for ipanel in range(8*isensor, 8*(1+isensor)):
        if len(self.panel_deltax[ipanel])<2: continue
        Sx = flex.mean_and_variance(1000.*self.panel_deltax[ipanel])
        Sy = flex.mean_and_variance(1000.*self.panel_deltay[ipanel])
        RMSDx = 1000.*math.sqrt(flex.mean(self.panel_deltax[ipanel]*self.panel_deltax[ipanel]))
        RMSDy = 1000.*math.sqrt(flex.mean(self.panel_deltay[ipanel]*self.panel_deltay[ipanel]))
        print("%3d  %3d"%(ipanel,ipanel//8),"%7.2f±%6.2f %7.2f±%6.2f %6d"%(Sx.mean(),Sx.unweighted_standard_error_of_mean(),
                                                 Sy.mean(),Sy.unweighted_standard_error_of_mean(), len(self.panel_deltax[ipanel])),
            "    %5.1f   %5.1f"%(RMSDx,RMSDy),
        )
      print("")
      cumD = (self.cumCALC[isensor]-self.cumOBS[isensor]).parts()
      print ( "All  %3d %7.2f        %7.2f        %6d"%(isensor,1000.*flex.mean(cumD[0]), 1000.*flex.mean(cumD[1]), len(cumD[0])))
      print("")

  # Now we'll do a linear least squares refinement over sensors:
  #Method 1. Simple rectilinear translation.
      if self.params.verbose:
        veclength = len(self.cumCALC[isensor])
        correction = flex.vec3_double( veclength, (flex.mean(cumD[0]), flex.mean(cumD[1]), flex.mean(cumD[2])) )

        new_delta = (self.cumCALC[isensor]-correction ) -self.cumOBS[isensor]
        for ipanel in range(8*isensor, 8*(1+isensor)):
          panel_delta = new_delta.select(self.cumPANNO[isensor]==ipanel)
          if len(panel_delta)<2: continue
          deltax_part, deltay_part = panel_delta.parts()[0:2]
          RMSDx = 1000.*math.sqrt( flex.mean(deltax_part * deltax_part) )
          RMSDy = 1000.*math.sqrt( flex.mean(deltay_part * deltay_part) )
          Sx = flex.mean_and_variance(1000.*deltax_part)
          Sy = flex.mean_and_variance(1000.*deltay_part)
          print("%3d  %3d"%(ipanel,ipanel//8),"%7.2f±%6.2f %7.2f±%6.2f %6d"%(Sx.mean(),Sx.unweighted_standard_error_of_mean(),
                                                 Sy.mean(),Sy.unweighted_standard_error_of_mean(), len(deltax_part)),
          "    %5.1f   %5.1f"%(RMSDx,RMSDy),
          )
        print()
  # Method 2. Translation + rotation.
      src = []
      dst = []
      for icoord in range(len(self.cumCALC[isensor])):
        src.append(self.cumCALC[isensor][icoord][0:2])
        dst.append(self.cumOBS[isensor][icoord][0:2])
      src = np.array(src)
      dst = np.array(dst)

      # estimate affine transform model using all coordinates
      model = SimilarityTransform()
      model.estimate(src, dst)

      # robustly estimate affine transform model with RANSAC
      model_robust, inliers = ransac((src, dst), SimilarityTransform, min_samples=3,
                               residual_threshold=2, max_trials=10)
      outliers = flex.bool(inliers == False)

      # compare "true" and estimated transform parameters
      if self.params.verbose:
        print("Similarity transform:")
        print("%2d"%isensor, "Scale: %.5f,"%(model.scale),
        "Translation(μm): (%7.2f,"%(1000.*model.translation[0]),
        "%7.2f),"%(1000.*model.translation[1]),
        "Rotation (°): %7.4f"%((180./math.pi)*model.rotation))
      print("RANSAC:")
      print("%2d"%isensor, "Scale: %.5f,"%(model_robust.scale),
      "Translation(μm): (%7.2f,"%(1000.*model_robust.translation[0]),
      "%7.2f),"%(1000.*model_robust.translation[1]),
      "Rotation (°): %7.4f,"%((180./math.pi)*model_robust.rotation),
      "Outliers:",outliers.count(True)
      )
      """from documentation:
      X = a0 * x - b0 * y + a1 = s * x * cos(rotation) - s * y * sin(rotation) + a1
      Y = b0 * x + a0 * y + b1 = s * x * sin(rotation) + s * y * cos(rotation) + b1"""

      oldCALC = self.cumCALC[isensor].parts()

      ransacCALC = flex.vec3_double(
               (float(model_robust.scale) * oldCALC[0] * math.cos(model_robust.rotation) -
               float(model_robust.scale) * oldCALC[1] * math.sin(model_robust.rotation) +
               float(model_robust.translation[0])),
               (float(model_robust.scale) * oldCALC[0] * math.sin(model_robust.rotation) +
               float(model_robust.scale) * oldCALC[1] * math.cos(model_robust.rotation) +
               float(model_robust.translation[1])),
               oldCALC[2]
               )
      new_delta = ransacCALC - self.cumOBS[isensor]
      inlier_delta = new_delta.select(~outliers)
      inlier_panno = self.cumPANNO[isensor].select(~outliers)

      for ipanel in range(8*isensor, 8*(1+isensor)):
        panel_delta = inlier_delta.select(inlier_panno==ipanel)
        if len(panel_delta)<2: continue
        deltax_part, deltay_part = panel_delta.parts()[0:2]
        RMSDx = 1000.*math.sqrt( flex.mean(deltax_part * deltax_part) )
        RMSDy = 1000.*math.sqrt( flex.mean(deltay_part * deltay_part) )
        Sx = flex.mean_and_variance(1000.*deltax_part)
        Sy = flex.mean_and_variance(1000.*deltay_part)
        print("%3d  %3d"%(ipanel,ipanel//8),"%7.2f±%6.2f %7.2f±%6.2f %6d"%(Sx.mean(),Sx.unweighted_standard_error_of_mean(),
                                                 Sy.mean(),Sy.unweighted_standard_error_of_mean(), len(deltax_part)),
        "    %5.1f   %5.1f"%(RMSDx,RMSDy),
        )

      if self.params.verbose:
        print("")
        cumD = (inlier_delta).parts()
        print ( "     %3d %7.2f        %7.2f        %6d\n"%(isensor,1000.*flex.mean(cumD[0]), 1000.*flex.mean(cumD[1]), len(cumD[0])))
      print("----\n")
コード例 #20
0
    def test_for_reference(self):
        from dials.array_family import flex
        from math import sqrt, pi

        # Integrate
        integration = self.experiments[0].profile.fitting_class()(
            self.experiments[0])

        # Integrate the reference profiles
        integration(self.experiments, self.reference)
        locator = integration.learner.locate()
        # Check the reference profiles and spots are ok
        #self.check_profiles(integration.learner)

        # Make sure background is zero
        profiles = self.reference['rs_shoebox']
        eps = 1e-7
        for p in profiles:
            assert (abs(flex.sum(p.background) - 0) < eps)
        print 'OK'

        # Only select variances greater than zero
        mask = self.reference.get_flags(self.reference.flags.integrated,
                                        all=False)
        assert (mask.count(True) > 0)
        I_cal = self.reference['intensity.prf.value']
        I_var = self.reference['intensity.prf.variance']
        B_sim = self.reference['background.sim.a'].as_double()
        I_sim = self.reference['intensity.sim'].as_double()
        I_exp = self.reference['intensity.exp']
        P_cor = self.reference['profile.correlation']
        X_pos, Y_pos, Z_pos = self.reference['xyzcal.px'].parts()
        I_cal = I_cal.select(mask)
        I_var = I_var.select(mask)
        I_sim = I_sim.select(mask)
        I_exp = I_exp.select(mask)
        P_cor = P_cor.select(mask)

        max_ind = flex.max_index(flex.abs(I_cal - I_sim))
        max_I = I_cal[max_ind]
        max_P = self.reference[max_ind]['rs_shoebox'].data
        max_C = self.reference[max_ind]['xyzcal.px']
        max_S = self.reference[max_ind]['shoebox'].data

        ref_ind = locator.index(max_C)
        ref_P = locator.profile(ref_ind)
        ref_C = locator.coord(ref_ind)

        #def f(I):
        #mask = flex.bool(flex.grid(9,9,9), False)
        #for k in range(9):
        #for j in range(9):
        #for i in range(9):
        #dx = 5 * (i - 4.5) / 4.5
        #dy = 5 * (j - 4.5) / 4.5
        #dz = 5 * (k - 4.5) / 4.5
        #dd = sqrt(dx**2 + dy**2 + dz**2)
        #if dd <= 3:
        #mask[k,j,i] = True

        #mask = mask.as_1d() & (ref_P.as_1d() > 0)
        #p = ref_P.as_1d().select(mask)
        #c = max_P.as_1d().select(mask)
        #return flex.sum((c - I * p)**2 / (I * p))

        #ff = []
        #for I in range(9500, 11500):
        #ff.append(f(I))
        #print 'Old I: ', sorted(range(len(ff)), key=lambda x: ff[x])[0] + 9500

        #from matplotlib import pylab
        #pylab.plot(range(9500, 11500), ff)
        #pylab.show()

        #def estI(I):
        #mask = flex.bool(flex.grid(9,9,9), False)
        #for k in range(9):
        #for j in range(9):
        #for i in range(9):
        #dx = 5 * (i - 4.5) / 4.5
        #dy = 5 * (j - 4.5) / 4.5
        #dz = 5 * (k - 4.5) / 4.5
        #dd = sqrt(dx**2 + dy**2 + dz**2)
        #if dd <= 3:
        #mask[k,j,i] = True

        #mask = mask.as_1d() & (ref_P.as_1d() > 0)
        #p = ref_P.as_1d().select(mask)
        #c = max_P.as_1d().select(mask)
        #v = I * p
        #return flex.sum(c * p / v) / flex.sum(p*p/v)

        #def iterI(I0):
        #I = estI(I0)
        #print I
        #if abs(I - I0) < 1e-3:
        #return I
        #return iterI(I)

        #newI = iterI(10703)#flex.sum(max_P))
        #print "New I: ", newI

        # Calculate the z score
        perc = self.mv3n_tolerance_interval(3 * 3)
        Z = (I_cal - I_sim) / flex.sqrt(I_var)
        mv = flex.mean_and_variance(Z)
        Z_mean = mv.mean()
        Z_var = mv.unweighted_sample_variance()
        print "Z: mean: %f, var: %f, sig: %f" % (Z_mean, Z_var, sqrt(Z_var))
コード例 #21
0
    def run(self, experiments, reflections):
        self.logger.log_step_time("SCALE_FRAMES")
        if self.params.scaling.algorithm != "mark0":  # mark1 implies no scaling/post-refinement
            self.logger.log("No scaling was done")
            if self.mpi_helper.rank == 0:
                self.logger.main_log("No scaling was done")
            return experiments, reflections

        new_experiments = ExperimentList()
        new_reflections = flex.reflection_table()

        # scale experiments, one at a time. Reject experiments that do not correlate with the reference or fail to scale.
        results = []
        slopes = []
        correlations = []
        high_res_experiments = 0
        experiments_rejected_because_of_low_signal = 0
        experiments_rejected_because_of_low_correlation_with_reference = 0

        target_symm = symmetry(
            unit_cell=self.params.scaling.unit_cell,
            space_group_info=self.params.scaling.space_group)
        for experiment in experiments:
            exp_reflections = reflections.select(
                reflections['exp_id'] == experiment.identifier)

            # Build a miller array for the experiment reflections
            exp_miller_indices = miller.set(
                target_symm, exp_reflections['miller_index_asymmetric'], True)
            exp_intensities = miller.array(
                exp_miller_indices, exp_reflections['intensity.sum.value'],
                flex.double(
                    flex.sqrt(exp_reflections['intensity.sum.variance'])))

            model_intensities = self.params.scaling.i_model

            # Extract an array of HKLs from the model to match the experiment HKLs
            matching_indices = miller.match_multi_indices(
                miller_indices_unique=model_intensities.indices(),
                miller_indices=exp_intensities.indices())

            # Least squares
            if self.params.scaling.mark0.fit_reference_to_experiment:  # RB: in cxi-merge we fit reference to experiment, but we should really do it the other way
                result = self.fit_reference_to_experiment(
                    model_intensities, exp_intensities, matching_indices)
            else:
                result = self.fit_experiment_to_reference(
                    model_intensities, exp_intensities, matching_indices)

            if result.error == scaling_result.err_low_signal:
                experiments_rejected_because_of_low_signal += 1
                continue
            elif result.error == scaling_result.err_low_correlation:
                experiments_rejected_because_of_low_correlation_with_reference += 1
                continue

            slopes.append(result.slope)
            correlations.append(result.correlation)

            if self.params.output.log_level == 0:
                self.logger.log(
                    "Experiment ID: %s; Slope: %f; Correlation %f" %
                    (experiment.identifier, result.slope, result.correlation))

            # count high resolution experiments
            if exp_intensities.d_min() <= self.params.merging.d_min:
                high_res_experiments += 1

            # apply scale factors
            if not self.params.postrefinement.enable:
                if self.params.scaling.mark0.fit_reference_to_experiment:
                    exp_reflections['intensity.sum.value'] /= result.slope
                    exp_reflections['intensity.sum.variance'] /= (
                        result.slope**2)
                else:
                    exp_reflections['intensity.sum.value'] *= result.slope
                    exp_reflections['intensity.sum.variance'] *= (
                        result.slope**2)

            new_experiments.append(experiment)
            new_reflections.extend(exp_reflections)

        rejected_experiments = len(experiments) - len(new_experiments)
        assert rejected_experiments == experiments_rejected_because_of_low_signal + \
                                        experiments_rejected_because_of_low_correlation_with_reference

        reflections_removed_because_of_rejected_experiments = reflections.size(
        ) - new_reflections.size()

        self.logger.log("Experiments rejected because of low signal: %d" %
                        experiments_rejected_because_of_low_signal)
        self.logger.log(
            "Experiments rejected because of low correlation with reference: %d"
            % experiments_rejected_because_of_low_correlation_with_reference)
        self.logger.log(
            "Reflections rejected because of rejected experiments: %d" %
            reflections_removed_because_of_rejected_experiments)
        self.logger.log("High resolution experiments: %d" %
                        high_res_experiments)
        if self.params.postrefinement.enable:
            self.logger.log(
                "Note: scale factors were not applied, because postrefinement is enabled"
            )

        # MPI-reduce all counts
        comm = self.mpi_helper.comm
        MPI = self.mpi_helper.MPI
        total_experiments_rejected_because_of_low_signal = comm.reduce(
            experiments_rejected_because_of_low_signal, MPI.SUM, 0)
        total_experiments_rejected_because_of_low_correlation_with_reference = comm.reduce(
            experiments_rejected_because_of_low_correlation_with_reference,
            MPI.SUM, 0)
        total_reflections_removed_because_of_rejected_experiments = comm.reduce(
            reflections_removed_because_of_rejected_experiments, MPI.SUM, 0)
        total_high_res_experiments = comm.reduce(high_res_experiments, MPI.SUM,
                                                 0)
        all_slopes = comm.reduce(slopes, MPI.SUM, 0)
        all_correlations = comm.reduce(correlations, MPI.SUM, 0)

        # rank 0: log data statistics
        if self.mpi_helper.rank == 0:
            self.logger.main_log(
                'Experiments rejected because of low signal: %d' %
                total_experiments_rejected_because_of_low_signal)
            self.logger.main_log(
                'Experiments rejected because of low correlation with reference: %d'
                %
                total_experiments_rejected_because_of_low_correlation_with_reference
            )
            self.logger.main_log(
                'Reflections rejected because of rejected experiments: %d' %
                total_reflections_removed_because_of_rejected_experiments)
            self.logger.main_log(
                'Experiments with high resolution of %5.2f Angstrom or better: %d'
                % (self.params.merging.d_min, total_high_res_experiments))

            if len(all_slopes) > 0:
                stats_slope = flex.mean_and_variance(flex.double(all_slopes))
                self.logger.main_log(
                    'Average experiment scale factor wrt reference: %f' %
                    (stats_slope.mean()))
            if len(all_correlations) > 0:
                stats_correlation = flex.mean_and_variance(
                    flex.double(all_correlations))
                self.logger.main_log(
                    'Average experiment correlation with reference: %f +/- %f'
                    %
                    (stats_correlation.mean(),
                     stats_correlation.unweighted_sample_standard_deviation()))

            if self.params.postrefinement.enable:
                self.logger.main_log(
                    "Note: scale factors were not applied, because postrefinement is enabled"
                )

        self.logger.log_step_time("SCALE_FRAMES", True)

        return new_experiments, new_reflections
コード例 #22
0
  def test_for_reference(self):
    from dials.array_family import flex
    from math import sqrt, pi

    # Integrate
    integration = self.experiments[0].profile.fitting_class()(self.experiments[0])

    # Integrate the reference profiles
    integration(self.experiments, self.reference)
    locator = integration.learner.locate()
    # Check the reference profiles and spots are ok
    #self.check_profiles(integration.learner)

    # Make sure background is zero
    profiles = self.reference['rs_shoebox']
    eps = 1e-7
    for p in profiles:
      assert(abs(flex.sum(p.background) - 0) < eps)
    print 'OK'

    # Only select variances greater than zero
    mask = self.reference.get_flags(self.reference.flags.integrated, all=False)
    assert(mask.count(True) > 0)
    I_cal = self.reference['intensity.prf.value']
    I_var = self.reference['intensity.prf.variance']
    B_sim = self.reference['background.sim.a'].as_double()
    I_sim = self.reference['intensity.sim'].as_double()
    I_exp = self.reference['intensity.exp']
    P_cor = self.reference['profile.correlation']
    X_pos, Y_pos, Z_pos = self.reference['xyzcal.px'].parts()
    I_cal = I_cal.select(mask)
    I_var = I_var.select(mask)
    I_sim = I_sim.select(mask)
    I_exp = I_exp.select(mask)
    P_cor = P_cor.select(mask)

    max_ind = flex.max_index(flex.abs(I_cal-I_sim))
    max_I = I_cal[max_ind]
    max_P = self.reference[max_ind]['rs_shoebox'].data
    max_C = self.reference[max_ind]['xyzcal.px']
    max_S = self.reference[max_ind]['shoebox'].data

    ref_ind = locator.index(max_C)
    ref_P = locator.profile(ref_ind)
    ref_C = locator.coord(ref_ind)

    #def f(I):
      #mask = flex.bool(flex.grid(9,9,9), False)
      #for k in range(9):
        #for j in range(9):
          #for i in range(9):
            #dx = 5 * (i - 4.5) / 4.5
            #dy = 5 * (j - 4.5) / 4.5
            #dz = 5 * (k - 4.5) / 4.5
            #dd = sqrt(dx**2 + dy**2 + dz**2)
            #if dd <= 3:
              #mask[k,j,i] = True

      #mask = mask.as_1d() & (ref_P.as_1d() > 0)
      #p = ref_P.as_1d().select(mask)
      #c = max_P.as_1d().select(mask)
      #return flex.sum((c - I * p)**2 / (I * p))

    #ff = []
    #for I in range(9500, 11500):
      #ff.append(f(I))
    #print 'Old I: ', sorted(range(len(ff)), key=lambda x: ff[x])[0] + 9500

    #from matplotlib import pylab
    #pylab.plot(range(9500, 11500), ff)
    #pylab.show()

    #def estI(I):
      #mask = flex.bool(flex.grid(9,9,9), False)
      #for k in range(9):
        #for j in range(9):
          #for i in range(9):
            #dx = 5 * (i - 4.5) / 4.5
            #dy = 5 * (j - 4.5) / 4.5
            #dz = 5 * (k - 4.5) / 4.5
            #dd = sqrt(dx**2 + dy**2 + dz**2)
            #if dd <= 3:
              #mask[k,j,i] = True

      #mask = mask.as_1d() & (ref_P.as_1d() > 0)
      #p = ref_P.as_1d().select(mask)
      #c = max_P.as_1d().select(mask)
      #v = I * p
      #return flex.sum(c * p / v) / flex.sum(p*p/v)

    #def iterI(I0):
      #I = estI(I0)
      #print I
      #if abs(I - I0) < 1e-3:
        #return I
      #return iterI(I)

    #newI = iterI(10703)#flex.sum(max_P))
    #print "New I: ", newI

    # Calculate the z score
    perc = self.mv3n_tolerance_interval(3*3)
    Z = (I_cal - I_sim) / flex.sqrt(I_var)
    mv = flex.mean_and_variance(Z)
    Z_mean = mv.mean()
    Z_var = mv.unweighted_sample_variance()
    print "Z: mean: %f, var: %f, sig: %f" % (Z_mean, Z_var, sqrt(Z_var))
コード例 #23
0
ファイル: tst_summation.py プロジェクト: biochem-fan/dials
  def test_for_reflections(self, refl):
    from dials.algorithms.integration.sum import IntegrationAlgorithm
    from dials.array_family import flex
    from dials.algorithms.statistics import \
      kolmogorov_smirnov_test_standard_normal

    # Get the calculated background and simulated background
    B_sim = refl['background.sim.a'].as_double()
    I_sim = refl['intensity.sim'].as_double()
    I_exp = refl['intensity.exp']

    # Set the background as simulated
    shoebox = refl['shoebox']
    for i in range(len(shoebox)):
      bg = shoebox[i].background
      ms = shoebox[i].mask
      for j in range(len(bg)):
        bg[j] = B_sim[i]


    # Integrate
    integration = IntegrationAlgorithm()
    integration(refl)
    I_cal = refl['intensity.sum.value']
    I_var = refl['intensity.sum.variance']

    # Only select variances greater than zero
    mask = I_var > 0
    I_cal = I_cal.select(mask)
    I_var = I_var.select(mask)
    I_sim = I_sim.select(mask)
    I_exp = I_exp.select(mask)

    # Calculate the z score
    perc = self.mv3n_tolerance_interval(3*3)
    Z = (I_cal - I_exp) / flex.sqrt(I_var)
    mv = flex.mean_and_variance(Z)
    Z_mean = mv.mean()
    Z_var = mv.unweighted_sample_variance()
    print "Z: mean: %f, var: %f" % (Z_mean, Z_var)

    # Do the kolmogorov smirnov test
    D, p  = kolmogorov_smirnov_test_standard_normal(Z)
    print "KS: D: %f, p-value: %f" % (D, p)

    # FIXME Z score should be a standard normal distribution. When background is
    # the main component, we do indeed see that the z score is in a standard
    # normal distribution. When the intensity dominates, the variance of the Z
    # scores decreases indicating that for increasing intensity of the signal,
    # the variance is over estimated.
    assert(abs(Z_mean) <= 3 * Z_var)


    #from matplotlib import pylab
    #pylab.hist(Z, 20)
    #pylab.show()

    #Z_I = sorted(Z)
    ##n = int(0.05 * len(Z_I))
    ##Z_I = Z_I[n:-n]
    ##mv = flex.mean_and_variance(flex.double(Z_I))
    ##print "Mean: %f, Sdev: %f" % (mv.mean(), mv.unweighted_sample_standard_deviation())
    #edf = [float(i+1) / len(Z_I) for i in range(len(Z_I))]
    #cdf = [0.5 * (1.0 + erf(z / sqrt(2.0))) for z in Z_I]

    print 'OK'
コード例 #24
0
def estimate_resolution_limit_distl_method1(reflections,
                                            imageset,
                                            ice_sel=None,
                                            plot_filename=None):

    # Implementation of Method 1 (section 2.4.4) of:
    # Z. Zhang, N. K. Sauter, H. van den Bedem, G. Snell and A. M. Deacon
    # J. Appl. Cryst. (2006). 39, 112-119
    # http://dx.doi.org/10.1107/S0021889805040677

    if ice_sel is None:
        ice_sel = flex.bool(len(reflections), False)

    variances = reflections['intensity.sum.variance']

    sel = variances > 0
    intensities = reflections['intensity.sum.value']
    variances = variances.select(sel)
    ice_sel = ice_sel.select(sel)
    reflections = reflections.select(sel)
    intensities = reflections['intensity.sum.value']
    d_star_sq = flex.pow2(reflections['rlp'].norms())
    d_spacings = uctbx.d_star_sq_as_d(d_star_sq)
    d_star_cubed = flex.pow(reflections['rlp'].norms(), 3)

    step = 2
    while len(reflections) / step > 40:
        step += 1

    order = flex.sort_permutation(d_spacings, reverse=True)

    ds3_subset = flex.double()
    d_subset = flex.double()
    for i in range(len(reflections) // step):
        ds3_subset.append(d_star_cubed[order[i * step]])
        d_subset.append(d_spacings[order[i * step]])

    x = flex.double(range(len(ds3_subset)))

    # (i)
    # Usually, Pm is the last point, that is, m = n. But m could be smaller than
    # n if an unusually high number of spots are detected around a certain
    # intermediate resolution. In that case, our search for the image resolution
    # does not go outside the spot 'bump;. This is particularly useful when
    # ice-rings are present.

    slopes = (ds3_subset[1:] - ds3_subset[0]) / (x[1:] - x[0])
    skip_first = 3
    p_m = flex.max_index(slopes[skip_first:]) + 1 + skip_first

    # (ii)

    from scitbx import matrix
    x1 = matrix.col((0, ds3_subset[0]))
    x2 = matrix.col((p_m, ds3_subset[p_m]))

    gaps = flex.double([0])
    v = matrix.col(((x2[1] - x1[1]), -(x2[0] - x1[0]))).normalize()

    for i in range(1, p_m):
        x0 = matrix.col((i, ds3_subset[i]))
        r = x1 - x0
        g = abs(v.dot(r))
        gaps.append(g)

    mv = flex.mean_and_variance(gaps)
    s = mv.unweighted_sample_standard_deviation()

    # (iii)

    p_k = flex.max_index(gaps)
    g_k = gaps[p_k]
    p_g = p_k
    for i in range(p_k + 1, len(gaps)):
        g_i = gaps[i]
        if g_i > (g_k - 0.5 * s):
            p_g = i

    ds3_g = ds3_subset[p_g]
    d_g = d_subset[p_g]

    noisiness = 0
    n = len(ds3_subset)
    for i in range(n - 1):
        for j in range(i + 1, n - 1):
            if slopes[i] >= slopes[j]:
                noisiness += 1
    noisiness /= ((n - 1) * (n - 2) / 2)

    if plot_filename is not None:
        if pyplot is None:
            raise Sorry("matplotlib must be installed to generate a plot.")
        fig = pyplot.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.scatter(range(len(ds3_subset)), ds3_subset)
        #ax.set_xlabel('')
        ax.set_ylabel('D^-3')
        xlim = pyplot.xlim()
        ylim = pyplot.ylim()
        ax.vlines(p_g, ylim[0], ylim[1], colors='red')
        pyplot.xlim(0, xlim[1])
        pyplot.ylim(0, ylim[1])
        pyplot.savefig(plot_filename)
        pyplot.close()

    return d_g, noisiness
コード例 #25
0
def estimate_resolution_limit_distl_method1(
  reflections, imageset, ice_sel=None, plot_filename=None):

  # Implementation of Method 1 (section 2.4.4) of:
  # Z. Zhang, N. K. Sauter, H. van den Bedem, G. Snell and A. M. Deacon
  # J. Appl. Cryst. (2006). 39, 112-119
  # http://dx.doi.org/10.1107/S0021889805040677

  if ice_sel is None:
    ice_sel = flex.bool(len(reflections), False)

  variances = reflections['intensity.sum.variance']

  sel = variances > 0
  intensities = reflections['intensity.sum.value']
  variances = variances.select(sel)
  ice_sel = ice_sel.select(sel)
  reflections = reflections.select(sel)
  intensities = reflections['intensity.sum.value']
  d_star_sq = flex.pow2(reflections['rlp'].norms())
  d_spacings = uctbx.d_star_sq_as_d(d_star_sq)
  d_star_cubed = flex.pow(reflections['rlp'].norms(), 3)

  step = 2
  while len(reflections)/step > 40:
    step += 1

  order = flex.sort_permutation(d_spacings, reverse=True)

  ds3_subset = flex.double()
  d_subset = flex.double()
  for i in range(len(reflections)//step):
    ds3_subset.append(d_star_cubed[order[i*step]])
    d_subset.append(d_spacings[order[i*step]])

  x = flex.double(range(len(ds3_subset)))

  # (i)
  # Usually, Pm is the last point, that is, m = n. But m could be smaller than
  # n if an unusually high number of spots are detected around a certain
  # intermediate resolution. In that case, our search for the image resolution
  # does not go outside the spot 'bump;. This is particularly useful when
  # ice-rings are present.

  slopes = (ds3_subset[1:] - ds3_subset[0])/(x[1:]-x[0])
  skip_first = 3
  p_m = flex.max_index(slopes[skip_first:]) + 1 + skip_first

  # (ii)

  from scitbx import matrix
  x1 = matrix.col((0, ds3_subset[0]))
  x2 = matrix.col((p_m, ds3_subset[p_m]))

  gaps = flex.double([0])
  v = matrix.col(((x2[1] - x1[1]), -(x2[0] - x1[0]))).normalize()

  for i in range(1, p_m):
    x0 = matrix.col((i, ds3_subset[i]))
    r = x1 - x0
    g = abs(v.dot(r))
    gaps.append(g)

  mv = flex.mean_and_variance(gaps)
  s = mv.unweighted_sample_standard_deviation()

  # (iii)

  p_k = flex.max_index(gaps)
  g_k = gaps[p_k]
  p_g = p_k
  for i in range(p_k+1, len(gaps)):
    g_i = gaps[i]
    if g_i > (g_k - 0.5 * s):
      p_g = i

  ds3_g = ds3_subset[p_g]
  d_g = d_subset[p_g]

  noisiness = 0
  n = len(ds3_subset)
  for i in range(n-1):
    for j in range(i+1, n-1):
      if slopes[i] >= slopes[j]:
        noisiness += 1
  noisiness /= ((n-1)*(n-2)/2)

  if plot_filename is not None:
    if pyplot is None:
      raise Sorry("matplotlib must be installed to generate a plot.")
    fig = pyplot.figure()
    ax = fig.add_subplot(1,1,1)
    ax.scatter(range(len(ds3_subset)), ds3_subset)
    #ax.set_xlabel('')
    ax.set_ylabel('D^-3')
    xlim = pyplot.xlim()
    ylim = pyplot.ylim()
    ax.vlines(p_g, ylim[0], ylim[1], colors='red')
    pyplot.xlim(0, xlim[1])
    pyplot.ylim(0, ylim[1])
    pyplot.savefig(plot_filename)
    pyplot.close()

  return d_g, noisiness
コード例 #26
0
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    # Find all detector objects
    detectors = []
    detectors.extend(experiments.detectors())

    # Verify inputs
    if len(detectors) != 2:
      print("Please provide two experiments for comparison")
      return

    # These lines exercise the iterate_detector_at_level and iterate_panels functions
    # for a detector with 4 hierarchy levels
    """
    print "Testing iterate_detector_at_level"
    for level in range(4):
      print "iterating at level", level
      for panelg in iterate_detector_at_level(detectors[0].hierarchy(), 0, level):
        print panelg.get_name()

    print "Testing iterate_panels"
    for level in range(4):
      print "iterating at level", level
      for panelg in iterate_detector_at_level(detectors[0].hierarchy(), 0, level):
        for panel in iterate_panels(panelg):
          print panel.get_name()
    """
    tmp = []
    for refls in reflections:
      print("N reflections total:", len(refls))
      refls = refls.select(refls.get_flags(refls.flags.used_in_refinement))
      print("N reflections used in refinement", len(refls))
      print("Reporting only on those reflections used in refinement")

      refls['difference_vector_norms'] = (refls['xyzcal.mm']-refls['xyzobs.mm.value']).norms()
      tmp.append(refls)
    reflections = tmp

    # Iterate through the detectors, computing the congruence statistics
    delta_normals = {}
    z_angles = {}
    f_deltas = {}
    s_deltas = {}
    z_deltas = {}
    o_deltas = {} # overall
    z_offsets_d = {}
    refl_counts = {}
    all_delta_normals = flex.double()
    all_rdelta_normals = flex.double()
    all_tdelta_normals = flex.double()
    all_z_angles = flex.double()
    all_f_deltas = flex.double()
    all_s_deltas = flex.double()
    all_z_deltas = flex.double()
    all_deltas = flex.double()
    all_refls_count = flex.int()

    all_normal_angles = flex.double()
    all_rnormal_angles = flex.double()
    all_tnormal_angles = flex.double()
    pg_normal_angle_sigmas = flex.double()
    pg_rnormal_angle_sigmas = flex.double()
    pg_tnormal_angle_sigmas = flex.double()
    all_rot_z = flex.double()
    pg_rot_z_sigmas = flex.double()
    pg_bc_dists = flex.double()
    all_bc_dist = flex.double()
    all_f_offsets = flex.double()
    all_s_offsets = flex.double()
    all_z_offsets = flex.double()
    pg_f_offset_sigmas = flex.double()
    pg_s_offset_sigmas = flex.double()
    pg_z_offset_sigmas = flex.double()
    pg_offset_sigmas = flex.double()
    all_weights = flex.double()

    congruence_table_data = []
    detector_table_data = []
    rmsds_table_data = []
    root1 = detectors[0].hierarchy()
    root2 = detectors[1].hierarchy()

    s0 = col(flex.vec3_double([col(b.get_s0()) for b in experiments.beams()]).mean())

    # Compute a set of radial and transverse displacements for each reflection
    print("Setting up stats...")
    tmp_refls = []
    for refls, expts in zip(reflections, [wrapper.data for wrapper in params.input.experiments]):
      tmp = flex.reflection_table()
      assert len(expts.detectors()) == 1
      dect = expts.detectors()[0]
      # Need to construct a variety of vectors
      for panel_id, panel in enumerate(dect):
        panel_refls = refls.select(refls['panel'] == panel_id)
        bcl = flex.vec3_double()
        # Compute the beam center in lab space (a vector pointing from the origin to where the beam would intersect
        # the panel, if it did intersect the panel)
        for expt_id in set(panel_refls['id']):
          beam = expts[expt_id].beam
          s0 = beam.get_s0()
          expt_refls = panel_refls.select(panel_refls['id'] == expt_id)
          beam_centre = panel.get_beam_centre_lab(s0)
          bcl.extend(flex.vec3_double(len(expt_refls), beam_centre))
        panel_refls['beam_centre_lab'] = bcl

        # Compute obs in lab space
        x, y, _ = panel_refls['xyzobs.mm.value'].parts()
        c = flex.vec2_double(x, y)
        panel_refls['obs_lab_coords'] = panel.get_lab_coord(c)
        # Compute deltaXY in panel space. This vector is relative to the panel origin
        x, y, _ = (panel_refls['xyzcal.mm'] - panel_refls['xyzobs.mm.value']).parts()
        # Convert deltaXY to lab space, subtracting off of the panel origin
        panel_refls['delta_lab_coords'] = panel.get_lab_coord(flex.vec2_double(x,y)) - panel.get_origin()
        tmp.extend(panel_refls)
      refls = tmp
      # The radial vector points from the center of the reflection to the beam center
      radial_vectors = (refls['obs_lab_coords'] - refls['beam_centre_lab']).each_normalize()
      # The transverse vector is orthogonal to the radial vector and the beam vector
      transverse_vectors = radial_vectors.cross(refls['beam_centre_lab']).each_normalize()
      # Compute the raidal and transverse components of each deltaXY
      refls['radial_displacements']     = refls['delta_lab_coords'].dot(radial_vectors)
      refls['transverse_displacements'] = refls['delta_lab_coords'].dot(transverse_vectors)

      tmp_refls.append(refls)
    reflections = tmp_refls

    for pg_id, (pg1, pg2) in enumerate(zip(iterate_detector_at_level(root1, 0, params.hierarchy_level),
                                           iterate_detector_at_level(root2, 0, params.hierarchy_level))):
      """ First compute statistics for detector congruence """
      # Count up the number of reflections in this panel group pair for use as a weighting scheme
      total_refls = 0
      pg1_refls = 0
      pg2_refls = 0
      for p1, p2 in zip(iterate_panels(pg1), iterate_panels(pg2)):
        r1 = len(reflections[0].select(reflections[0]['panel'] == id_from_name(detectors[0], p1.get_name())))
        r2 = len(reflections[1].select(reflections[1]['panel'] == id_from_name(detectors[1], p2.get_name())))
        total_refls += r1 + r2
        pg1_refls += r1
        pg2_refls += r2
      if pg1_refls == 0 and pg2_refls == 0:
        print("No reflections on panel group", pg_id)
        continue

      assert pg1.get_name() == pg2.get_name()
      refl_counts[pg1.get_name()] = total_refls

      row = ["%d"%pg_id]
      for pg, refls, det in zip([pg1, pg2], reflections, detectors):
        pg_refls = flex.reflection_table()
        for p in iterate_panels(pg):
          pg_refls.extend(refls.select(refls['panel'] == id_from_name(det, p.get_name())))
        if len(pg_refls) == 0:
          rmsd = r_rmsd = t_rmsd = 0
        else:
          rmsd = math.sqrt(flex.sum_sq(pg_refls['difference_vector_norms'])/len(pg_refls))*1000
          r_rmsd = math.sqrt(flex.sum_sq(pg_refls['radial_displacements'])/len(pg_refls))*1000
          t_rmsd = math.sqrt(flex.sum_sq(pg_refls['transverse_displacements'])/len(pg_refls))*1000

        row.extend(["%6.1f"%rmsd, "%6.1f"%r_rmsd, "%6.1f"%t_rmsd, "%8d"%len(pg_refls)])
      rmsds_table_data.append(row)

      # Angle between normals of pg1 and pg2
      delta_norm_angle = col(pg1.get_normal()).angle(col(pg2.get_normal()), deg=True)
      all_delta_normals.append(delta_norm_angle)

      # compute radial and transverse components of the delta between normal angles
      pgo = (get_center(pg1)+get_center(pg2))/2
      ro = (get_center(root1)+get_center(root2))/2
      rn = (col(root1.get_normal())+col(root2.get_normal()))/2
      rf = (col(root1.get_fast_axis())+col(root2.get_fast_axis()))/2
      rs = (col(root1.get_slow_axis())+col(root2.get_slow_axis()))/2

      ro_pgo = pgo - ro # vector from the detector origin to the average panel group origin
      if ro_pgo.length() == 0:
        radial = col((0,0,0))
        transverse = col((0,0,0))
      else:
        radial = ((rf.dot(ro_pgo) * rf) + (rs.dot(ro_pgo) * rs)).normalize() # component of ro_pgo in rf rs plane
        transverse = rn.cross(radial).normalize()
      # now radial and transverse are vectors othogonal to each other and the detector normal, such that
      # radial points at the panel group origin
      # v1 and v2 are the components of pg 1 and 2 normals in the rn radial plane
      v1 = (radial.dot(col(pg1.get_normal())) * radial) + (rn.dot(col(pg1.get_normal())) * rn)
      v2 = (radial.dot(col(pg2.get_normal())) * radial) + (rn.dot(col(pg2.get_normal())) * rn)
      rdelta_norm_angle = v1.angle(v2, deg=True)
      if v1.cross(v2).dot(transverse) < 0:
        rdelta_norm_angle = -rdelta_norm_angle
      all_rdelta_normals.append(rdelta_norm_angle)
      # v1 and v2 are the components of pg 1 and 2 normals in the rn transverse plane
      v1 = (transverse.dot(col(pg1.get_normal())) * transverse) + (rn.dot(col(pg1.get_normal())) * rn)
      v2 = (transverse.dot(col(pg2.get_normal())) * transverse) + (rn.dot(col(pg2.get_normal())) * rn)
      tdelta_norm_angle = v1.angle(v2, deg=True)
      if v1.cross(v2).dot(radial) < 0:
        tdelta_norm_angle = -tdelta_norm_angle
      all_tdelta_normals.append(tdelta_norm_angle)

      # compute the angle between fast axes of these panel groups
      z_angle = col(pg1.get_fast_axis()[0:2]).angle(col(pg2.get_fast_axis()[0:2]), deg=True)
      all_z_angles.append(z_angle)
      z_angles[pg1.get_name()] = z_angle

      all_refls_count.append(total_refls)
      all_weights.append(pg1_refls)
      all_weights.append(pg2_refls)


      """ Now compute statistics measuring the reality of the detector. For example, instead of the distance between two things,
      we are concerned with the location of those things relative to laboratory space """
      # Compute distances between panel groups and beam center
      # Also compute offset along Z axis
      dists = flex.double()
      f_offsets = flex.double()
      s_offsets = flex.double()
      z_offsets = flex.double()
      for pg, r in zip([pg1, pg2], [root1, root2]):
        bc = col(pg.get_beam_centre_lab(s0))
        ori = get_center(pg)

        dists.append((ori-bc).length())

        rori = col(r.get_origin())
        delta_ori = ori-rori
        r_norm = col(r.get_normal())
        r_fast = col(r.get_fast_axis())
        r_slow = col(r.get_slow_axis())
        f_offsets.append(r_fast.dot(delta_ori)*1000)
        s_offsets.append(r_slow.dot(delta_ori)*1000)
        z_offsets.append(r_norm.dot(delta_ori)*1000)

      fd = abs(f_offsets[0]-f_offsets[1])
      sd = abs(s_offsets[0]-s_offsets[1])
      zd = abs(z_offsets[0]-z_offsets[1])
      od = math.sqrt(fd**2+sd**2+zd**2)
      f_deltas[pg1.get_name()] = fd
      s_deltas[pg1.get_name()] = sd
      z_deltas[pg1.get_name()] = zd
      o_deltas[pg1.get_name()] = od
      all_f_deltas.append(fd)
      all_s_deltas.append(sd)
      all_z_deltas.append(zd)
      all_deltas.append(od)

      all_f_offsets.extend(f_offsets)
      all_s_offsets.extend(s_offsets)
      all_z_offsets.extend(z_offsets)

      # Compute angle between detector normal and panel group normal
      # Compute rotation of panel group around detector normal
      pg_rotz = flex.double()
      norm_angles = flex.double()
      rnorm_angles = flex.double()
      tnorm_angles = flex.double()
      for pg, r in zip([pg1, pg2], [root1, root2]):

        pgo = get_center(pg)
        pgn = col(pg.get_normal())
        pgf = col(pg.get_fast_axis())

        ro = get_center(r)
        rn = col(r.get_normal())
        rf = col(r.get_fast_axis())
        rs = col(r.get_slow_axis())

        norm_angle = rn.angle(pgn, deg=True)
        norm_angles.append(norm_angle)
        all_normal_angles.append(norm_angle)

        ro_pgo = pgo - ro # vector from the detector origin to the panel group origin
        if ro_pgo.length() == 0:
          radial = col((0,0,0))
          transverse = col((0,0,0))
        else:
          radial = ((rf.dot(ro_pgo) * rf) + (rs.dot(ro_pgo) * rs)).normalize() # component of ro_pgo in rf rs plane
          transverse = rn.cross(radial).normalize()
        # now radial and transverse are vectors othogonal to each other and the detector normal, such that
        # radial points at the panel group origin
        # v is the component of pgn in the rn radial plane
        v = (radial.dot(pgn) * radial) + (rn.dot(pgn) * rn)
        angle = rn.angle(v, deg=True)
        if rn.cross(v).dot(transverse) < 0:
          angle = -angle
        rnorm_angles.append(angle)
        all_rnormal_angles.append(angle)
        # v is the component of pgn in the rn transverse plane
        v = (transverse.dot(pgn) * transverse) + (rn.dot(pgn) * rn)
        angle = rn.angle(v, deg=True)
        if rn.cross(v).dot(radial) < 0:
          angle = -angle
        tnorm_angles.append(angle)
        all_tnormal_angles.append(angle)

        # v is the component of pgf in the rf rs plane
        v = (rf.dot(pgf) * rf) + (rs.dot(pgf) * rs)
        angle = rf.angle(v, deg=True)
        angle = angle-(round(angle/90)*90) # deviation from 90 degrees
        pg_rotz.append(angle)
        all_rot_z.append(angle)

      # Set up table rows using stats aggregated from above
      pg_weights = flex.double([pg1_refls, pg2_refls])
      if 0 in pg_weights:
        dist_m = dist_s = norm_angle_m = norm_angle_s = rnorm_angle_m = rnorm_angle_s = 0
        tnorm_angle_m = tnorm_angle_s = rotz_m = rotz_s = 0
        fo_m = fo_s = so_m = so_s = zo_m = zo_s = o_s = 0

      else:
        stats = flex.mean_and_variance(dists, pg_weights)
        dist_m = stats.mean()
        dist_s = stats.gsl_stats_wsd()

        stats = flex.mean_and_variance(norm_angles, pg_weights)
        norm_angle_m = stats.mean()
        norm_angle_s = stats.gsl_stats_wsd()

        stats = flex.mean_and_variance(rnorm_angles, pg_weights)
        rnorm_angle_m = stats.mean()
        rnorm_angle_s = stats.gsl_stats_wsd()

        stats = flex.mean_and_variance(tnorm_angles, pg_weights)
        tnorm_angle_m = stats.mean()
        tnorm_angle_s = stats.gsl_stats_wsd()

        stats = flex.mean_and_variance(pg_rotz, pg_weights)
        rotz_m = stats.mean()
        rotz_s = stats.gsl_stats_wsd()

        stats = flex.mean_and_variance(f_offsets, pg_weights)
        fo_m = stats.mean()
        fo_s = stats.gsl_stats_wsd()
        stats = flex.mean_and_variance(s_offsets, pg_weights)
        so_m = stats.mean()
        so_s = stats.gsl_stats_wsd()
        stats = flex.mean_and_variance(z_offsets, pg_weights)
        zo_m = stats.mean()
        zo_s = stats.gsl_stats_wsd()

        o_s = math.sqrt(fo_s**2+so_s**2+zo_s**2)

      pg_bc_dists.append(dist_m)
      all_bc_dist.extend(dists)
      pg_normal_angle_sigmas.append(norm_angle_s)
      pg_rnormal_angle_sigmas.append(rnorm_angle_s)
      pg_tnormal_angle_sigmas.append(tnorm_angle_s)
      pg_rot_z_sigmas.append(rotz_s)
      pg_f_offset_sigmas.append(fo_s)
      pg_s_offset_sigmas.append(so_s)
      pg_z_offset_sigmas.append(zo_s)
      pg_offset_sigmas.append(o_s)
      z_offsets_d[pg1.get_name()] = zo_m

      congruence_table_data.append(["%d"%pg_id, "%5.1f"%dist_m, #"%.4f"%dist_s,
                                    "%.4f"%delta_norm_angle, "%.4f"%rdelta_norm_angle,
                                    "%.4f"%tdelta_norm_angle, "%.4f"%z_angle,
                                    "%4.1f"%fd, "%4.1f"%sd, "%4.1f"%zd, "%4.1f"%od, "%6d"%total_refls])
      detector_table_data.append(["%d"%pg_id, "%5.1f"%dist_m, #"%.4f"%dist_s,
                                  "%.4f"%norm_angle_m, "%.4f"%norm_angle_s,
                                  "%.4f"%rnorm_angle_m, "%.4f"%rnorm_angle_s,
                                  "%.4f"%tnorm_angle_m, "%.4f"%tnorm_angle_s,
                                  "%10.6f"%rotz_m, "%.6f"%rotz_s,
                                  #"%9.1f"%fo_m, "%5.3f"%fo_s,
                                  #"%9.1f"%so_m, "%5.3f"%so_s,
                                  "%9.3f"%fo_s,
                                  "%9.3f"%so_s,
                                  "%9.1f"%zo_m, "%9.1f"%zo_s, "%9.3f"%o_s, "%6d"%total_refls])

    # Set up table output
    table_d = {d:row for d, row in zip(pg_bc_dists, congruence_table_data)}
    table_header = ["PanelG","Dist","Normal","RNormal","TNormal","Z rot","Delta","Delta","Delta","Delta","N"]
    table_header2 = ["Id","","Angle","Angle","Angle","Angle","F","S","Z","O","Refls"]
    table_header3 = ["", "(mm)","(mm)","(deg)","(deg)","(microns)","(microns)","(microns)","(microns)","(microns)",""]
    congruence_table_data = [table_header, table_header2, table_header3]
    congruence_table_data.extend([table_d[key] for key in sorted(table_d)])

    table_d = {d:row for d, row in zip(pg_bc_dists, detector_table_data)}
    table_header = ["PanelG","Dist","Normal","Normal","RNormal","RNormal","TNormal","TNormal","RotZ", "RotZ","F Offset","S Offset","Z Offset","Z Offset","Offset","N"]
    table_header2 = ["Id","","","Sigma","","Sigma","","Sigma","","Sigma","Sigma","Sigma","","Sigma","Sigma","Refls"]
    table_header3 = ["", "(mm)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)","(microns)","(microns)","(microns)","(microns)","(microns)",""]
    detector_table_data = [table_header, table_header2, table_header3]
    detector_table_data.extend([table_d[key] for key in sorted(table_d)])

    table_d = {d:row for d, row in zip(pg_bc_dists, rmsds_table_data)}
    table_header = ["PanelG"]
    table_header2 = ["Id"]
    table_header3 = [""]
    for i in range(len(detectors)):
      table_header.extend(["D%d"%i]*4)
      table_header2.extend(["RMSD", "rRMSD", "tRMSD", "N refls"])
      table_header3.extend(["(microns)"]*3)
      table_header3.append("")
    rmsds_table_data = [table_header, table_header2, table_header3]
    rmsds_table_data.extend([table_d[key] for key in sorted(table_d)])

    if len(all_refls_count) > 1:
      r1 = ["Weighted mean"]
      r2 = ["Weighted stddev"]
      r1.append("")
      r2.append("")
      #r1.append("")
      #r2.append("")
      stats = flex.mean_and_variance(all_delta_normals, all_refls_count.as_double())
      r1.append("%.4f"%stats.mean())
      r2.append("%.4f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(all_rdelta_normals, all_refls_count.as_double())
      r1.append("%.4f"%stats.mean())
      r2.append("%.4f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(all_tdelta_normals, all_refls_count.as_double())
      r1.append("%.4f"%stats.mean())
      r2.append("%.4f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(all_z_angles, all_refls_count.as_double())
      r1.append("%.4f"%stats.mean())
      r2.append("%.4f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(all_f_deltas, all_refls_count.as_double())
      r1.append("%4.1f"%stats.mean())
      r2.append("%4.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(all_s_deltas, all_refls_count.as_double())
      r1.append("%4.1f"%stats.mean())
      r2.append("%4.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(all_z_deltas, all_refls_count.as_double())
      r1.append("%4.1f"%stats.mean())
      r2.append("%4.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(all_deltas, all_refls_count.as_double())
      r1.append("%4.1f"%stats.mean())
      r2.append("%4.1f"%stats.gsl_stats_wsd())
      r1.append("")
      r2.append("")
      congruence_table_data.append(r1)
      congruence_table_data.append(r2)
      congruence_table_data.append(["Mean", "", "", "","","", "", "", "", "", "", "%6.1f"%flex.mean(all_refls_count.as_double())])

    from libtbx import table_utils
    print("Congruence statistics, I.E. the differences between the input detectors:")
    print(table_utils.format(congruence_table_data,has_header=3,justify='center',delim=" "))

    print("PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, statistics are computed between the matching panel groups between the two input experiments.")
    print("Dist: distance from center of panel group to the beam center")
    print("Dist Sigma: weighted standard deviation of the measurements used to compute Dist")
    print("Normal angle: angle between the normal vectors of matching panel groups.")
    print("RNormal angle: radial component of the angle between the normal vectors of matching panel groups")
    print("TNormal angle: transverse component of the angle between the normal vectors of matching panel groups")
    print("Z rot: angle between the XY components of the fast axes of the panel groups.")
    print("Delta F: shift between matching panel groups along the detector fast axis.")
    print("Delta S: shift between matching panel groups along the detector slow axis.")
    print("Delta Z: Z shift between matching panel groups along the detector normal.")
    print("Delta O: Overall shift between matching panel groups along the detector normal.")
    print("N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations.")
    print()
    print()


    if len(all_weights) > 1:
      r1 = ["All"]
      r2 = ["Mean"]
      for data, weights, fmt in [[None,None,None],
                                 #[None,None,None],
                                 [all_normal_angles,       all_weights.as_double(),     "%.4f"],
                                 [pg_normal_angle_sigmas,  all_refls_count.as_double(), "%.4f"],
                                 [all_rnormal_angles,      all_weights.as_double(),     "%.4f"],
                                 [pg_rnormal_angle_sigmas, all_refls_count.as_double(), "%.4f"],
                                 [all_tnormal_angles,      all_weights.as_double(),     "%.4f"],
                                 [pg_tnormal_angle_sigmas, all_refls_count.as_double(), "%.4f"],
                                 [all_rot_z,               all_weights.as_double(),     "%10.6f"],
                                 [pg_rot_z_sigmas,         all_refls_count.as_double(), "%.6f"],
                                 #[all_f_offsets,           all_weights.as_double(),     "%9.1f"],
                                 [pg_f_offset_sigmas,      all_refls_count.as_double(), "%9.3f"],
                                 #[all_s_offsets,           all_weights.as_double(),     "%9.1f"],
                                 [pg_s_offset_sigmas,      all_refls_count.as_double(), "%9.3f"],
                                 [all_z_offsets,           all_weights.as_double(),     "%9.1f"],
                                 [pg_z_offset_sigmas,      all_refls_count.as_double(), "%9.1f"],
                                 [pg_offset_sigmas,        all_refls_count.as_double(), "%9.1f"]]:

        r2.append("")
        if data is None and weights is None:
          r1.append("")
          continue
        stats = flex.mean_and_variance(data, weights)
        r1.append(fmt%stats.mean())

      r1.append("")
      r2.append("%6.1f"%flex.mean(all_refls_count.as_double()))
      detector_table_data.append(r1)
      detector_table_data.append(r2)

    print("Detector statistics, I.E. measurements of parameters relative to the detector plane:")
    print(table_utils.format(detector_table_data,has_header=3,justify='center',delim=" "))

    print("PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, weighted means and weighted standard deviations (Sigmas) for the properties listed below are computed using the matching panel groups between the input experiments.")
    print("Dist: distance from center of panel group to the beam center")
    print("Dist Sigma: weighted standard deviation of the measurements used to compute Dist")
    print("Normal Angle: angle between the normal vector of the detector at its root hierarchy level and the normal of the panel group")
    print("RNormal Angle: radial component of Normal Angle")
    print("TNormal Angle: transverse component of Normal Angle")
    print("RotZ: deviation from 90 degrees of the rotation of each panel group around the detector normal")
    print("F Offset: offset of panel group along the detector's fast axis")
    print("S Offset: offset of panel group along the detector's slow axis")
    print("Z Offset: offset of panel group along the detector normal")
    print("Offset: offset of panel group in F,S,Z space. Sigma is F, S, Z offset sigmas summed in quadrature.")
    print("N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations.")
    print("All: weighted mean of the values shown")
    print()
    print("Sigmas in this table are computed using the standard deviation of 2 measurements (I.E. a panel's Z Offset is measured twice, once in each input dataset). This is related by a factor of sqrt(2)/2 to the mean of the Delta Z parameter in the congruence statistics table above, which is the difference between Z parameters.")
    print()

    row = ["Overall"]
    for refls in reflections:
      row.append("%6.1f"%(math.sqrt(flex.sum_sq(refls['difference_vector_norms'])/len(refls))*1000))
      row.append("%6.1f"%(math.sqrt(flex.sum_sq(refls['radial_displacements'])/len(refls))*1000))
      row.append("%6.1f"%(math.sqrt(flex.sum_sq(refls['transverse_displacements'])/len(refls))*1000))
      row.append("%8d"%len(refls))
    rmsds_table_data.append(row)

    print("RMSDs by detector number")
    print(table_utils.format(rmsds_table_data,has_header=3,justify='center',delim=" "))
    print("PanelG Id: panel group id or panel id, depending on hierarchy_level")
    print("RMSD: root mean squared deviation between observed and predicted spot locations")
    print("rRMSD: RMSD of radial components of the observed-predicted vectors")
    print("tRMSD: RMSD of transverse components of the observed-predicted vectors")
    print("N refls: number of reflections")

    # Show stats for detector hierarchy root
    def _print_vector(v):
      for i in v:
        print("%10.5f"%i, end=' ')
      print()
    for d_id, d in enumerate(detectors):
      ori = d.hierarchy().get_origin()
      norm = d.hierarchy().get_normal()
      fast = d.hierarchy().get_fast_axis()
      slow = d.hierarchy().get_slow_axis()
      print("Detector", d_id, "origin:   ", end=' '); _print_vector(ori)
      print("Detector", d_id, "normal:   ", end=' '); _print_vector(norm)
      print("Detector", d_id, "fast axis:", end=' '); _print_vector(fast)
      print("Detector", d_id, "slow axis:", end=' '); _print_vector(slow)

    # Unit cell statstics
    lengths = flex.vec3_double()
    angles = flex.vec3_double()
    weights = flex.double()
    for refls, expts in zip(reflections, [d.data for d in params.input.experiments]):
      for crystal_id, crystal in enumerate(expts.crystals()):
        lengths.append(crystal.get_unit_cell().parameters()[0:3])
        angles.append(crystal.get_unit_cell().parameters()[3:6])
        weights.append(len(refls.select(refls['id'] == crystal_id)))

    print("Unit cell stats (angstroms and degrees), weighted means and standard deviations")
    for subset, tags in zip([lengths, angles], [["Cell a", "Cell b", "Cell c"],["Cell alpha", "Cell beta", "Cell gamma"]]):
      for data, tag in zip(subset.parts(), tags):
        stats = flex.mean_and_variance(data, weights)
        print("%s %5.1f +/- %6.3f"%(tag, stats.mean(), stats.gsl_stats_wsd()))

    if params.tag is None:
      tag = ""
    else:
      tag = "%s "%params.tag

    if params.show_plots:
      # Plot the results
      detector_plot_dict(self.params, detectors[0], refl_counts, u"%sN reflections"%tag, u"%6d", show=False)
      #detector_plot_dict(self.params, detectors[0], delta_normals, u"%sAngle between normal vectors (\N{DEGREE SIGN})"%tag, u"%.2f\N{DEGREE SIGN}", show=False)
      detector_plot_dict(self.params, detectors[0], z_angles, u"%sZ rotation angle between panels (\N{DEGREE SIGN})"%tag, u"%.2f\N{DEGREE SIGN}", show=False)
      detector_plot_dict(self.params, detectors[0], f_deltas, u"%sFast displacements between panels (microns)"%tag, u"%4.1f", show=False)
      detector_plot_dict(self.params, detectors[0], s_deltas, u"%sSlow displacements between panels (microns)"%tag, u"%4.1f", show=False)
      detector_plot_dict(self.params, detectors[0], z_offsets_d, u"%sZ offsets along detector normal (microns)"%tag, u"%4.1f", show=False)
      detector_plot_dict(self.params, detectors[0], z_deltas, u"%sZ displacements between panels (microns)"%tag, u"%4.1f", show=False)
      detector_plot_dict(self.params, detectors[0], o_deltas, u"%sOverall displacements between panels (microns)"%tag, u"%4.1f", show=False)
      plt.show()
コード例 #27
0
    def run(self):
        """Parse the options."""
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        self.params = params
        assert (len(params.input.experiments) == len(params.input.reflections)
                == 1), "Provide one experiment list and one reflection table"
        assert params.method == "summed_intensity"

        experiments = params.input.experiments[0].data
        reflections = params.input.reflections[0].data

        # Find the aveage unit cell for the crystals in the experiments provided
        weighted_relfs = flex.reflection_table()
        all_uc = [flex.double() for i in xrange(6)]
        space_group = None
        for expt_id, experiment in enumerate(experiments):
            refls = reflections.select(reflections["id"] == expt_id)
            unit_cell = experiment.crystal.get_unit_cell()
            for i in xrange(6):
                all_uc[i].append(unit_cell.parameters()[i])

            if space_group is None:
                space_group = experiment.crystal.get_space_group()
            else:
                assert (space_group.type().lookup_symbol() == experiment.
                        crystal.get_space_group().type().lookup_symbol())

        # Compute the average unit cell and build a miller array with it
        unit_cell = uctbx.unit_cell([flex.mean(data) for data in all_uc])
        cs = crystal_symmetry(unit_cell, space_group.type().lookup_symbol())
        ms = miller_set(cs, reflections["miller_index"], anomalous_flag=False)
        ma = ms.array(reflections["intensity.sum.value"] /
                      flex.sqrt(reflections["intensity.sum.variance"]))

        ma.setup_binner(n_bins=10)
        binner = ma.binner()
        mean_i = flex.double()
        reflections["delpsical.weights"] = flex.double(len(reflections), 0)

        # Iterate through the bins and compute the Wilson plot, then use it compute the weights
        for i in binner.range_all():
            sel = binner.selection(i)
            if sel.count(True) == 0:
                mean_i.append(0)
                continue
            mean_i.append(
                flex.mean(reflections["intensity.sum.value"].select(sel)))
            reflections["delpsical.weights"].set_selected(
                sel,
                reflections["intensity.sum.value"].select(sel) *
                (params.summed_intensity.scale_factor / mean_i[i]),
            )

            if params.show_weight_plots:
                fig = plt.figure()
                plt.title(str(i))
                plt.hist(reflections["delpsical.weights"].select(sel))

        # Show unit cell distribution and mean I
        print("Average uc +/- std. deviation")
        labels = [
            "% 6s" % l for l in ["a", "b", "c", "alpha", "beta", "gamma"]
        ]
        for label, data in zip(labels, all_uc):
            stats = flex.mean_and_variance(data)
            print("%s % 6.1f +/- %6.1f" %
                  (label, stats.mean(),
                   stats.unweighted_sample_standard_deviation()))

        print("Mean I over all data")
        binner.show_data(mean_i, data_fmt="%.1f", show_unused=False)

        easy_pickle.dump(params.output.reflections, reflections)

        if params.show_weight_plots:
            plt.show()