예제 #1
0
def refine_expanding(params, merged_scope, combine_phil):
  assert params.start_at_hierarchy_level == 0
  if params.rmsd_filter.enable:
    input_name = "filtered"
    command = "cctbx.xfel.filter_experiments_by_rmsd %s %s output.filtered_experiments=%s output.filtered_reflections=%s"
    command = command%("%s_combined_experiments.json"%params.tag, "%s_combined_reflections.pickle"%params.tag,
                       "%s_filtered_experiments.json"%params.tag, "%s_filtered_reflections.pickle"%params.tag)
    command += " iqr_multiplier=%f"%params.rmsd_filter.iqr_multiplier
    print command
    result = easy_run.fully_buffered(command=command).raise_if_errors()
    result.show_stdout()
  else:
    input_name = "combined"
  # --------------------------
  if params.panel_filter is not None:
    from libtbx import easy_pickle
    print "Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in params.panel_filter]))
    combined_path = "%s_combined_reflections.pickle"%params.tag
    data = easy_pickle.load(combined_path)
    sel = None
    for panel_id in params.panel_filter:
      if sel is None:
        sel = data['panel'] == panel_id
      else:
        sel |= data['panel'] == panel_id
    print "Retaining", len(data.select(sel)), "out of", len(data), "reflections"
    easy_pickle.dump(combined_path, data.select(sel))
  # ----------------------------------
  # this is the order to refine the CSPAD in
  steps = {}
  steps[0] = [2, 3]
  steps[1] = steps[0] + [0, 1]
  steps[2] = steps[1] + [14, 15]
  steps[3] = steps[2] + [6, 7]
  steps[4] = steps[3] + [4, 5]
  steps[5] = steps[4] + [12, 13]
  steps[6] = steps[5] + [8, 9]
  steps[7] = steps[6] + [10, 11]

  for s, panels in steps.iteritems():
    rest = []
    for p in panels:
      rest.append(p+16)
      rest.append(p+32)
      rest.append(p+48)
    panels.extend(rest)

  levels = {0: (0,1)} # levels 0 and 1
  for i in range(7):
    levels[i+1] = (2,) # level 2

  previous_step_and_level = None
  for j in range(8):
    from libtbx import easy_pickle
    print "Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in steps[j]]))
    combined_path = "%s_%s_reflections.pickle"%(params.tag, input_name)
    output_path = "%s_reflections_step%d.pickle"%(params.tag, j)
    data = easy_pickle.load(combined_path)
    sel = None
    for panel_id in steps[j]:
      if sel is None:
        sel = data['panel'] == panel_id
      else:
        sel |= data['panel'] == panel_id
    print "Retaining", len(data.select(sel)), "out of", len(data), "reflections"
    easy_pickle.dump(output_path, data.select(sel))

    for i in levels[j]:
      print "Step", j , "refining at hierarchy level", i
      refine_phil_file = "%s_refine_step%d_level%d.phil"%(params.tag, j, i)
      if i == 0:
        if params.refine_distance:
          diff_phil = "refinement.parameterisation.detector.fix_list=Tau1" # fix detector rotz
        else:
          diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Tau1" # fix detector rotz, distance
        if params.flat_refinement:
          diff_phil += ",Tau2,Tau3" # Also fix x and y rotations
        diff_phil += "\n"
        if params.refine_energy:
          diff_phil += "refinement.parameterisation.beam.fix=in_spindle_plane+out_spindle_plane\n" # allow energy to refine
      else:
        # Note, always need to fix something, so pick a panel group and fix its Tau1 (rotation around Z) always
        if params.flat_refinement and params.flat_refinement_with_distance:
          diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1,Tau2,Tau3\n" # refine distance, rotz and xy translation
          diff_phil += "refinement.parameterisation.detector.constraints.parameter=Dist\n" # constrain distance to be refined identically for all panels at this hierarchy level
        elif params.flat_refinement:
          diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Group1Tau1,Tau2,Tau3\n" # refine only rotz and xy translation
        else:
          diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1\n" # refine almost everything

      if previous_step_and_level is None:
        command = "dials.refine %s %s_%s_experiments.json %s_reflections_step%d.pickle"%( \
          refine_phil_file, params.tag, input_name, params.tag, j)
      else:
        p_step, p_level = previous_step_and_level
        if p_step == j:
          command = "dials.refine %s %s_refined_experiments_step%d_level%d.json %s_refined_reflections_step%d_level%d.pickle"%( \
            refine_phil_file, params.tag, p_step, p_level, params.tag, p_step, p_level)
        else:
          command = "dials.refine %s %s_refined_experiments_step%d_level%d.json %s_reflections_step%d.pickle"%( \
            refine_phil_file, params.tag, p_step, p_level, params.tag, j)


      diff_phil += "refinement.parameterisation.detector.hierarchy_level=%d\n"%i

      output_experiments = "%s_refined_experiments_step%d_level%d.json"%(params.tag, j, i)
      command += " output.experiments=%s output.reflections=%s_refined_reflections_step%d_level%d.pickle"%( \
        output_experiments, params.tag, j, i)

      scope = merged_scope.fetch(parse(diff_phil))
      f = open(refine_phil_file, 'w')
      f.write(refine_scope.fetch_diff(scope).as_str())
      f.close()

      print command
      result = easy_run.fully_buffered(command=command).raise_if_errors()
      result.show_stdout()

      # In expanding mode, if using flat refinement with distance, after having refined this step as a block, unrefined
      # panels will have been left behind.  Read back the new metrology, compute the shift applied to the panels refined
      # in this step,and apply that shift to the unrefined panels in this step
      if params.flat_refinement and params.flat_refinement_with_distance and i > 0:
        from dxtbx.model.experiment_list import ExperimentListFactory, ExperimentListDumper
        from xfel.command_line.cspad_detector_congruence import iterate_detector_at_level, iterate_panels
        from scitbx.array_family import flex
        from scitbx.matrix import col
        from libtbx.test_utils import approx_equal
        experiments = ExperimentListFactory.from_json_file(output_experiments, check_format=False)
        assert len(experiments.detectors()) == 1
        detector = experiments.detectors()[0]
        # Displacements: deltas along the vector normal to the detector
        displacements = flex.double()
        # Iterate through the panel groups at this level
        for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
          # Were there panels refined in this step in this panel group?
          if params.panel_filter:
            test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group) if list(detector).index(panel) in params.panel_filter]
          else:
            test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group)]
          if not any(test): continue
          # Compute the translation along the normal of this panel group.  This is defined as distance in dials.refine
          displacements.append(col(panel_group.get_local_fast_axis()).cross(col(panel_group.get_local_slow_axis())).dot(col(panel_group.get_local_origin())))

        # Even though the panels are constrained to move the same amount, there is a bit a variation.
        stats = flex.mean_and_variance(displacements)
        displacement = stats.mean()
        print "Average displacement along normals: %f +/- %f"%(stats.mean(), stats.unweighted_sample_standard_deviation())

        # Verify the variation isn't significant
        for k in range(1, len(displacements)):
          assert approx_equal(displacements[0], displacements[k])
        # If all of the panel groups in this level moved, no need to do anything.
        if len(displacements) != len(list(iterate_detector_at_level(detector.hierarchy(), 0, i))):
          for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
            if params.panel_filter:
              test = [list(detector).index(panel) in steps[j] and list(detector).index(panel) in params.panel_filter for panel in iterate_panels(panel_group)]
            else:
              test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group)]
            # If any of the panels in this panel group moved, no need to do anything
            if any(test): continue

            # None of the panels in this panel group moved in this step, so need to apply displacement from other panel
            # groups at this level
            fast = col(panel_group.get_local_fast_axis())
            slow = col(panel_group.get_local_slow_axis())
            ori = col(panel_group.get_local_origin())
            normal = fast.cross(slow)
            panel_group.set_local_frame(fast, slow, (ori.dot(fast)*fast) + (ori.dot(slow)*slow) + (normal*displacement))

        # Check the new displacements. Should be the same across all panels.
        displacements = []
        for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
          displacements.append(col(panel_group.get_local_fast_axis()).cross(col(panel_group.get_local_slow_axis())).dot(col(panel_group.get_local_origin())))

        for k in range(1, len(displacements)):
          assert approx_equal(displacements[0], displacements[k])

        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments)

      previous_step_and_level = j,i

  output_geometry(params)
예제 #2
0
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)

    # Find all detector objects
    detectors = experiments.detectors()

    # Verify inputs
    if len(params.input.reflections) == len(detectors) and len(detectors) > 1:
      # case for passing in multiple images on the command line
      assert len(params.input.reflections) == len(detectors)
      reflections = flex.reflection_table()
      for expt_id in xrange(len(detectors)):
        subset = params.input.reflections[expt_id].data
        subset['id'] = flex.int(len(subset), expt_id)
        reflections.extend(subset)
    else:
      # case for passing in combined experiments and reflections
      reflections = flatten_reflections(params.input.reflections)[0]

    detector = detectors[0]

    #from dials.algorithms.refinement.prediction import ExperimentsPredictor
    #ref_predictor = ExperimentsPredictor(experiments, force_stills=experiments.all_stills())

    print "N reflections total:", len(reflections)
    if params.residuals.exclude_outliers:
      reflections = reflections.select(reflections.get_flags(reflections.flags.used_in_refinement))
      print "N reflections used in refinement:", len(reflections)
      print "Reporting only on those reflections used in refinement"

    if self.params.residuals.i_sigi_cutoff is not None:
      sel = (reflections['intensity.sum.value']/flex.sqrt(reflections['intensity.sum.variance'])) >= self.params.residuals.i_sigi_cutoff
      reflections = reflections.select(sel)
      print "After filtering by I/sigi cutoff of %f, there are %d reflections left"%(self.params.residuals.i_sigi_cutoff,len(reflections))

    reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()

    n = len(reflections)
    rmsd = self.get_weighted_rmsd(reflections)
    print "Dataset RMSD (microns)", rmsd * 1000

    if params.tag is None:
      tag = ''
    else:
      tag = '%s '%params.tag

    # set up delta-psi ratio heatmap
    p = flex.int() # positive
    n = flex.int() # negative
    for i in set(reflections['id']):
      exprefls = reflections.select(reflections['id']==i)
      p.append(len(exprefls.select(exprefls['delpsical.rad']>0)))
      n.append(len(exprefls.select(exprefls['delpsical.rad']<0)))
    plt.hist2d(p, n, bins=30)
    cb = plt.colorbar()
    cb.set_label("N images")
    plt.title(r"%s2D histogram of pos vs. neg $\Delta\Psi$ per image"%tag)
    plt.xlabel(r"N reflections with $\Delta\Psi$ > 0")
    plt.ylabel(r"N reflections with $\Delta\Psi$ < 0")

    self.delta_scalar = 50

    # Iterate through the detectors, computing detector statistics at the per-panel level (IE one statistic per panel)
    # Per panel dictionaries
    rmsds = {}
    refl_counts = {}
    transverse_rmsds = {}
    radial_rmsds = {}
    ttdpcorr = {}
    pg_bc_dists = {}
    mean_delta_two_theta = {}
    # per panelgroup flex arrays
    pg_rmsds = flex.double()
    pg_r_rmsds = flex.double()
    pg_t_rmsds = flex.double()
    pg_refls_count = flex.int()
    pg_refls_count_d = {}
    table_header = ["PG id", "RMSD","Radial", "Transverse", "N refls"]
    table_header2 = ["","(um)","RMSD (um)","RMSD (um)",""]
    table_data = []
    table_data.append(table_header)
    table_data.append(table_header2)

    # Compute a set of radial and transverse displacements for each reflection
    print "Setting up stats..."
    tmp = flex.reflection_table()
    # Need to construct a variety of vectors
    for panel_id, panel in enumerate(detector):
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      bcl = flex.vec3_double()
      tto = flex.double()
      ttc = flex.double()
      # Compute the beam center in lab space (a vector pointing from the origin to where the beam would intersect
      # the panel, if it did intersect the panel)
      for expt_id in set(panel_refls['id']):
        beam = experiments[expt_id].beam
        s0 = beam.get_s0()
        expt_refls = panel_refls.select(panel_refls['id'] == expt_id)
        beam_centre = panel.get_beam_centre_lab(s0)
        bcl.extend(flex.vec3_double(len(expt_refls), beam_centre))
        obs_x, obs_y, _ = expt_refls['xyzobs.px.value'].parts()
        cal_x, cal_y, _ = expt_refls['xyzcal.px'].parts()
        tto.extend(flex.double([panel.get_two_theta_at_pixel(s0, (obs_x[i], obs_y[i])) for i in xrange(len(expt_refls))]))
        ttc.extend(flex.double([panel.get_two_theta_at_pixel(s0, (cal_x[i], cal_y[i])) for i in xrange(len(expt_refls))]))
      panel_refls['beam_centre_lab'] = bcl
      panel_refls['two_theta_obs'] = tto * (180/math.pi)
      panel_refls['two_theta_cal'] = ttc * (180/math.pi) #+ (0.5*panel_refls['delpsical.rad']*panel_refls['two_theta_obs'])
      # Compute obs in lab space
      x, y, _ = panel_refls['xyzobs.mm.value'].parts()
      c = flex.vec2_double(x, y)
      panel_refls['obs_lab_coords'] = panel.get_lab_coord(c)
      # Compute deltaXY in panel space. This vector is relative to the panel origin
      x, y, _ = (panel_refls['xyzcal.mm'] - panel_refls['xyzobs.mm.value']).parts()
      # Convert deltaXY to lab space, subtracting off of the panel origin
      panel_refls['delta_lab_coords'] = panel.get_lab_coord(flex.vec2_double(x,y)) - panel.get_origin()
      tmp.extend(panel_refls)
    reflections = tmp
    # The radial vector points from the center of the reflection to the beam center
    radial_vectors = (reflections['obs_lab_coords'] - reflections['beam_centre_lab']).each_normalize()
    # The transverse vector is orthogonal to the radial vector and the beam vector
    transverse_vectors = radial_vectors.cross(reflections['beam_centre_lab']).each_normalize()
    # Compute the raidal and transverse components of each deltaXY
    reflections['radial_displacements']     = reflections['delta_lab_coords'].dot(radial_vectors)
    reflections['transverse_displacements'] = reflections['delta_lab_coords'].dot(transverse_vectors)

    # Iterate through the detector at the specified hierarchy level
    for pg_id, pg in enumerate(iterate_detector_at_level(detector.hierarchy(), 0, params.hierarchy_level)):
      pg_msd_sum = 0
      pg_r_msd_sum = 0
      pg_t_msd_sum = 0
      pg_refls = 0
      pg_delpsi = flex.double()
      pg_deltwotheta = flex.double()
      for p in iterate_panels(pg):
        panel_id = id_from_name(detector, p.get_name())
        panel_refls = reflections.select(reflections['panel'] == panel_id)
        n = len(panel_refls)
        pg_refls += n

        delta_x = panel_refls['xyzcal.mm'].parts()[0] - panel_refls['xyzobs.mm.value'].parts()[0]
        delta_y = panel_refls['xyzcal.mm'].parts()[1] - panel_refls['xyzobs.mm.value'].parts()[1]

        tmp = flex.sum((delta_x**2)+(delta_y**2))
        pg_msd_sum += tmp

        r = panel_refls['radial_displacements']
        t = panel_refls['transverse_displacements']
        pg_r_msd_sum += flex.sum_sq(r)
        pg_t_msd_sum += flex.sum_sq(t)

        pg_delpsi.extend(panel_refls['delpsical.rad']*180/math.pi)
        pg_deltwotheta.extend(panel_refls['two_theta_obs'] - panel_refls['two_theta_cal'])

      bc = col(pg.get_beam_centre_lab(s0))
      ori = get_center(pg)
      pg_bc_dists[pg.get_name()] = (ori-bc).length()
      if len(pg_deltwotheta) > 0:
        mean_delta_two_theta[pg.get_name()] = flex.mean(pg_deltwotheta)
      else:
        mean_delta_two_theta[pg.get_name()] = 0

      if pg_refls == 0:
        pg_rmsd = pg_r_rmsd = pg_t_rmsd = 0
      else:
        pg_rmsd = math.sqrt(pg_msd_sum/pg_refls) * 1000
        pg_r_rmsd = math.sqrt(pg_r_msd_sum/pg_refls) * 1000
        pg_t_rmsd = math.sqrt(pg_t_msd_sum/pg_refls) * 1000
      pg_rmsds.append(pg_rmsd)
      pg_r_rmsds.append(pg_r_rmsd)
      pg_t_rmsds.append(pg_t_rmsd)
      pg_refls_count.append(pg_refls)
      pg_refls_count_d[pg.get_name()] = pg_refls
      table_data.append(["%d"%pg_id, "%.1f"%pg_rmsd, "%.1f"%pg_r_rmsd, "%.1f"%pg_t_rmsd, "%6d"%pg_refls])

      refl_counts[pg.get_name()] = pg_refls
      if pg_refls == 0:
        rmsds[p.get_name()] = -1
        radial_rmsds[p.get_name()] = -1
        transverse_rmsds[p.get_name()] = -1
        ttdpcorr[pg.get_name()] = -1
      else:
        rmsds[pg.get_name()] = pg_rmsd
        radial_rmsds[pg.get_name()]     = pg_r_rmsd
        transverse_rmsds[pg.get_name()] = pg_t_rmsd

        lc = flex.linear_correlation(pg_delpsi, pg_deltwotheta)
        ttdpcorr[pg.get_name()] = lc.coefficient()


    r1 = ["Weighted mean"]
    r2 = ["Weighted stddev"]
    if len(pg_rmsds) > 1:
      stats = flex.mean_and_variance(pg_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_r_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_t_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
    else:
      r1.extend([""]*3)
      r2.extend([""]*3)
    r1.append("")
    r2.append("")
    table_data.append(r1)
    table_data.append(r2)
    table_data.append(["Mean", "", "", "", "%8.1f"%flex.mean(pg_refls_count.as_double())])

    from libtbx import table_utils
    print "Detector statistics.  Angles in degrees, RMSDs in microns"
    print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

    self.histogram(reflections, '%sDifference vector norms (mm)'%tag)

    if params.show_plots:
      if self.params.tag is None:
        t = ""
      else:
        t = "%s "%self.params.tag
      self.image_rmsd_histogram(reflections, tag)

      # Plots! these are plots with callbacks to draw on individual panels
      self.detector_plot_refls(detector, reflections, '%sOverall positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltas)
      self.detector_plot_refls(detector, reflections, '%sRadial positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_radial_deltas)
      self.detector_plot_refls(detector, reflections, '%sTransverse positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_transverse_deltas)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta\Psi$'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltapsi, colorbar_units=r"$\circ$")
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY*%s'%(tag, self.delta_scalar), show=False, plot_callback=self.plot_deltas)
      self.detector_plot_refls(detector, reflections, '%sSP Manual CDF'%tag, show=False, plot_callback=self.plot_cdf_manually)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY Histograms'%tag, show=False, plot_callback=self.plot_histograms)
      self.detector_plot_refls(detector, reflections, r'%sRadial displacements vs. $\Delta\Psi$, colored by $\Delta$XY'%tag, show=False, plot_callback=self.plot_radial_displacements_vs_deltapsi)
      self.detector_plot_refls(detector, reflections, r'%sDistance vector norms'%tag, show=False, plot_callback=self.plot_difference_vector_norms_histograms)

      # Plot intensity vs. radial_displacement
      fig = plt.figure()
      panel_id = 15
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      a = panel_refls['radial_displacements']
      b = panel_refls['intensity.sum.value']
      sel = (a > -0.2) & (a < 0.2) & (b < 50000)
      plt.hist2d(a.select(sel), b.select(sel), bins=100)
      plt.title("%s2D histogram of intensity vs. radial displacement for panel %d"%(tag, panel_id))
      plt.xlabel("Radial displacement (mm)")
      plt.ylabel("Intensity")
      ax = plt.colorbar()
      ax.set_label("Counts")

      # Plot delta 2theta vs. deltapsi
      n_bins = 10
      bin_size = len(reflections)//n_bins
      bin_low = []
      bin_high = []
      data = flex.sorted(reflections['two_theta_obs'])
      for i in xrange(n_bins):
        bin_low = data[i*bin_size]
        if (i+1)*bin_size >= len(reflections):
          bin_high = data[-1]
        else:
          bin_high = data[(i+1)*bin_size]
        refls = reflections.select((reflections['two_theta_obs'] >= bin_low) &
                                   (reflections['two_theta_obs'] <= bin_high))
        a = refls['delpsical.rad']*180/math.pi
        b = refls['two_theta_obs'] - refls['two_theta_cal']
        fig = plt.figure()
        sel = (a > -0.2) & (a < 0.2) & (b > -0.05) & (b < 0.05)
        plt.hist2d(a.select(sel), b.select(sel), bins=50, range = [[-0.2, 0.2], [-0.05, 0.05]])
        cb = plt.colorbar()
        cb.set_label("N reflections")
        plt.title(r'%sBin %d (%.02f, %.02f 2$\Theta$) $\Delta2\Theta$ vs. $\Delta\Psi$. Showing %d of %d refls'%(tag,i,bin_low,bin_high,len(a.select(sel)),len(a)))
        plt.xlabel(r'$\Delta\Psi \circ$')
        plt.ylabel(r'$\Delta2\Theta \circ$')

      # Plot delta 2theta vs. 2theta
      a = reflections['two_theta_obs']#[:71610]
      b = reflections['two_theta_obs'] - reflections['two_theta_cal']
      fig = plt.figure()
      limits = -0.05, 0.05
      sel = (b > limits[0]) & (b < limits[1])
      plt.hist2d(a.select(sel), b.select(sel), bins=100, range=((0,50), limits))
      plt.clim((0,100))
      cb = plt.colorbar()
      cb.set_label("N reflections")
      plt.title(r'%s$\Delta2\Theta$ vs. 2$\Theta$. Showing %d of %d refls'%(tag,len(a.select(sel)),len(a)))
      plt.xlabel(r'2$\Theta \circ$')
      plt.ylabel(r'$\Delta2\Theta \circ$')

      # calc the trendline
      z = np.polyfit(a.select(sel), b.select(sel), 1)
      print 'y=%.7fx+(%.7f)'%(z[0],z[1])

      # Plots with single values per panel
      self.detector_plot_dict(detector, refl_counts, u"%s N reflections"%t, u"%6d", show=False)
      self.detector_plot_dict(detector, rmsds, "%s Positional RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, radial_rmsds, "%s Radial RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, transverse_rmsds, "%s Transverse RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, ttdpcorr, r"%s $\Delta2\Theta$ vs. $\Delta\Psi$ CC"%t, u"%5.3f", show=False)

      self.plot_unitcells(experiments)
      self.plot_data_by_two_theta(reflections, tag)

      # Plot data by panel group
      sorted_values = sorted(pg_bc_dists.values())
      vdict = {}
      for k in pg_bc_dists:
        vdict[pg_bc_dists[k]] = k
      sorted_keys = [vdict[v] for v in sorted_values if vdict[v] in rmsds]
      x = [sorted_values[i] for i in xrange(len(sorted_values)) if pg_bc_dists.keys()[i] in rmsds]

      self.plot_multi_data(x,
                           [[pg_refls_count_d[k] for k in sorted_keys],
                            ([rmsds[k] for k in sorted_keys],
                             [radial_rmsds[k] for k in sorted_keys],
                             [transverse_rmsds[k] for k in sorted_keys]),
                            [radial_rmsds[k]/transverse_rmsds[k] for k in sorted_keys],
                            [mean_delta_two_theta[k] for k in sorted_keys]],
                           "Panel group distance from beam center (mm)",
                           ["N reflections",
                            ("Overall RMSD",
                             "Radial RMSD",
                             "Transverse RMSD"),
                            "R/T RMSD ratio",
                            "Delta two theta"],
                           ["N reflections",
                            "RMSD (microns)",
                            "R/T RMSD ratio",
                            "Delta two theta (degrees)"],
                           "%sData by panelgroup"%tag)

      if self.params.save_pdf:
        pp = PdfPages('residuals_%s.pdf'%(tag.strip()))
        for i in plt.get_fignums():
          pp.savefig(plt.figure(i))
        pp.close()
      else:
        plt.show()
예제 #3
0
    def run(self):
        ''' Parse the options. '''
        from dials.util.options import flatten_experiments, flatten_reflections
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        self.params = params
        experiments = flatten_experiments(params.input.experiments)
        reflections = flatten_reflections(params.input.reflections)

        # Find all detector objects
        detectors = []
        detectors.extend(experiments.detectors())

        # Verify inputs
        if len(detectors) != 2:
            raise Sorry(
                "Please provide a reference and a moving set of experiments")

        reflections = reflections[1]
        detector = detectors[1]

        if not hasattr(detector, 'hierarchy'):
            raise Sorry("Script intended for hierarchical detectors")

        if params.max_hierarchy_level is None or str(
                params.max_hierarchy_level).lower() == 'auto':
            params.max_hierarchy_level = 0
            root = detector.hierarchy()
            while root.is_group():
                root = root[0]
                params.max_hierarchy_level += 1
            print("Found", params.max_hierarchy_level + 1, "hierarchy levels")

        reference_root = detectors[0].hierarchy()
        moving_root = detector.hierarchy()
        rori = get_center(reference_root)
        rf = col(reference_root.get_fast_axis())
        rs = col(reference_root.get_slow_axis())
        r_norm = col(reference_root.get_normal())
        s0 = col(
            flex.vec3_double([col(b.get_s0())
                              for b in experiments.beams()]).mean())

        summary_table_header = [
            "Hierarchy", "Delta XY", "Delta XY", "R Offsets", "R Offsets",
            "T Offsets", "T Offsets", "Z Offsets", "Z Offsets", "dR Norm",
            "dR Norm", "dT Norm", "dT Norm", "Local dNorm", "Local dNorm",
            "Rot Z", "Rot Z"
        ]
        summary_table_header2 = [
            "Level", "", "Sigma", "", "Sigma", "", "Sigma", "", "Sigma", "",
            "Sigma", "", "Sigma", "", "Sigma", "", "Sigma"
        ]
        summary_table_header3 = [
            "", "(microns)", "(microns)", "(microns)", "(microns)",
            "(microns)", "(microns)", "(microns)", "(microns)", "(deg)",
            "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", "(deg)"
        ]
        summary_table_data = []
        summary_table_data.append(summary_table_header)
        summary_table_data.append(summary_table_header2)
        summary_table_data.append(summary_table_header3)

        table_header = [
            "PanelG", "BC dist", "Delta XY", "R Offsets", "T Offsets",
            "Z Offsets", "dR Norm", "dT Norm", "Local dNorm", "Rot Z",
            "N Refls"
        ]
        table_header2 = [
            "ID", "(mm)", "(microns)", "(microns)", "(microns)", "(microns)",
            "(deg)", "(deg)", "(deg)", "(deg)", ""
        ]

        from xfel.cftbx.detector.cspad_cbf_tbx import basis

        def get_full_basis_shift(pg):
            """Compute basis shift from pg to lab space"""
            shift = basis(panelgroup=pg)
            while True:
                parent = pg.parent()
                if parent is None:
                    break
                shift = basis(panelgroup=parent) * shift
                pg = parent
            return shift

        # Iterate through the hierarchy levels
        for level in range(params.max_hierarchy_level + 1):
            delta_xy = flex.double()
            r_offsets = flex.double()
            t_offsets = flex.double()
            z_offsets = flex.double()
            rot_z = flex.double()
            delta_r_norm = flex.double()
            delta_t_norm = flex.double()
            local_dnorm = flex.double()
            bc_dists = flex.double()
            weights = flex.double()

            rows = []

            for pg_id, (pg1, pg2) in enumerate(
                    zip(iterate_detector_at_level(reference_root, 0, level),
                        iterate_detector_at_level(moving_root, 0, level))):
                weight = 0
                for panel_id, p in enumerate(iterate_panels(pg2)):
                    weight += len(
                        reflections.select(
                            reflections['panel'] == id_from_name(
                                detector, p.get_name())))
                weights.append(weight)

                bc = col(pg1.get_beam_centre_lab(s0))
                ori = get_center(pg1)
                bc_dist = (ori - bc).length()
                bc_dists.append(bc_dist)

                z_dists = []
                ori_xy = []
                for pg in [pg1, pg2]:
                    ori = pg.get_local_origin()
                    ori_xy.append(col((ori[0], ori[1])))
                    z_dists.append(ori[2] * 1000)
                dxy = (ori_xy[1] - ori_xy[0]).length() * 1000
                delta_xy.append(dxy)

                z_off = z_dists[1] - z_dists[0]
                z_offsets.append(z_off)

                pgo1 = col(pg1.get_origin())
                ro_pgo = pgo1 - rori  # vector from the detector origin to the panel group origin
                if ro_pgo.length() == 0:
                    radial = col((0, 0, 0))
                    transverse = col((0, 0, 0))
                else:
                    radial = (
                        (rf.dot(ro_pgo) * rf) + (rs.dot(ro_pgo) * rs)
                    ).normalize()  # component of ro_pgo in rf rs plane
                    transverse = r_norm.cross(radial).normalize()
                # now radial and transverse are vectors othogonal to each other and the detector normal, such that
                # radial points at the panel group origin

                # compute shift in local frame, then convert that shift to lab space, then make it relative to the reference's origin, in lab space
                lpgo1 = col(pg1.get_local_origin())
                lpgo2 = col(pg2.get_local_origin())
                delta_pgo = (get_full_basis_shift(pg1) *
                             (lpgo2 - lpgo1)) - pgo1

                # v is the component of delta_pgo along the radial vector
                v = (radial.dot(delta_pgo) * radial)
                r_offset = v.length() * 1000
                angle = r_norm.angle(v, deg=True)
                if r_norm.cross(v).dot(transverse) < 0:
                    r_offset = -r_offset
                r_offsets.append(r_offset)
                # v is the component of delta_pgo along the transverse vector
                v = (transverse.dot(delta_pgo) * transverse)
                t_offset = v.length() * 1000
                angle = r_norm.angle(v, deg=True)
                if r_norm.cross(v).dot(radial) < 0:
                    t_offset = -t_offset
                t_offsets.append(t_offset)

                pgn1 = col(pg1.get_normal())
                pgf1 = col(pg1.get_fast_axis())
                pgs1 = col(pg1.get_slow_axis())
                pgn2 = col(pg2.get_normal())
                pgf2 = col(pg2.get_fast_axis())

                # v1 and v2 are the component of pgf1 and pgf2 in the rf rs plane
                v1 = (rf.dot(pgf1) * rf) + (rs.dot(pgf1) * rs)
                v2 = (rf.dot(pgf2) * rf) + (rs.dot(pgf2) * rs)
                rz = v1.angle(v2, deg=True)
                rot_z.append(rz)

                # v1 and v2 are the components of pgn1 and pgn2 in the r_norm radial plane
                v1 = (r_norm.dot(pgn1) * r_norm) + (radial.dot(pgn1) * radial)
                v2 = (r_norm.dot(pgn2) * r_norm) + (radial.dot(pgn2) * radial)
                drn = v1.angle(v2, deg=True)
                if v2.cross(v1).dot(transverse) < 0:
                    drn = -drn
                delta_r_norm.append(drn)

                # v1 and v2 are the components of pgn1 and pgn2 in the r_norm transverse plane
                v1 = (r_norm.dot(pgn1) * r_norm) + (transverse.dot(pgn1) *
                                                    transverse)
                v2 = (r_norm.dot(pgn2) * r_norm) + (transverse.dot(pgn2) *
                                                    transverse)
                dtn = v1.angle(v2, deg=True)
                if v2.cross(v1).dot(radial) < 0:
                    dtn = -dtn
                delta_t_norm.append(dtn)

                # Determine angle between normals in local space
                lpgf1 = col(pg1.get_local_fast_axis())
                lpgs1 = col(pg1.get_local_slow_axis())
                lpgn1 = lpgf1.cross(lpgs1)
                lpgf2 = col(pg2.get_local_fast_axis())
                lpgs2 = col(pg2.get_local_slow_axis())
                lpgn2 = lpgf2.cross(lpgs2)
                ldn = lpgn1.angle(lpgn2, deg=True)
                local_dnorm.append(ldn)

                row = [
                    "%3d" % pg_id,
                    "%6.1f" % bc_dist,
                    "%6.1f" % dxy,
                    "%6.1f" % r_offset,
                    "%6.1f" % t_offset,
                    "%6.1f" % z_off,
                    "%.4f" % drn,
                    "%.4f" % dtn,
                    "%.4f" % ldn,
                    "%.4f" % rz,
                    "%8d" % weight
                ]
                rows.append(row)

            wm_row = ["Weighted mean", ""]
            ws_row = ["Weighted stddev", ""]
            s_row = ["%d" % level]
            iterable = zip([
                delta_xy, r_offsets, t_offsets, z_offsets, delta_r_norm,
                delta_t_norm, local_dnorm, rot_z
            ], [
                "%6.1f", "%6.1f", "%6.1f", "%6.1f", "%.4f", "%.4f", "%.4f",
                "%.4f"
            ])
            if len(z_offsets) == 0:
                wm_row.extend(["%6.1f" % 0] * 8)
                ws_row.extend(["%6.1f" % 0] * 8)
                s_row.extend(["%6.1f" % 0] * 8)
            elif len(z_offsets) == 1:
                for data, fmt in iterable:
                    wm_row.append(fmt % data[0])
                    ws_row.append(fmt % 0)
                    s_row.append(fmt % data[0])
                    s_row.append(fmt % 0)
            else:
                for data, fmt in iterable:
                    stats = flex.mean_and_variance(data, weights)
                    wm_row.append(fmt % stats.mean())
                    ws_row.append(fmt % stats.gsl_stats_wsd())
                    s_row.append(fmt % stats.mean())
                    s_row.append(fmt % stats.gsl_stats_wsd())
            wm_row.append("")
            ws_row.append("")
            summary_table_data.append(s_row)

            table_data = [table_header, table_header2]
            table_d = {d: row for d, row in zip(bc_dists, rows)}
            table_data.extend([table_d[key] for key in sorted(table_d)])
            table_data.append(wm_row)
            table_data.append(ws_row)

            from libtbx import table_utils
            print("Hierarchy level %d Detector shifts" % level)
            print(
                table_utils.format(table_data,
                                   has_header=2,
                                   justify='center',
                                   delim=" "))

        print("Detector shifts summary")
        print(
            table_utils.format(summary_table_data,
                               has_header=3,
                               justify='center',
                               delim=" "))

        print()
        print("""
For each hierarchy level, the average shifts in are computed among objects at that level, weighted by the number of reflections recorded on each object. For example, for a four quadrant detector, the average Z shift will be the average of the four quadrant Z values, each weighted by the number of reflections on that quadrant.

-------------------
Column descriptions
-------------------

Individual hierarchy level tables only:
PanelG id: ID of the panel group.
BC dist: distance of the panel group from the beam center.
N Refls: number of reflections on this panel group

All tables:
Delta XY: magnitude of the shift in the local XY frame.
R, T offsets: shifts relative to the parent object's location in the radial and transverse directions (relative to the detector center).
Z offsets: relative shifts in the local frame in the local Z direction.
R, T Norm: angle between normal vectors in lab space, projected onto the radial or transverse plane.
Local dNorm: local relative angle between normal vectors.
Rot Z: rotation around detector normal in lab space
""")
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_datablocks, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)
    datablocks = flatten_datablocks(params.input.datablock)
    reflections = flatten_reflections(params.input.reflections)

    # Find all detector objects
    detectors = []
    detectors.extend(experiments.detectors())
    dbs = []
    for datablock in datablocks:
      dbs.extend(datablock.unique_detectors())
    detectors.extend(dbs)

    # Verify inputs
    if len(detectors) != 2:
      raise Sorry("Please provide a reference and a moving set of experiments and or datablocks")

    reflections = reflections[1]
    detector = detectors[1]

    if not hasattr(detector, 'hierarchy'):
      raise Sorry("Script intended for hierarchical detectors")

    if params.max_hierarchy_level is None or str(params.max_hierarchy_level).lower() == 'auto':
      params.max_hierarchy_level = 0
      root = detector.hierarchy()
      while root.is_group():
        root = root[0]
        params.max_hierarchy_level += 1
      print "Found", params.max_hierarchy_level+1, "hierarchy levels"

    reference_root = detectors[0].hierarchy()
    moving_root = detector.hierarchy()
    rori = get_center(reference_root)
    rf = col(reference_root.get_fast_axis())
    rs = col(reference_root.get_slow_axis())
    r_norm = col(reference_root.get_normal())
    s0 = col(flex.vec3_double([col(b.get_s0()) for b in experiments.beams()]).mean())

    summary_table_header = ["Hierarchy","Delta XY","Delta XY","R Offsets","R Offsets","T Offsets","T Offsets","Z Offsets","Z Offsets","dR Norm","dR Norm","dT Norm","dT Norm","Local dNorm", "Local dNorm", "Rot Z","Rot Z"]
    summary_table_header2 = ["Level","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma"]
    summary_table_header3 = ["","(microns)","(microns)","(microns)","(microns)","(microns)","(microns)","(microns)","(microns)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)"]
    summary_table_data = []
    summary_table_data.append(summary_table_header)
    summary_table_data.append(summary_table_header2)
    summary_table_data.append(summary_table_header3)

    table_header = ["PanelG","BC dist","Delta XY","R Offsets","T Offsets","Z Offsets","dR Norm","dT Norm","Local dNorm","Rot Z","N Refls"]
    table_header2 = ["ID","(mm)","(microns)","(microns)","(microns)","(microns)","(deg)","(deg)","(deg)","(deg)",""]

    from xfel.cftbx.detector.cspad_cbf_tbx import basis
    def get_full_basis_shift(pg):
      """Compute basis shift from pg to lab space"""
      shift = basis(panelgroup=pg)
      while True:
        parent = pg.parent()
        if parent is None:
          break
        shift = basis(panelgroup=parent) * shift
        pg = parent
      return shift

    # Iterate through the hierarchy levels
    for level in xrange(params.max_hierarchy_level+1):
      delta_xy = flex.double()
      r_offsets = flex.double()
      t_offsets = flex.double()
      z_offsets = flex.double()
      rot_z = flex.double()
      delta_r_norm = flex.double()
      delta_t_norm = flex.double()
      local_dnorm = flex.double()
      bc_dists = flex.double()
      weights = flex.double()

      rows = []

      for pg_id, (pg1, pg2) in enumerate(zip(iterate_detector_at_level(reference_root, 0, level),
                                             iterate_detector_at_level(moving_root, 0, level))):
        weight = 0
        for panel_id, p in enumerate(iterate_panels(pg2)):
          weight += len(reflections.select(reflections['panel'] == id_from_name(detector, p.get_name())))
        weights.append(weight)

        bc = col(pg1.get_beam_centre_lab(s0))
        ori = get_center(pg1)
        bc_dist = (ori-bc).length()
        bc_dists.append(bc_dist)

        z_dists = []
        ori_xy = []
        for pg in [pg1,pg2]:
          ori = pg.get_local_origin()
          ori_xy.append(col((ori[0], ori[1])))
          z_dists.append(ori[2]*1000)
        dxy = (ori_xy[1]-ori_xy[0]).length()*1000
        delta_xy.append(dxy)

        z_off = z_dists[1]-z_dists[0]
        z_offsets.append(z_off)

        pgo1 = col(pg1.get_origin())
        ro_pgo = pgo1 - rori # vector from the detector origin to the panel group origin
        if ro_pgo.length() == 0:
          radial = col((0,0,0))
          transverse = col((0,0,0))
        else:
          radial = ((rf.dot(ro_pgo) * rf) + (rs.dot(ro_pgo) * rs)).normalize() # component of ro_pgo in rf rs plane
          transverse = r_norm.cross(radial).normalize()
        # now radial and transverse are vectors othogonal to each other and the detector normal, such that
        # radial points at the panel group origin

        # compute shift in local frame, then convert that shift to lab space, then make it relative to the reference's origin, in lab space
        lpgo1 = col(pg1.get_local_origin())
        lpgo2 = col(pg2.get_local_origin())
        delta_pgo = (get_full_basis_shift(pg1) * (lpgo2-lpgo1)) - pgo1

        # v is the component of delta_pgo along the radial vector
        v = (radial.dot(delta_pgo) * radial)
        r_offset = v.length() * 1000
        angle = r_norm.angle(v, deg=True)
        if r_norm.cross(v).dot(transverse) < 0:
          r_offset = -r_offset
        r_offsets.append(r_offset)
        # v is the component of delta_pgo along the transverse vector
        v = (transverse.dot(delta_pgo) * transverse)
        t_offset = v.length() * 1000
        angle = r_norm.angle(v, deg=True)
        if r_norm.cross(v).dot(radial) < 0:
          t_offset = -t_offset
        t_offsets.append(t_offset)

        pgn1 = col(pg1.get_normal())
        pgf1 = col(pg1.get_fast_axis())
        pgs1 = col(pg1.get_slow_axis())
        pgn2 = col(pg2.get_normal())
        pgf2 = col(pg2.get_fast_axis())

        # v1 and v2 are the component of pgf1 and pgf2 in the rf rs plane
        v1 = (rf.dot(pgf1) * rf) + (rs.dot(pgf1) * rs)
        v2 = (rf.dot(pgf2) * rf) + (rs.dot(pgf2) * rs)
        rz = v1.angle(v2, deg=True)
        rot_z.append(rz)

        # v1 and v2 are the components of pgn1 and pgn2 in the r_norm radial plane
        v1 = (r_norm.dot(pgn1) * r_norm) + (radial.dot(pgn1) * radial)
        v2 = (r_norm.dot(pgn2) * r_norm) + (radial.dot(pgn2) * radial)
        drn = v1.angle(v2, deg=True)
        if v2.cross(v1).dot(transverse) < 0:
          drn = -drn
        delta_r_norm.append(drn)

        # v1 and v2 are the components of pgn1 and pgn2 in the r_norm transverse plane
        v1 = (r_norm.dot(pgn1) * r_norm) + (transverse.dot(pgn1) * transverse)
        v2 = (r_norm.dot(pgn2) * r_norm) + (transverse.dot(pgn2) * transverse)
        dtn = v1.angle(v2, deg=True)
        if v2.cross(v1).dot(radial) < 0:
          dtn = -dtn
        delta_t_norm.append(dtn)

        # Determine angle between normals in local space
        lpgf1 = col(pg1.get_local_fast_axis())
        lpgs1 = col(pg1.get_local_slow_axis())
        lpgn1 = lpgf1.cross(lpgs1)
        lpgf2 = col(pg2.get_local_fast_axis())
        lpgs2 = col(pg2.get_local_slow_axis())
        lpgn2 = lpgf2.cross(lpgs2)
        ldn = lpgn1.angle(lpgn2, deg=True)
        local_dnorm.append(ldn)

        row = ["%3d"%pg_id, "%6.1f"%bc_dist, "%6.1f"%dxy,
               "%6.1f"%r_offset, "%6.1f"%t_offset, "%6.1f"%z_off,
               "%.4f"%drn, "%.4f"%dtn, "%.4f"%ldn, "%.4f"%rz, "%8d"%weight]
        rows.append(row)

      wm_row = ["Weighted mean", ""]
      ws_row = ["Weighted stddev", ""]
      s_row = ["%d"%level]
      iterable = zip([delta_xy, r_offsets, t_offsets, z_offsets, delta_r_norm, delta_t_norm, local_dnorm, rot_z],
                     ["%6.1f","%6.1f","%6.1f","%6.1f","%.4f","%.4f","%.4f","%.4f"])
      if len(z_offsets) == 0:
        wm_row.extend(["%6.1f"%0]*8)
        ws_row.extend(["%6.1f"%0]*8)
        s_row.extend(["%6.1f"%0]*8)
      elif len(z_offsets) == 1:
        for data, fmt in iterable:
          wm_row.append(fmt%data[0])
          ws_row.append(fmt%0)
          s_row.append(fmt%data[0])
          s_row.append(fmt%0)
      else:
        for data, fmt in iterable:
          stats = flex.mean_and_variance(data, weights)
          wm_row.append(fmt%stats.mean())
          ws_row.append(fmt%stats.gsl_stats_wsd())
          s_row.append(fmt%stats.mean())
          s_row.append(fmt%stats.gsl_stats_wsd())
      wm_row.append("")
      ws_row.append("")
      summary_table_data.append(s_row)

      table_data = [table_header, table_header2]
      table_d = {d:row for d, row in zip(bc_dists, rows)}
      table_data.extend([table_d[key] for key in sorted(table_d)])
      table_data.append(wm_row)
      table_data.append(ws_row)

      from libtbx import table_utils
      print "Hierarchy level %d Detector shifts"%level
      print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

    print "Detector shifts summary"
    print table_utils.format(summary_table_data,has_header=3,justify='center',delim=" ")

    print
    print """