예제 #1
0
파일: report.py 프로젝트: biochem-fan/dials
  def __init__(self, experiments, profile_fitter, reflections, num_folds):
    '''
    Create the integration report

    :param experiments: The experiment list
    :param profile_model: The profile model
    :param reflections: The reflection table

    '''
    from collections import OrderedDict

    # Initialise the report class
    super(ProfileValidationReport, self).__init__()

    # Create the table
    table = Table()

    # Set the title
    table.name = 'validation.summary'
    table.title = 'Summary of profile validation '

    # Add the columns
    table.cols.append(('id', 'ID'))
    table.cols.append(('subsample', 'Sub-sample'))
    table.cols.append(('n_valid', '# validated'))
    table.cols.append(('cc', '<CC>'))
    table.cols.append(('nrmsd', '<NRMSD>'))

    # Split the reflections
    reflection_tables = reflections.split_by_experiment_id()
    assert len(reflection_tables) == len(experiments)
    assert len(profile_fitter) == num_folds

    # Create the summary for each profile model
    for i in range(len(reflection_tables)):
      reflection_table = reflection_tables[i]
      reflection_table = reflection_table.select(
        reflection_table.get_flags(
          reflection_table.flags.integrated_prf))
      index = reflection_table['profile.index']
      cc = reflection_table['profile.correlation']
      nrmsd = reflection_table['profile.rmsd']
      for j in range(num_folds):
        mask = index == j
        num_validated = mask.count(True)
        if num_validated == 0:
          mean_cc = 0
          mean_nrmsd = 0
        else:
          mean_cc = flex.mean(cc.select(mask))
          mean_nrmsd = flex.mean(nrmsd.select(mask))
        table.rows.append([
          '%d'   % i,
          '%d'   % j,
          '%d'   % num_validated,
          '%.2f' % mean_cc,
          '%.2f' % mean_nrmsd])

    # Add the table
    self.add_table(table)
예제 #2
0
 def tst_for_dataset(self, creator, filename):
   from dials.array_family import flex
   from dials.algorithms.shoebox import MaskCode
   print filename
   rlist = flex.reflection_table.from_pickle(filename)
   shoebox = rlist['shoebox']
   background = [sb.background.deep_copy() for sb in shoebox]
   success = creator(shoebox)
   assert(success.count(True) == len(success))
   diff = []
   for i in range(len(rlist)):
     mask = flex.bool([(m & MaskCode.Foreground) != 0 for m in shoebox[i].mask])
     px1 = background[i].select(mask)
     px2 = shoebox[i].background.select(mask)
     den = max([flex.mean(px1), 1.0])
     diff.append(flex.mean(px2 - px1) / den)
   diff = flex.double(diff)
   mv = flex.mean_and_variance(flex.double(diff))
   mean = mv.mean()
   sdev = mv.unweighted_sample_standard_deviation()
   try:
     assert(abs(mean) < 0.01)
   except Exception:
     print "Mean: %f, Sdev: %f", mean, sdev
     from matplotlib import pylab
     pylab.hist(diff)
     pylab.show()
     raise
예제 #3
0
파일: report.py 프로젝트: biochem-fan/dials
  def overall_report(data):

    # Start by adding some overall numbers
    report = OrderedDict()
    report['n']                   = len(reflections)
    report['n_full']              = data['full'].count(True)
    report['n_partial']           = data['full'].count(False)
    report['n_overload']          = data['over'].count(True)
    report['n_ice']               = data['ice'].count(True)
    report['n_summed']            = data['sum'].count(True)
    report['n_fitted']            = data['prf'].count(True)
    report['n_integated']         = data['int'].count(True)
    report['n_invalid_bg']        = data['ninvbg'].count(True)
    report['n_invalid_fg']        = data['ninvfg'].count(True)
    report['n_failed_background'] = data['fbgd'].count(True)
    report['n_failed_summation']  = data['fsum'].count(True)
    report['n_failed_fitting']    = data['fprf'].count(True)

    # Compute mean background
    try:
      report['mean_background'] = flex.mean(
        data['background.mean'].select(data['int']))
    except Exception:
      report['mean_background'] = 0.0

    # Compute mean I/Sigma summation
    try:
      report['ios_sum'] = flex.mean(
        data['intensity.sum.ios'].select(data['sum']))
    except Exception:
      report['ios_sum'] = 0.0

    # Compute mean I/Sigma profile fitting
    try:
      report['ios_prf'] = flex.mean(
        data['intensity.prf.ios'].select(data['prf']))
    except Exception:
      report['ios_prf'] = 0.0

    # Compute the mean profile correlation
    try:
      report['cc_prf'] = flex.mean(
        data['profile.correlation'].select(data['prf']))
    except Exception:
      report['cc_prf'] = 0.0

    # Compute the correlations between summation and profile fitting
    try:
      mask = data['sum'] & data['prf']
      Isum = data['intensity.sum.value'].select(mask)
      Iprf = data['intensity.prf.value'].select(mask)
      report['cc_pearson_sum_prf'] = pearson_correlation_coefficient(Isum, Iprf)
      report['cc_spearman_sum_prf'] = spearman_correlation_coefficient(Isum, Iprf)
    except Exception:
      report['cc_pearson_sum_prf'] = 0.0
      report['cc_spearman_sum_prf'] = 0.0

    # Return the overall report
    return report
예제 #4
0
파일: img_viewer.py 프로젝트: ndevenish/DUI
    def ini_contrast(self):
        if not self.contrast_initiated:
            try:
                n_of_imgs = len(self.my_sweep.indices())
                logger.debug("n_of_imgs(ini_contrast) = %s", n_of_imgs)

                img_arr_n0 = self.my_sweep.get_raw_data(0)[0]
                img_arr_n1 = self.my_sweep.get_raw_data(1)[0]
                img_arr_n2 = self.my_sweep.get_raw_data(2)[0]

                tst_sample = (
                    img_arr_n0[0:25, 0:25].as_double()
                    + img_arr_n1[0:25, 0:25].as_double()
                    + img_arr_n2[0:25, 0:25].as_double()
                ) / 3.0
                logger.debug("tst_sample = %s", tst_sample)

                i_mean = flex.mean(tst_sample)
                tst_new_max = (i_mean + 1) * 25

                logger.debug("flex.mean(tst_sample) = %s", i_mean)
                logger.debug("tst_new_max = %s", tst_new_max)
                self.try_change_max(tst_new_max)
                self.try_change_min(-3)
                self.contrast_initiated = True
            except BaseException as e:
                # We don't want to catch bare exceptions but don't know
                # what this was supposed to catch. Log it.
                logger.error(
                    "Caught unknown exception type %s: %s", type(e).__name__, e
                )
                logger.debug("Unable to calculate mean and adjust contrast")
예제 #5
0
  def plot_histograms(self, reflections, panel = None, ax = None, bounds = None):
    data = reflections['difference_vector_norms']
    colors = ['b-', 'g-', 'g--', 'r-', 'b-', 'b--']
    n_slots = 20
    if self.params.residuals.histogram_max is None:
      h = flex.histogram(data, n_slots=n_slots)
    else:
      h = flex.histogram(data.select(data <= self.params.residuals.histogram_max), n_slots=n_slots)

    n = len(reflections)
    rmsd_obs = math.sqrt((reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).sum_sq()/n)
    sigma = mode = h.slot_centers()[list(h.slots()).index(flex.max(h.slots()))]
    mean_obs = flex.mean(data)
    median = flex.median(data)
    mean_rayleigh = math.sqrt(math.pi/2)*sigma
    rmsd_rayleigh = math.sqrt(2)*sigma

    data = flex.vec2_double([(i,j) for i, j in zip(h.slot_centers(), h.slots())])
    n = len(data)
    for i in [mean_obs, mean_rayleigh, mode, rmsd_obs, rmsd_rayleigh]:
      data.extend(flex.vec2_double([(i, 0), (i, flex.max(h.slots()))]))
    data = self.get_bounded_data(data, bounds)
    tmp = [data[:n]]
    for i in xrange(len(colors)):
      tmp.append(data[n+(i*2):n+((i+1)*2)])
    data = tmp

    for d, c in zip(data, colors):
      ax.plot(d.parts()[0], d.parts()[1], c)

    if ax.get_legend() is None:
      ax.legend([r"$\Delta$XY", "MeanObs", "MeanRayl", "Mode", "RMSDObs", "RMSDRayl"])
  def compute_functional_and_gradients_test_code(self):
    values = self.parameterization(self.x)
    assert -150. < values.BFACTOR < 150. # limits on the exponent, please
    self.func = self.refinery.fvec_callable(values)
    functional = flex.sum(self.func*self.func)
    self.f = functional
    jacobian = self.refinery.jacobian_callable(values)
    self.gg_0 = flex.sum(2. * self.func * jacobian[0])
    self.gg_1 = flex.sum(2. * self.func * jacobian[1])
    self.gg_3 = flex.sum(2. * self.func * jacobian[3])
    self.gg_4 = flex.sum(2. * self.func * jacobian[4])
    DELTA = 1.E-7
    self.g = flex.double()
    for x in xrange(self.n):
      templist = list(self.x)
      templist[x]+=DELTA
      dvalues = flex.double(templist)

      dfunc = self.refinery.fvec_callable(self.parameterization(dvalues))
      dfunctional = flex.sum(dfunc*dfunc)
      #calculate by finite_difference
      self.g.append( ( dfunctional-functional )/DELTA )
    self.g[2]=0.

    print >> self.out, "rms %10.3f"%math.sqrt(flex.mean(self.func*self.func)),
    values.show(self.out)
    print >>self.out, "derivatives--> %15.5f    %15.5f    %9.7f   %5.2f   %5.2f"%tuple(self.g)
    print >>self.out, "  analytical-> %15.5f    %15.5f                %5.2f   %5.2f"%(
      self.gg_0,self.gg_1, self.gg_3,self.gg_4)
    self.g[0]=self.gg_0
    self.g[1]=self.gg_1
    self.g[3]=self.gg_3
    self.g[4]=self.gg_4
    return self.f, self.g
예제 #7
0
def outlier_rejection(reflections):
  # http://scripts.iucr.org/cgi-bin/paper?ba0032
  if len(reflections) == 1:
    return reflections
  intensities = reflections['intensity.sum.value']
  variances = reflections['intensity.sum.variance']

  i_max = flex.max_index(intensities)

  sel = flex.bool(len(reflections), True)
  sel[i_max] = False

  i_test = intensities[i_max]
  var_test = variances[i_max]

  intensities_subset = intensities.select(sel)
  var_subset = variances.select(sel)

  var_prior = var_test + 1/flex.sum(1/var_subset)
  p_prior = 1/math.sqrt(2*math.pi * var_prior) * math.exp(
    -(i_test - flex.mean(intensities_subset))**2/(2 * var_prior))
  #print p_prior

  if p_prior > 1e-10:
    return reflections

  return outlier_rejection(reflections.select(sel))
예제 #8
0
def calc_2D_rmsd_and_displacements(reflections):

  displacements = flex.vec2_double(reflections['xyzobs.px.value'].parts()[0], reflections['xyzobs.px.value'].parts()[1]) - \
                  flex.vec2_double(reflections['xyzcal.px'].parts()[0], reflections['xyzcal.px'].parts()[1])
  rmsd = math.sqrt(flex.mean(displacements.dot( displacements )))

  return rmsd,displacements
 def build_up(pfh, objective_only=False):
   values = pfh.parameterization(pfh.x)
   assert 0. < values.G , "G-scale value out of range ( < 0 ) within LevMar build_up"
   # XXX revisit these limits.  Seems like an ad hoc approach to have to set these limits
   # However, the assertions are necessary to avoid floating point exceptions at the C++ level
   # Regardless, these tests throw out ~30% of LM14 data, thus search for another approach
   assert -150. < values.BFACTOR < 150. ,"B-factor out of range (+/-150) within LevMar build_up"
   assert -0.5 < 180.*values.thetax/math.pi < 0.5 , "thetax out of range ( |rotx|>.5 degrees ) within LevMar build_up"
   assert -0.5 < 180.*values.thetay/math.pi < 0.5 , "thetay out of range ( |roty|>.5 degrees ) within LevMar build_up"
   assert 0.000001 < values.RS , "RLP size out of range (<0.000001) within LevMar build_up"
   assert values.RS < 0.001 , "RLP size out of range (>0.001) within LevMar build_up"
   residuals = pfh.refinery.fvec_callable(values)
   pfh.reset()
   if objective_only:
     pfh.add_residuals(residuals, weights=pfh.refinery.WEIGHTS)
   else:
     grad_r = pfh.refinery.jacobian_callable(values)
     jacobian = flex.double(
       flex.grid(len(pfh.refinery.MILLER), pfh.n_parameters))
     for j, der_r in enumerate(grad_r):
       jacobian.matrix_paste_column_in_place(der_r,j)
       #print >> pfh.out, "COL",j, list(der_r)
     pfh.add_equations(residuals, jacobian, weights=pfh.refinery.WEIGHTS)
   print >> pfh.out, "rms %10.3f"%math.sqrt(flex.mean(pfh.refinery.WEIGHTS*residuals*residuals)),
   values.show(pfh.out)
예제 #10
0
파일: model.py 프로젝트: dials/dials
    def wrapper(experiment):
      from dials.algorithms.profile_model.gaussian_rs import GaussianRSProfileModeller
      from math import ceil

      # Return if no scan or gonio
      if (experiment.scan is None or
          experiment.goniometer is None or
          experiment.scan.get_oscillation()[1] == 0):
        return None

      # Compute the scan step
      phi0, phi1 = experiment.scan.get_oscillation_range(deg=True)
      assert(phi1 > phi0)
      phi_range = phi1 - phi0
      num_scan_points = int(ceil(phi_range / self.params.gaussian_rs.fitting.scan_step))
      assert(num_scan_points > 0)

      # Create the grid method
      GridMethod = GaussianRSProfileModeller.GridMethod
      FitMethod = GaussianRSProfileModeller.FitMethod
      grid_method = int(GridMethod.names[self.params.gaussian_rs.fitting.grid_method].real)
      fit_method = int(FitMethod.names[self.params.gaussian_rs.fitting.fit_method].real)

      if self._scan_varying:
        sigma_b = flex.mean(self.sigma_b(deg=False))
        sigma_m = flex.mean(self.sigma_m(deg=False))
      else:
        sigma_b = self.sigma_b(deg=False)
        sigma_m = self.sigma_m(deg=False)

      # Create the modeller
      return GaussianRSProfileModeller(
        experiment.beam,
        experiment.detector,
        experiment.goniometer,
        experiment.scan,
        sigma_b,
        sigma_m,
        self.n_sigma(),
        self.params.gaussian_rs.fitting.grid_size,
        num_scan_points,
        self.params.gaussian_rs.fitting.threshold,
        grid_method,
        fit_method)
 def compute_functional_and_gradients(self):
   values = self.parameterization(self.x)
   assert -150. < values.BFACTOR < 150. # limits on the exponent, please
   self.func = self.refinery.fvec_callable(values)
   functional = flex.sum(self.func*self.func)
   self.f = functional
   jacobian = self.refinery.jacobian_callable(values)
   self.g = flex.double(self.n)
   for ix in xrange(self.n):
     self.g[ix] = flex.sum(2. * self.func * jacobian[ix])
   print >> self.out, "rms %10.3f"%math.sqrt(flex.mean(self.func*self.func)),
   values.show(self.out)
   return self.f, self.g
예제 #12
0
파일: modeller.py 프로젝트: dials/dials
 def compute(self, reflections):
   from dials.algorithms.background.gmodel import Fitter
   from dials.array_family import flex
   assert self.finalized()
   fitter = Fitter(self.background)
   scale = fitter(reflections['shoebox'])
   success = scale >= 0
   mean = flex.double([
     flex.mean(sbox.background)
     for sbox in reflections['shoebox']
   ])
   reflections['background.mean'] = mean
   reflections['background.scale'] = scale
   reflections.set_flags(success != True, reflections.flags.dont_integrate)
   return success
예제 #13
0
  def histogram(self, reflections, title):
    data = reflections['difference_vector_norms']
    n_slots = 100
    if self.params.residuals.histogram_max is None:
      h = flex.histogram(data, n_slots=n_slots)
    else:
      h = flex.histogram(data.select(data <= self.params.residuals.histogram_max), n_slots=n_slots)

    n = len(reflections)
    rmsd = math.sqrt((reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).sum_sq()/n)
    sigma = mode = h.slot_centers()[list(h.slots()).index(flex.max(h.slots()))]
    mean = flex.mean(data)
    median = flex.median(data)
    print "RMSD (microns)", rmsd * 1000
    print "Histogram mode (microns):", mode * 1000
    print "Overall mean (microns):", mean * 1000
    print "Overall median (microns):", median * 1000
    mean2 = math.sqrt(math.pi/2)*sigma
    rmsd2 = math.sqrt(2)*sigma
    print "Rayleigh Mean (microns)", mean2 * 1000
    print "Rayleigh RMSD (microns)", rmsd2 * 1000

    r = reflections['radial_displacements']
    t = reflections['transverse_displacements']
    print "Overall radial RMSD (microns)", math.sqrt(flex.sum_sq(r)/len(r)) * 1000
    print "Overall transverse RMSD (microns)", math.sqrt(flex.sum_sq(t)/len(t)) * 1000

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(h.slot_centers().as_numpy_array(), h.slots().as_numpy_array(), '-')

    vmax = self.params.residuals.plot_max
    if self.params.residuals.histogram_xmax is not None:
      ax.set_xlim((0,self.params.residuals.histogram_xmax))
    if self.params.residuals.histogram_ymax is not None:
      ax.set_ylim((0,self.params.residuals.histogram_ymax))
    plt.title(title)


    ax.plot((mean, mean), (0, flex.max(h.slots())), 'g-')
    ax.plot((mean2, mean2), (0, flex.max(h.slots())), 'g--')
    ax.plot((mode, mode), (0, flex.max(h.slots())), 'r-')
    ax.plot((rmsd, rmsd), (0, flex.max(h.slots())), 'b-')
    ax.plot((rmsd2, rmsd2), (0, flex.max(h.slots())), 'b--')

    ax.legend([r"$\Delta$XY", "MeanObs", "MeanRayl", "Mode", "RMSDObs", "RMSDRayl"])
    ax.set_xlabel("(mm)")
    ax.set_ylabel("Count")
  def compute_functional_and_gradients(self):
    values = self.parameterization(self.x)
    assert -150. < values.BFACTOR < 150. # limits on the exponent, please
    self.func = self.refinery.fvec_callable(values)
    functional = flex.sum(self.func*self.func)
    self.f = functional
    DELTA = 1.E-7
    self.g = flex.double()
    for x in xrange(self.n):
      templist = list(self.x)
      templist[x]+=DELTA
      dvalues = flex.double(templist)

      dfunc = self.refinery.fvec_callable(self.parameterization(dvalues))
      dfunctional = flex.sum(dfunc*dfunc)
      #calculate by finite_difference
      self.g.append( ( dfunctional-functional )/DELTA )
    self.g[2]=0.
    print >> self.out, "rms %10.3f"%math.sqrt(flex.mean(self.func*self.func)),
    values.show(self.out)
    return self.f, self.g
예제 #15
0
def wilson_outliers(reflections, ice_sel=None, p_cutoff=1e-2):
  # http://scripts.iucr.org/cgi-bin/paper?ba0032
  if ice_sel is None:
    ice_sel = flex.bool(len(reflections), False)

  E_cutoff = math.sqrt(-math.log(p_cutoff))
  intensities = reflections['intensity.sum.value']
  variances = reflections['intensity.sum.variance']

  Sigma_n = flex.mean(intensities.select(~ice_sel))
  normalised_amplitudes = flex.sqrt(intensities)/math.sqrt(Sigma_n)

  outliers = normalised_amplitudes >= E_cutoff

  if outliers.count(True):
    # iterative outlier rejection
    inliers = ~outliers
    outliers.set_selected(
      inliers, wilson_outliers(
        reflections.select(inliers), ice_sel.select(inliers)))

  return outliers
예제 #16
0
def wilson_outliers(reflections, ice_sel=None, p_cutoff=1e-2):
    # http://scripts.iucr.org/cgi-bin/paper?ba0032
    if ice_sel is None:
        ice_sel = flex.bool(len(reflections), False)

    E_cutoff = math.sqrt(-math.log(p_cutoff))
    intensities = reflections["intensity.sum.value"]

    Sigma_n = flex.mean(intensities.select(~ice_sel))
    normalised_amplitudes = flex.sqrt(intensities) / math.sqrt(Sigma_n)

    outliers = normalised_amplitudes >= E_cutoff

    if outliers.count(True):
        # iterative outlier rejection
        inliers = ~outliers
        outliers.set_selected(
            inliers,
            wilson_outliers(reflections.select(inliers),
                            ice_sel.select(inliers)),
        )

    return outliers
예제 #17
0
    def compute_functional_and_gradients(self):
        values = self.parameterization(self.x)
        assert -150. < values.BFACTOR < 150.  # limits on the exponent, please
        self.func = self.refinery.fvec_callable(values)
        functional = flex.sum(self.func * self.func)
        self.f = functional
        DELTA = 1.E-7
        self.g = flex.double()
        for x in range(self.n):
            templist = list(self.x)
            templist[x] += DELTA
            dvalues = flex.double(templist)

            dfunc = self.refinery.fvec_callable(self.parameterization(dvalues))
            dfunctional = flex.sum(dfunc * dfunc)
            #calculate by finite_difference
            self.g.append((dfunctional - functional) / DELTA)
        self.g[2] = 0.
        print("rms %10.3f" % math.sqrt(flex.mean(self.func * self.func)),
              end=' ',
              file=self.out)
        values.show(self.out)
        return self.f, self.g
예제 #18
0
  def setup_work_arrays(self, reflections):
    '''Select multiply-measured HKLs. Calculate and cache reflection deltas, deltas squared, and HKL means for every reflection'''
    self.deltas     = flex.double()
    self.work_table = flex.reflection_table()
    delta_sq        = flex.double()
    mean            = flex.double() # mean = <I'_hj>
    biased_mean     = flex.double() # biased_mean = <I_h>, so dont leave out any reflection
    var             = flex.double()
    all_biased_mean = flex.double()

    for refls in reflection_table_utils.get_next_hkl_reflection_table(reflections):
      number_of_measurements = refls.size()
      if number_of_measurements == 0: # if the returned "refls" list is empty, it's the end of the input "reflections" list
        break
      refls_biased_mean = flex.double(len(refls), flex.mean(refls['intensity.sum.value']))
      all_biased_mean.extend(refls_biased_mean)

      if number_of_measurements > self.params.merging.minimum_multiplicity:
        nn_factor_sqrt = math.sqrt((number_of_measurements - 1) / number_of_measurements)
        i_sum = flex.double(number_of_measurements, flex.sum(refls['intensity.sum.value']))
        i_sum_minus_val = i_sum - refls['intensity.sum.value']
        mean_without_val = i_sum_minus_val/(number_of_measurements-1)
        delta = nn_factor_sqrt * (refls['intensity.sum.value'] - mean_without_val)
        self.deltas.extend(delta/flex.sqrt(refls['intensity.sum.variance'])) # Please be careful about where to put the var
        delta_sq.extend(delta**2)
        mean.extend(mean_without_val)
        biased_mean.extend(refls_biased_mean)
        var.extend(refls['intensity.sum.variance'])

    self.work_table["delta_sq"]    = delta_sq
    self.work_table["mean"]        = mean
    self.work_table["biased_mean"] = biased_mean
    self.work_table["var"]         = var
    reflections['biased_mean'] = all_biased_mean
    self.logger.log("Number of work reflections selected: %d"%self.deltas.size())
    return reflections
예제 #19
0
    def compute_functional_and_gradients_test_code(self):
        values = self.parameterization(self.x)
        assert -150. < values.BFACTOR < 150.  # limits on the exponent, please
        self.func = self.refinery.fvec_callable(values)
        functional = flex.sum(self.func * self.func)
        self.f = functional
        jacobian = self.refinery.jacobian_callable(values)
        self.gg_0 = flex.sum(2. * self.func * jacobian[0])
        self.gg_1 = flex.sum(2. * self.func * jacobian[1])
        self.gg_3 = flex.sum(2. * self.func * jacobian[3])
        self.gg_4 = flex.sum(2. * self.func * jacobian[4])
        DELTA = 1.E-7
        self.g = flex.double()
        for x in xrange(self.n):
            templist = list(self.x)
            templist[x] += DELTA
            dvalues = flex.double(templist)

            dfunc = self.refinery.fvec_callable(self.parameterization(dvalues))
            dfunctional = flex.sum(dfunc * dfunc)
            #calculate by finite_difference
            self.g.append((dfunctional - functional) / DELTA)
        self.g[2] = 0.

        print >> self.out, "rms %10.3f" % math.sqrt(
            flex.mean(self.func * self.func)),
        values.show(self.out)
        print >> self.out, "derivatives--> %15.5f    %15.5f    %9.7f   %5.2f   %5.2f" % tuple(
            self.g)
        print >> self.out, "  analytical-> %15.5f    %15.5f                %5.2f   %5.2f" % (
            self.gg_0, self.gg_1, self.gg_3, self.gg_4)
        self.g[0] = self.gg_0
        self.g[1] = self.gg_1
        self.g[3] = self.gg_3
        self.g[4] = self.gg_4
        return self.f, self.g
예제 #20
0
    def __call__(self):
        """Determine optimal mosaicity and domain size model (monochromatic)"""
        if self.refinery is None:
            RR = self.reflections
        else:
            RR = self.refinery.predict_for_reflection_table(self.reflections)

        all_crystals = []
        self.nv_acceptance_flags = flex.bool(len(self.reflections["id"]))
        from dxtbx.model import MosaicCrystalSauter2014

        for iid, experiment in enumerate(self.experiments):
            excursion_rad = RR["delpsical.rad"].select(RR["id"] == iid)
            delta_psi_deg = excursion_rad * 180.0 / math.pi
            logger.info("")
            logger.info("%s %s", flex.max(delta_psi_deg),
                        flex.min(delta_psi_deg))
            mean_excursion = flex.mean(delta_psi_deg)
            logger.info(
                "The mean excursion is %7.3f degrees, r.m.s.d %7.3f",
                mean_excursion,
                math.sqrt(flex.mean(RR["delpsical2"].select(RR["id"] == iid))),
            )

            crystal = MosaicCrystalSauter2014(self.experiments[iid].crystal)
            self.experiments[iid].crystal = crystal
            beam = self.experiments[iid].beam
            miller_indices = self.reflections["miller_index"].select(
                self.reflections["id"] == iid)

            # FIXME XXX revise this formula so as to use a different wavelength potentially for each reflection
            two_thetas = crystal.get_unit_cell().two_theta(
                miller_indices, beam.get_wavelength(), deg=True)
            dspacings = crystal.get_unit_cell().d(miller_indices)

            # First -- try to get a reasonable envelope for the observed excursions.
            # minimum of three regions; maximum of 50 measurements in each bin
            logger.info("fitting parameters on %d spots", len(excursion_rad))
            n_bins = min(max(3, len(excursion_rad) // 25), 50)
            bin_sz = len(excursion_rad) // n_bins
            logger.info("nbins %s bin_sz %s", n_bins, bin_sz)
            order = flex.sort_permutation(two_thetas)
            two_thetas_env = flex.double()
            dspacings_env = flex.double()
            excursion_rads_env = flex.double()
            for x in range(0, n_bins):
                subset = order[x * bin_sz:(x + 1) * bin_sz]
                two_thetas_env.append(flex.mean(two_thetas.select(subset)))
                dspacings_env.append(flex.mean(dspacings.select(subset)))
                excursion_rads_env.append(
                    flex.max(flex.abs(excursion_rad.select(subset))))

            # Second -- parameter fit
            # solve the normal equations
            sum_inv_u_sq = flex.sum(dspacings_env * dspacings_env)
            sum_inv_u = flex.sum(dspacings_env)
            sum_te_u = flex.sum(dspacings_env * excursion_rads_env)
            sum_te = flex.sum(excursion_rads_env)
            Normal_Mat = sqr(
                (sum_inv_u_sq, sum_inv_u, sum_inv_u, len(dspacings_env)))
            Vector = col((sum_te_u, sum_te))
            solution = Normal_Mat.inverse() * Vector
            s_ang = 1.0 / (2 * solution[0])
            logger.info("Best LSQ fit Scheerer domain size is %9.2f ang",
                        s_ang)

            k_degrees = solution[1] * 180.0 / math.pi
            logger.info(
                "The LSQ full mosaicity is %8.5f deg; half-mosaicity %9.5f",
                2 * k_degrees,
                k_degrees,
            )

            from xfel.mono_simulation.max_like import minimizer

            # coerce the estimates to be positive for max-likelihood
            lower_limit_domain_size = (
                math.pow(crystal.get_unit_cell().volume(), 1.0 / 3.0) * 3
            )  # params.refinement.domain_size_lower_limit

            d_estimate = max(s_ang, lower_limit_domain_size)
            M = minimizer(
                d_i=dspacings,
                psi_i=excursion_rad,
                eta_rad=abs(2.0 * solution[1]),
                Deff=d_estimate,
            )
            logger.info(
                "ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots",
                M.x[1] * 180.0 / math.pi,
                2.0 / M.x[0],
                len(two_thetas),
            )
            tan_phi_rad_ML = dspacings / (2.0 / M.x[0])
            tan_phi_deg_ML = tan_phi_rad_ML * 180.0 / math.pi
            tan_outer_deg_ML = tan_phi_deg_ML + 0.5 * M.x[1] * 180.0 / math.pi

            # Only set the flags for those reflections that were indexed for this lattice
            self.nv_acceptance_flags.set_selected(
                self.reflections["id"] == iid,
                flex.abs(delta_psi_deg) < tan_outer_deg_ML,
            )

            if (
                    self.graph_verbose
            ):  # params.refinement.mosaic.enable_AD14F7B: # Excursion vs resolution fit
                AD1TF7B_MAX2T = 30.0
                AD1TF7B_MAXDP = 1.0
                from matplotlib import pyplot as plt

                plt.plot(two_thetas, delta_psi_deg, "bo")
                minplot = flex.min(two_thetas)
                plt.plot([0, minplot], [mean_excursion, mean_excursion], "k-")
                LR = flex.linear_regression(two_thetas, delta_psi_deg)
                model_y = LR.slope() * two_thetas + LR.y_intercept()
                plt.plot(two_thetas, model_y, "k-")

                plt.title(
                    "ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots" %
                    (M.x[1] * 180.0 / math.pi, 2.0 / M.x[0], len(two_thetas)))
                plt.plot(two_thetas, tan_phi_deg_ML, "r.")
                plt.plot(two_thetas, -tan_phi_deg_ML, "r.")
                plt.plot(two_thetas, tan_outer_deg_ML, "g.")
                plt.plot(two_thetas, -tan_outer_deg_ML, "g.")
                plt.xlim([0, AD1TF7B_MAX2T])
                plt.ylim([-AD1TF7B_MAXDP, AD1TF7B_MAXDP])
                plt.show()
                plt.close()

            from xfel.mono_simulation.util import green_curve_area

            self.green_curve_area = green_curve_area(two_thetas,
                                                     tan_outer_deg_ML)
            logger.info("The green curve area is %s", self.green_curve_area)

            crystal.set_half_mosaicity_deg(M.x[1] * 180.0 / (2.0 * math.pi))
            crystal.set_domain_size_ang(2.0 / M.x[0])
            self._ML_full_mosaicity_rad = M.x[1]
            self._ML_domain_size_ang = 2.0 / M.x[0]

            # params.refinement.mosaic.model_expansion_factor
            """The expansion factor should be initially set to 1, then expanded so that the # reflections matched becomes
        as close as possible to # of observed reflections input, in the last integration call.  Determine this by
        inspecting the output log file interactively.  Do not exceed the bare minimum threshold needed.
        The intention is to find an optimal value, global for a given dataset."""
            model_expansion_factor = 1.4
            crystal.set_half_mosaicity_deg(crystal.get_half_mosaicity_deg() *
                                           model_expansion_factor)
            crystal.set_domain_size_ang(crystal.get_domain_size_ang() /
                                        model_expansion_factor)

            if (self.ewald_proximal_volume(iid) >
                    self.params.indexing.stills.ewald_proximal_volume_max):
                raise DialsIndexError("Ewald proximity volume too high, %f" %
                                      self.ewald_proximal_volume(iid))

            all_crystals.append(crystal)
        return all_crystals
  def __init__(self,measurements_orig, params, i_model, miller_set, result, out):
    measurements = measurements_orig.deep_copy()
    # Now manipulate the data to conform to unit cell, asu, and space group
    # of reference.  The resolution will be cut later.
    # Only works if there is NOT an indexing ambiguity!
    observations = measurements.customized_copy(
      anomalous_flag=not params.merge_anomalous,
      crystal_symmetry=miller_set.crystal_symmetry()
      ).map_to_asu()

    observations_original_index = measurements.customized_copy(
      anomalous_flag=not params.merge_anomalous,
      crystal_symmetry=miller_set.crystal_symmetry()
      )

    # Ensure that match_multi_indices() will return identical results
    # when a frame's observations are matched against the
    # pre-generated Miller set, self.miller_set, and the reference
    # data set, self.i_model.  The implication is that the same match
    # can be used to map Miller indices to array indices for intensity
    # accumulation, and for determination of the correlation
    # coefficient in the presence of a scaling reference.

    assert len(i_model.indices()) == len(miller_set.indices()) \
        and  (i_model.indices() ==
              miller_set.indices()).count(False) == 0
    matches = miller.match_multi_indices(
      miller_indices_unique=miller_set.indices(),
      miller_indices=observations.indices())

    pair1 = flex.int([pair[1] for pair in matches.pairs()])
    pair0 = flex.int([pair[0] for pair in matches.pairs()])
    # narrow things down to the set that matches, only
    observations_pair1_selected = observations.customized_copy(
      indices = flex.miller_index([observations.indices()[p] for p in pair1]),
      data = flex.double([observations.data()[p] for p in pair1]),
      sigmas = flex.double([observations.sigmas()[p] for p in pair1]),
    )
    observations_original_index_pair1_selected = observations_original_index.customized_copy(
      indices = flex.miller_index([observations_original_index.indices()[p] for p in pair1]),
      data = flex.double([observations_original_index.data()[p] for p in pair1]),
      sigmas = flex.double([observations_original_index.sigmas()[p] for p in pair1]),
    )
###################
    I_observed = observations_pair1_selected.data()
    chosen = chosen_weights(observations_pair1_selected, params)

    MILLER = observations_original_index_pair1_selected.indices()
    ORI = result["current_orientation"][0]
    Astar = matrix.sqr(ORI.reciprocal_matrix())
    WAVE = result["wavelength"]
    BEAM = matrix.col((0.0,0.0,-1./WAVE))
    BFACTOR = 0.

    #calculation of correlation here
    I_reference = flex.double([i_model.data()[pair[0]] for pair in matches.pairs()])
    use_weights = False # New facility for getting variance-weighted correlation

    if use_weights:
       #variance weighting
      I_weight = flex.double(
        [1./(observations_pair1_selected.sigmas()[pair[1]])**2 for pair in matches.pairs()])
    else:
      I_weight = flex.double(len(observations_pair1_selected.sigmas()), 1.)

    """Explanation of 'include_negatives' semantics as originally implemented in cxi.merge postrefinement:
       include_negatives = True
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       + and - reflections both used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)

       include_negatives = False
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       +       reflections only used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)
    """
    if params.include_negatives:
      SWC = simple_weighted_correlation(I_weight, I_reference, I_observed)
    else:
      non_positive = ( observations_pair1_selected.data() <= 0 )
      SWC = simple_weighted_correlation(I_weight.select(~non_positive),
            I_reference.select(~non_positive), I_observed.select(~non_positive))

    print >> out, "Old correlation is", SWC.corr
    assert params.postrefinement.algorithm=="rs_hybrid"
    Rhall = flex.double()
    for mill in MILLER:
        H = matrix.col(mill)
        Xhkl = Astar*H
        Rh = ( Xhkl + BEAM ).length() - (1./WAVE)
        Rhall.append(Rh)
    Rs = math.sqrt(flex.mean(Rhall*Rhall))

    RS = 1./10000. # reciprocal effective domain size of 1 micron
    RS = Rs        # try this empirically determined approximate, monochrome, a-mosaic value

    self.rs2_current = flex.double([SWC.slope, BFACTOR, RS, 0., 0.])
    self.rs2_parameterization_class = rs_parameterization

    self.rs2_refinery = rs2_refinery(ORI=ORI, MILLER=MILLER, BEAM=BEAM, WAVE=WAVE,
        ICALCVEC = I_reference, IOBSVEC = I_observed, WEIGHTS = chosen)
    self.rs2_refinery.set_profile_shape(params.postrefinement.lineshape)
    self.nave1_refinery = nave1_refinery(ORI=ORI, MILLER=MILLER, BEAM=BEAM, WAVE=WAVE,
        ICALCVEC = I_reference, IOBSVEC = I_observed, WEIGHTS = chosen)
    self.nave1_refinery.set_profile_shape(params.postrefinement.lineshape)

    self.out=out; self.params = params;
    self.miller_set = miller_set
    self.observations_pair1_selected = observations_pair1_selected;
    self.observations_original_index_pair1_selected = observations_original_index_pair1_selected
    self.i_model = i_model
예제 #22
0
  def run(self, experiments, reflections):

    self.logger.log_step_time("POLARIZATION_CORRECTION")

    result = flex.reflection_table()

    for experiment in experiments:
      refls = reflections.select(reflections['exp_id'] == experiment.identifier)
      beam = experiment.beam
      # Remove the need for pixel size within cxi.merge.  Allows multipanel detector with dissimilar panels.
      # Relies on new frame extractor code called by dials.stills_process that writes s0, s1 and polarization normal
      # vectors all to the integration pickle.  Future path (IE THIS CODE): use dials json and reflection file.
      s0_vec = matrix.col(beam.get_s0()).normalize()
      s0_polar_norm = beam.get_polarization_normal()
      s1_vec = refls['s1']
      Ns1 = len(s1_vec)
      # project the s1_vector onto the plane normal to s0.  Get result by subtracting the
      # projection of s1 onto s0, which is (s1.dot.s0_norm)s0_norm
      s0_norm = flex.vec3_double(Ns1,s0_vec)
      s1_proj = (s1_vec.dot(s0_norm))*s0_norm
      s1_in_normal_plane = s1_vec - s1_proj
      # Now want the polar angle between the projected s1 and the polarization normal
      s0_polar_norms = flex.vec3_double(Ns1,s0_polar_norm)
      dotprod = (s1_in_normal_plane.dot(s0_polar_norms))
      costheta = dotprod/(s1_in_normal_plane.norms())
      theta = flex.acos(costheta)
      cos_two_polar_angle = flex.cos(2.0*theta)
      # gives same as old answer to ~1% but not exact.  Not sure why, should not matter.

      tt_vec = experiment.crystal.get_unit_cell().two_theta(miller_indices = refls['miller_index'],
                                                            wavelength = beam.get_wavelength())
      cos_tt_vec = flex.cos(tt_vec)
      sin_tt_vec = flex.sin(tt_vec)
      cos_sq_tt_vec = cos_tt_vec * cos_tt_vec
      sin_sq_tt_vec = sin_tt_vec * sin_tt_vec
      P_nought_vec = 0.5 * (1. + cos_sq_tt_vec)

      F_prime = -1.0 # Hard-coded value defines the incident polarization axis
      P_prime = 0.5 * F_prime * cos_two_polar_angle * sin_sq_tt_vec

      # added as a diagnostic
      #prange=P_nought_vec - P_prime
      #other_F_prime = 1.0
      #otherP_prime = 0.5 * other_F_prime * cos_two_polar_angle * sin_sq_tt_vec
      #otherprange=P_nought_vec - otherP_prime
      #diff2 = flex.abs(prange - otherprange)
      #print >> out, "mean diff is",flex.mean(diff2), "range",flex.min(diff2), flex.max(diff2)
      # done

      correction = 1 / ( P_nought_vec - P_prime )
      refls['intensity.sum.value'] = refls['intensity.sum.value'] * correction
      refls['intensity.sum.variance'] = refls['intensity.sum.variance'] * correction**2 # propagated error
      # This corrects observations for polarization assuming 100% polarization on
      # one axis (thus the F_prime = -1.0 rather than the perpendicular axis, 1.0)
      # Polarization model as described by Kahn, Fourme, Gadet, Janin, Dumas & Andre
      # (1982) J. Appl. Cryst. 15, 330-337, equations 13 - 15.

      result.extend(refls)

    if len(reflections) > 0:
      self.logger.log("Applied polarization correction. Mean intensity changed from %.2f to %.2f"%(flex.mean(reflections['intensity.sum.value']), flex.mean(result['intensity.sum.value'])))

    self.logger.log_step_time("POLARIZATION_CORRECTION", True)

    return experiments, result
예제 #23
0
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)

    # Find all detector objects
    detectors = experiments.detectors()

    # Verify inputs
    if len(params.input.reflections) == len(detectors) and len(detectors) > 1:
      # case for passing in multiple images on the command line
      assert len(params.input.reflections) == len(detectors)
      reflections = flex.reflection_table()
      for expt_id in xrange(len(detectors)):
        subset = params.input.reflections[expt_id].data
        subset['id'] = flex.int(len(subset), expt_id)
        reflections.extend(subset)
    else:
      # case for passing in combined experiments and reflections
      reflections = flatten_reflections(params.input.reflections)[0]

    detector = detectors[0]

    #from dials.algorithms.refinement.prediction import ExperimentsPredictor
    #ref_predictor = ExperimentsPredictor(experiments, force_stills=experiments.all_stills())

    print "N reflections total:", len(reflections)
    if params.residuals.exclude_outliers:
      reflections = reflections.select(reflections.get_flags(reflections.flags.used_in_refinement))
      print "N reflections used in refinement:", len(reflections)
      print "Reporting only on those reflections used in refinement"

    if self.params.residuals.i_sigi_cutoff is not None:
      sel = (reflections['intensity.sum.value']/flex.sqrt(reflections['intensity.sum.variance'])) >= self.params.residuals.i_sigi_cutoff
      reflections = reflections.select(sel)
      print "After filtering by I/sigi cutoff of %f, there are %d reflections left"%(self.params.residuals.i_sigi_cutoff,len(reflections))

    reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()

    n = len(reflections)
    rmsd = self.get_weighted_rmsd(reflections)
    print "Dataset RMSD (microns)", rmsd * 1000

    if params.tag is None:
      tag = ''
    else:
      tag = '%s '%params.tag

    # set up delta-psi ratio heatmap
    p = flex.int() # positive
    n = flex.int() # negative
    for i in set(reflections['id']):
      exprefls = reflections.select(reflections['id']==i)
      p.append(len(exprefls.select(exprefls['delpsical.rad']>0)))
      n.append(len(exprefls.select(exprefls['delpsical.rad']<0)))
    plt.hist2d(p, n, bins=30)
    cb = plt.colorbar()
    cb.set_label("N images")
    plt.title(r"%s2D histogram of pos vs. neg $\Delta\Psi$ per image"%tag)
    plt.xlabel(r"N reflections with $\Delta\Psi$ > 0")
    plt.ylabel(r"N reflections with $\Delta\Psi$ < 0")

    self.delta_scalar = 50

    # Iterate through the detectors, computing detector statistics at the per-panel level (IE one statistic per panel)
    # Per panel dictionaries
    rmsds = {}
    refl_counts = {}
    transverse_rmsds = {}
    radial_rmsds = {}
    ttdpcorr = {}
    pg_bc_dists = {}
    mean_delta_two_theta = {}
    # per panelgroup flex arrays
    pg_rmsds = flex.double()
    pg_r_rmsds = flex.double()
    pg_t_rmsds = flex.double()
    pg_refls_count = flex.int()
    pg_refls_count_d = {}
    table_header = ["PG id", "RMSD","Radial", "Transverse", "N refls"]
    table_header2 = ["","(um)","RMSD (um)","RMSD (um)",""]
    table_data = []
    table_data.append(table_header)
    table_data.append(table_header2)

    # Compute a set of radial and transverse displacements for each reflection
    print "Setting up stats..."
    tmp = flex.reflection_table()
    # Need to construct a variety of vectors
    for panel_id, panel in enumerate(detector):
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      bcl = flex.vec3_double()
      tto = flex.double()
      ttc = flex.double()
      # Compute the beam center in lab space (a vector pointing from the origin to where the beam would intersect
      # the panel, if it did intersect the panel)
      for expt_id in set(panel_refls['id']):
        beam = experiments[expt_id].beam
        s0 = beam.get_s0()
        expt_refls = panel_refls.select(panel_refls['id'] == expt_id)
        beam_centre = panel.get_beam_centre_lab(s0)
        bcl.extend(flex.vec3_double(len(expt_refls), beam_centre))
        obs_x, obs_y, _ = expt_refls['xyzobs.px.value'].parts()
        cal_x, cal_y, _ = expt_refls['xyzcal.px'].parts()
        tto.extend(flex.double([panel.get_two_theta_at_pixel(s0, (obs_x[i], obs_y[i])) for i in xrange(len(expt_refls))]))
        ttc.extend(flex.double([panel.get_two_theta_at_pixel(s0, (cal_x[i], cal_y[i])) for i in xrange(len(expt_refls))]))
      panel_refls['beam_centre_lab'] = bcl
      panel_refls['two_theta_obs'] = tto * (180/math.pi)
      panel_refls['two_theta_cal'] = ttc * (180/math.pi) #+ (0.5*panel_refls['delpsical.rad']*panel_refls['two_theta_obs'])
      # Compute obs in lab space
      x, y, _ = panel_refls['xyzobs.mm.value'].parts()
      c = flex.vec2_double(x, y)
      panel_refls['obs_lab_coords'] = panel.get_lab_coord(c)
      # Compute deltaXY in panel space. This vector is relative to the panel origin
      x, y, _ = (panel_refls['xyzcal.mm'] - panel_refls['xyzobs.mm.value']).parts()
      # Convert deltaXY to lab space, subtracting off of the panel origin
      panel_refls['delta_lab_coords'] = panel.get_lab_coord(flex.vec2_double(x,y)) - panel.get_origin()
      tmp.extend(panel_refls)
    reflections = tmp
    # The radial vector points from the center of the reflection to the beam center
    radial_vectors = (reflections['obs_lab_coords'] - reflections['beam_centre_lab']).each_normalize()
    # The transverse vector is orthogonal to the radial vector and the beam vector
    transverse_vectors = radial_vectors.cross(reflections['beam_centre_lab']).each_normalize()
    # Compute the raidal and transverse components of each deltaXY
    reflections['radial_displacements']     = reflections['delta_lab_coords'].dot(radial_vectors)
    reflections['transverse_displacements'] = reflections['delta_lab_coords'].dot(transverse_vectors)

    # Iterate through the detector at the specified hierarchy level
    for pg_id, pg in enumerate(iterate_detector_at_level(detector.hierarchy(), 0, params.hierarchy_level)):
      pg_msd_sum = 0
      pg_r_msd_sum = 0
      pg_t_msd_sum = 0
      pg_refls = 0
      pg_delpsi = flex.double()
      pg_deltwotheta = flex.double()
      for p in iterate_panels(pg):
        panel_id = id_from_name(detector, p.get_name())
        panel_refls = reflections.select(reflections['panel'] == panel_id)
        n = len(panel_refls)
        pg_refls += n

        delta_x = panel_refls['xyzcal.mm'].parts()[0] - panel_refls['xyzobs.mm.value'].parts()[0]
        delta_y = panel_refls['xyzcal.mm'].parts()[1] - panel_refls['xyzobs.mm.value'].parts()[1]

        tmp = flex.sum((delta_x**2)+(delta_y**2))
        pg_msd_sum += tmp

        r = panel_refls['radial_displacements']
        t = panel_refls['transverse_displacements']
        pg_r_msd_sum += flex.sum_sq(r)
        pg_t_msd_sum += flex.sum_sq(t)

        pg_delpsi.extend(panel_refls['delpsical.rad']*180/math.pi)
        pg_deltwotheta.extend(panel_refls['two_theta_obs'] - panel_refls['two_theta_cal'])

      bc = col(pg.get_beam_centre_lab(s0))
      ori = get_center(pg)
      pg_bc_dists[pg.get_name()] = (ori-bc).length()
      if len(pg_deltwotheta) > 0:
        mean_delta_two_theta[pg.get_name()] = flex.mean(pg_deltwotheta)
      else:
        mean_delta_two_theta[pg.get_name()] = 0

      if pg_refls == 0:
        pg_rmsd = pg_r_rmsd = pg_t_rmsd = 0
      else:
        pg_rmsd = math.sqrt(pg_msd_sum/pg_refls) * 1000
        pg_r_rmsd = math.sqrt(pg_r_msd_sum/pg_refls) * 1000
        pg_t_rmsd = math.sqrt(pg_t_msd_sum/pg_refls) * 1000
      pg_rmsds.append(pg_rmsd)
      pg_r_rmsds.append(pg_r_rmsd)
      pg_t_rmsds.append(pg_t_rmsd)
      pg_refls_count.append(pg_refls)
      pg_refls_count_d[pg.get_name()] = pg_refls
      table_data.append(["%d"%pg_id, "%.1f"%pg_rmsd, "%.1f"%pg_r_rmsd, "%.1f"%pg_t_rmsd, "%6d"%pg_refls])

      refl_counts[pg.get_name()] = pg_refls
      if pg_refls == 0:
        rmsds[p.get_name()] = -1
        radial_rmsds[p.get_name()] = -1
        transverse_rmsds[p.get_name()] = -1
        ttdpcorr[pg.get_name()] = -1
      else:
        rmsds[pg.get_name()] = pg_rmsd
        radial_rmsds[pg.get_name()]     = pg_r_rmsd
        transverse_rmsds[pg.get_name()] = pg_t_rmsd

        lc = flex.linear_correlation(pg_delpsi, pg_deltwotheta)
        ttdpcorr[pg.get_name()] = lc.coefficient()


    r1 = ["Weighted mean"]
    r2 = ["Weighted stddev"]
    if len(pg_rmsds) > 1:
      stats = flex.mean_and_variance(pg_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_r_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
      stats = flex.mean_and_variance(pg_t_rmsds, pg_refls_count.as_double())
      r1.append("%.1f"%stats.mean())
      r2.append("%.1f"%stats.gsl_stats_wsd())
    else:
      r1.extend([""]*3)
      r2.extend([""]*3)
    r1.append("")
    r2.append("")
    table_data.append(r1)
    table_data.append(r2)
    table_data.append(["Mean", "", "", "", "%8.1f"%flex.mean(pg_refls_count.as_double())])

    from libtbx import table_utils
    print "Detector statistics.  Angles in degrees, RMSDs in microns"
    print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

    self.histogram(reflections, '%sDifference vector norms (mm)'%tag)

    if params.show_plots:
      if self.params.tag is None:
        t = ""
      else:
        t = "%s "%self.params.tag
      self.image_rmsd_histogram(reflections, tag)

      # Plots! these are plots with callbacks to draw on individual panels
      self.detector_plot_refls(detector, reflections, '%sOverall positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltas)
      self.detector_plot_refls(detector, reflections, '%sRadial positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_radial_deltas)
      self.detector_plot_refls(detector, reflections, '%sTransverse positional displacements (mm)'%tag, show=False, plot_callback=self.plot_obs_colored_by_transverse_deltas)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta\Psi$'%tag, show=False, plot_callback=self.plot_obs_colored_by_deltapsi, colorbar_units=r"$\circ$")
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY*%s'%(tag, self.delta_scalar), show=False, plot_callback=self.plot_deltas)
      self.detector_plot_refls(detector, reflections, '%sSP Manual CDF'%tag, show=False, plot_callback=self.plot_cdf_manually)
      self.detector_plot_refls(detector, reflections, r'%s$\Delta$XY Histograms'%tag, show=False, plot_callback=self.plot_histograms)
      self.detector_plot_refls(detector, reflections, r'%sRadial displacements vs. $\Delta\Psi$, colored by $\Delta$XY'%tag, show=False, plot_callback=self.plot_radial_displacements_vs_deltapsi)
      self.detector_plot_refls(detector, reflections, r'%sDistance vector norms'%tag, show=False, plot_callback=self.plot_difference_vector_norms_histograms)

      # Plot intensity vs. radial_displacement
      fig = plt.figure()
      panel_id = 15
      panel_refls = reflections.select(reflections['panel'] == panel_id)
      a = panel_refls['radial_displacements']
      b = panel_refls['intensity.sum.value']
      sel = (a > -0.2) & (a < 0.2) & (b < 50000)
      plt.hist2d(a.select(sel), b.select(sel), bins=100)
      plt.title("%s2D histogram of intensity vs. radial displacement for panel %d"%(tag, panel_id))
      plt.xlabel("Radial displacement (mm)")
      plt.ylabel("Intensity")
      ax = plt.colorbar()
      ax.set_label("Counts")

      # Plot delta 2theta vs. deltapsi
      n_bins = 10
      bin_size = len(reflections)//n_bins
      bin_low = []
      bin_high = []
      data = flex.sorted(reflections['two_theta_obs'])
      for i in xrange(n_bins):
        bin_low = data[i*bin_size]
        if (i+1)*bin_size >= len(reflections):
          bin_high = data[-1]
        else:
          bin_high = data[(i+1)*bin_size]
        refls = reflections.select((reflections['two_theta_obs'] >= bin_low) &
                                   (reflections['two_theta_obs'] <= bin_high))
        a = refls['delpsical.rad']*180/math.pi
        b = refls['two_theta_obs'] - refls['two_theta_cal']
        fig = plt.figure()
        sel = (a > -0.2) & (a < 0.2) & (b > -0.05) & (b < 0.05)
        plt.hist2d(a.select(sel), b.select(sel), bins=50, range = [[-0.2, 0.2], [-0.05, 0.05]])
        cb = plt.colorbar()
        cb.set_label("N reflections")
        plt.title(r'%sBin %d (%.02f, %.02f 2$\Theta$) $\Delta2\Theta$ vs. $\Delta\Psi$. Showing %d of %d refls'%(tag,i,bin_low,bin_high,len(a.select(sel)),len(a)))
        plt.xlabel(r'$\Delta\Psi \circ$')
        plt.ylabel(r'$\Delta2\Theta \circ$')

      # Plot delta 2theta vs. 2theta
      a = reflections['two_theta_obs']#[:71610]
      b = reflections['two_theta_obs'] - reflections['two_theta_cal']
      fig = plt.figure()
      limits = -0.05, 0.05
      sel = (b > limits[0]) & (b < limits[1])
      plt.hist2d(a.select(sel), b.select(sel), bins=100, range=((0,50), limits))
      plt.clim((0,100))
      cb = plt.colorbar()
      cb.set_label("N reflections")
      plt.title(r'%s$\Delta2\Theta$ vs. 2$\Theta$. Showing %d of %d refls'%(tag,len(a.select(sel)),len(a)))
      plt.xlabel(r'2$\Theta \circ$')
      plt.ylabel(r'$\Delta2\Theta \circ$')

      # calc the trendline
      z = np.polyfit(a.select(sel), b.select(sel), 1)
      print 'y=%.7fx+(%.7f)'%(z[0],z[1])

      # Plots with single values per panel
      self.detector_plot_dict(detector, refl_counts, u"%s N reflections"%t, u"%6d", show=False)
      self.detector_plot_dict(detector, rmsds, "%s Positional RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, radial_rmsds, "%s Radial RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, transverse_rmsds, "%s Transverse RMSDs (microns)"%t, u"%4.1f", show=False)
      self.detector_plot_dict(detector, ttdpcorr, r"%s $\Delta2\Theta$ vs. $\Delta\Psi$ CC"%t, u"%5.3f", show=False)

      self.plot_unitcells(experiments)
      self.plot_data_by_two_theta(reflections, tag)

      # Plot data by panel group
      sorted_values = sorted(pg_bc_dists.values())
      vdict = {}
      for k in pg_bc_dists:
        vdict[pg_bc_dists[k]] = k
      sorted_keys = [vdict[v] for v in sorted_values if vdict[v] in rmsds]
      x = [sorted_values[i] for i in xrange(len(sorted_values)) if pg_bc_dists.keys()[i] in rmsds]

      self.plot_multi_data(x,
                           [[pg_refls_count_d[k] for k in sorted_keys],
                            ([rmsds[k] for k in sorted_keys],
                             [radial_rmsds[k] for k in sorted_keys],
                             [transverse_rmsds[k] for k in sorted_keys]),
                            [radial_rmsds[k]/transverse_rmsds[k] for k in sorted_keys],
                            [mean_delta_two_theta[k] for k in sorted_keys]],
                           "Panel group distance from beam center (mm)",
                           ["N reflections",
                            ("Overall RMSD",
                             "Radial RMSD",
                             "Transverse RMSD"),
                            "R/T RMSD ratio",
                            "Delta two theta"],
                           ["N reflections",
                            "RMSD (microns)",
                            "R/T RMSD ratio",
                            "Delta two theta (degrees)"],
                           "%sData by panelgroup"%tag)

      if self.params.save_pdf:
        pp = PdfPages('residuals_%s.pdf'%(tag.strip()))
        for i in plt.get_fignums():
          pp.savefig(plt.figure(i))
        pp.close()
      else:
        plt.show()
예제 #24
0
def test_contraints_manager_simple_test():

    x = flex.random_double(10)

    # constrain parameters 2 and 4 and 6, 7 and 8
    c1 = EqualShiftConstraint([1, 3], x)
    c2 = EqualShiftConstraint([5, 6, 7], x)

    cm = ConstraintManager([c1, c2], len(x))
    constrained_x = cm.constrain_parameters(x)

    # check the constrained parameters are as expected
    assert len(constrained_x) == 7
    assert constrained_x[5] == flex.mean(x.select([1, 3]))
    assert constrained_x[6] == flex.mean(x[5:8])

    # minimiser would modify the constrained parameters
    mod_constrained_x = constrained_x + 10.0

    # check the expanded parameters are as expected
    expanded = cm.expand_parameters(mod_constrained_x)
    assert x + 10.0 == expanded

    # make a matrix to exercise jacobian compaction
    j = flex.random_double(20 * 10)
    j.reshape(flex.grid(20, 10))

    # for constrained columns, elements that are non-zero in one column are
    # zero in the other columns. Enforce that in this example
    mask2 = flex.bool([True] * 10 + [False] * 10)
    mask4 = ~mask2
    col2 = j.matrix_copy_column(1)
    col2.set_selected(mask2, 0)
    j.matrix_paste_column_in_place(col2, 1)
    col4 = j.matrix_copy_column(3)
    col4.set_selected(mask4, 0)
    j.matrix_paste_column_in_place(col4, 3)

    mask6 = flex.bool([False] * 7 + [True] * 13)
    mask7 = mask6.reversed()
    mask8 = ~(mask6 & mask7)
    col6 = j.matrix_copy_column(5)
    col6.set_selected(mask6, 0)
    j.matrix_paste_column_in_place(col6, 5)
    col7 = j.matrix_copy_column(6)
    col7.set_selected(mask7, 0)
    j.matrix_paste_column_in_place(col7, 6)
    col8 = j.matrix_copy_column(7)
    col8.set_selected(mask8, 0)
    j.matrix_paste_column_in_place(col8, 7)

    cj = cm.constrain_jacobian(j)

    # check expected dimensions
    assert cj.all() == (20, 7)

    # check that the constrained columns are equal to sums of the relevant
    # columns in the original Jacobian
    tmp = j.matrix_copy_column(1) + j.matrix_copy_column(3)
    assert (cj.matrix_copy_column(5) == tmp).all_eq(True)

    tmp = j.matrix_copy_column(5) + j.matrix_copy_column(
        6) + j.matrix_copy_column(7)
    assert (cj.matrix_copy_column(6) == tmp).all_eq(True)

    # convert to a sparse matrix to exercise the sparse Jacobian compaction
    j2 = sparse.matrix(20, 10)
    mask = flex.bool(20, True)
    for i, c in enumerate(j2.cols()):
        c.set_selected(mask, j.matrix_copy_column(i))
    assert (j2.as_dense_matrix() == j).all_eq(True)

    cm2 = SparseConstraintManager([c1, c2], len(x))
    cj2 = cm2.constrain_jacobian(j2)

    # ensure dense and sparse calculations give the same result
    assert (cj2.as_dense_matrix() == cj).all_eq(True)

    # construct derivatives of the objective dL/dp from the Jacobian to test
    # constrain_gradient_vector. Here assume unit weights
    dL_dp = [sum(col.as_dense_vector()) for col in j2.cols()]
    constr_dL_dp = cm.constrain_gradient_vector(dL_dp)

    # check constrained values are equal to sums of relevant elements in the
    # original gradient vector
    assert constr_dL_dp[5] == dL_dp[1] + dL_dp[3]
    assert constr_dL_dp[6] == dL_dp[5] + dL_dp[6] + dL_dp[7]
 def __del__(self):
   values = self.parameterization(self.x)
   print >> self.out, "FINALMODEL",
   print >> self.out, "rms %10.3f"%math.sqrt(flex.mean(self.func*self.func)),
   values.show(self.out)
예제 #26
0
    def overall_report(data):

        # Start by adding some overall numbers
        report = collections.OrderedDict()
        report["n"] = len(reflections)
        report["n_full"] = data["full"].count(True)
        report["n_partial"] = data["full"].count(False)
        report["n_overload"] = data["over"].count(True)
        report["n_ice"] = data["ice"].count(True)
        report["n_summed"] = data["sum"].count(True)
        report["n_fitted"] = data["prf"].count(True)
        report["n_integated"] = data["int"].count(True)
        report["n_invalid_bg"] = data["ninvbg"].count(True)
        report["n_invalid_fg"] = data["ninvfg"].count(True)
        report["n_failed_background"] = data["fbgd"].count(True)
        report["n_failed_summation"] = data["fsum"].count(True)
        report["n_failed_fitting"] = data["fprf"].count(True)

        # Compute mean background
        try:
            report["mean_background"] = flex.mean(
                data["background.mean"].select(data["int"]))
        except Exception:
            report["mean_background"] = 0.0

        # Compute mean I/Sigma summation
        try:
            report["ios_sum"] = flex.mean(data["intensity.sum.ios"].select(
                data["sum"]))
        except Exception:
            report["ios_sum"] = 0.0

        # Compute mean I/Sigma profile fitting
        try:
            report["ios_prf"] = flex.mean(data["intensity.prf.ios"].select(
                data["prf"]))
        except Exception:
            report["ios_prf"] = 0.0

        # Compute the mean profile correlation
        try:
            report["cc_prf"] = flex.mean(data["profile.correlation"].select(
                data["prf"]))
        except Exception:
            report["cc_prf"] = 0.0

        # Compute the correlations between summation and profile fitting
        try:
            mask = data["sum"] & data["prf"]
            Isum = data["intensity.sum.value"].select(mask)
            Iprf = data["intensity.prf.value"].select(mask)
            report["cc_pearson_sum_prf"] = pearson_correlation_coefficient(
                Isum, Iprf)
            report["cc_spearman_sum_prf"] = spearman_correlation_coefficient(
                Isum, Iprf)
        except Exception:
            report["cc_pearson_sum_prf"] = 0.0
            report["cc_spearman_sum_prf"] = 0.0

        # Return the overall report
        return report
  def integration_concept_detail(self, experiments, reflections, spots,image_number,cb_op_to_primitive,**kwargs):
    detector = experiments[0].detector
    crystal = experiments[0].crystal
    from cctbx.crystal import symmetry
    c_symmetry = symmetry(space_group = crystal.get_space_group(), unit_cell = crystal.get_unit_cell())

    self.image_number = image_number
    NEAR = 10
    pxlsz = detector[0].get_pixel_size()

    Predicted = self.get_predictions_accounting_for_centering(experiments,reflections,cb_op_to_primitive,**kwargs)

    FWMOSAICITY = self.inputai.getMosaicity()
    self.DOMAIN_SZ_ANG = kwargs.get("domain_size_ang",  self.__dict__.get("actual",0)  )
    refineflag = {True:0,False:1}[kwargs.get("domain_size_ang",0)==0]
    c_symmetry.show_summary(prefix="EXCURSION%1d REPORT FWMOS= %6.4f DOMAIN= %6.1f "%(refineflag,FWMOSAICITY,self.DOMAIN_SZ_ANG))
    from annlib_ext import AnnAdaptor
    self.cell = c_symmetry.unit_cell()

    query = flex.double()
    print len(self.predicted)

    for pred in self.predicted: # predicted spot coord in pixels
      query.append(pred[0]/pxlsz[0])
      query.append(pred[1]/pxlsz[1])

    self.reserve_hkllist_for_signal_search = self.hkllist

    reference = flex.double()

    assert self.length>NEAR# Can't do spot/pred matching with too few spots
    for spot in spots:
      reference.append(spot.ctr_mass_x())
      reference.append(spot.ctr_mass_y())

    IS_adapt = AnnAdaptor(data=reference,dim=2,k=NEAR)
    IS_adapt.query(query)
    idx_cutoff = float(min(self.mask_focus[image_number]))

    from rstbx.apps.slip_helpers import slip_callbacks
    cache_refinement_spots = getattr(slip_callbacks.slip_callback,"requires_refinement_spots",False)

    indexed_pairs_provisional = []
    correction_vectors_provisional = []
    c_v_p_flex = flex.vec3_double()
    this_setting_matched_indices = reflections["miller_index"]
    for j,item in enumerate(this_setting_matched_indices):
      this_setting_index = self.hkllist.first_index(item)
      if this_setting_index:
        Match = dict(spot=j,pred=this_setting_index)
        indexed_pairs_provisional.append(Match)
        vector = matrix.col(
            [reflections["xyzobs.px.value"][j][0] - self.predicted[Match["pred"]][0]/pxlsz[0],
             reflections["xyzobs.px.value"][j][1] - self.predicted[Match["pred"]][1]/pxlsz[1]])
        correction_vectors_provisional.append(vector)
        c_v_p_flex.append((vector[0],vector[1],0.))
    self.N_correction_vectors = len(correction_vectors_provisional)
    self.rmsd_px = math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))
    print "... %d provisional matches"%self.N_correction_vectors,
    print "r.m.s.d. in pixels: %6.3f"%(self.rmsd_px)

    if self.horizons_phil.integration.enable_residual_scatter:
      from matplotlib import pyplot as plt
      fig = plt.figure()
      for cv in correction_vectors_provisional:
        plt.plot([cv[1]],[-cv[0]],"r.")
      plt.title(" %d matches, r.m.s.d. %5.2f pixels"%(len(correction_vectors_provisional),math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))))
      plt.axes().set_aspect("equal")
      self.show_figure(plt,fig,"res")
      plt.close()

    if self.horizons_phil.integration.enable_residual_map:
      from matplotlib import pyplot as plt
      PX = reflections["xyzobs.px.value"]
      fig = plt.figure()
      for match,cv in zip(indexed_pairs_provisional,correction_vectors_provisional):
        plt.plot([PX[match["spot"]][1]],[-PX[match["spot"]][0]],"r.")
        plt.plot([self.predicted[match["pred"]][1]/pxlsz[1]],[-self.predicted[match["pred"]][0]/pxlsz[0]],"g.")
        plt.plot([PX[match["spot"]][1], PX[match["spot"]][1] + 10.*cv[1]],
                 [-PX[match["spot"]][0], -PX[match["spot"]][0] - 10.*cv[0]],'r-')
      if kwargs.get("user-reentrant") != None and self.horizons_phil.integration.spot_prediction == "dials" \
             and self.horizons_phil.integration.enable_residual_map_deltapsi:
        from rstbx.apps.stills.util import residual_map_special_deltapsi_add_on
        residual_map_special_deltapsi_add_on(
          reflections = self.dials_spot_prediction,
          matches = indexed_pairs_provisional, experiments=experiments,
          hkllist = self.hkllist,
          predicted = self.predicted, plot=plt, eta_deg=FWMOSAICITY, deff=self.DOMAIN_SZ_ANG
          )
      plt.xlim([0,detector[0].get_image_size()[1]])
      plt.ylim([-detector[0].get_image_size()[0],0])
      plt.title(" %d matches, r.m.s.d. %5.2f pixels"%(len(correction_vectors_provisional),math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))))
      plt.axes().set_aspect("equal")
      self.show_figure(plt,fig,"map")
      plt.close()

    indexed_pairs = indexed_pairs_provisional
    correction_vectors = correction_vectors_provisional
    ########### skip outlier rejection for this derived class

    ### However must retain the ability to write out correction vectiors.
    if True: # at Aaron's request; test later
      correction_lengths = flex.double([v.length() for v in correction_vectors_provisional])
      clorder = flex.sort_permutation(correction_lengths)
      sorted_cl = correction_lengths.select(clorder)
      indexed_pairs = []
      correction_vectors = []
      self.correction_vectors = []
      for icand in xrange(len(sorted_cl)):
        # somewhat arbitrary sigma = 1.0 cutoff for outliers
        indexed_pairs.append(indexed_pairs_provisional[clorder[icand]])
        correction_vectors.append(correction_vectors_provisional[clorder[icand]])
        if cache_refinement_spots:
          self.spotfinder.images[self.frame_numbers[self.image_number]]["refinement_spots"].append(
          spots[reflections[indexed_pairs[-1]["spot"]]['spotfinder_lookup']])
        if kwargs.get("verbose_cv")==True:
            print "CV OBSCENTER %7.2f %7.2f REFINEDCENTER %7.2f %7.2f"%(
              float(self.inputpd["size1"])/2.,float(self.inputpd["size2"])/2.,
              self.inputai.xbeam()/pxlsz[0], self.inputai.ybeam()/pxlsz[1]),
            print "OBSSPOT %7.2f %7.2f PREDSPOT %7.2f %7.2f"%(
              reflections[indexed_pairs[-1]["spot"]]['xyzobs.px.value'][0],
              reflections[indexed_pairs[-1]["spot"]]['xyzobs.px.value'][1],
              self.predicted[indexed_pairs[-1]["pred"]][0]/pxlsz[0],
              self.predicted[indexed_pairs[-1]["pred"]][1]/pxlsz[1]),
            the_hkl = self.hkllist[indexed_pairs[-1]["pred"]]
            print "HKL %4d %4d %4d"%the_hkl,"%2d"%self.setting_id,
            radial, azimuthal = spots[indexed_pairs[-1]["spot"]].get_radial_and_azimuthal_size(
              self.inputai.xbeam()/pxlsz[0], self.inputai.ybeam()/pxlsz[1])
            print "RADIALpx %5.3f AZIMUTpx %5.3f"%(radial,azimuthal)

        # Store a list of correction vectors in self.
        radial, azimuthal = spots[indexed_pairs[-1]['spot']].get_radial_and_azimuthal_size(
          self.inputai.xbeam()/pxlsz[0], self.inputai.ybeam()/pxlsz[1])
        self.correction_vectors.append(
          dict(obscenter=(float(self.inputpd['size1']) / 2,
                          float(self.inputpd['size2']) / 2),
               refinedcenter=(self.inputai.xbeam() / pxlsz[0],
                              self.inputai.ybeam() / pxlsz[1]),
               obsspot=(reflections[indexed_pairs[-1]['spot']]['xyzobs.px.value'][0],
                        reflections[indexed_pairs[-1]['spot']]['xyzobs.px.value'][1]),
               predspot=(self.predicted[indexed_pairs[-1]['pred']][0] / pxlsz[0],
                         self.predicted[indexed_pairs[-1]['pred']][1] / pxlsz[1]),
               hkl=(self.hkllist[indexed_pairs[-1]['pred']][0],
                    self.hkllist[indexed_pairs[-1]['pred']][1],
                    self.hkllist[indexed_pairs[-1]['pred']][2]),
               setting_id=self.setting_id,
               radial=radial,
               azimuthal=azimuthal))


    self.inputpd["symmetry"] = c_symmetry
    self.inputpd["symmetry"].show_summary(prefix="SETTING ")


    if self.horizons_phil.integration.model == "user_supplied":
      # Not certain of whether the reentrant_* dictionary keys create a memory leak
      if kwargs.get("user-reentrant",None)==None:
        kwargs["reentrant_experiments"] = experiments
        kwargs["reentrant_reflections"] = reflections
        from cxi_user import post_outlier_rejection
        self.indexed_pairs = indexed_pairs
        self.spots = spots
        post_outlier_rejection(self,image_number,cb_op_to_primitive,self.horizons_phil,kwargs)
        return
    ########### finished with user-supplied code


    correction_lengths=flex.double([v.length() for v in correction_vectors])

    self.r_residual = pxlsz[0]*flex.mean(correction_lengths)

    #assert len(indexed_pairs)>NEAR # must have enough indexed spots
    if (len(indexed_pairs) <= NEAR):
      raise Sorry("Not enough indexed spots, only found %d, need %d" % (len(indexed_pairs), NEAR))

    reference = flex.double()
    for item in indexed_pairs:
      reference.append(spots[item["spot"]].ctr_mass_x())
      reference.append(spots[item["spot"]].ctr_mass_y())

    PS_adapt = AnnAdaptor(data=reference,dim=2,k=NEAR)
    PS_adapt.query(query)

    self.BSmasks = []
    # do not use null: self.null_correction_mapping( predicted=self.predicted,
    self.positional_correction_mapping( predicted=self.predicted,
                                        correction_vectors = correction_vectors,
                                        PS_adapt = PS_adapt,
                                        IS_adapt = IS_adapt,
                                        spots = spots)

    # which spots are close enough to interfere with background?
    MAXOVER=6
    OS_adapt = AnnAdaptor(data=query,dim=2,k=MAXOVER) #six near nbrs
    OS_adapt.query(query)
    if self.mask_focus[image_number] is None:
      raise Sorry("No observed/predicted spot agreement; no Spotfinder masks; skip integration")
    nbr_cutoff = 2.0* max(self.mask_focus[image_number])
    FRAME = int(nbr_cutoff/2)
    #print "The overlap cutoff is %d pixels"%nbr_cutoff
    nbr_cutoff_sq = nbr_cutoff * nbr_cutoff

    #print "Optimized C++ section...",
    self.set_frame(FRAME)
    self.set_background_factor(kwargs["background_factor"])
    self.set_nbr_cutoff_sq(nbr_cutoff_sq)
    self.set_guard_width_sq(self.horizons_phil.integration.guard_width_sq)
    self.set_detector_gain(self.horizons_phil.integration.detector_gain)
    flex_sorted = flex.int()
    for item in self.sorted:
      flex_sorted.append(item[0]);flex_sorted.append(item[1]);

    if self.horizons_phil.integration.mask_pixel_value is not None:
      self.set_mask_pixel_val(self.horizons_phil.integration.mask_pixel_value)

    image_obj = self.imagefiles.imageindex(self.frame_numbers[self.image_number])
    image_obj.read()
    rawdata = image_obj.linearintdata # assume image #1

    if self.inputai.active_areas != None:
      self.detector_xy_draft = self.safe_background( rawdata=rawdata,
                          predicted=self.predicted,
                          OS_adapt=OS_adapt,
                          sorted=flex_sorted,
                          tiles=self.inputai.active_areas.IT,
                          tile_id=self.inputai.active_areas.tile_id);
    else:
      self.detector_xy_draft = self.safe_background( rawdata=rawdata,
                          predicted=self.predicted,
                          OS_adapt=OS_adapt,
                          sorted=flex_sorted);
    for i in xrange(len(self.predicted)): # loop over predicteds
      B_S_mask = {}
      keys = self.get_bsmask(i)
      for k in xrange(0,len(keys),2):
        B_S_mask[(keys[k],keys[k+1])]=True
      self.BSmasks.append(B_S_mask)
    #print "Done"
    return
예제 #28
0
    def __call__(self):
        """Determine optimal mosaicity and domain size model (monochromatic)"""
        RR = self.refinery.predict_for_reflection_table(self.reflections)
        excursion_rad = RR["delpsical.rad"]
        delta_psi_deg = excursion_rad * 180. / math.pi
        print
        print flex.max(delta_psi_deg), flex.min(delta_psi_deg)
        mean_excursion = flex.mean(delta_psi_deg)
        print "The mean excursion is %7.3f degrees, r.m.s.d %7.3f" % (
            mean_excursion, math.sqrt(flex.mean(RR["delpsical2"])))

        crystal = self.experiments[0].crystal
        beam = self.experiments[0].beam
        miller_indices = self.reflections["miller_index"]

        # FIXME XXX revise this formula so as to use a different wavelength potentially for each reflection
        two_thetas = crystal.get_unit_cell().two_theta(miller_indices,
                                                       beam.get_wavelength(),
                                                       deg=True)
        dspacings = crystal.get_unit_cell().d(miller_indices)
        dspace_sq = dspacings * dspacings

        #  First -- try to get a reasonable envelope for the observed excursions.
        ## minimum of three regions; maximum of 50 measurements in each bin
        print "fitting parameters on %d spots" % len(excursion_rad)
        n_bins = min(max(3, len(excursion_rad) // 25), 50)
        bin_sz = len(excursion_rad) // n_bins
        print "nbins", n_bins, "bin_sz", bin_sz
        order = flex.sort_permutation(two_thetas)
        two_thetas_env = flex.double()
        dspacings_env = flex.double()
        excursion_rads_env = flex.double()
        for x in xrange(0, n_bins):
            subset = order[x * bin_sz:(x + 1) * bin_sz]
            two_thetas_env.append(flex.mean(two_thetas.select(subset)))
            dspacings_env.append(flex.mean(dspacings.select(subset)))
            excursion_rads_env.append(
                flex.max(flex.abs(excursion_rad.select(subset))))

        #  Second -- parameter fit
        ## solve the normal equations
        sum_inv_u_sq = flex.sum(dspacings_env * dspacings_env)
        sum_inv_u = flex.sum(dspacings_env)
        sum_te_u = flex.sum(dspacings_env * excursion_rads_env)
        sum_te = flex.sum(excursion_rads_env)
        Normal_Mat = sqr(
            (sum_inv_u_sq, sum_inv_u, sum_inv_u, len(dspacings_env)))
        Vector = col((sum_te_u, sum_te))
        solution = Normal_Mat.inverse() * Vector
        s_ang = 1. / (2 * solution[0])
        print "Best LSQ fit Scheerer domain size is %9.2f ang" % (s_ang)

        tan_phi_rad = dspacings / (2. * s_ang)
        tan_phi_deg = tan_phi_rad * 180. / math.pi
        k_degrees = solution[1] * 180. / math.pi
        print "The LSQ full mosaicity is %8.5f deg; half-mosaicity %9.5f" % (
            2 * k_degrees, k_degrees)
        tan_outer_deg = tan_phi_deg + k_degrees

        from xfel.mono_simulation.max_like import minimizer
        # coerce the estimates to be positive for max-likelihood
        lower_limit_domain_size = math.pow(
            crystal.get_unit_cell().volume(),
            1. / 3.) * 3  # params.refinement.domain_size_lower_limit

        d_estimate = max(s_ang, lower_limit_domain_size)
        M = minimizer(d_i=dspacings,
                      psi_i=excursion_rad,
                      eta_rad=abs(2. * solution[1]),
                      Deff=d_estimate)
        print "ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots" % (
            M.x[1] * 180. / math.pi, 2. / M.x[0], len(two_thetas))
        tan_phi_rad_ML = dspacings / (2. / M.x[0])
        tan_phi_deg_ML = tan_phi_rad_ML * 180. / math.pi
        tan_outer_deg_ML = tan_phi_deg_ML + 0.5 * M.x[1] * 180. / math.pi

        self.nv_acceptance_flags = flex.abs(delta_psi_deg) < tan_outer_deg_ML

        if self.graph_verbose:  #params.refinement.mosaic.enable_AD14F7B: # Excursion vs resolution fit
            AD1TF7B_MAX2T = 30.
            AD1TF7B_MAXDP = 1.
            from matplotlib import pyplot as plt
            plt.plot(two_thetas, delta_psi_deg, "bo")
            minplot = flex.min(two_thetas)
            plt.plot([0, minplot], [mean_excursion, mean_excursion], "k-")
            LR = flex.linear_regression(two_thetas, delta_psi_deg)
            model_y = LR.slope() * two_thetas + LR.y_intercept()
            plt.plot(two_thetas, model_y, "k-")

            plt.title("ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots" %
                      (M.x[1] * 180. / math.pi, 2. / M.x[0], len(two_thetas)))
            plt.plot(two_thetas, tan_phi_deg_ML, "r.")
            plt.plot(two_thetas, -tan_phi_deg_ML, "r.")
            plt.plot(two_thetas, tan_outer_deg_ML, "g.")
            plt.plot(two_thetas, -tan_outer_deg_ML, "g.")
            plt.xlim([0, AD1TF7B_MAX2T])
            plt.ylim([-AD1TF7B_MAXDP, AD1TF7B_MAXDP])
            plt.show()
            plt.close()

        from xfel.mono_simulation.util import green_curve_area
        self.green_curve_area = green_curve_area(two_thetas, tan_outer_deg_ML)
        print "The green curve area is ", self.green_curve_area

        crystal._ML_half_mosaicity_deg = M.x[1] * 180. / (2. * math.pi)
        crystal._ML_domain_size_ang = 2. / M.x[0]
        self._ML_full_mosaicity_rad = M.x[1]
        self._ML_domain_size_ang = 2. / M.x[0]

        #params.refinement.mosaic.model_expansion_factor
        """The expansion factor should be initially set to 1, then expanded so that the # reflections matched becomes
    as close as possible to # of observed reflections input, in the last integration call.  Determine this by
    inspecting the output log file interactively.  Do not exceed the bare minimum threshold needed.
    The intention is to find an optimal value, global for a given dataset."""
        model_expansion_factor = 1.4
        crystal._ML_half_mosaicity_deg *= model_expansion_factor
        crystal._ML_domain_size_ang /= model_expansion_factor

        return crystal
예제 #29
0
    def abs_bounding_lines_in_mm(self, detector):
        """Return bounding lines of kapton"""
        # first get bounding directions from detector:
        detz = flex.mean(flex.double([panel.get_origin()[2] for panel in detector]))
        edges = []
        for ii, panel in enumerate(detector):
            f_size, s_size = panel.get_image_size()
            for point in [(0, 0), (0, s_size), (f_size, 0), (f_size, s_size)]:
                x, y = panel.get_pixel_lab_coord(point)[0:2]
                edges.append((x, y, detz))
        # Use the idea that the corners of the detector are end points of the diagonal and will be the
        # top 2 max dimension among all end points
        dlist = flex.double()
        dlist_idx = []
        n_edges = len(edges)
        for ii in range(n_edges - 1):
            for jj in range(ii + 1, n_edges):
                pt_1 = col(edges[ii])
                pt_2 = col(edges[jj])
                distance = (pt_1 - pt_2).length()
                dlist.append(distance)
                dlist_idx.append((ii, jj))
        sorted_idx = flex.sort_permutation(dlist, reverse=True)

        edge_pts = [
            edges[dlist_idx[sorted_idx[0]][0]],
            edges[dlist_idx[sorted_idx[1]][0]],
            edges[dlist_idx[sorted_idx[0]][1]],
            edges[dlist_idx[sorted_idx[1]][1]],
        ]

        self.detector_edges = edge_pts
        # Now get the maximum extent of the intersection of the rays with the detector
        all_ints = []
        kapton_path_list = []

        for ii, edge_point in enumerate(self.edge_points):
            s1 = edge_point.normalize()
            kapton_path_mm = self.get_kapton_path_mm(s1)
            for panel in detector:
                try:
                    x_int, y_int = panel.get_lab_coord(panel.get_ray_intersection(s1))[
                        0:2
                    ]
                except RuntimeError:
                    pass
                int_point = (x_int, y_int, detz)
                # Arbitrary tolerance of couple of pixels otherwise these points were getting clustered together
                tolerance = min(panel.get_pixel_size()) * 2.0
                if (
                    sum(
                        (col(trial_pt) - col(int_point)).length() <= tolerance
                        for trial_pt in all_ints
                    )
                    == 0
                ):
                    all_ints.append(int_point)
                kapton_path_list.append(kapton_path_mm)
        # Use the idea that the extreme edges of the intersection points are end points of the diagonal and will be the
        # top 2 max dimension among all end points
        dlist = flex.double()
        dlist_idx = []
        n_edges = len(all_ints)
        for ii in range(n_edges - 1):
            pt_1 = col(all_ints[ii])
            for jj in range(ii + 1, n_edges):
                pt_2 = col(all_ints[jj])
                distance = (pt_1 - pt_2).length()
                dlist.append(distance)
                dlist_idx.append((ii, jj))
        sorted_idx = flex.sort_permutation(dlist, reverse=True)

        int_edge_pts = [
            all_ints[dlist_idx[sorted_idx[0]][0]],
            all_ints[dlist_idx[sorted_idx[1]][0]],
            all_ints[dlist_idx[sorted_idx[0]][1]],
            all_ints[dlist_idx[sorted_idx[1]][1]],
        ]

        # Sort out the edge points and the int_edge_points which are on the same side
        kapton_edge_1 = (col(int_edge_pts[0]) - col(int_edge_pts[1])).normalize()
        kapton_edge_2 = (col(int_edge_pts[2]) - col(int_edge_pts[3])).normalize()
        min_loss_func = -999.9
        edge_idx = None
        for edge_idx_combo in [(0, 1, 2, 3), (0, 3, 1, 2)]:
            side_1 = (
                col(edge_pts[edge_idx_combo[0]]) - col(edge_pts[edge_idx_combo[1]])
            ).normalize()
            side_2 = (
                col(edge_pts[edge_idx_combo[2]]) - col(edge_pts[edge_idx_combo[3]])
            ).normalize()
            loss_func = abs(kapton_edge_1.dot(side_1)) + abs(kapton_edge_2.dot(side_2))
            if loss_func > min_loss_func:
                edge_idx = edge_idx_combo
                min_loss_func = loss_func
        # Make sure the edges of the detector and the kapton are in the same orientation
        # first for kapton edge 1
        side_1 = (col(edge_pts[edge_idx[0]]) - col(edge_pts[edge_idx[1]])).normalize()
        side_2 = (col(edge_pts[edge_idx[2]]) - col(edge_pts[edge_idx[3]])).normalize()
        v1 = kapton_edge_1.dot(side_1)
        v2 = kapton_edge_2.dot(side_2)
        if v1 < 0.0:
            edge_idx = (edge_idx[1], edge_idx[0], edge_idx[2], edge_idx[3])
        if v2 < 0.0:
            edge_idx = (edge_idx[0], edge_idx[1], edge_idx[3], edge_idx[2])

        # Now make sure the edges and the kapton lines are on the right side (i.e not swapped).
        # Let's look at edge_idx[0:2] i,e the first edge of detector parallel to the kapton
        pt1 = edge_pts[edge_idx[0]]
        pt2 = edge_pts[edge_idx[1]]
        # Now find the distance between each of these points and the kapton lines.
        d1_kapton_1 = self.distance_of_point_from_line(
            pt1, int_edge_pts[0], int_edge_pts[1]
        )
        d1_kapton_2 = self.distance_of_point_from_line(
            pt1, int_edge_pts[2], int_edge_pts[3]
        )
        d2_kapton_1 = self.distance_of_point_from_line(
            pt2, int_edge_pts[0], int_edge_pts[1]
        )
        d2_kapton_2 = self.distance_of_point_from_line(
            pt2, int_edge_pts[2], int_edge_pts[3]
        )
        if d1_kapton_1 < d1_kapton_2:  # closer to max than edge
            assert (
                d2_kapton_1 < d2_kapton_2
            ), "Distance mismatch. Edge of detector might be on wrong side of kapton tape ... please check"
            pair_values = [
                (
                    edge_pts[edge_idx[0]],
                    edge_pts[edge_idx[1]],
                    edge_pts[edge_idx[2]],
                    edge_pts[edge_idx[3]],
                ),
                (int_edge_pts[0], int_edge_pts[1], int_edge_pts[2], int_edge_pts[3]),
            ]
        else:
            pair_values = [
                (
                    edge_pts[edge_idx[0]],
                    edge_pts[edge_idx[1]],
                    edge_pts[edge_idx[2]],
                    edge_pts[edge_idx[3]],
                ),
                (int_edge_pts[2], int_edge_pts[3], int_edge_pts[0], int_edge_pts[1]),
            ]

        return pair_values
예제 #30
0
    def __call__(self, index):
        """
        Extract strong pixels from an image

        :param index: The index of the image
        """
        # Get the frame number
        if isinstance(self.imageset, ImageSequence):
            frame = self.imageset.get_array_range()[0] + index
        else:
            ind = self.imageset.indices()
            if len(ind) > 1:
                assert all(i1 + 1 == i2
                           for i1, i2 in zip(ind[0:-1], ind[1:-1]))
            frame = ind[index]

        # Create the list of pixel lists
        pixel_list = []

        # Get the image and mask
        image = self.imageset.get_corrected_data(index)
        mask = self.imageset.get_mask(index)

        # Set the mask
        if self.mask is not None:
            assert len(self.mask) == len(mask)
            mask = tuple(m1 & m2 for m1, m2 in zip(mask, self.mask))

        logger.debug(
            "Number of masked pixels for image %i: %i",
            index,
            sum(m.count(False) for m in mask),
        )

        # Add the images to the pixel lists
        num_strong = 0
        average_background = 0
        for im, mk in zip(image, mask):
            if self.region_of_interest is not None:
                x0, x1, y0, y1 = self.region_of_interest
                height, width = im.all()
                assert x0 < x1, "x0 < x1"
                assert y0 < y1, "y0 < y1"
                assert x0 >= 0, "x0 >= 0"
                assert y0 >= 0, "y0 >= 0"
                assert x1 <= width, "x1 <= width"
                assert y1 <= height, "y1 <= height"
                im_roi = im[y0:y1, x0:x1]
                mk_roi = mk[y0:y1, x0:x1]
                tm_roi = self.threshold_function.compute_threshold(
                    im_roi, mk_roi)
                threshold_mask = flex.bool(im.accessor(), False)
                threshold_mask[y0:y1, x0:x1] = tm_roi
            else:
                threshold_mask = self.threshold_function.compute_threshold(
                    im, mk)

            # Add the pixel list
            plist = PixelList(frame, im, threshold_mask)
            pixel_list.append(plist)

            # Get average background
            if self.compute_mean_background:
                background = im.as_1d().select((mk & ~threshold_mask).as_1d())
                average_background += flex.mean(background)

            # Add to the spot count
            num_strong += len(plist)

        # Make average background
        average_background /= len(image)

        # Check total number of strong pixels
        if self.max_strong_pixel_fraction < 1:
            num_image = 0
            for im in image:
                num_image += len(im)
            max_strong = int(
                math.ceil(self.max_strong_pixel_fraction * num_image))
            if num_strong > max_strong:
                raise RuntimeError(f"""
          The number of strong pixels found ({num_strong}) is greater than the
          maximum allowed ({max_strong}). Try changing spot finding parameters
        """)

        # Print some info
        if self.compute_mean_background:
            logger.info(
                "Found %d strong pixels on image %d with average background %f",
                num_strong,
                frame + 1,
                average_background,
            )
        else:
            logger.info("Found %d strong pixels on image %d", num_strong,
                        frame + 1)

        # Return the result
        return pixel_list
예제 #31
0
  def plot_one_model(self,nrow,out):
    fig = plt.subplot(self.gs[nrow*self.ncols])
    two_thetas = self.reduction.get_two_theta_deg()
    degrees = self.reduction.get_delta_psi_deg()

    if self.color_encoding=="conventional":
          positive = (self.reduction.i_sigi>=0.)
          fig.plot(two_thetas.select(positive), degrees.select(positive), "bo")
          fig.plot(two_thetas.select(~positive), degrees.select(~positive), "r+")
    elif self.color_encoding=="I/sigma":
          positive = (self.reduction.i_sigi>=0.)
          tt_selected = two_thetas.select(positive)
          dp_selected = degrees.select(positive)
          i_sigi_select = self.reduction.i_sigi.select(positive)
          order = flex.sort_permutation(i_sigi_select)
          tt_selected = tt_selected.select(order)
          dp_selected = dp_selected.select(order)
          i_sigi_selected = i_sigi_select.select(order)
          from matplotlib.colors import Normalize
          dnorm = Normalize()
          dcolors = i_sigi_selected.as_numpy_array()
          dnorm.autoscale(dcolors)
          N = len(dcolors)
          CMAP = plt.get_cmap("rainbow")
          if self.refined.get("partiality_array",None) is None:
            for n in xrange(N):
              fig.plot([tt_selected[n]],[dp_selected[n]],
              color=CMAP(dnorm(dcolors[n])),marker=".", markersize=10)
          else:
            partials = self.refined.get("partiality_array")
            partials_select = partials.select(positive)
            partials_selected = partials_select.select(order)
            assert len(partials)==len(positive)
            for n in xrange(N):
              fig.plot([tt_selected[n]],[dp_selected[n]],
              color=CMAP(dnorm(dcolors[n])),marker=".", markersize=20*partials_selected[n])
              # change the markersize to indicate partiality.
          negative = (self.reduction.i_sigi<0.)
          fig.plot(two_thetas.select(negative), degrees.select(negative), "r+", linewidth=1)
    else:
          strong = (self.reduction.i_sigi>=10.)
          positive = ((~strong) & (self.reduction.i_sigi>=0.))
          negative = (self.reduction.i_sigi<0.)
          assert (strong.count(True)+positive.count(True)+negative.count(True) ==
                  len(self.reduction.i_sigi))
          fig.plot(two_thetas.select(positive), degrees.select(positive), "bo")
          fig.plot(two_thetas.select(strong), degrees.select(strong), marker='.',linestyle='None',
           markerfacecolor='#00ee00', markersize=10)
          fig.plot(two_thetas.select(negative), degrees.select(negative), "r+")

    # indicate the imposed resolution filter
    wavelength = self.reduction.experiment.beam.get_wavelength()
    imposed_res_filter = self.reduction.get_imposed_res_filter(out)
    resolution_markers = [
      a for a in [imposed_res_filter,self.reduction.measurements.d_min()] if a is not None]
    for RM in resolution_markers:
          two_th = (180./math.pi)*2.*math.asin(wavelength/(2.*RM))
          plt.plot([two_th, two_th],[self.AD1TF7B_MAXDP*-0.8,self.AD1TF7B_MAXDP*0.8],'k-')
          plt.text(two_th,self.AD1TF7B_MAXDP*-0.9,"%4.2f"%RM)

    #indicate the linefit
    mean = flex.mean(degrees)
    minplot = flex.min(two_thetas)
    plt.plot([0,minplot],[mean,mean],"k-")
    LR = flex.linear_regression(two_thetas, degrees)
    model_y = LR.slope()*two_thetas + LR.y_intercept()
    plt.plot(two_thetas, model_y, "k-")

    #Now let's take care of the red and green lines.
    half_mosaic_rotation_deg = self.refined["half_mosaic_rotation_deg"]
    mosaic_domain_size_ang = self.refined["mosaic_domain_size_ang"]
    red_curve_domain_size_ang = self.refined.get("red_curve_domain_size_ang",mosaic_domain_size_ang)
    a_step = self.AD1TF7B_MAX2T / 50.
    a_range = flex.double([a_step*x for x in xrange(1,50)]) # domain two-theta array
    #Bragg law [d=L/2sinTH]
    d_spacing = (wavelength/(2.*flex.sin(math.pi*a_range/360.)))
    # convert two_theta to a delta-psi.  Formula for Deffective [Dpsi=d/2Deff]
    inner_phi_deg = flex.asin((d_spacing / (2.*red_curve_domain_size_ang)) )*(180./math.pi)
    outer_phi_deg = flex.asin((d_spacing / (2.*mosaic_domain_size_ang)) + \
      half_mosaic_rotation_deg*math.pi/180. )*(180./math.pi)
    plt.title("ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots\n%s"%(
          2.*half_mosaic_rotation_deg, mosaic_domain_size_ang, len(two_thetas),
          os.path.basename(self.reduction.filename)))
    plt.plot(a_range, inner_phi_deg, "r-")
    plt.plot(a_range,-inner_phi_deg, "r-")
    plt.plot(a_range, outer_phi_deg, "g-")
    plt.plot(a_range, -outer_phi_deg, "g-")
    plt.xlim([0,self.AD1TF7B_MAX2T])
    plt.ylim([-self.AD1TF7B_MAXDP,self.AD1TF7B_MAXDP])

    #second plot shows histogram
    fig = plt.subplot(self.gs[1+nrow*self.ncols])
    plt.xlim([-self.AD1TF7B_MAXDP,self.AD1TF7B_MAXDP])
    nbins = 50
    n,bins,patches = plt.hist(dp_selected, nbins,
           range=(-self.AD1TF7B_MAXDP,self.AD1TF7B_MAXDP),
           weights=self.reduction.i_sigi.select(positive),
           normed=0, facecolor="orange", alpha=0.75)
    #ersatz determine the median i_sigi point:
    isi_positive = self.reduction.i_sigi.select(positive)
    isi_order = flex.sort_permutation(isi_positive)
    reordered = isi_positive.select(isi_order)
    isi_median = reordered[int(len(isi_positive)*0.9)]
    isi_top_half_selection = (isi_positive>isi_median)
    n,bins,patches = plt.hist(dp_selected.select(isi_top_half_selection), nbins,
           range=(-self.AD1TF7B_MAXDP,self.AD1TF7B_MAXDP),
           weights=isi_positive.select(isi_top_half_selection),
           normed=0, facecolor="#ff0000", alpha=0.75)
    plt.xlabel("(degrees)")
    plt.title("Weighted histogram of Delta-psi")
    def integration_concept_detail(self, experiments, reflections, spots,
                                   image_number, cb_op_to_primitive, **kwargs):
        detector = experiments[0].detector
        crystal = experiments[0].crystal
        from cctbx.crystal import symmetry
        c_symmetry = symmetry(space_group=crystal.get_space_group(),
                              unit_cell=crystal.get_unit_cell())

        self.image_number = image_number
        NEAR = 10
        pxlsz = detector[0].get_pixel_size()

        Predicted = self.get_predictions_accounting_for_centering(
            experiments, reflections, cb_op_to_primitive, **kwargs)

        FWMOSAICITY = self.inputai.getMosaicity()
        self.DOMAIN_SZ_ANG = kwargs.get("domain_size_ang",
                                        self.__dict__.get("actual", 0))
        refineflag = {True: 0, False: 1}[kwargs.get("domain_size_ang", 0) == 0]
        c_symmetry.show_summary(
            prefix="EXCURSION%1d REPORT FWMOS= %6.4f DOMAIN= %6.1f " %
            (refineflag, FWMOSAICITY, self.DOMAIN_SZ_ANG))
        from annlib_ext import AnnAdaptor
        self.cell = c_symmetry.unit_cell()

        query = flex.double()
        print len(self.predicted)

        for pred in self.predicted:  # predicted spot coord in pixels
            query.append(pred[0] / pxlsz[0])
            query.append(pred[1] / pxlsz[1])

        self.reserve_hkllist_for_signal_search = self.hkllist

        reference = flex.double()

        assert self.length > NEAR  # Can't do spot/pred matching with too few spots
        for spot in spots:
            reference.append(spot.ctr_mass_x())
            reference.append(spot.ctr_mass_y())

        IS_adapt = AnnAdaptor(data=reference, dim=2, k=NEAR)
        IS_adapt.query(query)
        idx_cutoff = float(min(self.mask_focus[image_number]))

        from rstbx.apps.slip_helpers import slip_callbacks
        cache_refinement_spots = getattr(slip_callbacks.slip_callback,
                                         "requires_refinement_spots", False)

        indexed_pairs_provisional = []
        correction_vectors_provisional = []
        c_v_p_flex = flex.vec3_double()
        this_setting_matched_indices = reflections["miller_index"]
        for j, item in enumerate(this_setting_matched_indices):
            this_setting_index = self.hkllist.first_index(item)
            if this_setting_index:
                Match = dict(spot=j, pred=this_setting_index)
                indexed_pairs_provisional.append(Match)
                vector = matrix.col([
                    reflections["xyzobs.px.value"][j][0] -
                    self.predicted[Match["pred"]][0] / pxlsz[0],
                    reflections["xyzobs.px.value"][j][1] -
                    self.predicted[Match["pred"]][1] / pxlsz[1]
                ])
                correction_vectors_provisional.append(vector)
                c_v_p_flex.append((vector[0], vector[1], 0.))
        self.N_correction_vectors = len(correction_vectors_provisional)
        self.rmsd_px = math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))
        print "... %d provisional matches" % self.N_correction_vectors,
        print "r.m.s.d. in pixels: %6.3f" % (self.rmsd_px)

        if self.horizons_phil.integration.enable_residual_scatter:
            from matplotlib import pyplot as plt
            fig = plt.figure()
            for cv in correction_vectors_provisional:
                plt.plot([cv[1]], [-cv[0]], "r.")
            plt.title(" %d matches, r.m.s.d. %5.2f pixels" %
                      (len(correction_vectors_provisional),
                       math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))))
            plt.axes().set_aspect("equal")
            self.show_figure(plt, fig, "res")
            plt.close()

        if self.horizons_phil.integration.enable_residual_map:
            from matplotlib import pyplot as plt
            PX = reflections["xyzobs.px.value"]
            fig = plt.figure()
            for match, cv in zip(indexed_pairs_provisional,
                                 correction_vectors_provisional):
                plt.plot([PX[match["spot"]][1]], [-PX[match["spot"]][0]], "r.")
                plt.plot([self.predicted[match["pred"]][1] / pxlsz[1]],
                         [-self.predicted[match["pred"]][0] / pxlsz[0]], "g.")
                plt.plot(
                    [PX[match["spot"]][1], PX[match["spot"]][1] + 10. * cv[1]],
                    [
                        -PX[match["spot"]][0],
                        -PX[match["spot"]][0] - 10. * cv[0]
                    ], 'r-')
            if kwargs.get("user-reentrant") != None and self.horizons_phil.integration.spot_prediction == "dials" \
                   and self.horizons_phil.integration.enable_residual_map_deltapsi:
                from rstbx.apps.stills.util import residual_map_special_deltapsi_add_on
                residual_map_special_deltapsi_add_on(
                    reflections=self.dials_spot_prediction,
                    matches=indexed_pairs_provisional,
                    experiments=experiments,
                    hkllist=self.hkllist,
                    predicted=self.predicted,
                    plot=plt,
                    eta_deg=FWMOSAICITY,
                    deff=self.DOMAIN_SZ_ANG)
            plt.xlim([0, detector[0].get_image_size()[1]])
            plt.ylim([-detector[0].get_image_size()[0], 0])
            plt.title(" %d matches, r.m.s.d. %5.2f pixels" %
                      (len(correction_vectors_provisional),
                       math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))))
            plt.axes().set_aspect("equal")
            self.show_figure(plt, fig, "map")
            plt.close()

        indexed_pairs = indexed_pairs_provisional
        correction_vectors = correction_vectors_provisional
        ########### skip outlier rejection for this derived class

        ### However must retain the ability to write out correction vectiors.
        if True:  # at Aaron's request; test later
            correction_lengths = flex.double(
                [v.length() for v in correction_vectors_provisional])
            clorder = flex.sort_permutation(correction_lengths)
            sorted_cl = correction_lengths.select(clorder)
            indexed_pairs = []
            correction_vectors = []
            self.correction_vectors = []
            for icand in xrange(len(sorted_cl)):
                # somewhat arbitrary sigma = 1.0 cutoff for outliers
                indexed_pairs.append(indexed_pairs_provisional[clorder[icand]])
                correction_vectors.append(
                    correction_vectors_provisional[clorder[icand]])
                if cache_refinement_spots:
                    self.spotfinder.images[self.frame_numbers[
                        self.image_number]]["refinement_spots"].append(
                            spots[reflections[indexed_pairs[-1]["spot"]]
                                  ['spotfinder_lookup']])
                if kwargs.get("verbose_cv") == True:
                    print "CV OBSCENTER %7.2f %7.2f REFINEDCENTER %7.2f %7.2f" % (
                        float(self.inputpd["size1"]) / 2.,
                        float(self.inputpd["size2"]) / 2.,
                        self.inputai.xbeam() / pxlsz[0],
                        self.inputai.ybeam() / pxlsz[1]),
                    print "OBSSPOT %7.2f %7.2f PREDSPOT %7.2f %7.2f" % (
                        reflections[indexed_pairs[-1]["spot"]]
                        ['xyzobs.px.value'][0], reflections[
                            indexed_pairs[-1]["spot"]]['xyzobs.px.value'][1],
                        self.predicted[indexed_pairs[-1]["pred"]][0] /
                        pxlsz[0], self.predicted[indexed_pairs[-1]["pred"]][1]
                        / pxlsz[1]),
                    the_hkl = self.hkllist[indexed_pairs[-1]["pred"]]
                    print "HKL %4d %4d %4d" % the_hkl, "%2d" % self.setting_id,
                    radial, azimuthal = spots[indexed_pairs[-1][
                        "spot"]].get_radial_and_azimuthal_size(
                            self.inputai.xbeam() / pxlsz[0],
                            self.inputai.ybeam() / pxlsz[1])
                    print "RADIALpx %5.3f AZIMUTpx %5.3f" % (radial, azimuthal)

                # Store a list of correction vectors in self.
                radial, azimuthal = spots[
                    indexed_pairs[-1]['spot']].get_radial_and_azimuthal_size(
                        self.inputai.xbeam() / pxlsz[0],
                        self.inputai.ybeam() / pxlsz[1])
                self.correction_vectors.append(
                    dict(obscenter=(float(self.inputpd['size1']) / 2,
                                    float(self.inputpd['size2']) / 2),
                         refinedcenter=(self.inputai.xbeam() / pxlsz[0],
                                        self.inputai.ybeam() / pxlsz[1]),
                         obsspot=(reflections[indexed_pairs[-1]
                                              ['spot']]['xyzobs.px.value'][0],
                                  reflections[indexed_pairs[-1]
                                              ['spot']]['xyzobs.px.value'][1]),
                         predspot=(
                             self.predicted[indexed_pairs[-1]['pred']][0] /
                             pxlsz[0],
                             self.predicted[indexed_pairs[-1]['pred']][1] /
                             pxlsz[1]),
                         hkl=(self.hkllist[indexed_pairs[-1]['pred']][0],
                              self.hkllist[indexed_pairs[-1]['pred']][1],
                              self.hkllist[indexed_pairs[-1]['pred']][2]),
                         setting_id=self.setting_id,
                         radial=radial,
                         azimuthal=azimuthal))

        self.inputpd["symmetry"] = c_symmetry
        self.inputpd["symmetry"].show_summary(prefix="SETTING ")

        if self.horizons_phil.integration.model == "user_supplied":
            # Not certain of whether the reentrant_* dictionary keys create a memory leak
            if kwargs.get("user-reentrant", None) == None:
                kwargs["reentrant_experiments"] = experiments
                kwargs["reentrant_reflections"] = reflections
                from cxi_user import post_outlier_rejection
                self.indexed_pairs = indexed_pairs
                self.spots = spots
                post_outlier_rejection(self, image_number, cb_op_to_primitive,
                                       self.horizons_phil, kwargs)
                return
        ########### finished with user-supplied code

        correction_lengths = flex.double(
            [v.length() for v in correction_vectors])

        self.r_residual = pxlsz[0] * flex.mean(correction_lengths)

        #assert len(indexed_pairs)>NEAR # must have enough indexed spots
        if (len(indexed_pairs) <= NEAR):
            raise Sorry("Not enough indexed spots, only found %d, need %d" %
                        (len(indexed_pairs), NEAR))

        reference = flex.double()
        for item in indexed_pairs:
            reference.append(spots[item["spot"]].ctr_mass_x())
            reference.append(spots[item["spot"]].ctr_mass_y())

        PS_adapt = AnnAdaptor(data=reference, dim=2, k=NEAR)
        PS_adapt.query(query)

        self.BSmasks = []
        # do not use null: self.null_correction_mapping( predicted=self.predicted,
        self.positional_correction_mapping(
            predicted=self.predicted,
            correction_vectors=correction_vectors,
            PS_adapt=PS_adapt,
            IS_adapt=IS_adapt,
            spots=spots)

        # which spots are close enough to interfere with background?
        MAXOVER = 6
        OS_adapt = AnnAdaptor(data=query, dim=2, k=MAXOVER)  #six near nbrs
        OS_adapt.query(query)
        if self.mask_focus[image_number] is None:
            raise Sorry(
                "No observed/predicted spot agreement; no Spotfinder masks; skip integration"
            )
        nbr_cutoff = 2.0 * max(self.mask_focus[image_number])
        FRAME = int(nbr_cutoff / 2)
        #print "The overlap cutoff is %d pixels"%nbr_cutoff
        nbr_cutoff_sq = nbr_cutoff * nbr_cutoff

        #print "Optimized C++ section...",
        self.set_frame(FRAME)
        self.set_background_factor(kwargs["background_factor"])
        self.set_nbr_cutoff_sq(nbr_cutoff_sq)
        self.set_guard_width_sq(self.horizons_phil.integration.guard_width_sq)
        self.set_detector_gain(self.horizons_phil.integration.detector_gain)
        flex_sorted = flex.int()
        for item in self.sorted:
            flex_sorted.append(item[0])
            flex_sorted.append(item[1])

        if self.horizons_phil.integration.mask_pixel_value is not None:
            self.set_mask_pixel_val(
                self.horizons_phil.integration.mask_pixel_value)

        image_obj = self.imagefiles.imageindex(
            self.frame_numbers[self.image_number])
        image_obj.read()
        rawdata = image_obj.linearintdata  # assume image #1

        if self.inputai.active_areas != None:
            self.detector_xy_draft = self.safe_background(
                rawdata=rawdata,
                predicted=self.predicted,
                OS_adapt=OS_adapt,
                sorted=flex_sorted,
                tiles=self.inputai.active_areas.IT,
                tile_id=self.inputai.active_areas.tile_id)
        else:
            self.detector_xy_draft = self.safe_background(
                rawdata=rawdata,
                predicted=self.predicted,
                OS_adapt=OS_adapt,
                sorted=flex_sorted)
        for i in xrange(len(self.predicted)):  # loop over predicteds
            B_S_mask = {}
            keys = self.get_bsmask(i)
            for k in xrange(0, len(keys), 2):
                B_S_mask[(keys[k], keys[k + 1])] = True
            self.BSmasks.append(B_S_mask)
        #print "Done"
        return
예제 #33
0
파일: finder.py 프로젝트: dials/dials
  def __call__(self, index):
    '''
    Extract strong pixels from an image

    :param index: The index of the image

    '''
    from dials.model.data import PixelList
    from dxtbx.imageset import ImageSweep
    from dials.array_family import flex
    from math import ceil

    # Parallel reading of HDF5 from the same handle is not allowed. Python
    # multiprocessing is a bit messed up and used fork on linux so need to
    # close and reopen file.
    if self.first:
      from dxtbx.imageset import SingleFileReader
      if isinstance(self.imageset.reader(), SingleFileReader):
        self.imageset.reader().nullify_format_instance()
      self.first = False

    # Get the frame number
    if isinstance(self.imageset, ImageSweep):
      frame = self.imageset.get_array_range()[0] + index
    else:
      ind = self.imageset.indices()
      if len(ind) > 1:
        assert(all(i1+1 == i2 for i1, i2 in zip(ind[0:-1], ind[1:-1])))
      frame = ind[index]

    # Create the list of pixel lists
    pixel_list = []

    # Get the image and mask
    image = self.imageset.get_corrected_data(index)
    mask = self.imageset.get_mask(index)

    # Set the mask
    if self.mask is not None:
      assert(len(self.mask) == len(mask))
      mask = tuple(m1 & m2 for m1, m2 in zip(mask, self.mask))

    logger.debug("Number of masked pixels for image %i: %i" %
                 (index, sum(m.count(False) for m in mask)))

    # Add the images to the pixel lists
    num_strong = 0
    average_background = 0
    for im, mk in zip(image, mask):
      if self.region_of_interest is not None:
        x0, x1, y0, y1 = self.region_of_interest
        height, width = im.all()
        assert x0 < x1, "x0 < x1"
        assert y0 < y1, "y0 < y1"
        assert x0 >= 0, "x0 >= 0"
        assert y0 >= 0, "y0 >= 0"
        assert x1 <= width, "x1 <= width"
        assert y1 <= height, "y1 <= height"
        im_roi = im[y0:y1,x0:x1]
        mk_roi = mk[y0:y1,x0:x1]
        tm_roi = self.threshold_function.compute_threshold(im_roi, mk_roi)
        threshold_mask = flex.bool(im.accessor(),False)
        threshold_mask[y0:y1,x0:x1] = tm_roi
      else:
        threshold_mask = self.threshold_function.compute_threshold(im, mk)

      # Add the pixel list
      plist = PixelList(frame, im, threshold_mask)
      pixel_list.append(plist)

      # Get average background
      if self.compute_mean_background:
        background = im.as_1d().select((mk & ~threshold_mask).as_1d())
        average_background += flex.mean(background)

      # Add to the spot count
      num_strong += len(plist)

    # Make average background
    average_background /= len(image)

    # Check total number of strong pixels
    if self.max_strong_pixel_fraction < 1:
      num_image = 0
      for im in image:
        num_image += len(im)
      max_strong = int(ceil(self.max_strong_pixel_fraction * num_image))
      if num_strong > max_strong:
        raise RuntimeError(
          '''
          The number of strong pixels found (%d) is greater than the
          maximum allowed (%d). Try changing spot finding parameters
        ''' % (num_strong, max_strong))

    # Print some info
    if self.compute_mean_background:
      logger.info("Found %d strong pixels on image %d with average background %f"
           % (num_strong,
              frame+1,
              average_background))
    else:
      logger.info("Found %d strong pixels on image %d" % (num_strong, frame+1))

    # Return the result
    return Result(pixel_list)
예제 #34
0
    def run(self, experiments, reflections):

        self.logger.log_step_time("POSTREFINEMENT")

        if not self.params.postrefinement.enable:
            self.logger.log("Postrefinement was not done")
            if self.mpi_helper.rank == 0:
                self.logger.main_log("Postrefinement was not done")
            return experiments, reflections

        target_symm = symmetry(
            unit_cell=self.params.scaling.unit_cell,
            space_group_info=self.params.scaling.space_group)
        i_model = self.params.scaling.i_model
        miller_set = self.params.scaling.miller_set

        # Ensure that match_multi_indices() will return identical results
        # when a frame's observations are matched against the
        # pre-generated Miller set, self.miller_set, and the reference
        # data set, self.i_model.  The implication is that the same match
        # can be used to map Miller indices to array indices for intensity
        # accumulation, and for determination of the correlation
        # coefficient in the presence of a scaling reference.
        assert len(i_model.indices()) == len(miller_set.indices())
        assert (i_model.indices() == miller_set.indices()).count(False) == 0

        new_experiments = ExperimentList()
        new_reflections = flex.reflection_table()

        experiments_rejected_by_reason = {}  # reason:how_many_rejected

        for experiment in experiments:

            exp_reflections = reflections.select(
                reflections['exp_id'] == experiment.identifier)

            # Build a miller array for the experiment reflections with original miller indexes
            exp_miller_indices_original = miller.set(
                target_symm, exp_reflections['miller_index'], True)
            observations_original_index = miller.array(
                exp_miller_indices_original,
                exp_reflections['intensity.sum.value'],
                flex.double(
                    flex.sqrt(exp_reflections['intensity.sum.variance'])))

            assert exp_reflections.size() == exp_miller_indices_original.size()
            assert observations_original_index.size(
            ) == exp_miller_indices_original.size()

            # Build a miller array for the experiment reflections with asu miller indexes
            exp_miller_indices_asu = miller.set(
                target_symm, exp_reflections['miller_index_asymmetric'], True)
            observations = miller.array(
                exp_miller_indices_asu, exp_reflections['intensity.sum.value'],
                flex.double(
                    flex.sqrt(exp_reflections['intensity.sum.variance'])))

            matches = miller.match_multi_indices(
                miller_indices_unique=miller_set.indices(),
                miller_indices=observations.indices())

            pair1 = flex.int([pair[1] for pair in matches.pairs()
                              ])  # refers to the observations
            pair0 = flex.int([pair[0] for pair in matches.pairs()
                              ])  # refers to the model

            assert exp_reflections.size() == exp_miller_indices_original.size()
            assert observations_original_index.size(
            ) == exp_miller_indices_original.size()

            # narrow things down to the set that matches, only
            observations_pair1_selected = observations.customized_copy(
                indices=flex.miller_index(
                    [observations.indices()[p] for p in pair1]),
                data=flex.double([observations.data()[p] for p in pair1]),
                sigmas=flex.double([observations.sigmas()[p] for p in pair1]))

            observations_original_index_pair1_selected = observations_original_index.customized_copy(
                indices=flex.miller_index(
                    [observations_original_index.indices()[p] for p in pair1]),
                data=flex.double(
                    [observations_original_index.data()[p] for p in pair1]),
                sigmas=flex.double(
                    [observations_original_index.sigmas()[p] for p in pair1]))

            I_observed = observations_pair1_selected.data()
            MILLER = observations_original_index_pair1_selected.indices()

            ORI = crystal_orientation(experiment.crystal.get_A(),
                                      basis_type.reciprocal)
            Astar = matrix.sqr(ORI.reciprocal_matrix())
            Astar_from_experiment = matrix.sqr(experiment.crystal.get_A())
            assert Astar == Astar_from_experiment

            WAVE = experiment.beam.get_wavelength()
            BEAM = matrix.col((0.0, 0.0, -1. / WAVE))
            BFACTOR = 0.
            MOSAICITY_DEG = experiment.crystal.get_half_mosaicity_deg()
            DOMAIN_SIZE_A = experiment.crystal.get_domain_size_ang()

            # calculation of correlation here
            I_reference = flex.double(
                [i_model.data()[pair[0]] for pair in matches.pairs()])
            I_invalid = flex.bool(
                [i_model.sigmas()[pair[0]] < 0. for pair in matches.pairs()])
            use_weights = False  # New facility for getting variance-weighted correlation

            if use_weights:
                # variance weighting
                I_weight = flex.double([
                    1. / (observations_pair1_selected.sigmas()[pair[1]])**2
                    for pair in matches.pairs()
                ])
            else:
                I_weight = flex.double(
                    len(observations_pair1_selected.sigmas()), 1.)

            I_weight.set_selected(I_invalid, 0.)
            """Explanation of 'include_negatives' semantics as originally implemented in cxi.merge postrefinement:
         include_negatives = True
         + and - reflections both used for Rh distribution for initial estimate of RS parameter
         + and - reflections both used for calc/obs correlation slope for initial estimate of G parameter
         + and - reflections both passed to the refinery and used in the target function (makes sense if
                             you look at it from a certain point of view)

         include_negatives = False
         + and - reflections both used for Rh distribution for initial estimate of RS parameter
         +       reflections only used for calc/obs correlation slope for initial estimate of G parameter
         + and - reflections both passed to the refinery and used in the target function (makes sense if
                             you look at it from a certain point of view)
      """

            # RB: By design, for MPI-Merge "include negatives" is implicitly True
            SWC = simple_weighted_correlation(I_weight, I_reference,
                                              I_observed)
            if self.params.output.log_level == 0:
                self.logger.log("Old correlation is: %f" % SWC.corr)

            if self.params.postrefinement.algorithm == "rs":

                Rhall = flex.double()

                for mill in MILLER:
                    H = matrix.col(mill)
                    Xhkl = Astar * H
                    Rh = (Xhkl + BEAM).length() - (1. / WAVE)
                    Rhall.append(Rh)

                Rs = math.sqrt(flex.mean(Rhall * Rhall))

                RS = 1. / 10000.  # reciprocal effective domain size of 1 micron
                RS = Rs  # try this empirically determined approximate, monochrome, a-mosaic value
                current = flex.double([SWC.slope, BFACTOR, RS, 0., 0.])

                parameterization_class = rs_parameterization
                refinery = rs_refinery(ORI=ORI,
                                       MILLER=MILLER,
                                       BEAM=BEAM,
                                       WAVE=WAVE,
                                       ICALCVEC=I_reference,
                                       IOBSVEC=I_observed)

            elif self.params.postrefinement.algorithm == "eta_deff":

                eta_init = 2. * MOSAICITY_DEG * math.pi / 180.
                D_eff_init = 2. * DOMAIN_SIZE_A
                current = flex.double(
                    [SWC.slope, BFACTOR, eta_init, 0., 0., D_eff_init])

                parameterization_class = eta_deff_parameterization
                refinery = eta_deff_refinery(ORI=ORI,
                                             MILLER=MILLER,
                                             BEAM=BEAM,
                                             WAVE=WAVE,
                                             ICALCVEC=I_reference,
                                             IOBSVEC=I_observed)

            func = refinery.fvec_callable(parameterization_class(current))
            functional = flex.sum(func * func)

            if self.params.output.log_level == 0:
                self.logger.log("functional: %f" % functional)

            self.current = current
            self.parameterization_class = parameterization_class
            self.refinery = refinery

            self.observations_pair1_selected = observations_pair1_selected
            self.observations_original_index_pair1_selected = observations_original_index_pair1_selected

            error_detected = False

            try:
                self.run_plain()

                result_observations_original_index, result_observations, result_matches = self.result_for_cxi_merge(
                )

                assert result_observations_original_index.size(
                ) == result_observations.size()
                assert result_matches.pairs().size(
                ) == result_observations_original_index.size()

            except (AssertionError, ValueError, RuntimeError) as e:
                error_detected = True
                reason = repr(e)
                if not reason:
                    reason = "Unknown error"
                if not reason in experiments_rejected_by_reason:
                    experiments_rejected_by_reason[reason] = 1
                else:
                    experiments_rejected_by_reason[reason] += 1

            if not error_detected:
                new_experiments.append(experiment)

                new_exp_reflections = flex.reflection_table()
                new_exp_reflections[
                    'miller_index_asymmetric'] = flex.miller_index(
                        result_observations.indices())
                new_exp_reflections['intensity.sum.value'] = flex.double(
                    result_observations.data())
                new_exp_reflections['intensity.sum.variance'] = flex.double(
                    flex.pow(result_observations.sigmas(), 2))
                new_exp_reflections['exp_id'] = flex.std_string(
                    len(new_exp_reflections), experiment.identifier)
                new_reflections.extend(new_exp_reflections)
            '''
      # debugging
      elif reason.startswith("ValueError"):
        self.logger.log("Rejected b/c of value error exp id: %s; unit cell: %s"%(exp_id, str(experiment.crystal.get_unit_cell())) )
      '''

        # report rejected experiments, reflections
        experiments_rejected_by_postrefinement = len(experiments) - len(
            new_experiments)
        reflections_rejected_by_postrefinement = reflections.size(
        ) - new_reflections.size()

        self.logger.log("Experiments rejected by post-refinement: %d" %
                        experiments_rejected_by_postrefinement)
        self.logger.log("Reflections rejected by post-refinement: %d" %
                        reflections_rejected_by_postrefinement)

        all_reasons = []
        for reason, count in experiments_rejected_by_reason.iteritems():
            self.logger.log("Experiments rejected due to %s: %d" %
                            (reason, count))
            all_reasons.append(reason)

        comm = self.mpi_helper.comm
        MPI = self.mpi_helper.MPI

        # Collect all rejection reasons from all ranks. Use allreduce to let each rank have all reasons.
        all_reasons = comm.allreduce(all_reasons, MPI.SUM)
        all_reasons = set(all_reasons)

        # Now that each rank has all reasons from all ranks, we can treat the reasons in a uniform way.
        total_experiments_rejected_by_reason = {}
        for reason in all_reasons:
            rejected_experiment_count = 0
            if reason in experiments_rejected_by_reason:
                rejected_experiment_count = experiments_rejected_by_reason[
                    reason]
            total_experiments_rejected_by_reason[reason] = comm.reduce(
                rejected_experiment_count, MPI.SUM, 0)

        total_accepted_experiment_count = comm.reduce(len(new_experiments),
                                                      MPI.SUM, 0)

        # how many reflections have we rejected due to post-refinement?
        rejected_reflections = len(reflections) - len(new_reflections)
        total_rejected_reflections = self.mpi_helper.sum(rejected_reflections)

        if self.mpi_helper.rank == 0:
            for reason, count in total_experiments_rejected_by_reason.iteritems(
            ):
                self.logger.main_log(
                    "Total experiments rejected due to %s: %d" %
                    (reason, count))
            self.logger.main_log("Total experiments accepted: %d" %
                                 total_accepted_experiment_count)
            self.logger.main_log(
                "Total reflections rejected due to post-refinement: %d" %
                total_rejected_reflections)

        self.logger.log_step_time("POSTREFINEMENT", True)

        return new_experiments, new_reflections
예제 #35
0
파일: merge.py 프로젝트: huwjenkins/dials
def dano_over_sigdano(anomalous_amplitudes):
    """Calculate < |F(+) - F(-)| / sigma(F(+) - F(-))> i.e. <DANO/SIGDANO>."""
    diff = anomalous_amplitudes.anomalous_differences()
    if not diff.data() or not diff.sigmas():
        return 0.0
    return flex.mean(flex.abs(diff.data()) / diff.sigmas())
    def __init__(self, measurements_orig, params, i_model, miller_set, result,
                 out):
        measurements = measurements_orig.deep_copy()

        # Now manipulate the data to conform to unit cell, asu, and space group
        # of reference.  The resolution will be cut later.
        # Only works if there is NOT an indexing ambiguity!
        observations = measurements.customized_copy(
            anomalous_flag=not params.merge_anomalous,
            crystal_symmetry=miller_set.crystal_symmetry()).map_to_asu()

        observations_original_index = measurements.customized_copy(
            anomalous_flag=not params.merge_anomalous,
            crystal_symmetry=miller_set.crystal_symmetry())

        # Ensure that match_multi_indices() will return identical results
        # when a frame's observations are matched against the
        # pre-generated Miller set, self.miller_set, and the reference
        # data set, self.i_model.  The implication is that the same match
        # can be used to map Miller indices to array indices for intensity
        # accumulation, and for determination of the correlation
        # coefficient in the presence of a scaling reference.

        assert len(i_model.indices()) == len(miller_set.indices()) \
            and  (i_model.indices() ==
                  miller_set.indices()).count(False) == 0

        matches = miller.match_multi_indices(
            miller_indices_unique=miller_set.indices(),
            miller_indices=observations.indices())

        pair1 = flex.int([pair[1] for pair in matches.pairs()])
        pair0 = flex.int([pair[0] for pair in matches.pairs()])
        # narrow things down to the set that matches, only
        observations_pair1_selected = observations.customized_copy(
            indices=flex.miller_index(
                [observations.indices()[p] for p in pair1]),
            data=flex.double([observations.data()[p] for p in pair1]),
            sigmas=flex.double([observations.sigmas()[p] for p in pair1]),
        )
        observations_original_index_pair1_selected = observations_original_index.customized_copy(
            indices=flex.miller_index(
                [observations_original_index.indices()[p] for p in pair1]),
            data=flex.double(
                [observations_original_index.data()[p] for p in pair1]),
            sigmas=flex.double(
                [observations_original_index.sigmas()[p] for p in pair1]),
        )
        ###################
        I_observed = observations_pair1_selected.data()
        MILLER = observations_original_index_pair1_selected.indices()
        ORI = result["current_orientation"][0]
        Astar = matrix.sqr(ORI.reciprocal_matrix())
        WAVE = result["wavelength"]
        BEAM = matrix.col((0.0, 0.0, -1. / WAVE))
        BFACTOR = 0.

        #calculation of correlation here
        I_reference = flex.double(
            [i_model.data()[pair[0]] for pair in matches.pairs()])
        I_invalid = flex.bool(
            [i_model.sigmas()[pair[0]] < 0. for pair in matches.pairs()])
        use_weights = False  # New facility for getting variance-weighted correlation

        if use_weights:
            #variance weighting
            I_weight = flex.double([
                1. / (observations_pair1_selected.sigmas()[pair[1]])**2
                for pair in matches.pairs()
            ])
        else:
            I_weight = flex.double(len(observations_pair1_selected.sigmas()),
                                   1.)
        I_weight.set_selected(I_invalid, 0.)
        """Explanation of 'include_negatives' semantics as originally implemented in cxi.merge postrefinement:
       include_negatives = True
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       + and - reflections both used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)

       include_negatives = False
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       +       reflections only used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)
    """
        if params.include_negatives:
            SWC = simple_weighted_correlation(I_weight, I_reference,
                                              I_observed)
        else:
            non_positive = (observations_pair1_selected.data() <= 0)
            SWC = simple_weighted_correlation(
                I_weight.select(~non_positive),
                I_reference.select(~non_positive),
                I_observed.select(~non_positive))

        print >> out, "Old correlation is", SWC.corr
        if params.postrefinement.algorithm == "rs":
            Rhall = flex.double()
            for mill in MILLER:
                H = matrix.col(mill)
                Xhkl = Astar * H
                Rh = (Xhkl + BEAM).length() - (1. / WAVE)
                Rhall.append(Rh)
            Rs = math.sqrt(flex.mean(Rhall * Rhall))

            RS = 1. / 10000.  # reciprocal effective domain size of 1 micron
            RS = Rs  # try this empirically determined approximate, monochrome, a-mosaic value
            current = flex.double([SWC.slope, BFACTOR, RS, 0., 0.])

            parameterization_class = rs_parameterization
            refinery = rs_refinery(ORI=ORI,
                                   MILLER=MILLER,
                                   BEAM=BEAM,
                                   WAVE=WAVE,
                                   ICALCVEC=I_reference,
                                   IOBSVEC=I_observed)

        elif params.postrefinement.algorithm == "eta_deff":
            eta_init = 2. * result["ML_half_mosaicity_deg"][0] * math.pi / 180.
            D_eff_init = 2. * result["ML_domain_size_ang"][0]
            current = flex.double([
                SWC.slope,
                BFACTOR,
                eta_init,
                0.,
                0.,
                D_eff_init,
            ])

            parameterization_class = eta_deff_parameterization
            refinery = eta_deff_refinery(ORI=ORI,
                                         MILLER=MILLER,
                                         BEAM=BEAM,
                                         WAVE=WAVE,
                                         ICALCVEC=I_reference,
                                         IOBSVEC=I_observed)

        func = refinery.fvec_callable(parameterization_class(current))
        functional = flex.sum(func * func)
        print >> out, "functional", functional
        self.current = current
        self.parameterization_class = parameterization_class
        self.refinery = refinery
        self.out = out
        self.params = params
        self.miller_set = miller_set
        self.observations_pair1_selected = observations_pair1_selected
        self.observations_original_index_pair1_selected = observations_original_index_pair1_selected
예제 #37
0
파일: modeller.py 프로젝트: dials/dials
  def finalize(self, data, mask):
    '''
    Finalize the model

    :param data: The data array
    :param mask: The mask array

    '''
    from dials.algorithms.image.filter import median_filter, mean_filter
    from dials.algorithms.image.fill_holes import diffusion_fill
    from dials.algorithms.image.fill_holes import simple_fill
    from dials.array_family import flex

    # Print some image properties
    sub_data = data.as_1d().select(mask.as_1d())
    logger.info('Raw image statistics:')
    logger.info('  min:  %d' % int(flex.min(sub_data)))
    logger.info('  max:  %d' % int(flex.max(sub_data)))
    logger.info('  mean: %d' % int(flex.mean(sub_data)))
    logger.info('')

    # Transform to polar
    logger.info('Transforming image data to polar grid')
    result = self.transform.to_polar(data, mask)
    data = result.data()
    mask = result.mask()
    sub_data = data.as_1d().select(mask.as_1d())
    logger.info('Polar image statistics:')
    logger.info('  min:  %d' % int(flex.min(sub_data)))
    logger.info('  max:  %d' % int(flex.max(sub_data)))
    logger.info('  mean: %d' % int(flex.mean(sub_data)))
    logger.info('')

    # Filter the image to remove noise
    if self.kernel_size > 0:
      if self.filter_type == 'median':
        logger.info('Applying median filter')
        data = median_filter(data, mask, (self.kernel_size, 0))
        sub_data = data.as_1d().select(mask.as_1d())
        logger.info('Median polar image statistics:')
        logger.info('  min:  %d' % int(flex.min(sub_data)))
        logger.info('  max:  %d' % int(flex.max(sub_data)))
        logger.info('  mean: %d' % int(flex.mean(sub_data)))
        logger.info('')
      elif self.filter_type == 'mean':
        logger.info('Applying mean filter')
        mask_as_int = mask.as_1d().as_int()
        mask_as_int.reshape(mask.accessor())
        data = mean_filter(data, mask_as_int, (self.kernel_size, 0), 1)
        sub_data = data.as_1d().select(mask.as_1d())
        logger.info('Mean polar image statistics:')
        logger.info('  min:  %d' % int(flex.min(sub_data)))
        logger.info('  max:  %d' % int(flex.max(sub_data)))
        logger.info('  mean: %d' % int(flex.mean(sub_data)))
        logger.info('')
      else:
        raise RuntimeError('Unknown filter_type: %s' % self.filter_type)

    # Fill any remaining holes
    logger.info("Filling holes")
    data = simple_fill(data, mask)
    data = diffusion_fill(data, mask, self.niter)
    mask = flex.bool(data.accessor(), True)
    sub_data = data.as_1d().select(mask.as_1d())
    logger.info('Filled polar image statistics:')
    logger.info('  min:  %d' % int(flex.min(sub_data)))
    logger.info('  max:  %d' % int(flex.max(sub_data)))
    logger.info('  mean: %d' % int(flex.mean(sub_data)))
    logger.info('')

    # Transform back
    logger.info('Transforming image data from polar grid')
    result = self.transform.from_polar(data, mask)
    data = result.data()
    mask = result.mask()
    sub_data = data.as_1d().select(mask.as_1d())
    logger.info('Final image statistics:')
    logger.info('  min:  %d' % int(flex.min(sub_data)))
    logger.info('  max:  %d' % int(flex.max(sub_data)))
    logger.info('  mean: %d' % int(flex.mean(sub_data)))
    logger.info('')

    # Fill in any discontinuities
    # FIXME NEED TO HANDLE DISCONTINUITY
    # mask = ~self.transform.discontinuity()[:-1,:-1]
    # data = diffusion_fill(data, mask, self.niter)

    # Get and apply the mask
    mask = self.experiment.imageset.get_mask(0)[0]
    mask = mask.as_1d().as_int().as_double()
    mask.reshape(data.accessor())
    data *= mask

    # Return the result
    return data
예제 #38
0
    def finalize(self, data, mask):
        """
        Finalize the model

        :param data: The data array
        :param mask: The mask array
        """
        from dials.algorithms.image.filter import median_filter, mean_filter
        from dials.algorithms.image.fill_holes import diffusion_fill
        from dials.algorithms.image.fill_holes import simple_fill
        from dials.array_family import flex

        # Print some image properties
        sub_data = data.as_1d().select(mask.as_1d())
        logger.info("Raw image statistics:")
        logger.info("  min:  %d" % int(flex.min(sub_data)))
        logger.info("  max:  %d" % int(flex.max(sub_data)))
        logger.info("  mean: %d" % int(flex.mean(sub_data)))
        logger.info("")

        # Transform to polar
        logger.info("Transforming image data to polar grid")
        result = self.transform.to_polar(data, mask)
        data = result.data()
        mask = result.mask()
        sub_data = data.as_1d().select(mask.as_1d())
        logger.info("Polar image statistics:")
        logger.info("  min:  %d" % int(flex.min(sub_data)))
        logger.info("  max:  %d" % int(flex.max(sub_data)))
        logger.info("  mean: %d" % int(flex.mean(sub_data)))
        logger.info("")

        # Filter the image to remove noise
        if self.kernel_size > 0:
            if self.filter_type == "median":
                logger.info("Applying median filter")
                data = median_filter(data,
                                     mask, (self.kernel_size, 0),
                                     periodic=True)
                sub_data = data.as_1d().select(mask.as_1d())
                logger.info("Median polar image statistics:")
                logger.info("  min:  %d" % int(flex.min(sub_data)))
                logger.info("  max:  %d" % int(flex.max(sub_data)))
                logger.info("  mean: %d" % int(flex.mean(sub_data)))
                logger.info("")
            elif self.filter_type == "mean":
                logger.info("Applying mean filter")
                mask_as_int = mask.as_1d().as_int()
                mask_as_int.reshape(mask.accessor())
                data = mean_filter(data, mask_as_int, (self.kernel_size, 0), 1)
                sub_data = data.as_1d().select(mask.as_1d())
                logger.info("Mean polar image statistics:")
                logger.info("  min:  %d" % int(flex.min(sub_data)))
                logger.info("  max:  %d" % int(flex.max(sub_data)))
                logger.info("  mean: %d" % int(flex.mean(sub_data)))
                logger.info("")
            else:
                raise RuntimeError("Unknown filter_type: %s" %
                                   self.filter_type)

        # Fill any remaining holes
        logger.info("Filling holes")
        data = simple_fill(data, mask)
        data = diffusion_fill(data, mask, self.niter)
        mask = flex.bool(data.accessor(), True)
        sub_data = data.as_1d().select(mask.as_1d())
        logger.info("Filled polar image statistics:")
        logger.info("  min:  %d" % int(flex.min(sub_data)))
        logger.info("  max:  %d" % int(flex.max(sub_data)))
        logger.info("  mean: %d" % int(flex.mean(sub_data)))
        logger.info("")

        # Transform back
        logger.info("Transforming image data from polar grid")
        result = self.transform.from_polar(data, mask)
        data = result.data()
        mask = result.mask()
        sub_data = data.as_1d().select(mask.as_1d())
        logger.info("Final image statistics:")
        logger.info("  min:  %d" % int(flex.min(sub_data)))
        logger.info("  max:  %d" % int(flex.max(sub_data)))
        logger.info("  mean: %d" % int(flex.mean(sub_data)))
        logger.info("")

        # Fill in any discontinuities
        mask = ~self.transform.discontinuity()[:-1, :-1]
        data = diffusion_fill(data, mask, self.niter)

        # Get and apply the mask
        mask = self.experiment.imageset.get_mask(0)[0]
        mask = mask.as_1d().as_int().as_double()
        mask.reshape(data.accessor())
        data *= mask

        # Return the result
        return data
예제 #39
0
  def __call__(self, experiments, reflections):
    results = flex.reflection_table()
    table_header = ["","","","I","IsigI","N >","RMSD","Cutoff"]
    table_header2 = ["Bin","Resolution Range","Completeness","","","cutoff","(um)",""]

    for exp_id in xrange(len(experiments)):
      print("*"*80)
      print("Significance filtering experiment", exp_id)
      table_data = []
      table_data.append(table_header)
      table_data.append(table_header2)
      experiment = experiments[exp_id]

      # Find the bins for this experiment
      crystal = experiment.crystal
      refls = reflections.select(reflections['id'] == exp_id)
      sym = symmetry(unit_cell = crystal.get_unit_cell(), space_group = crystal.get_space_group())
      d = crystal.get_unit_cell().d(refls['miller_index'])
      mset = sym.miller_set(indices = refls['miller_index'], anomalous_flag=False)
      binner = mset.setup_binner(n_bins=self.params.n_bins)
      acceptable_resolution_bins = []

      # Iterate through the bins, examining I/sigI at each bin
      for i in binner.range_used():
        d_max, d_min = binner.bin_d_range(i)
        sel = (d <= d_max) & (d > d_min)
        sel &= refls['intensity.sum.value'] > 0
        bin_refls = refls.select(sel)
        n_refls = len(bin_refls)
        avg_i = flex.mean(bin_refls['intensity.sum.value']) if n_refls > 0 else 0
        avg_i_sigi = flex.mean(bin_refls['intensity.sum.value'] /
                               flex.sqrt(bin_refls['intensity.sum.variance'])) if n_refls > 0 else 0
        acceptable_resolution_bins.append(avg_i_sigi >= self.params.isigi_cutoff)

        bright_refls = bin_refls.select((bin_refls['intensity.sum.value']/flex.sqrt(bin_refls['intensity.sum.variance'])) >= self.params.isigi_cutoff)
        n_bright = len(bright_refls)

        rmsd_obs = 1000*math.sqrt((bright_refls['xyzcal.mm']-bright_refls['xyzobs.mm.value']).sum_sq()/n_bright) if n_bright > 0 else 0

        table_row = []
        table_row.append("%3d"%i)
        table_row.append("%-13s"%binner.bin_legend(i_bin=i,show_bin_number=False,show_bin_range=False,
                                                   show_d_range=True, show_counts=False))
        table_row.append("%13s"%binner.bin_legend(i_bin=i,show_bin_number=False,show_bin_range=False,
                                                  show_d_range=False, show_counts=True))

        table_row.append("%.1f"%(avg_i))
        table_row.append("%.1f"%(avg_i_sigi))
        table_row.append("%3d"%n_bright)
        table_row.append("%.1f"%(rmsd_obs))
        table_data.append(table_row)

      # Throw out bins that go back above the cutoff after the first non-passing bin is found
      acceptable_resolution_bins = [acceptable_resolution_bins[i] for i in xrange(len(acceptable_resolution_bins))
                                    if False not in acceptable_resolution_bins[:i+1]]

      for b, row in zip(acceptable_resolution_bins, table_data[2:]):
        if b:
          row.append("X")
      print(table_utils.format(table_data,has_header=2,justify='center',delim=" "))

      # Save the results
      if any(acceptable_resolution_bins):
        best_index = acceptable_resolution_bins.count(True)-1
        best_row = table_data[best_index+2]
        d_min = binner.bin_d_range(binner.range_used()[best_index])[1]
        print("best row:", " ".join(best_row))
        if self.params.enable:
          results.extend(refls.select(d >= d_min))
      else:
        print("Data didn't pass cutoff")
    if self.params.enable:
      return results
    else:
      return reflections
예제 #40
0
def blank_integrated_analysis(reflections, scan, phi_step, fractional_loss):
    prf_sel = reflections.get_flags(reflections.flags.integrated_prf)
    if prf_sel.count(True) > 0:
        reflections = reflections.select(prf_sel)
        intensities = reflections["intensity.prf.value"]
        variances = reflections["intensity.prf.variance"]
    else:
        sum_sel = reflections.get_flags(reflections.flags.integrated_sum)
        reflections = reflections.select(sum_sel)
        intensities = reflections["intensity.sum.value"]
        variances = reflections["intensity.sum.variance"]

    i_sigi = intensities / flex.sqrt(variances)

    xyz_px = reflections["xyzobs.px.value"]
    x_px, y_px, z_px = xyz_px.parts()
    phi = scan.get_angle_from_array_index(z_px)

    osc = scan.get_oscillation()[1]
    n_images_per_step = iceil(phi_step / osc)
    phi_step = n_images_per_step * osc

    array_range = scan.get_array_range()
    phi_min = flex.min(phi)
    phi_max = flex.max(phi)
    n_steps = int(round((phi_max - phi_min) / phi_step))
    hist = flex.histogram(
        z_px, data_min=array_range[0], data_max=array_range[1], n_slots=n_steps
    )
    logger.debug("Histogram:")
    logger.debug(hist.as_str())

    mean_i_sigi = flex.double()
    for i, slot_info in enumerate(hist.slot_infos()):
        sel = (z_px >= slot_info.low_cutoff) & (z_px < slot_info.high_cutoff)
        if sel.count(True) == 0:
            mean_i_sigi.append(0)
        else:
            mean_i_sigi.append(flex.mean(i_sigi.select(sel)))

    potential_blank_sel = mean_i_sigi <= (fractional_loss * flex.max(mean_i_sigi))

    xmin, xmax = zip(
        *[
            (slot_info.low_cutoff, slot_info.high_cutoff)
            for slot_info in hist.slot_infos()
        ]
    )

    d = {
        "data": [
            {
                "x": list(hist.slot_centers()),
                "y": list(mean_i_sigi),
                "xlow": xmin,
                "xhigh": xmax,
                "blank": list(potential_blank_sel),
                "type": "bar",
                "name": "blank_counts_analysis",
            }
        ],
        "layout": {
            "xaxis": {"title": "z observed (images)"},
            "yaxis": {"title": "Number of reflections"},
            "bargap": 0,
        },
    }

    blank_regions = blank_regions_from_sel(d["data"][0])
    d["blank_regions"] = blank_regions

    return d
예제 #41
0
  def centroid_mean_diff_vs_phi(self, rlist, threshold):
    from os.path import join
    import math
    I = rlist['intensity.sum.value']
    I_sig = flex.sqrt(rlist['intensity.sum.variance'])
    I_over_S = I / I_sig
    mask = I_over_S > threshold
    rlist = rlist.select(mask)
    assert(len(rlist) > 0)

    xc, yc, zc = rlist['xyzcal.mm'].parts()
    xo, yo, zo = rlist['xyzobs.mm.value'].parts()

    dx = xc - xo
    dy = yc - yo
    dphi = (zc - zo) * RAD2DEG

    mean_residuals_x = flex.double()
    mean_residuals_y = flex.double()
    mean_residuals_phi = flex.double()
    rmsd_x = flex.double()
    rmsd_y = flex.double()
    rmsd_phi = flex.double()
    frame = []
    phi_obs_deg = RAD2DEG * zo
    phi = []

    for i_phi in range(int(math.floor(flex.min(phi_obs_deg))),
                   int(math.ceil(flex.max(phi_obs_deg)))):
      sel = (phi_obs_deg >= i_phi) & (phi_obs_deg < (i_phi+1))
      if sel.count(True) == 0:
        continue
      mean_residuals_x.append(flex.mean(dx.select(sel)))
      mean_residuals_y.append(flex.mean(dy.select(sel)))
      mean_residuals_phi.append(flex.mean(dphi.select(sel)))
      rmsd_x.append(math.sqrt(flex.mean_sq(dx.select(sel))))
      rmsd_y.append(math.sqrt(flex.mean_sq(dy.select(sel))))
      rmsd_phi.append(math.sqrt(flex.mean_sq(dphi.select(sel))))
      phi.append(i_phi)

    from matplotlib import pyplot
    fig = pyplot.figure()
    ax = fig.add_subplot(311)
    #fig.subplots_adjust(hspace=0.5)
    pyplot.axhline(0, color='grey')
    ax.scatter(phi, mean_residuals_x)
    ax.set_xlabel('phi (deg)')
    ax.set_ylabel('mean $\Delta$ x (mm)')
    ax = fig.add_subplot(312)
    pyplot.axhline(0, color='grey')
    ax.scatter(phi, mean_residuals_y)
    ax.set_xlabel('phi (deg)')
    ax.set_ylabel('mean $\Delta$ y (mm)')
    ax = fig.add_subplot(313)
    pyplot.axhline(0, color='grey')
    ax.scatter(phi, mean_residuals_phi)
    ax.set_xlabel('phi (deg)')
    ax.set_ylabel('mean $\Delta$ phi (deg)')
    pyplot.savefig(join(self.directory, "centroid_mean_diff_vs_phi.png"))
    pyplot.close()

    fig = pyplot.figure()
    ax = fig.add_subplot(311)
    #fig.subplots_adjust(hspace=0.5)
    pyplot.axhline(flex.mean(rmsd_x), color='grey')
    ax.scatter(phi, rmsd_x)
    ax.set_xlabel('phi (deg)')
    ax.set_ylabel('rmsd x (mm)')
    ax = fig.add_subplot(312)
    pyplot.axhline(flex.mean(rmsd_y), color='grey')
    ax.scatter(phi, rmsd_y)
    ax.set_xlabel('phi (deg)')
    ax.set_ylabel('rmsd y (mm)')
    ax = fig.add_subplot(313)
    pyplot.axhline(flex.mean(rmsd_phi), color='grey')
    ax.scatter(phi, rmsd_phi)
    ax.set_xlabel('phi (deg)')
    ax.set_ylabel('rmsd phi (deg)')
    pyplot.savefig(join(self.directory, "centroid_rmsd_vs_phi.png"))
    pyplot.close()
예제 #42
0
    def __call__(self, index):
        """
        Extract strong pixels from an image

        :param index: The index of the image
        """
        from dials.model.data import PixelList
        from dxtbx.imageset import ImageSequence

        # Parallel reading of HDF5 from the same handle is not allowed. Python
        # multiprocessing is a bit messed up and used fork on linux so need to
        # close and reopen file.
        if self.first:
            if self.imageset.reader().is_single_file_reader():
                self.imageset.reader().nullify_format_instance()
            self.first = False

        # Get the frame number
        if isinstance(self.imageset, ImageSequence):
            frame = self.imageset.get_array_range()[0] + index
        else:
            ind = self.imageset.indices()
            if len(ind) > 1:
                assert all(i1 + 1 == i2
                           for i1, i2 in zip(ind[0:-1], ind[1:-1]))
            frame = ind[index]

        # Create the list of pixel lists
        pixel_list = []

        # Get the image and mask
        image = self.imageset.get_corrected_data(index)
        mask = self.imageset.get_mask(index)

        # Set the mask
        if self.mask is not None:
            assert len(self.mask) == len(mask)
            mask = tuple(m1 & m2 for m1, m2 in zip(mask, self.mask))

        logger.debug("Number of masked pixels for image %i: %i" %
                     (index, sum(m.count(False) for m in mask)))

        # Add the images to the pixel lists
        num_strong = 0
        average_background = 0
        for im, mk in zip(image, mask):
            if self.region_of_interest is not None:
                x0, x1, y0, y1 = self.region_of_interest
                height, width = im.all()
                assert x0 < x1, "x0 < x1"
                assert y0 < y1, "y0 < y1"
                assert x0 >= 0, "x0 >= 0"
                assert y0 >= 0, "y0 >= 0"
                assert x1 <= width, "x1 <= width"
                assert y1 <= height, "y1 <= height"
                im_roi = im[y0:y1, x0:x1]
                mk_roi = mk[y0:y1, x0:x1]
                tm_roi = self.threshold_function.compute_threshold(
                    im_roi, mk_roi)
                threshold_mask = flex.bool(im.accessor(), False)
                threshold_mask[y0:y1, x0:x1] = tm_roi
            else:
                threshold_mask = self.threshold_function.compute_threshold(
                    im, mk)

            # Add the pixel list
            plist = PixelList(frame, im, threshold_mask)
            pixel_list.append(plist)

            # Get average background
            if self.compute_mean_background:
                background = im.as_1d().select((mk & ~threshold_mask).as_1d())
                average_background += flex.mean(background)

            # Add to the spot count
            num_strong += len(plist)

        # Make average background
        average_background /= len(image)

        # Check total number of strong pixels
        if self.max_strong_pixel_fraction < 1:
            num_image = 0
            for im in image:
                num_image += len(im)
            max_strong = int(
                math.ceil(self.max_strong_pixel_fraction * num_image))
            if num_strong > max_strong:
                raise RuntimeError("""
          The number of strong pixels found (%d) is greater than the
          maximum allowed (%d). Try changing spot finding parameters
        """ % (num_strong, max_strong))

        # Print some info
        if self.compute_mean_background:
            logger.info(
                "Found %d strong pixels on image %d with average background %f"
                % (num_strong, frame + 1, average_background))
        else:
            logger.info("Found %d strong pixels on image %d" %
                        (num_strong, frame + 1))

        # Return the result
        return Result(pixel_list)
예제 #43
0
    def overall_report(data):

        # Start by adding some overall numbers
        report = OrderedDict()
        report['n'] = len(reflections)
        report['n_full'] = data['full'].count(True)
        report['n_partial'] = data['full'].count(False)
        report['n_overload'] = data['over'].count(True)
        report['n_ice'] = data['ice'].count(True)
        report['n_summed'] = data['sum'].count(True)
        report['n_fitted'] = data['prf'].count(True)
        report['n_integated'] = data['int'].count(True)
        report['n_invalid_bg'] = data['ninvbg'].count(True)
        report['n_invalid_fg'] = data['ninvfg'].count(True)
        report['n_failed_background'] = data['fbgd'].count(True)
        report['n_failed_summation'] = data['fsum'].count(True)
        report['n_failed_fitting'] = data['fprf'].count(True)

        # Compute mean background
        try:
            report['mean_background'] = flex.mean(
                data['background.mean'].select(data['int']))
        except Exception:
            report['mean_background'] = 0.0

        # Compute mean I/Sigma summation
        try:
            report['ios_sum'] = flex.mean(data['intensity.sum.ios'].select(
                data['sum']))
        except Exception:
            report['ios_sum'] = 0.0

        # Compute mean I/Sigma profile fitting
        try:
            report['ios_prf'] = flex.mean(data['intensity.prf.ios'].select(
                data['prf']))
        except Exception:
            report['ios_prf'] = 0.0

        # Compute the mean profile correlation
        try:
            report['cc_prf'] = flex.mean(data['profile.correlation'].select(
                data['prf']))
        except Exception:
            report['cc_prf'] = 0.0

        # Compute the correlations between summation and profile fitting
        try:
            mask = data['sum'] & data['prf']
            Isum = data['intensity.sum.value'].select(mask)
            Iprf = data['intensity.prf.value'].select(mask)
            report['cc_pearson_sum_prf'] = pearson_correlation_coefficient(
                Isum, Iprf)
            report['cc_spearman_sum_prf'] = spearman_correlation_coefficient(
                Isum, Iprf)
        except Exception:
            report['cc_pearson_sum_prf'] = 0.0
            report['cc_spearman_sum_prf'] = 0.0

        # Return the overall report
        return report
예제 #44
0
def run(params, root, common_set=None):
    iterable = []
    iterable2 = []
    debug_root = os.path.join(root, 'debug')
    print('start appending')
    for filename in os.listdir(debug_root):
        if os.path.splitext(filename)[1] != ".txt": continue
        iterable.append(filename)
    for filename in os.listdir(root):
        if 'refined_experiments' not in os.path.splitext(
                filename)[0] or os.path.splitext(filename)[1] != ".json":
            continue
        iterable2.append(filename)
    print('done appending')
    #if command_line.options.mpi:
    if params.mpi:
        try:
            from mpi4py import MPI
        except ImportError:
            raise Sorry("MPI not found")
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        size = comm.Get_size()
        print(rank, size)
        # get hits and indexing info
        iterable = [
            iterable[i] for i in range(len(iterable)) if (i + rank) % size == 0
        ]
        results = get_hits_and_indexing_stats(iterable, debug_root, rank)
        results = comm.gather(results, root=0)
        # Now get uc and rmsd info
        iterable2 = [
            iterable2[i] for i in range(len(iterable2))
            if (i + rank) % size == 0
        ]
        results2 = get_uc_and_rmsd_stats(iterable2,
                                         root,
                                         rank=rank,
                                         common_set=common_set)
        results2 = comm.gather(results2, root=0)
        if rank != 0: return
    else:
        results = [get_hits_and_indexing_stats(iterable, debug_root)]
        results2 = [
            get_uc_and_rmsd_stats(iterable2,
                                  root,
                                  rank=0,
                                  common_set=common_set)
        ]
    # Now evaulate summary statistics
    print('Now evaluating summary statistics')
    n_hits = 0
    n_idx = 0
    t_idx = 0
    t_idx_success = 0
    all_idx_cutoff_time_exceeded_event = []
    total_xray_events = 0
    total_images_analyzed = 0
    for ii, r in enumerate(results):
        n_hits += r[0]
        n_idx += r[1]
        t_idx += r[2]
        t_idx_success += r[3]
        all_idx_cutoff_time_exceeded_event.extend(r[4])
        total_xray_events += r[5]
        total_images_analyzed += r[6]
    # Write out the cutoff time exceeded events in a format that xtc_process.py can interpret for skipping events
    if indexing_time_cutoff is not None:
        fts = open('timestamps_to_skip.dat', 'w')
        for evt in all_idx_cutoff_time_exceeded_event:
            fts.write('psanagpu999,%s,%s,fail\n' % (evt, evt))
        fts.close()
    node_hours = None
    core_hours = None
    if out_logfile is not None and wall_time is None:
        total_time = []
        run_number = int(os.path.abspath(root).strip().split('/')[-3][1:])
        print(run_number)
        with open(out_logfile, 'r') as flog:
            for line in flog:
                if string_to_search_for in line:
                    ax = line.split()
                    if int(ax[-1]) == run_number:
                        total_time.append(float(ax[1]))
        node_hours = max(total_time) * num_nodes / 3600.0
        if num_cores is not None:
            core_hours = max(total_time) * num_cores / 3600.0
        else:
            core_hours = max(
                total_time) * num_nodes * num_cores_per_node / 3600.0

    if wall_time is not None:
        node_hours = wall_time * num_nodes / 3600.0
        if num_cores is not None:
            core_hours = wall_time * num_cores / 3600.0
        else:
            core_hours = wall_time * num_nodes * num_cores_per_node / 3600.0

    all_uc_a = flex.double()
    all_uc_b = flex.double()
    all_uc_c = flex.double()
    all_uc_alpha = flex.double()
    all_uc_beta = flex.double()
    all_uc_gamma = flex.double()
    dR = flex.double()
    info_list = []
    info = []
    for ii, r in enumerate(results2):
        all_uc_a.extend(r[0])
        all_uc_b.extend(r[1])
        all_uc_c.extend(r[2])
        all_uc_alpha.extend(r[3])
        all_uc_beta.extend(r[4])
        all_uc_gamma.extend(r[5])
        dR.extend(r[6])
        if show_plot:
            for jj, aa in enumerate(r[0]):
                info.append({
                    'a': r[0][jj],
                    'b': r[1][jj],
                    'c': r[2][jj],
                    'alpha': r[3][jj],
                    'beta': r[4][jj],
                    'gamma': r[5][jj],
                    'n_img': 0
                })
    info_list.append(info)
    n_lattices = len(all_uc_a)
    # Now print out all relevant statistics
    if True:
        print('-' * 80)
        print('|' + ' ' * 80 + '|\n' + '|' + ' ' * 20 +
              'Analytics Package for Indexing' + ' ' * 30 + '|\n|' + ' ' * 80 +
              '|')
        print('-' * 80)
        print('Getting stats for data in : ', root)
        print(
            '====================== Indexing and Timing Statistics ============================'
        )
        print('Total number of X-ray events = ', total_xray_events)
        print('Total number of images analyzed = ', total_images_analyzed)
        print('Number of Hits = ', n_hits)
        print('Number of images successfully indexed = ', n_idx)
        if common_set is None:
            print('Number of lattices = ', n_lattices)
        else:
            print('Number of common lattices', n_lattices)
        print('Total time spent in indexing (hrs) = ', t_idx)
        print('Time spent in indexing successfully (core-hrs) = ',
              t_idx_success)
        print('Average time spent indexing (core-secs) = ',
              3600 * t_idx / n_hits)
        print('Average time spent indexing successfully (core-secs) = ',
              3600 * t_idx_success / n_idx)
        if node_hours is not None:
            print('Total Node-hours with %d nodes = %.2f (hrs)' %
                  (num_nodes, node_hours))
            print(
                '% core utilization i.e (total indexing time)/(total core-hrs) = ',
                100.0 * t_idx / core_hours)
        if common_set is None:
            print(
                '====================== Unit Cell & RMSD Statistics ============================'
            )
        else:
            print(
                '====================== Unit Cell & RMSD Statistics from Common Set ============================'
            )
        print('a-edge (A) : %.2f +/- %.2f' %
              (flex.mean(all_uc_a), flex.mean_and_variance(
                  all_uc_a).unweighted_sample_standard_deviation()))
        print('b-edge (A) : %.2f +/- %.2f' %
              (flex.mean(all_uc_b), flex.mean_and_variance(
                  all_uc_b).unweighted_sample_standard_deviation()))
        print('c-edge (A) : %.2f +/- %.2f' %
              (flex.mean(all_uc_c), flex.mean_and_variance(
                  all_uc_c).unweighted_sample_standard_deviation()))
        print('alpha (deg) : %.2f +/- %.2f' %
              (flex.mean(all_uc_alpha), flex.mean_and_variance(
                  all_uc_alpha).unweighted_sample_standard_deviation()))
        print('beta (deg) : %.2f +/- %.2f' %
              (flex.mean(all_uc_beta), flex.mean_and_variance(
                  all_uc_beta).unweighted_sample_standard_deviation()))
        print('gamma (deg) : %.2f +/- %.2f' %
              (flex.mean(all_uc_gamma), flex.mean_and_variance(
                  all_uc_gamma).unweighted_sample_standard_deviation()))
        print('Total RMSD i.e calc - obs for Bragg spots (um) = ',
              1000.0 * math.sqrt(dR.dot(dR) / len(dR)))
    print('-' * 80)
    if show_plot:
        import xfel.ui.components.xfel_gui_plotter as pltr
        plotter = pltr.PopUpCharts()
        plotter.plot_uc_histogram(info_list=info_list,
                                  legend_list=['combined'],
                                  iqr_ratio=None)
        plotter.plt.show()
예제 #45
0
    def __init__(self, experiments, profile_fitter, reflections, num_folds):
        '''
    Create the integration report

    :param experiments: The experiment list
    :param profile_model: The profile model
    :param reflections: The reflection table

    '''
        from collections import OrderedDict

        # Initialise the report class
        super(ProfileValidationReport, self).__init__()

        # Create the table
        table = Table()

        # Set the title
        table.name = 'validation.summary'
        table.title = 'Summary of profile validation '

        # Add the columns
        table.cols.append(('id', 'ID'))
        table.cols.append(('subsample', 'Sub-sample'))
        table.cols.append(('n_valid', '# validated'))
        table.cols.append(('cc', '<CC>'))
        table.cols.append(('nrmsd', '<NRMSD>'))

        # Split the reflections
        reflection_tables = reflections.split_by_experiment_id()
        assert len(reflection_tables) == len(experiments)
        assert len(profile_fitter) == num_folds

        # Create the summary for each profile model
        for i in range(len(reflection_tables)):
            reflection_table = reflection_tables[i]
            reflection_table = reflection_table.select(
                reflection_table.get_flags(
                    reflection_table.flags.integrated_prf))
            index = reflection_table['profile.index']
            cc = reflection_table['profile.correlation']
            nrmsd = reflection_table['profile.rmsd']
            for j in range(num_folds):
                mask = index == j
                num_validated = mask.count(True)
                if num_validated == 0:
                    mean_cc = 0
                    mean_nrmsd = 0
                else:
                    mean_cc = flex.mean(cc.select(mask))
                    mean_nrmsd = flex.mean(nrmsd.select(mask))
                table.rows.append([
                    '%d' % i,
                    '%d' % j,
                    '%d' % num_validated,
                    '%.2f' % mean_cc,
                    '%.2f' % mean_nrmsd
                ])

        # Add the table
        self.add_table(table)
예제 #46
0
  def per_sensor_analysis(self): # hardcoded Jungfrau 16M geometry
    for isensor in range(32):
      print ("Panel Sensor  <Δx>(μm)     <Δy>(μm)      Nrefl  RMS Δx(μm)  RMS Δy(μm) ")

      if len(self.cumCALC[isensor]) < 2: continue

      for ipanel in range(8*isensor, 8*(1+isensor)):
        if len(self.panel_deltax[ipanel])<2: continue
        Sx = flex.mean_and_variance(1000.*self.panel_deltax[ipanel])
        Sy = flex.mean_and_variance(1000.*self.panel_deltay[ipanel])
        RMSDx = 1000.*math.sqrt(flex.mean(self.panel_deltax[ipanel]*self.panel_deltax[ipanel]))
        RMSDy = 1000.*math.sqrt(flex.mean(self.panel_deltay[ipanel]*self.panel_deltay[ipanel]))
        print("%3d  %3d"%(ipanel,ipanel//8),"%7.2f±%6.2f %7.2f±%6.2f %6d"%(Sx.mean(),Sx.unweighted_standard_error_of_mean(),
                                                 Sy.mean(),Sy.unweighted_standard_error_of_mean(), len(self.panel_deltax[ipanel])),
            "    %5.1f   %5.1f"%(RMSDx,RMSDy),
        )
      print("")
      cumD = (self.cumCALC[isensor]-self.cumOBS[isensor]).parts()
      print ( "All  %3d %7.2f        %7.2f        %6d"%(isensor,1000.*flex.mean(cumD[0]), 1000.*flex.mean(cumD[1]), len(cumD[0])))
      print("")

  # Now we'll do a linear least squares refinement over sensors:
  #Method 1. Simple rectilinear translation.
      if self.params.verbose:
        veclength = len(self.cumCALC[isensor])
        correction = flex.vec3_double( veclength, (flex.mean(cumD[0]), flex.mean(cumD[1]), flex.mean(cumD[2])) )

        new_delta = (self.cumCALC[isensor]-correction ) -self.cumOBS[isensor]
        for ipanel in range(8*isensor, 8*(1+isensor)):
          panel_delta = new_delta.select(self.cumPANNO[isensor]==ipanel)
          if len(panel_delta)<2: continue
          deltax_part, deltay_part = panel_delta.parts()[0:2]
          RMSDx = 1000.*math.sqrt( flex.mean(deltax_part * deltax_part) )
          RMSDy = 1000.*math.sqrt( flex.mean(deltay_part * deltay_part) )
          Sx = flex.mean_and_variance(1000.*deltax_part)
          Sy = flex.mean_and_variance(1000.*deltay_part)
          print("%3d  %3d"%(ipanel,ipanel//8),"%7.2f±%6.2f %7.2f±%6.2f %6d"%(Sx.mean(),Sx.unweighted_standard_error_of_mean(),
                                                 Sy.mean(),Sy.unweighted_standard_error_of_mean(), len(deltax_part)),
          "    %5.1f   %5.1f"%(RMSDx,RMSDy),
          )
        print()
  # Method 2. Translation + rotation.
      src = []
      dst = []
      for icoord in range(len(self.cumCALC[isensor])):
        src.append(self.cumCALC[isensor][icoord][0:2])
        dst.append(self.cumOBS[isensor][icoord][0:2])
      src = np.array(src)
      dst = np.array(dst)

      # estimate affine transform model using all coordinates
      model = SimilarityTransform()
      model.estimate(src, dst)

      # robustly estimate affine transform model with RANSAC
      model_robust, inliers = ransac((src, dst), SimilarityTransform, min_samples=3,
                               residual_threshold=2, max_trials=10)
      outliers = flex.bool(inliers == False)

      # compare "true" and estimated transform parameters
      if self.params.verbose:
        print("Similarity transform:")
        print("%2d"%isensor, "Scale: %.5f,"%(model.scale),
        "Translation(μm): (%7.2f,"%(1000.*model.translation[0]),
        "%7.2f),"%(1000.*model.translation[1]),
        "Rotation (°): %7.4f"%((180./math.pi)*model.rotation))
      print("RANSAC:")
      print("%2d"%isensor, "Scale: %.5f,"%(model_robust.scale),
      "Translation(μm): (%7.2f,"%(1000.*model_robust.translation[0]),
      "%7.2f),"%(1000.*model_robust.translation[1]),
      "Rotation (°): %7.4f,"%((180./math.pi)*model_robust.rotation),
      "Outliers:",outliers.count(True)
      )
      """from documentation:
      X = a0 * x - b0 * y + a1 = s * x * cos(rotation) - s * y * sin(rotation) + a1
      Y = b0 * x + a0 * y + b1 = s * x * sin(rotation) + s * y * cos(rotation) + b1"""

      oldCALC = self.cumCALC[isensor].parts()

      ransacCALC = flex.vec3_double(
               (float(model_robust.scale) * oldCALC[0] * math.cos(model_robust.rotation) -
               float(model_robust.scale) * oldCALC[1] * math.sin(model_robust.rotation) +
               float(model_robust.translation[0])),
               (float(model_robust.scale) * oldCALC[0] * math.sin(model_robust.rotation) +
               float(model_robust.scale) * oldCALC[1] * math.cos(model_robust.rotation) +
               float(model_robust.translation[1])),
               oldCALC[2]
               )
      new_delta = ransacCALC - self.cumOBS[isensor]
      inlier_delta = new_delta.select(~outliers)
      inlier_panno = self.cumPANNO[isensor].select(~outliers)

      for ipanel in range(8*isensor, 8*(1+isensor)):
        panel_delta = inlier_delta.select(inlier_panno==ipanel)
        if len(panel_delta)<2: continue
        deltax_part, deltay_part = panel_delta.parts()[0:2]
        RMSDx = 1000.*math.sqrt( flex.mean(deltax_part * deltax_part) )
        RMSDy = 1000.*math.sqrt( flex.mean(deltay_part * deltay_part) )
        Sx = flex.mean_and_variance(1000.*deltax_part)
        Sy = flex.mean_and_variance(1000.*deltay_part)
        print("%3d  %3d"%(ipanel,ipanel//8),"%7.2f±%6.2f %7.2f±%6.2f %6d"%(Sx.mean(),Sx.unweighted_standard_error_of_mean(),
                                                 Sy.mean(),Sy.unweighted_standard_error_of_mean(), len(deltax_part)),
        "    %5.1f   %5.1f"%(RMSDx,RMSDy),
        )

      if self.params.verbose:
        print("")
        cumD = (inlier_delta).parts()
        print ( "     %3d %7.2f        %7.2f        %6d\n"%(isensor,1000.*flex.mean(cumD[0]), 1000.*flex.mean(cumD[1]), len(cumD[0])))
      print("----\n")
예제 #47
0
파일: Index.py 프로젝트: xia2/xia2
    def run(self, method):
      from xia2.Handlers.Streams import Debug
      Debug.write('Running dials.index')

      self.clear_command_line()
      for f in self._sweep_filenames:
        self.add_command_line(f)
      for f in self._spot_filenames:
        self.add_command_line(f)
      self.add_command_line('indexing.method=%s' % method)
      nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
      self.set_cpu_threads(nproc)
      self.add_command_line('indexing.nproc=%i' % nproc)
      if PhilIndex.params.xia2.settings.small_molecule == True:
        self.add_command_line('filter_ice=false')
      if self._reflections_per_degree is not None:
        self.add_command_line(
          'reflections_per_degree=%i' %self._reflections_per_degree)
      if self._fft3d_n_points is not None:
        self.add_command_line(
          'fft3d.reciprocal_space_grid.n_points=%i' %self._fft3d_n_points)
      if self._close_to_spindle_cutoff is not None:
        self.add_command_line(
          'close_to_spindle_cutoff=%f' %self._close_to_spindle_cutoff)
      if self._outlier_algorithm:
        self.add_command_line('outlier.algorithm=%s' % self._outlier_algorithm)
      if self._max_cell:
        self.add_command_line('max_cell=%d' % self._max_cell)
      if self._min_cell:
        self.add_command_line('min_cell=%d' % self._min_cell)
      if self._histogram_binning is not None:
        self.add_command_line('max_cell_estimation.histogram_binning=%s' %self._histogram_binning)
      if self._d_min_start:
        self.add_command_line('d_min_start=%f' % self._d_min_start)
      if self._indxr_input_lattice is not None:
        from xia2.Experts.SymmetryExpert import lattice_to_spacegroup_number
        self._symm = lattice_to_spacegroup_number(
            self._indxr_input_lattice)
        self.add_command_line('known_symmetry.space_group=%s' % self._symm)
      if self._indxr_input_cell is not None:
        self.add_command_line(
          'known_symmetry.unit_cell="%s,%s,%s,%s,%s,%s"' %self._indxr_input_cell)
      if self._maximum_spot_error:
        self.add_command_line('maximum_spot_error=%.f' %
                              self._maximum_spot_error)
      if self._detector_fix:
        self.add_command_line('detector.fix=%s' % self._detector_fix)
      if self._beam_fix:
        self.add_command_line('beam.fix=%s' % self._beam_fix)
      if self._phil_file is not None:
        self.add_command_line("%s" %self._phil_file)

      self._experiment_filename = os.path.join(
        self.get_working_directory(), '%d_experiments.json' %self.get_xpid())
      self._indexed_filename = os.path.join(
        self.get_working_directory(), '%d_indexed.pickle' %self.get_xpid())
      self.add_command_line("output.experiments=%s" %self._experiment_filename)
      self.add_command_line("output.reflections=%s" %self._indexed_filename)

      self.start()
      self.close_wait()
      self.check_for_errors()

      from dials.array_family import flex
      from dxtbx.serialize import load
      self._experiment_list = load.experiment_list(self._experiment_filename)
      self._reflections = flex.reflection_table.from_pickle(
        self._indexed_filename)

      crystal = self._experiment_list.crystals()[0]
      self._p1_cell = crystal.get_unit_cell().parameters()

      refined_sel = self._reflections.get_flags(self._reflections.flags.used_in_refinement)
      refl = self._reflections.select(refined_sel)
      xc, yc, zc = refl['xyzcal.px'].parts()
      xo, yo, zo = refl['xyzobs.px.value'].parts()
      import math
      self._nref = refl.size()
      self._rmsd_x = math.sqrt(flex.mean(flex.pow2(xc - xo)))
      self._rmsd_y = math.sqrt(flex.mean(flex.pow2(yc - yo)))
      self._rmsd_z = math.sqrt(flex.mean(flex.pow2(zc - zo)))

      return
예제 #48
0
def integrate(experiment):
    from dials.algorithms.spot_prediction import PixelToMillerIndex
    from dials.array_family import flex
    from math import floor, sqrt
    from collections import defaultdict

    detector = experiment.detector
    assert len(detector) == 1
    panel = detector[0]

    xsize, ysize = panel.get_image_size()

    transform = PixelToMillerIndex(experiment.beam, experiment.detector,
                                   experiment.crystal)

    data = experiment.imageset.get_raw_data(0)[0]

    mask = flex.bool(flex.grid(ysize, xsize), False)
    reflections = defaultdict(list)

    print("Doing pixel labelling")
    for j in range(ysize):
        for i in range(xsize):
            h = transform.h(0, i, j)
            h0 = tuple(map(lambda x: int(floor(x + 0.5)), h))

            d = sqrt(sum(map(lambda x, y: (x - y)**2, h, h0)))
            # if not hasattr(reflections[h0], "xd"):
            #   reflections[h0].xd = d
            #   reflections[h0].xc = i
            #   reflections[h0].yc = j
            # elif reflections[h0].xd > d:
            #   reflections[h0].xd = d
            #   reflections[h0].xc = i
            #   reflections[h0].yc = j

            if d < 0.3:
                mask[j, i] = True
            reflections[h0].append((j, i))

    # from matplotlib import pylab
    # #pylab.imshow(mask.as_numpy_array(), interpolation='none')
    # pylab.show()

    print("Integrating reflections")
    miller_index = flex.miller_index()
    intensity = flex.double()
    variance = flex.double()
    bbox = flex.int6()
    xyz = flex.vec3_double()
    for h, r in reflections.iteritems():

        # xc = r.xc
        # yc = r.yc

        b_sum = 0
        f_sum = 0
        b_cnt = 0
        f_cnt = 0
        for i in range(len(r)):
            y, x = r[i]
            m = mask[y, x]
            if data[y, x] >= 0:
                if m:
                    f_sum += data[y, x]
                    f_cnt += 1
                else:
                    b_sum += data[y, x]
                    b_cnt += 1
        Y, X = zip(*r)
        x0, x1, y0, y1 = min(X), max(X), min(Y), max(Y)
        if f_cnt > 0 and b_cnt > 0:
            B = b_sum / b_cnt
            I = f_sum - B * f_cnt
            V = f_sum + B * (1 + f_cnt / b_cnt)
            miller_index.append(h)
            intensity.append(I)
            variance.append(V)
            bbox.append((x0, x1, y0, y1, 0, 1))
            # xyz.append((xc, yc, 0))

    print("Integrated %d reflections" % len(reflections))
    print(flex.min(intensity), flex.max(intensity), flex.mean(intensity))
    reflections = flex.reflection_table()
    reflections["miller_index"] = miller_index
    reflections["intensity.sum.value"] = intensity
    reflections["intensity.sum.variance"] = variance
    reflections["bbox"] = bbox
    reflections["panel"] = flex.size_t(len(reflections), 0)
    reflections["id"] = flex.size_t(len(reflections), 0)
    # reflections["xyzcal.px"] = xyz
    # reflections["xyzobs.px"] = xyz
    reflections.set_flags(flex.size_t(range(len(reflections))),
                          reflections.flags.integrated_sum)
    return reflections
예제 #49
0
파일: test_index.py 프로젝트: jmp1985/dials
def run_indexing(
    reflections,
    experiment,
    working_directory,
    extra_args,
    expected_unit_cell,
    expected_rmsds,
    expected_hall_symbol,
    n_expected_lattices=1,
    relative_length_tolerance=0.005,
    absolute_angle_tolerance=0.5,
):
    commands = ["dials.index"]
    if isinstance(reflections, list):
        commands.extend(reflections)
    else:
        commands.append(reflections)
    if isinstance(experiment, list):
        commands.extend(experiment)
    else:
        commands.append(experiment)
    commands.extend(extra_args)

    result = procrunner.run(commands, working_directory=working_directory)
    assert not result.returncode and not result.stderr

    out_expts = working_directory.join("indexed.expt")
    out_refls = working_directory.join("indexed.refl")
    assert out_expts.check()
    assert out_refls.check()

    experiments_list = load.experiment_list(out_expts.strpath,
                                            check_format=False)
    assert len(experiments_list.crystals()) == n_expected_lattices
    indexed_reflections = flex.reflection_table.from_file(out_refls.strpath)
    indexed_reflections.assert_experiment_identifiers_are_consistent(
        experiments_list)
    rmsds = None

    for i, experiment in enumerate(experiments_list):
        assert unit_cells_are_similar(
            experiment.crystal.get_unit_cell(),
            expected_unit_cell,
            relative_length_tolerance=relative_length_tolerance,
            absolute_angle_tolerance=absolute_angle_tolerance,
        ), (
            experiment.crystal.get_unit_cell().parameters(),
            expected_unit_cell.parameters(),
        )
        sg = experiment.crystal.get_space_group()
        assert sg.type().hall_symbol() == expected_hall_symbol, (
            sg.type().hall_symbol(),
            expected_hall_symbol,
        )
        reflections = indexed_reflections.select(
            indexed_reflections["id"] == i)
        mi = reflections["miller_index"]
        assert (mi != (0, 0, 0)).count(False) == 0
        reflections = reflections.select(mi != (0, 0, 0))
        reflections = reflections.select(
            reflections.get_flags(reflections.flags.used_in_refinement))
        assert len(reflections) > 0
        obs_x, obs_y, obs_z = reflections["xyzobs.mm.value"].parts()
        calc_x, calc_y, calc_z = reflections["xyzcal.mm"].parts()
        rmsd_x = flex.mean(flex.pow2(obs_x - calc_x))**0.5
        rmsd_y = flex.mean(flex.pow2(obs_y - calc_y))**0.5
        rmsd_z = flex.mean(flex.pow2(obs_z - calc_z))**0.5
        rmsds = (rmsd_x, rmsd_y, rmsd_z)
        for actual, expected in zip(rmsds, expected_rmsds):
            assert actual <= expected, "%s %s" % (rmsds, expected_rmsds)
        assert experiment.identifier != ""
        expt = ExperimentList()
        expt.append(experiment)
        reflections.assert_experiment_identifiers_are_consistent(expt)

    return _indexing_result(indexed_reflections, experiments_list, rmsds)
 def __del__(self):
     values = self.parameterization(self.x)
     print >> self.out, "FINALMODEL",
     print >> self.out, "rms %10.3f" % math.sqrt(
         flex.mean(self.func * self.func)),
     values.show(self.out)
예제 #51
0
    def plot_one_model(self, nrow, out):
        fig = plt.subplot(self.gs[nrow * self.ncols])
        two_thetas = self.reduction.get_two_theta_deg()
        degrees = self.reduction.get_delta_psi_deg()

        if self.color_encoding == "conventional":
            positive = (self.reduction.i_sigi >= 0.)
            fig.plot(two_thetas.select(positive), degrees.select(positive),
                     "bo")
            fig.plot(two_thetas.select(~positive), degrees.select(~positive),
                     "r+")
        elif self.color_encoding == "I/sigma":
            positive = (self.reduction.i_sigi >= 0.)
            tt_selected = two_thetas.select(positive)
            dp_selected = degrees.select(positive)
            i_sigi_select = self.reduction.i_sigi.select(positive)
            order = flex.sort_permutation(i_sigi_select)
            tt_selected = tt_selected.select(order)
            dp_selected = dp_selected.select(order)
            i_sigi_selected = i_sigi_select.select(order)
            from matplotlib.colors import Normalize
            dnorm = Normalize()
            dcolors = i_sigi_selected.as_numpy_array()
            dnorm.autoscale(dcolors)
            N = len(dcolors)
            CMAP = plt.get_cmap("rainbow")
            if self.refined.get("partiality_array", None) is None:
                for n in xrange(N):
                    fig.plot([tt_selected[n]], [dp_selected[n]],
                             color=CMAP(dnorm(dcolors[n])),
                             marker=".",
                             markersize=10)
            else:
                partials = self.refined.get("partiality_array")
                partials_select = partials.select(positive)
                partials_selected = partials_select.select(order)
                assert len(partials) == len(positive)
                for n in xrange(N):
                    fig.plot([tt_selected[n]], [dp_selected[n]],
                             color=CMAP(dnorm(dcolors[n])),
                             marker=".",
                             markersize=20 * partials_selected[n])
                    # change the markersize to indicate partiality.
            negative = (self.reduction.i_sigi < 0.)
            fig.plot(two_thetas.select(negative),
                     degrees.select(negative),
                     "r+",
                     linewidth=1)
        else:
            strong = (self.reduction.i_sigi >= 10.)
            positive = ((~strong) & (self.reduction.i_sigi >= 0.))
            negative = (self.reduction.i_sigi < 0.)
            assert (strong.count(True) + positive.count(True) +
                    negative.count(True) == len(self.reduction.i_sigi))
            fig.plot(two_thetas.select(positive), degrees.select(positive),
                     "bo")
            fig.plot(two_thetas.select(strong),
                     degrees.select(strong),
                     marker='.',
                     linestyle='None',
                     markerfacecolor='#00ee00',
                     markersize=10)
            fig.plot(two_thetas.select(negative), degrees.select(negative),
                     "r+")

        # indicate the imposed resolution filter
        wavelength = self.reduction.experiment.beam.get_wavelength()
        imposed_res_filter = self.reduction.get_imposed_res_filter(out)
        resolution_markers = [
            a
            for a in [imposed_res_filter,
                      self.reduction.measurements.d_min()] if a is not None
        ]
        for RM in resolution_markers:
            two_th = (180. / math.pi) * 2. * math.asin(wavelength / (2. * RM))
            plt.plot([two_th, two_th],
                     [self.AD1TF7B_MAXDP * -0.8, self.AD1TF7B_MAXDP * 0.8],
                     'k-')
            plt.text(two_th, self.AD1TF7B_MAXDP * -0.9, "%4.2f" % RM)

        #indicate the linefit
        mean = flex.mean(degrees)
        minplot = flex.min(two_thetas)
        plt.plot([0, minplot], [mean, mean], "k-")
        LR = flex.linear_regression(two_thetas, degrees)
        model_y = LR.slope() * two_thetas + LR.y_intercept()
        plt.plot(two_thetas, model_y, "k-")

        #Now let's take care of the red and green lines.
        half_mosaic_rotation_deg = self.refined["half_mosaic_rotation_deg"]
        mosaic_domain_size_ang = self.refined["mosaic_domain_size_ang"]
        red_curve_domain_size_ang = self.refined.get(
            "red_curve_domain_size_ang", mosaic_domain_size_ang)
        a_step = self.AD1TF7B_MAX2T / 50.
        a_range = flex.double([a_step * x for x in xrange(1, 50)
                               ])  # domain two-theta array
        #Bragg law [d=L/2sinTH]
        d_spacing = (wavelength / (2. * flex.sin(math.pi * a_range / 360.)))
        # convert two_theta to a delta-psi.  Formula for Deffective [Dpsi=d/2Deff]
        inner_phi_deg = flex.asin(
            (d_spacing / (2. * red_curve_domain_size_ang))) * (180. / math.pi)
        outer_phi_deg = flex.asin((d_spacing / (2.*mosaic_domain_size_ang)) + \
          half_mosaic_rotation_deg*math.pi/180. )*(180./math.pi)
        plt.title("ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots\n%s" %
                  (2. * half_mosaic_rotation_deg, mosaic_domain_size_ang,
                   len(two_thetas), os.path.basename(self.reduction.filename)))
        plt.plot(a_range, inner_phi_deg, "r-")
        plt.plot(a_range, -inner_phi_deg, "r-")
        plt.plot(a_range, outer_phi_deg, "g-")
        plt.plot(a_range, -outer_phi_deg, "g-")
        plt.xlim([0, self.AD1TF7B_MAX2T])
        plt.ylim([-self.AD1TF7B_MAXDP, self.AD1TF7B_MAXDP])

        #second plot shows histogram
        fig = plt.subplot(self.gs[1 + nrow * self.ncols])
        plt.xlim([-self.AD1TF7B_MAXDP, self.AD1TF7B_MAXDP])
        nbins = 50
        n, bins, patches = plt.hist(
            dp_selected,
            nbins,
            range=(-self.AD1TF7B_MAXDP, self.AD1TF7B_MAXDP),
            weights=self.reduction.i_sigi.select(positive),
            normed=0,
            facecolor="orange",
            alpha=0.75)
        #ersatz determine the median i_sigi point:
        isi_positive = self.reduction.i_sigi.select(positive)
        isi_order = flex.sort_permutation(isi_positive)
        reordered = isi_positive.select(isi_order)
        isi_median = reordered[int(len(isi_positive) * 0.9)]
        isi_top_half_selection = (isi_positive > isi_median)
        n, bins, patches = plt.hist(
            dp_selected.select(isi_top_half_selection),
            nbins,
            range=(-self.AD1TF7B_MAXDP, self.AD1TF7B_MAXDP),
            weights=isi_positive.select(isi_top_half_selection),
            normed=0,
            facecolor="#ff0000",
            alpha=0.75)
        plt.xlabel("(degrees)")
        plt.title("Weighted histogram of Delta-psi")
예제 #52
0
파일: show.py 프로젝트: kmdalton/dials
def show_reflections(
    reflections,
    show_intensities=False,
    show_profile_fit=False,
    show_centroids=False,
    show_all_reflection_data=False,
    show_flags=False,
    max_reflections=None,
    show_identifiers=False,
):

    text = []

    from orderedset import OrderedSet

    formats = {
        "miller_index": "%i, %i, %i",
        "d": "%.2f",
        "qe": "%.3f",
        "dqe": "%.3f",
        "id": "%i",
        "imageset_id": "%i",
        "panel": "%i",
        "flags": "%i",
        "background.mean": "%.1f",
        "background.dispersion": "%.1f",
        "background.mse": "%.1f",
        "background.sum.value": "%.1f",
        "background.sum.variance": "%.1f",
        "intensity.prf.value": "%.1f",
        "intensity.prf.variance": "%.1f",
        "intensity.sum.value": "%.1f",
        "intensity.sum.variance": "%.1f",
        "intensity.cor.value": "%.1f",
        "intensity.cor.variance": "%.1f",
        "intensity.scale.value": "%.1f",
        "intensity.scale.variance": "%.1f",
        "Ih_values": "%.1f",
        "lp": "%.3f",
        "num_pixels.background": "%i",
        "num_pixels.background_used": "%i",
        "num_pixels.foreground": "%i",
        "num_pixels.valid": "%i",
        "partial_id": "%i",
        "partiality": "%.4f",
        "profile.correlation": "%.3f",
        "profile.rmsd": "%.3f",
        "xyzcal.mm": "%.2f, %.2f, %.2f",
        "xyzcal.px": "%.2f, %.2f, %.2f",
        "delpsical.rad": "%.3f",
        "delpsical2": "%.3f",
        "delpsical.weights": "%.3f",
        "xyzobs.mm.value": "%.2f, %.2f, %.2f",
        "xyzobs.mm.variance": "%.4e, %.4e, %.4e",
        "xyzobs.px.value": "%.2f, %.2f, %.2f",
        "xyzobs.px.variance": "%.4f, %.4f, %.4f",
        "s1": "%.4f, %.4f, %.4f",
        "s2": "%.4f, %.4f, %.4f",
        "shoebox": "%.1f",
        "rlp": "%.4f, %.4f, %.4f",
        "zeta": "%.3f",
        "x_resid": "%.3f",
        "x_resid2": "%.3f",
        "y_resid": "%.3f",
        "y_resid2": "%.3f",
        "kapton_absorption_correction": "%.3f",
        "kapton_absorption_correction_sigmas": "%.3f",
        "inverse_scale_factor": "%.3f",
        "inverse_scale_factor_variance": "%.3f",
    }

    for rlist in reflections:
        from dials.algorithms.shoebox import MaskCode

        foreground_valid = MaskCode.Valid | MaskCode.Foreground
        text.append("")
        text.append(f"Reflection list contains {len(rlist)} reflections")

        if len(rlist) == 0:
            continue

        rows = [["Column", "min", "max", "mean"]]
        for k, col in rlist.cols():
            if k in formats and "%" not in formats.get(k, "%s"):
                # Allow blanking out of entries that wouldn't make sense
                rows.append([
                    k,
                    formats.get(k, "%s"),
                    formats.get(k, "%s"),
                    formats.get(k, "%s"),
                ])
            elif type(col) in (flex.double, flex.int, flex.size_t):
                if type(col) in (flex.int, flex.size_t):
                    col = col.as_double()
                rows.append([
                    k,
                    formats.get(k, "%s") % flex.min(col),
                    formats.get(k, "%s") % flex.max(col),
                    formats.get(k, "%s") % flex.mean(col),
                ])
            elif type(col) in (flex.vec3_double, flex.miller_index):
                if isinstance(col, flex.miller_index):
                    col = col.as_vec3_double()
                rows.append([
                    k,
                    formats.get(k, "%s") % col.min(),
                    formats.get(k, "%s") % col.max(),
                    formats.get(k, "%s") % col.mean(),
                ])
            elif isinstance(col, flex.shoebox):
                rows.append([k, "", "", ""])
                si = col.summed_intensity().observed_value()
                rows.append([
                    "  summed I",
                    formats.get(k, "%s") % flex.min(si),
                    formats.get(k, "%s") % flex.max(si),
                    formats.get(k, "%s") % flex.mean(si),
                ])
                x1, x2, y1, y2, z1, z2 = col.bounding_boxes().parts()
                bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
                rows.append([
                    "  N pix",
                    formats.get(k, "%s") % flex.min(bbox_sizes),
                    formats.get(k, "%s") % flex.max(bbox_sizes),
                    formats.get(k, "%s") % flex.mean(bbox_sizes),
                ])
                fore_valid = col.count_mask_values(
                    foreground_valid).as_double()
                rows.append([
                    "  N valid foreground pix",
                    formats.get(k, "%s") % flex.min(fore_valid),
                    formats.get(k, "%s") % flex.max(fore_valid),
                    formats.get(k, "%s") % flex.mean(fore_valid),
                ])

        text.append(tabulate(rows, headers="firstrow"))

        if show_flags:
            text.append(_create_flag_count_table(rlist))

        if show_identifiers:
            if rlist.experiment_identifiers():
                text.append(
                    """Experiment identifiers id-map values:\n%s""" %
                    ("\n".join(
                        "id:" + str(k) + " -> experiment identifier:" +
                        str(rlist.experiment_identifiers()[k])
                        for k in rlist.experiment_identifiers().keys())))

    intensity_keys = (
        "miller_index",
        "d",
        "intensity.prf.value",
        "intensity.prf.variance",
        "intensity.sum.value",
        "intensity.sum.variance",
        "background.mean",
        "profile.correlation",
        "profile.rmsd",
    )

    profile_fit_keys = ("miller_index", "d")

    centroid_keys = (
        "miller_index",
        "d",
        "xyzcal.mm",
        "xyzcal.px",
        "xyzobs.mm.value",
        "xyzobs.mm.variance",
        "xyzobs.px.value",
        "xyzobs.px.variance",
    )

    keys_to_print = OrderedSet()

    if show_intensities:
        for k in intensity_keys:
            keys_to_print.add(k)
    if show_profile_fit:
        for k in profile_fit_keys:
            keys_to_print.add(k)
    if show_centroids:
        for k in centroid_keys:
            keys_to_print.add(k)
    if show_all_reflection_data:
        for k in formats:
            keys_to_print.add(k)

    def format_column(key, data, format_strings=None):
        if isinstance(data, flex.vec3_double):
            c_strings = [
                c.as_string(format_strings[i].strip())
                for i, c in enumerate(data.parts())
            ]
        elif isinstance(data, flex.miller_index):
            c_strings = [
                c.as_string(format_strings[i].strip())
                for i, c in enumerate(data.as_vec3_double().parts())
            ]
        elif isinstance(data, flex.size_t):
            c_strings = [data.as_int().as_string(format_strings[0].strip())]
        elif isinstance(data, flex.shoebox):
            x1, x2, y1, y2, z1, z2 = data.bounding_boxes().parts()
            bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
            c_strings = [bbox_sizes.as_string(format_strings[0].strip())]
            key += " (N pix)"
        else:
            c_strings = [data.as_string(format_strings[0].strip())]

        column = flex.std_string()
        max_element_lengths = [c.max_element_length() for c in c_strings]
        for i in range(len(c_strings[0])):

            column.append(f"%{len(key)}s" % ", ".join(
                ("%%%is" % max_element_lengths[j]) % c_strings[j][i]
                for j in range(len(c_strings))))
        return column

    if keys_to_print:
        keys = [k for k in keys_to_print if k in rlist]
        if max_reflections is not None:
            max_reflections = min(len(rlist), max_reflections)
        else:
            max_reflections = len(rlist)

        columns = []

        for k in keys:
            columns.append(
                format_column(k,
                              rlist[k],
                              format_strings=formats[k].split(",")))

        text.append("")
        text.append("Printing %i of %i reflections:" %
                    (max_reflections, len(rlist)))
        line = []
        for j in range(len(columns)):
            key = keys[j]
            if key == "shoebox":
                key += " (N pix)"
            width = max(len(key), columns[j].max_element_length())
            line.append("%%%is" % width % key)
        text.append(" ".join(line))
        for i in range(max_reflections):
            line = (c[i] for c in columns)
            text.append(" ".join(line))

    return "\n".join(text)
예제 #53
0
    def _create_summation_matrix(self):
        """ "Create a summation matrix to allow sums into intensity bins.

        This routine attempts to bin into bins equally spaced in log(intensity),
        to give a representative sample across all intensities. To avoid
        undersampling, it is required that there are at least 100 reflections
        per intensity bin unless there are very few reflections."""
        n = self.Ih_table.size
        self.binning_info["n_reflections"] = n
        summation_matrix = sparse.matrix(n, self.n_bins)
        Ih = self.Ih_table.Ih_values * self.Ih_table.inverse_scale_factors
        size_order = flex.sort_permutation(Ih, reverse=True)
        Imax = max(Ih)
        Imin = max(1.0, min(Ih))  # avoid log issues
        spacing = (log(Imax) - log(Imin)) / float(self.n_bins)
        boundaries = [Imax] + [
            exp(log(Imax) - (i * spacing)) for i in range(1, self.n_bins + 1)
        ]
        boundaries[-1] = min(Ih) - 0.01
        self.binning_info["bin_boundaries"] = boundaries
        self.binning_info["refl_per_bin"] = flex.double()

        n_cumul = 0
        if Ih.size() > 100 * self.min_reflections_required:
            self.min_reflections_required = int(Ih.size() / 100.0)
        min_per_bin = min(self.min_reflections_required,
                          int(n / (3.0 * self.n_bins)))
        for i in range(len(boundaries) - 1):
            maximum = boundaries[i]
            minimum = boundaries[i + 1]
            sel1 = Ih <= maximum
            sel2 = Ih > minimum
            sel = sel1 & sel2
            isel = sel.iselection()
            n_in_bin = isel.size()
            if n_in_bin < min_per_bin:  # need more in this bin
                m = n_cumul + min_per_bin
                if m < n:  # still some refl left to use
                    idx = size_order[m]
                    intensity = Ih[idx]
                    boundaries[i + 1] = intensity
                    minimum = boundaries[i + 1]
                    sel = sel1 & (Ih > minimum)
                    isel = sel.iselection()
                    n_in_bin = isel.size()
            self.binning_info["refl_per_bin"].append(n_in_bin)
            for j in isel:
                summation_matrix[j, i] = 1
            n_cumul += n_in_bin
        cols_to_del = []
        for i, col in enumerate(summation_matrix.cols()):
            if col.non_zeroes < min_per_bin - 5:
                cols_to_del.append(i)
        n_new_cols = summation_matrix.n_cols - len(cols_to_del)
        if n_new_cols == self.n_bins:
            for i in range(len(boundaries) - 1):
                maximum = boundaries[i]
                minimum = boundaries[i + 1]
                sel1 = Ih <= maximum
                sel2 = Ih > minimum
                sel = sel1 & sel2
                m = flex.mean(Ih.select(sel))
                self.binning_info["mean_intensities"].append(m)
            return summation_matrix
        new_sum_matrix = sparse.matrix(summation_matrix.n_rows, n_new_cols)
        next_col = 0
        refl_per_bin = flex.double()
        new_bounds = []
        for i, col in enumerate(summation_matrix.cols()):
            if i not in cols_to_del:
                new_sum_matrix[:, next_col] = col
                next_col += 1
                new_bounds.append(boundaries[i])
                refl_per_bin.append(self.binning_info["refl_per_bin"][i])
        self.binning_info["refl_per_bin"] = refl_per_bin
        new_bounds.append(boundaries[-1])
        self.binning_info["bin_boundaries"] = new_bounds
        for i in range(len(new_bounds) - 1):
            maximum = new_bounds[i]
            minimum = new_bounds[i + 1]
            sel1 = Ih <= maximum
            sel2 = Ih > minimum
            sel = sel1 & sel2
            m = flex.mean(Ih.select(sel))
            self.binning_info["mean_intensities"].append(m)
        return new_sum_matrix
예제 #54
0
파일: Index.py 프로젝트: BlenderCN-Org/xia2
        def run(self, method):
            from xia2.Handlers.Streams import Debug

            Debug.write("Running dials.index")

            self.clear_command_line()
            for f in self._sweep_filenames:
                self.add_command_line(f)
            for f in self._spot_filenames:
                self.add_command_line(f)
            if len(self._sweep_filenames) > 1:
                self.add_command_line("auto_reduction.action=fix")
            self.add_command_line("indexing.method=%s" % method)
            nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
            self.set_cpu_threads(nproc)
            self.add_command_line("indexing.nproc=%i" % nproc)
            if PhilIndex.params.xia2.settings.small_molecule:
                self.add_command_line("filter_ice=false")
            if self._reflections_per_degree is not None:
                self.add_command_line("reflections_per_degree=%i" %
                                      self._reflections_per_degree)
            if self._fft3d_n_points is not None:
                self.add_command_line(
                    "fft3d.reciprocal_space_grid.n_points=%i" %
                    self._fft3d_n_points)
            if self._close_to_spindle_cutoff is not None:
                self.add_command_line("close_to_spindle_cutoff=%f" %
                                      self._close_to_spindle_cutoff)
            if self._outlier_algorithm:
                self.add_command_line("outlier.algorithm=%s" %
                                      self._outlier_algorithm)
            if self._max_cell:
                self.add_command_line("max_cell=%g" % self._max_cell)
            if self._max_cell_max_height_fraction is not None:
                self.add_command_line("max_height_fraction=%g" %
                                      self._max_cell_max_height_fraction)
            if self._min_cell:
                self.add_command_line("min_cell=%d" % self._min_cell)
            if self._histogram_binning is not None:
                self.add_command_line(
                    "max_cell_estimation.histogram_binning=%s" %
                    self._histogram_binning)
            if self._nearest_neighbor_percentile is not None:
                self.add_command_line(
                    "max_cell_estimation.nearest_neighbor_percentile=%s" %
                    self._nearest_neighbor_percentile)
            if self._d_min_start:
                self.add_command_line("d_min_start=%f" % self._d_min_start)
            if self._indxr_input_lattice is not None:
                from xia2.Experts.SymmetryExpert import lattice_to_spacegroup_number

                self._symm = lattice_to_spacegroup_number(
                    self._indxr_input_lattice)
                self.add_command_line("known_symmetry.space_group=%s" %
                                      self._symm)
            if self._indxr_input_cell is not None:
                self.add_command_line(
                    'known_symmetry.unit_cell="%s,%s,%s,%s,%s,%s"' %
                    self._indxr_input_cell)
            if self._maximum_spot_error:
                self.add_command_line("maximum_spot_error=%.f" %
                                      self._maximum_spot_error)
            if self._detector_fix:
                self.add_command_line("detector.fix=%s" % self._detector_fix)
            if self._beam_fix:
                self.add_command_line("beam.fix=%s" % self._beam_fix)
            if self._phil_file is not None:
                self.add_command_line("%s" % self._phil_file)

            self._experiment_filename = os.path.join(
                self.get_working_directory(),
                "%d_indexed.expt" % self.get_xpid())
            self._indexed_filename = os.path.join(
                self.get_working_directory(),
                "%d_indexed.refl" % self.get_xpid())
            self.add_command_line("output.experiments=%s" %
                                  self._experiment_filename)
            self.add_command_line("output.reflections=%s" %
                                  self._indexed_filename)

            self.start()
            self.close_wait()

            if not os.path.isfile(
                    self._experiment_filename) or not os.path.isfile(
                        self._indexed_filename):
                # Indexing failed
                with open(self.get_log_file(), "r") as fh:
                    if "No suitable lattice could be found" in fh.read():
                        raise libtbx.utils.Sorry(
                            "No suitable indexing solution could be found.\n\n"
                            "You can view the reciprocal space with:\n"
                            "dials.reciprocal_lattice_viewer %s" % " ".join(
                                os.path.normpath(
                                    os.path.join(self.get_working_directory(),
                                                 p))
                                for p in self._sweep_filenames +
                                self._spot_filenames))
                    else:
                        raise RuntimeError(
                            "dials.index failed, see log file for more details: %s"
                            % self.get_log_file())

            self.check_for_errors()

            for record in self.get_all_output():
                if "Too few reflections to parameterise" in record:
                    Debug.write(record.strip())

            from dials.array_family import flex
            from dxtbx.serialize import load

            self._experiment_list = load.experiment_list(
                self._experiment_filename)
            self._reflections = flex.reflection_table.from_file(
                self._indexed_filename)

            crystal = self._experiment_list.crystals()[0]
            self._p1_cell = crystal.get_unit_cell().parameters()

            refined_sel = self._reflections.get_flags(
                self._reflections.flags.used_in_refinement)
            refl = self._reflections.select(refined_sel)
            xc, yc, zc = refl["xyzcal.px"].parts()
            xo, yo, zo = refl["xyzobs.px.value"].parts()
            import math

            self._nref = refl.size()
            self._rmsd_x = math.sqrt(flex.mean(flex.pow2(xc - xo)))
            self._rmsd_y = math.sqrt(flex.mean(flex.pow2(yc - yo)))
            self._rmsd_z = math.sqrt(flex.mean(flex.pow2(zc - zo)))
예제 #55
0
  def __call__(self):
    """Determine optimal mosaicity and domain size model (monochromatic)"""
    RR = self.refinery.predict_for_reflection_table(self.reflections)
    excursion_rad = RR["delpsical.rad"]
    delta_psi_deg = excursion_rad * 180./math.pi
    print
    print flex.max(delta_psi_deg), flex.min(delta_psi_deg)
    mean_excursion = flex.mean(delta_psi_deg)
    print "The mean excursion is %7.3f degrees, r.m.s.d %7.3f"%(mean_excursion, math.sqrt(flex.mean(RR["delpsical2"])))

    crystal = self.experiments[0].crystal
    beam = self.experiments[0].beam
    miller_indices = self.reflections["miller_index"]

    # FIXME XXX revise this formula so as to use a different wavelength potentially for each reflection
    two_thetas = crystal.get_unit_cell().two_theta(miller_indices,beam.get_wavelength(),deg=True)
    dspacings = crystal.get_unit_cell().d(miller_indices)
    dspace_sq = dspacings * dspacings

    #  First -- try to get a reasonable envelope for the observed excursions.
        ## minimum of three regions; maximum of 50 measurements in each bin
    print "fitting parameters on %d spots"%len(excursion_rad)
    n_bins = min(max(3, len(excursion_rad)//25),50)
    bin_sz = len(excursion_rad)//n_bins
    print "nbins",n_bins,"bin_sz",bin_sz
    order = flex.sort_permutation(two_thetas)
    two_thetas_env = flex.double()
    dspacings_env = flex.double()
    excursion_rads_env = flex.double()
    for x in xrange(0,n_bins):
      subset = order[x*bin_sz:(x+1)*bin_sz]
      two_thetas_env.append(flex.mean(two_thetas.select(subset)))
      dspacings_env.append(flex.mean(dspacings.select(subset)))
      excursion_rads_env.append(flex.max(flex.abs(excursion_rad.select(subset))))

    #  Second -- parameter fit
        ## solve the normal equations
    sum_inv_u_sq = flex.sum(dspacings_env * dspacings_env)
    sum_inv_u    = flex.sum(dspacings_env)
    sum_te_u     = flex.sum(dspacings_env * excursion_rads_env)
    sum_te       = flex.sum(excursion_rads_env)
    Normal_Mat   = sqr((sum_inv_u_sq, sum_inv_u, sum_inv_u, len(dspacings_env)))
    Vector       = col((sum_te_u, sum_te))
    solution     = Normal_Mat.inverse() * Vector
    s_ang = 1./(2*solution[0])
    print "Best LSQ fit Scheerer domain size is %9.2f ang"%(
      s_ang)

    tan_phi_rad = dspacings / (2. * s_ang)
    tan_phi_deg = tan_phi_rad * 180./math.pi
    k_degrees = solution[1]* 180./math.pi
    print "The LSQ full mosaicity is %8.5f deg; half-mosaicity %9.5f"%(2*k_degrees, k_degrees)
    tan_outer_deg = tan_phi_deg + k_degrees

    from xfel.mono_simulation.max_like import minimizer
    # coerce the estimates to be positive for max-likelihood
    lower_limit_domain_size = math.pow(crystal.get_unit_cell().volume(),
     1./3.)*3 # params.refinement.domain_size_lower_limit

    d_estimate = max(s_ang, lower_limit_domain_size)
    M = minimizer(d_i = dspacings, psi_i = excursion_rad, eta_rad = abs(2. * solution[1]),
                  Deff = d_estimate)
    print "ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots"%(M.x[1]*180./math.pi, 2./M.x[0], len(two_thetas))
    tan_phi_rad_ML = dspacings / (2. / M.x[0])
    tan_phi_deg_ML = tan_phi_rad_ML * 180./math.pi
    tan_outer_deg_ML = tan_phi_deg_ML + 0.5*M.x[1]*180./math.pi

    self.nv_acceptance_flags = flex.abs(delta_psi_deg) < tan_outer_deg_ML

    if self.graph_verbose: #params.refinement.mosaic.enable_AD14F7B: # Excursion vs resolution fit
      AD1TF7B_MAX2T = 30.
      AD1TF7B_MAXDP = 1.
      from matplotlib import pyplot as plt
      plt.plot(two_thetas, delta_psi_deg, "bo")
      minplot = flex.min(two_thetas)
      plt.plot([0,minplot],[mean_excursion,mean_excursion],"k-")
      LR = flex.linear_regression(two_thetas, delta_psi_deg)
      model_y = LR.slope()*two_thetas + LR.y_intercept()
      plt.plot(two_thetas, model_y, "k-")

      plt.title("ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots"%(M.x[1]*180./math.pi, 2./M.x[0], len(two_thetas)))
      plt.plot(two_thetas, tan_phi_deg_ML, "r.")
      plt.plot(two_thetas, -tan_phi_deg_ML, "r.")
      plt.plot(two_thetas, tan_outer_deg_ML, "g.")
      plt.plot(two_thetas, -tan_outer_deg_ML, "g.")
      plt.xlim([0,AD1TF7B_MAX2T])
      plt.ylim([-AD1TF7B_MAXDP,AD1TF7B_MAXDP])
      plt.show()
      plt.close()

    from xfel.mono_simulation.util import green_curve_area
    self.green_curve_area = green_curve_area(two_thetas, tan_outer_deg_ML)
    print "The green curve area is ", self.green_curve_area

    crystal._ML_half_mosaicity_deg = M.x[1]*180./(2.*math.pi)
    crystal._ML_domain_size_ang = 2./M.x[0]
    self._ML_full_mosaicity_rad = M.x[1]
    self._ML_domain_size_ang = 2./M.x[0]

    #params.refinement.mosaic.model_expansion_factor
    """The expansion factor should be initially set to 1, then expanded so that the # reflections matched becomes
    as close as possible to # of observed reflections input, in the last integration call.  Determine this by
    inspecting the output log file interactively.  Do not exceed the bare minimum threshold needed.
    The intention is to find an optimal value, global for a given dataset."""
    model_expansion_factor = 1.4
    crystal._ML_half_mosaicity_deg *= model_expansion_factor
    crystal._ML_domain_size_ang /= model_expansion_factor

    return crystal