Example #1
0
    def calculate_gold3d(cls):
        from scitbx.array_family import flex
        from scitbx import matrix

        f_tot = 0.0
        r_tot = 0.0
        c_tot = 0.0
        d_tot = 0.0

        for (f, r, c), d in zip(cls.points3d, cls.pixels3d):
            f_tot += d * f
            r_tot += d * r
            c_tot += d * c
            d_tot += d

        cls.gold3d = matrix.col((f_tot / d_tot, r_tot / d_tot, c_tot / d_tot))

        _f, _r, _c = cls.gold3d

        f_tot = 0.0
        r_tot = 0.0
        c_tot = 0.0

        for (f, r, c), d in zip(cls.points3d, cls.pixels3d):
            f_tot += d * (f - _f) ** 2
            r_tot += d * (r - _r) ** 2
            c_tot += d * (c - _c) ** 2

        _sf = f_tot / d_tot
        _sr = r_tot / d_tot
        _sc = c_tot / d_tot

        cls.gold3dvar = matrix.col((_sf, _sr, _sc))

        # f_tot = 0.0
        # r_tot = 0.0
        # c_tot = 0.0

        # for (f, r, c), d in zip(cls.points3d, cls.pixels3d):
        #  f_tot += d * (f - _f) ** 2
        #  r_tot += d * (r - _r) ** 2
        #  c_tot += d * (c - _c) ** 2

        # _sf = f_tot / (d_tot-1)
        # _sr = r_tot / (d_tot-1)
        # _sc = c_tot / (d_tot-1)

        # cls.gold3dubvar = matrix.col((_sf, _sr, _sc))

        pixel_x, pixel_y, pixel_z = zip(*cls.points3d)
        xc = flex.mean_and_variance(flex.double(pixel_x), cls.pixels3d.as_1d())
        yc = flex.mean_and_variance(flex.double(pixel_y), cls.pixels3d.as_1d())
        zc = flex.mean_and_variance(flex.double(pixel_z), cls.pixels3d.as_1d())
        cls.gold3dubvar = matrix.col(
            (
                xc.gsl_stats_wvariance(),
                yc.gsl_stats_wvariance(),
                zc.gsl_stats_wvariance(),
            )
        )
Example #2
0
  def estimate_cc_true(self):

    # A1.2. Estimation of E(CC; S).

    # (i)

    var_intensities = flex.mean_and_variance(
      self.intensities.data()).unweighted_sample_variance()
    var_sigmas = flex.mean_and_variance(
      flex.pow2(self.intensities.sigmas())).mean()
    self.E_cc_true = var_intensities/(var_intensities + var_sigmas)

    # (ii)

    reindexed_intensities = self.intensities.change_basis(
      sgtbx.change_of_basis_op('-x,-y,-z')).map_to_asu()
    x, y = self.intensities.common_sets(
      reindexed_intensities, assert_is_similar_symmetry=False)
    self.cc_identity = CorrelationCoefficientAccumulator(x.data(), y.data())
    n_identity = self.cc_identity.n()

    min_sd = 0.05
    sigma_1 = max(min_sd, self.cc_sig_fac/200**0.5)
    sigma_2 = max(min_sd, self.cc_sig_fac/n_identity**0.5)
    w1 = 1/sigma_1**2
    w2 = 1/sigma_2**2

    self.cc_true = (w1 * self.E_cc_true + w2 * self.cc_identity.coefficient())/(w1 + w2)

    logger.debug('cc_true = w1 * E_cc_true + w2 * cc_identity)/(w1 + w2)')
    logger.debug('w1: %g', w1)
    logger.debug('w2: %g', w2)
    logger.debug('E_cc_true: %g', self.E_cc_true)
    logger.debug('cc_identity: %g', self.cc_identity.coefficient())
    logger.debug('cc_true: %g', self.cc_true)
Example #3
0
      def target(self, vector):
        """ Compute the functional by first applying the current values for the sd parameters
        to the input data, then computing the complete set of normalized deviations and finally
        using those normalized deviations to compute the functional."""
        sdfac, sdb, sdadd = vector[0],0.0,vector[1]

        a_new_variance, b_new_variance = ccp4_model.apply_sd_error_params(
          vector, a_data, b_data, a_sigmas, b_sigmas)

        mean_num = (a_data/ (a_new_variance) ) + (b_data/ (b_new_variance) )
        mean_den = (1./ (a_new_variance) ) + (1./ (b_new_variance) )
        mean_values = mean_num / mean_den

        delta_I_a = a_data - mean_values
        normal_a = delta_I_a / flex.sqrt(a_new_variance)

        delta_I_b = b_data - mean_values
        normal_b = delta_I_b / flex.sqrt(b_new_variance)

        mean_order = flex.sort_permutation(mean_values)
        scatters = flex.double(50)
        scattersb = flex.double(50)
        for isubsection in range(50):
          subselect = mean_order[isubsection*len(mean_order)//50:(isubsection+1)*len(mean_order)//50]
          vals = normal_a.select(subselect)
          scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_variance()

          valsb = normal_b.select(subselect)
          scattersb[isubsection] = flex.mean_and_variance(valsb).unweighted_sample_variance()

        f = flex.sum( flex.pow(1.-scatters, 2) )
        print "f: % 12.1f, sdfac: %8.5f, sdb: %8.5f, sdadd: %8.5f"%(f, sdfac, sdb, sdadd)
        return f
      def target(self, vector):
        """ Compute the functional by first applying the current values for the sd parameters
        to the input data, then computing the complete set of normalized deviations and finally
        using those normalized deviations to compute the functional."""
        sdfac, sdb, sdadd = vector[0],0.0,vector[1]

        a_new_variance, b_new_variance = ccp4_model.apply_sd_error_params(
          vector, a_data, b_data, a_sigmas, b_sigmas)

        mean_num = (a_data/ (a_new_variance) ) + (b_data/ (b_new_variance) )
        mean_den = (1./ (a_new_variance) ) + (1./ (b_new_variance) )
        mean_values = mean_num / mean_den

        delta_I_a = a_data - mean_values
        normal_a = delta_I_a / flex.sqrt(a_new_variance)

        delta_I_b = b_data - mean_values
        normal_b = delta_I_b / flex.sqrt(b_new_variance)

        mean_order = flex.sort_permutation(mean_values)
        scatters = flex.double(50)
        scattersb = flex.double(50)
        for isubsection in xrange(50):
          subselect = mean_order[isubsection*len(mean_order)//50:(isubsection+1)*len(mean_order)//50]
          vals = normal_a.select(subselect)
          scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_variance()

          valsb = normal_b.select(subselect)
          scattersb[isubsection] = flex.mean_and_variance(valsb).unweighted_sample_variance()

        f = flex.sum( flex.pow(1.-scatters, 2) )
        print "f: % 12.1f, sdfac: %8.5f, sdb: %8.5f, sdadd: %8.5f"%(f, sdfac, sdb, sdadd)
        return f
Example #5
0
  def calculate_gold2d(self):

    from scitbx.array_family import flex
    from scitbx import matrix

    r_tot = 0.0
    c_tot = 0.0
    d_tot = 0.0

    for (r, c), d in zip(self.points2d, self.pixels2d):
      r_tot += d * r
      c_tot += d * c
      d_tot += d

    self.gold2d = matrix.col((r_tot / d_tot, c_tot / d_tot))
    _r, _c = self.gold2d

    r_tot = 0.0
    c_tot = 0.0

    for (r, c), d in zip(self.points2d, self.pixels2d):
      r_tot += d * (r - _r) ** 2
      c_tot += d * (c - _c) ** 2

    _sr = r_tot / d_tot
    _sc = c_tot / d_tot

    self.gold2dvar = matrix.col((_sr, _sc))

    pixel_x, pixel_y = zip(*self.points2d)
    xc = flex.mean_and_variance(flex.double(pixel_x), self.pixels2d.as_1d())
    yc = flex.mean_and_variance(flex.double(pixel_y), self.pixels2d.as_1d())
    self.gold2dubvar = matrix.col((xc.gsl_stats_wvariance(),
                                   yc.gsl_stats_wvariance()))
Example #6
0
def restrain_II(plt):
  OX = GS_ROI(full_path("data_sherrell/pf-rd-ox_fftkk.out"))
  RD = GS_ROI(full_path("data_sherrell/pf-rd-red_fftkk.out"))
  MT = GS_ROI(full_path("data_sherrell/Fe_fake.dat")) # with interpolated points
  r_mean = flex.double(200)
  r_sigma = flex.double(200)
  for ichannel in range(100):
    fp_pop = flex.mean_and_variance(flex.double([OX.fp[ichannel],RD.fp[ichannel],MT.fp[ichannel]]))
    fdp_pop = flex.mean_and_variance(flex.double([OX.fdp[ichannel],RD.fdp[ichannel],MT.fdp[ichannel]]))
    r_mean[ichannel] = fp_pop.mean(); r_mean[100+ichannel] = fdp_pop.mean()
    r_sigma[ichannel] = fp_pop.unweighted_sample_standard_deviation()
    r_sigma[100+ichannel] = fdp_pop.unweighted_sample_standard_deviation()

  plt.stackplot(OX.energy,r_mean[0:100]-r_sigma[0:100],color=('lightgreen'))
  plt.stackplot(OX.energy,r_mean[0:100]+r_sigma[0:100],color=('white'))
  plt.stackplot(OX.energy,r_mean[100:200]+r_sigma[100:200],color=('lightgreen'))
  plt.stackplot(OX.energy,r_mean[100:200]-r_sigma[100:200],color=('white'))
  plt.plot(OX.energy,r_mean[0:100]+r_sigma[0:100],'g-')
  plt.plot(OX.energy,r_mean[100:200]+r_sigma[100:200],'g-')
  plt.plot(OX.energy,r_mean[0:100]-r_sigma[0:100],'g-')
  plt.plot(OX.energy,r_mean[100:200]-r_sigma[100:200],'g-')
  plt.plot(OX.energy,r_mean[0:100],'g-')
  plt.plot(OX.energy,r_mean[100:200],'g-')
  OX.plot_them(plt,f1="b.",f2="b.")
  OX.plot_them(plt,f1="b-",f2="b-")
  RD = GS_ROI(full_path("data_sherrell/pf-rd-red_fftkk.out"))
  RD.plot_them(plt,f1="r.",f2="r.")
  RD.plot_them(plt,f1="r-",f2="r-")
  MT = GS_ROI(full_path("data_sherrell/Fe_fake.dat")) # with interpolated points
  MT.plot_them(plt,f1="m-",f2="m-")
  plt.axes().set_ylim((-8.3,4.2))
  plt.show()
def run(args):
  if "-c" in args or "-h" in args or "--help" in args:
    print help_message
  user_phil = []
  for arg in args :
    try :
      user_phil.append(phil.parse(arg))
    except RuntimeError as e :
      raise Sorry("Unrecognized argument '%s' (error: %s)" % (arg, str(e)))
  params = phil_scope.fetch(sources=user_phil).extract()

  img = dxtbx.load(params.average)
  dataset_name = "exp=%s:run=%s:idx"%(params.experiment,params.run)
  ds = psana.DataSource(dataset_name)
  run = ds.runs().next()

  psana_det = psana.Detector(params.address, ds.env())
  psana_gain_mask = psana_det.gain_mask()
  psana_gain_mask = flex.bool(psana_gain_mask==1)

  gain_masks = []
  assert psana_gain_mask.focus() == (32, 185, 388)
  for i in xrange(32):
    gain_masks.append(psana_gain_mask[i:i+1,:,:194])
    gain_masks[-1].reshape(flex.grid(185,194))
    gain_masks.append(psana_gain_mask[i:i+1,:,194:])
    gain_masks[-1].reshape(flex.grid(185,194))

  ratios = flex.double()
  counts = flex.int()
  for panel_id, (data, mask) in enumerate(zip(img.get_raw_data(), gain_masks)):
    if mask.all_eq(True) or mask.all_eq(False):
      continue

    panel_sum = 0
    panel_count = 0
    for s in xrange(data.focus()[1]):
      for f in xrange(data.focus()[0]):
        if f+1 == data.focus()[0]:
          continue
        if (not mask[f,s]) and mask[f+1,s] and data[f+1,s] != 0:
          panel_sum += data[f,s]/data[f+1,s]
          panel_count += 1
        elif mask[f,s] and not mask[f+1,s] and data[f,s] != 0:
          panel_sum += data[f+1,s]/data[f,s]
          panel_count += 1
    if panel_count > 0:
      ratio = panel_sum/panel_count
      ratios.append(ratio)
      counts.append(panel_count)
      print "Panel", panel_id, "ratio:", ratio, "N pairs", panel_count

  if len(ratios) <= 1:
    return
  print "Mean:", flex.mean(ratios)
  print "Standard deviation", flex.mean_and_variance(ratios).unweighted_sample_standard_deviation()

  stats = flex.mean_and_variance(ratios, counts.as_double())
  print "Weighted mean:", stats.mean()
  print "Weighted standard deviation", stats.gsl_stats_wsd()
Example #8
0
  def calculate_gold_masked2d(self):

    from scitbx.array_family import flex
    from scitbx import matrix

    r_tot = 0.0
    c_tot = 0.0
    d_tot = 0.0

    for (r, c), d, m in zip(self.points2d, self.pixels2d, self.mask2d):
      if m:
        r_tot += d * r
        c_tot += d * c
        d_tot += d

    self.goldmasked2d = matrix.col((r_tot / d_tot, c_tot / d_tot))

    _r, _c = self.goldmasked2d

    r_tot = 0.0
    c_tot = 0.0

    for (r, c), d, m in zip(self.points2d, self.pixels2d, self.mask2d):
      if m:
        r_tot += d * (r - _r) ** 2
        c_tot += d * (c - _c) ** 2

    _sr = r_tot / d_tot
    _sc = c_tot / d_tot

    self.goldmasked2dvar = matrix.col((_sr, _sc))

    #r_tot = 0.0
    #c_tot = 0.0

    #for (r, c), d, m in zip(self.points2d, self.pixels2d, self.mask2d):
    #  if m:
    #    r_tot += d * (r - _r) ** 2
    #    c_tot += d * (c - _c) ** 2

    #_sr = r_tot / (d_tot-1)
    #_sc = c_tot / (d_tot-1)

    #self.goldmasked2dubvar = matrix.col((_sr, _sc))

    pixel_x = flex.double()
    pixel_y = flex.double()
    pixel_d = flex.double()
    for (x, y), d, m in zip(self.points2d, self.pixels2d, self.mask2d):
      if m:
        pixel_x.append(x)
        pixel_y.append(y)
        pixel_d.append(d)

    xc = flex.mean_and_variance(flex.double(pixel_x), pixel_d)
    yc = flex.mean_and_variance(flex.double(pixel_y), pixel_d)
    self.goldmasked2dubvar = matrix.col((xc.gsl_stats_wvariance(),
                                         yc.gsl_stats_wvariance()))
Example #9
0
    def calculate_gold_masked2d(cls):
        from scitbx.array_family import flex
        from scitbx import matrix

        r_tot = 0.0
        c_tot = 0.0
        d_tot = 0.0

        for (r, c), d, m in zip(cls.points2d, cls.pixels2d, cls.mask2d):
            if m:
                r_tot += d * r
                c_tot += d * c
                d_tot += d

        cls.goldmasked2d = matrix.col((r_tot / d_tot, c_tot / d_tot))

        _r, _c = cls.goldmasked2d

        r_tot = 0.0
        c_tot = 0.0

        for (r, c), d, m in zip(cls.points2d, cls.pixels2d, cls.mask2d):
            if m:
                r_tot += d * (r - _r) ** 2
                c_tot += d * (c - _c) ** 2

        _sr = r_tot / d_tot
        _sc = c_tot / d_tot

        cls.goldmasked2dvar = matrix.col((_sr, _sc))

        # r_tot = 0.0
        # c_tot = 0.0

        # for (r, c), d, m in zip(cls.points2d, cls.pixels2d, cls.mask2d):
        #  if m:
        #    r_tot += d * (r - _r) ** 2
        #    c_tot += d * (c - _c) ** 2

        # _sr = r_tot / (d_tot-1)
        # _sc = c_tot / (d_tot-1)

        # cls.goldmasked2dubvar = matrix.col((_sr, _sc))

        pixel_x = flex.double()
        pixel_y = flex.double()
        pixel_d = flex.double()
        for (x, y), d, m in zip(cls.points2d, cls.pixels2d, cls.mask2d):
            if m:
                pixel_x.append(x)
                pixel_y.append(y)
                pixel_d.append(d)

        xc = flex.mean_and_variance(flex.double(pixel_x), pixel_d)
        yc = flex.mean_and_variance(flex.double(pixel_y), pixel_d)
        cls.goldmasked2dubvar = matrix.col(
            (xc.gsl_stats_wvariance(), yc.gsl_stats_wvariance())
        )
Example #10
0
  def calculate_gold3d(self):

    from scitbx.array_family import flex
    from scitbx import matrix

    f_tot = 0.0
    r_tot = 0.0
    c_tot = 0.0
    d_tot = 0.0

    for (f, r, c), d in zip(self.points3d, self.pixels3d):
      f_tot += d * f
      r_tot += d * r
      c_tot += d * c
      d_tot += d

    self.gold3d = matrix.col((f_tot / d_tot, r_tot / d_tot, c_tot / d_tot))

    _f, _r, _c = self.gold3d

    f_tot = 0.0
    r_tot = 0.0
    c_tot = 0.0

    for (f, r, c), d in zip(self.points3d, self.pixels3d):
      f_tot += d * (f - _f) ** 2
      r_tot += d * (r - _r) ** 2
      c_tot += d * (c - _c) ** 2

    _sf = f_tot / d_tot
    _sr = r_tot / d_tot
    _sc = c_tot / d_tot

    self.gold3dvar = matrix.col((_sf, _sr, _sc))

    #f_tot = 0.0
    #r_tot = 0.0
    #c_tot = 0.0

    #for (f, r, c), d in zip(self.points3d, self.pixels3d):
    #  f_tot += d * (f - _f) ** 2
    #  r_tot += d * (r - _r) ** 2
    #  c_tot += d * (c - _c) ** 2

    #_sf = f_tot / (d_tot-1)
    #_sr = r_tot / (d_tot-1)
    #_sc = c_tot / (d_tot-1)

    #self.gold3dubvar = matrix.col((_sf, _sr, _sc))

    pixel_x, pixel_y, pixel_z = zip(*self.points3d)
    xc = flex.mean_and_variance(flex.double(pixel_x), self.pixels3d.as_1d())
    yc = flex.mean_and_variance(flex.double(pixel_y), self.pixels3d.as_1d())
    zc = flex.mean_and_variance(flex.double(pixel_z), self.pixels3d.as_1d())
    self.gold3dubvar = matrix.col((xc.gsl_stats_wvariance(),
                                   yc.gsl_stats_wvariance(),
                                   zc.gsl_stats_wvariance()))
Example #11
0
    def plots(a_data, b_data, a_sigmas, b_sigmas):

        # Diagnostic use of the (I - <I>) / sigma distribution, should have mean=0, std=1
        a_variance = a_sigmas * a_sigmas
        b_variance = b_sigmas * b_sigmas
        mean_num = (a_data / (a_variance)) + (b_data / (b_variance))
        mean_den = (1. / (a_variance)) + (1. / (b_variance))
        mean_values = mean_num / mean_den

        delta_I_a = a_data - mean_values
        normal_a = delta_I_a / (a_sigmas)
        stats_a = flex.mean_and_variance(normal_a)
        print "\nA mean %7.4f std %7.4f" % (
            stats_a.mean(), stats_a.unweighted_sample_standard_deviation())
        order_a = flex.sort_permutation(normal_a)

        delta_I_b = b_data - mean_values
        normal_b = delta_I_b / (b_sigmas)
        stats_b = flex.mean_and_variance(normal_b)
        print "B mean %7.4f std %7.4f" % (
            stats_b.mean(), stats_b.unweighted_sample_standard_deviation())
        order_b = flex.sort_permutation(normal_b)
        # plots for debugging
        from matplotlib import pyplot as plt
        cumnorm = plt.subplot(321)
        cumnorm.plot(xrange(len(order_a)), normal_a.select(order_a), "b.")
        cumnorm.plot(xrange(len(order_b)), normal_b.select(order_b), "r.")
        #plt.show()
        logger = plt.subplot(324)
        logger.loglog(a_data, b_data, "r.")
        delta = plt.subplot(322)
        delta.plot(a_data, delta_I_a, "g.")
        #plt.show()
        #nselection = (flex.abs(normal_a) < 2.).__and__(flex.abs(normal_b) < 2.)
        gam = plt.subplot(323)
        gam.plot(mean_values, normal_a, "b.")
        sigs = plt.subplot(326)
        sigs.plot(a_sigmas, b_sigmas, "g.")
        mean_order = flex.sort_permutation(mean_values)
        scatters = flex.double(50)
        scattersb = flex.double(50)
        for isubsection in xrange(50):
            subselect = mean_order[isubsection * len(mean_order) //
                                   50:(isubsection + 1) * len(mean_order) //
                                   50]
            vals = normal_a.select(subselect)
            #scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_standard_deviation()
            scatters[isubsection] = flex.mean_and_variance(
                vals).unweighted_sample_variance()

            valsb = normal_b.select(subselect)
            #scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_standard_deviation()
            scattersb[isubsection] = flex.mean_and_variance(
                valsb).unweighted_sample_variance()
        aaronsplot = plt.subplot(325)
        aaronsplot.plot(xrange(50), 2. * scatters, "b.")
        plt.show()
Example #12
0
    def plot_uc_3Dplot(self, info, iqr_ratio=1.5):
        assert self.interactive

        import numpy as np
        from mpl_toolkits.mplot3d import Axes3D  # import dependency

        fig = self.plt.figure(figsize=(12, 10))
        # Extract uc dimensions from info list
        a = flex.double([i['a'] for i in info])
        b = flex.double([i['b'] for i in info])
        c = flex.double([i['c'] for i in info])
        alpha = flex.double([i['alpha'] for i in info])
        beta = flex.double([i['beta'] for i in info])
        gamma = flex.double([i['gamma'] for i in info])
        n_total = len(a)

        accepted = flex.bool(n_total, True)
        for d in [a, b, c, alpha, beta, gamma]:
            outliers = self.reject_outliers(d, iqr_ratio)
            accepted &= ~outliers

        a = a.select(accepted)
        b = b.select(accepted)
        c = c.select(accepted)

        AA = "a-edge (%.2f +/- %.2f $\AA$)" % (
            flex.mean(a),
            flex.mean_and_variance(a).unweighted_sample_standard_deviation())
        BB = "b-edge (%.2f +/- %.2f $\AA$)" % (
            flex.mean(b),
            flex.mean_and_variance(b).unweighted_sample_standard_deviation())
        CC = "c-edge (%.2f +/- %.2f $\AA$)" % (
            flex.mean(c),
            flex.mean_and_variance(c).unweighted_sample_standard_deviation())

        subset = min(len(a), 1000)

        flex.set_random_seed(123)
        rnd_sel = flex.random_double(len(a)) < (subset / n_total)

        a = a.select(rnd_sel)
        b = b.select(rnd_sel)
        c = c.select(rnd_sel)

        fig.suptitle('{} randomly selected cells out of total {} images'
                     ''.format(len(a), n_total),
                     fontsize=18)

        ax = fig.add_subplot(111, projection='3d')

        for ia in range(len(a)):
            ax.scatter(a[ia], b[ia], c[ia], c='r', marker='+')

        ax.set_xlabel(AA)
        ax.set_ylabel(BB)
        ax.set_zlabel(CC)
Example #13
0
  def calculate_gold_masked3d(self):

    from scitbx.array_family import flex
    from scitbx import matrix

    f_tot = 0.0
    r_tot = 0.0
    c_tot = 0.0
    d_tot = 0.0

    for (f, r, c), d, m in zip(self.points3d, self.pixels3d, self.mask3d):
      if m:
        f_tot += d * f
        r_tot += d * r
        c_tot += d * c
        d_tot += d

    self.goldmasked3d = matrix.col((f_tot / d_tot,
        r_tot / d_tot, c_tot / d_tot))

    _f, _r, _c = self.goldmasked3d

    f_tot = 0.0
    r_tot = 0.0
    c_tot = 0.0

    for (f, r, c), d, m in zip(self.points3d, self.pixels3d, self.mask3d):
      if m:
        f_tot += d * (f - _f) ** 2
        r_tot += d * (r - _r) ** 2
        c_tot += d * (c - _c) ** 2

    _sf = f_tot / d_tot
    _sr = r_tot / d_tot
    _sc = c_tot / d_tot

    self.goldmasked3dvar = matrix.col((_sf, _sr, _sc))

    pixel_x = flex.double()
    pixel_y = flex.double()
    pixel_z = flex.double()
    pixel_d = flex.double()
    for (x, y, z), d, m in zip(self.points3d, self.pixels3d, self.mask3d):
      if m:
        pixel_x.append(x)
        pixel_y.append(y)
        pixel_z.append(z)
        pixel_d.append(d)

    xc = flex.mean_and_variance(flex.double(pixel_x), pixel_d)
    yc = flex.mean_and_variance(flex.double(pixel_y), pixel_d)
    zc = flex.mean_and_variance(flex.double(pixel_z), pixel_d)
    self.goldmasked3dubvar = matrix.col((xc.gsl_stats_wvariance(),
                                         yc.gsl_stats_wvariance(),
                                         zc.gsl_stats_wvariance()))
Example #14
0
    def calculate_gold_masked3d(self):

        from scitbx.array_family import flex
        from scitbx import matrix

        f_tot = 0.0
        r_tot = 0.0
        c_tot = 0.0
        d_tot = 0.0

        for (f, r, c), d, m in zip(self.points3d, self.pixels3d, self.mask3d):
            if m:
                f_tot += d * f
                r_tot += d * r
                c_tot += d * c
                d_tot += d

        self.goldmasked3d = matrix.col(
            (f_tot / d_tot, r_tot / d_tot, c_tot / d_tot))

        _f, _r, _c = self.goldmasked3d

        f_tot = 0.0
        r_tot = 0.0
        c_tot = 0.0

        for (f, r, c), d, m in zip(self.points3d, self.pixels3d, self.mask3d):
            if m:
                f_tot += d * (f - _f)**2
                r_tot += d * (r - _r)**2
                c_tot += d * (c - _c)**2

        _sf = f_tot / d_tot
        _sr = r_tot / d_tot
        _sc = c_tot / d_tot

        self.goldmasked3dvar = matrix.col((_sf, _sr, _sc))

        pixel_x = flex.double()
        pixel_y = flex.double()
        pixel_z = flex.double()
        pixel_d = flex.double()
        for (x, y, z), d, m in zip(self.points3d, self.pixels3d, self.mask3d):
            if m:
                pixel_x.append(x)
                pixel_y.append(y)
                pixel_z.append(z)
                pixel_d.append(d)

        xc = flex.mean_and_variance(flex.double(pixel_x), pixel_d)
        yc = flex.mean_and_variance(flex.double(pixel_y), pixel_d)
        zc = flex.mean_and_variance(flex.double(pixel_z), pixel_d)
        self.goldmasked3dubvar = matrix.col(
            (xc.gsl_stats_wvariance(), yc.gsl_stats_wvariance(),
             zc.gsl_stats_wvariance()))
  def plot_uc_3Dplot(self, info):
    assert self.interactive

    import numpy as np
    from mpl_toolkits.mplot3d import Axes3D # import dependency

    fig = self.plt.figure(figsize=(12, 10))
    # Extract uc dimensions from info list
    a = flex.double([i['a'] for i in info])
    b = flex.double([i['b'] for i in info])
    c = flex.double([i['c'] for i in info])
    alpha = flex.double([i['alpha'] for i in info])
    beta = flex.double([i['beta'] for i in info])
    gamma = flex.double([i['gamma'] for i in info])
    n_total = len(a)

    accepted = flex.bool(n_total, True)
    for d in [a, b, c, alpha, beta, gamma]:
      outliers = self.reject_outliers(d)
      accepted &= ~outliers

    a = a.select(accepted)
    b = b.select(accepted)
    c = c.select(accepted)

    AA = "a-edge (%.2f +/- %.2f $\AA$)" % (flex.mean(a),
                                        flex.mean_and_variance(a).unweighted_sample_standard_deviation())
    BB = "b-edge (%.2f +/- %.2f $\AA$)" % (flex.mean(b),
                                        flex.mean_and_variance(b).unweighted_sample_standard_deviation())
    CC = "c-edge (%.2f +/- %.2f $\AA$)" % (flex.mean(c),
                                        flex.mean_and_variance(c).unweighted_sample_standard_deviation())


    subset = min(len(a),1000)

    flex.set_random_seed(123)
    rnd_sel = flex.random_double(len(a))<(subset/n_total)

    a = a.select(rnd_sel)
    b = b.select(rnd_sel)
    c = c.select(rnd_sel)

    fig.suptitle('{} randomly selected cells out of total {} images'
                 ''.format(len(a),n_total), fontsize=18)

    ax = fig.add_subplot(111, projection='3d')

    for ia in xrange(len(a)):
      ax.scatter(a[ia],b[ia],c[ia],c='r',marker='+')

    ax.set_xlabel(AA)
    ax.set_ylabel(BB)
    ax.set_zlabel(CC)
Example #16
0
def simulate(n, size):
    from scitbx.array_family import flex
    from scitbx.random import variate, poisson_distribution

    shoeboxes = []

    B = 10

    # Generate shoeboxes with uniform random background
    for l in range(n):
        sbox = flex.double(flex.grid(size), 0)
        g = variate(poisson_distribution(mean=B))
        for k in range(size[0]):
            for j in range(size[1]):
                for i in range(size[2]):
                    sbox[k, j, i] += next(g)
        shoeboxes.append(sbox)

    # Calculate the Intensity (should be zero)
    import random

    I_cal = []
    mean = []
    for i in range(len(shoeboxes)):
        nn = len(shoeboxes[i])
        mm = int(1.0 * nn)
        index = flex.size_t(random.sample(range(nn), mm))
        assert len(set(index)) == mm
        data = shoeboxes[i].select(index)
        II = flex.sum(data)
        # II = flex.mean(data)
        BB = mm * B
        # BB = B
        I_cal.append(II - BB)
        m = flex.mean(data)
        mean.append(m)
    I_cal = flex.double(I_cal)

    mv = flex.mean_and_variance(flex.double(mean))
    print(mv.mean() - B, mv.unweighted_sample_variance())
    v1 = B / (size[0] * size[1] * size[2])
    v2 = B * (size[0] * size[1] * size[2])
    print(v1)
    print(v2)
    print(I_cal[0])

    from math import sqrt

    Z = (I_cal - 0) / sqrt(v2)

    # Return the mean and standard deviation
    mv = flex.mean_and_variance(Z)
    return mv.mean(), mv.unweighted_sample_variance()
Example #17
0
def restrain_II_values():
  OX = GS_ROI(full_path("data_sherrell/pf-rd-ox_fftkk.out"))
  RD = GS_ROI(full_path("data_sherrell/pf-rd-red_fftkk.out"))
  MT = GS_ROI(full_path("data_sherrell/Fe_fake.dat")) # with interpolated points
  r_mean = flex.double(200)
  r_sigma = flex.double(200)
  for ichannel in range(100):
    fp_pop = flex.mean_and_variance(flex.double([OX.fp[ichannel],RD.fp[ichannel],MT.fp[ichannel]]))
    fdp_pop = flex.mean_and_variance(flex.double([OX.fdp[ichannel],RD.fdp[ichannel],MT.fdp[ichannel]]))
    r_mean[ichannel] = fp_pop.mean(); r_mean[100+ichannel] = fdp_pop.mean()
    r_sigma[ichannel] = (1./SEVERITY_FACTOR)*fp_pop.unweighted_sample_standard_deviation()
    r_sigma[100+ichannel] = (1./SEVERITY_FACTOR)*fdp_pop.unweighted_sample_standard_deviation()
  return r_mean, r_sigma
  def plots(a_data, b_data, a_sigmas, b_sigmas):

    # Diagnostic use of the (I - <I>) / sigma distribution, should have mean=0, std=1
    a_variance = a_sigmas * a_sigmas
    b_variance = b_sigmas * b_sigmas
    mean_num = (a_data/ (a_variance) ) + (b_data/ (b_variance) )
    mean_den = (1./ (a_variance) ) + (1./ (b_variance) )
    mean_values = mean_num / mean_den

    delta_I_a = a_data - mean_values
    normal_a = delta_I_a / (a_sigmas)
    stats_a = flex.mean_and_variance(normal_a)
    print "\nA mean %7.4f std %7.4f"%(stats_a.mean(),stats_a.unweighted_sample_standard_deviation())
    order_a = flex.sort_permutation(normal_a)

    delta_I_b = b_data - mean_values
    normal_b = delta_I_b / (b_sigmas)
    stats_b = flex.mean_and_variance(normal_b)
    print "B mean %7.4f std %7.4f"%(stats_b.mean(),stats_b.unweighted_sample_standard_deviation())
    order_b = flex.sort_permutation(normal_b)
    # plots for debugging
    from matplotlib import pyplot as plt
    cumnorm = plt.subplot(321)
    cumnorm.plot(xrange(len(order_a)),normal_a.select(order_a),"b.")
    cumnorm.plot(xrange(len(order_b)),normal_b.select(order_b),"r.")
    #plt.show()
    logger = plt.subplot(324)
    logger.loglog(a_data,b_data,"r.")
    delta = plt.subplot(322)
    delta.plot(a_data, delta_I_a, "g.")
    #plt.show()
    #nselection = (flex.abs(normal_a) < 2.).__and__(flex.abs(normal_b) < 2.)
    gam = plt.subplot(323)
    gam.plot(mean_values,normal_a,"b.")
    sigs = plt.subplot(326)
    sigs.plot(a_sigmas,b_sigmas,"g.")
    mean_order = flex.sort_permutation(mean_values)
    scatters = flex.double(50)
    scattersb = flex.double(50)
    for isubsection in xrange(50):
      subselect = mean_order[isubsection*len(mean_order)//50:(isubsection+1)*len(mean_order)//50]
      vals = normal_a.select(subselect)
      #scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_standard_deviation()
      scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_variance()

      valsb = normal_b.select(subselect)
      #scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_standard_deviation()
      scattersb[isubsection] = flex.mean_and_variance(valsb).unweighted_sample_variance()
    aaronsplot = plt.subplot(325)
    aaronsplot.plot(xrange(50), 2. * scatters, "b.")
    plt.show()
Example #19
0
def simulate(n, size):
  from scitbx.array_family import flex
  from scitbx.random import variate, poisson_distribution
  shoeboxes = []

  B = 10

  # Generate shoeboxes with uniform random background
  for l in range(n):
    sbox = flex.double(flex.grid(size),0)
    g = variate(poisson_distribution(mean = B))
    for k in range(size[0]):
      for j in range(size[1]):
        for i in range(size[2]):
          sbox[k, j, i] += g.next()
    shoeboxes.append(sbox)

  # Calculate the Intensity (should be zero)
  import random
  I_cal = []
  mean = []
  for i in range(len(shoeboxes)):
    nn = len(shoeboxes[i])
    mm = int(1.0 * nn)
    index = flex.size_t(random.sample(range(nn), mm))
    assert(len(set(index)) == mm)
    data = shoeboxes[i].select(index)
    II = flex.sum(data)
    #II = flex.mean(data)
    BB = mm * B
    #BB = B
    I_cal.append(II - BB)
    m = flex.mean(data)
    mean.append(m)
  I_cal = flex.double(I_cal)

  mv = flex.mean_and_variance(flex.double(mean))
  print mv.mean() - B, mv.unweighted_sample_variance()
  v1 = B / (size[0] * size[1] * size[2])
  v2= B * (size[0] * size[1] * size[2])
  print v1
  print v2
  print I_cal[0]

  from math import sqrt
  Z = (I_cal - 0) / sqrt(v2)


  # Return the mean and standard deviation
  mv = flex.mean_and_variance(Z)
  return mv.mean(), mv.unweighted_sample_variance()
Example #20
0
  def detector_origin_analysis(self):
    self.FRAMES["detector_origin_x_refined"]=flex.double()
    self.FRAMES["detector_origin_y_refined"]=flex.double()
    self.FRAMES["distance_refined"]=flex.double()
    for iframe in xrange(len(self.FRAMES["frame_id"])):
      if iframe < self.n_refined_frames:
        SIGN = -1.
        PIXEL_SZ = 0.11 # mm/pixel
        detector_origin = col((-self.FRAMES["beam_x"][iframe]
                               + SIGN * PIXEL_SZ * self.frame_translations.x[2*iframe],
                               -self.FRAMES["beam_y"][iframe]
                               + SIGN * PIXEL_SZ * self.frame_translations.x[1+2*iframe],
                               0.))
        self.FRAMES["detector_origin_x_refined"].append(detector_origin[0])
        self.FRAMES["detector_origin_y_refined"].append(detector_origin[1])
        self.FRAMES["distance_refined"].append(
           self.frame_distances.x[iframe] +
           self.FRAMES["distance"][iframe]
        )

    xm = flex.mean_and_variance(self.FRAMES["detector_origin_x_refined"])
    ym = flex.mean_and_variance(self.FRAMES["detector_origin_y_refined"])
    print "Beam x mean %7.3f sigma %7.3f mm"%(
      xm.mean(), xm.unweighted_sample_standard_deviation())
    print "Beam y mean %7.3f sigma %7.3f mm"%(
      ym.mean(), ym.unweighted_sample_standard_deviation())

    time_series = False
    import os
    files = [os.path.basename(f) for f in self.FRAMES["unique_file_name"]]
    longs = [long("".join([a for a in name if a.isdigit()]))//1000 for name in files]
    floats = flex.double([float(L) for L in longs])[
      :len(self.FRAMES["detector_origin_x_refined"])]

    order = flex.sort_permutation(floats)
    time_sorted_x_beam = self.FRAMES["detector_origin_x_refined"].select(order)
    time_sorted_y_beam = self.FRAMES["detector_origin_y_refined"].select(order)

    if time_series:
      from matplotlib import pyplot as plt
      plt.plot(xrange(len(order)),time_sorted_x_beam,"r-")
      plt.plot(xrange(len(order)),time_sorted_y_beam,"b-")
      plt.show()

    for item in order:
      print files[item], "%8.3f %8.3f dist %8.3f"%(
        self.FRAMES["detector_origin_x_refined"][item],
        self.FRAMES["detector_origin_y_refined"][item],
        self.FRAMES["distance_refined"][item])
Example #21
0
    def calculate_gold2d(self):

        from scitbx.array_family import flex
        from scitbx import matrix

        r_tot = 0.0
        c_tot = 0.0
        d_tot = 0.0

        for (r, c), d in zip(self.points2d, self.pixels2d):
            r_tot += d * r
            c_tot += d * c
            d_tot += d

        self.gold2d = matrix.col((r_tot / d_tot, c_tot / d_tot))
        _r, _c = self.gold2d

        r_tot = 0.0
        c_tot = 0.0

        for (r, c), d in zip(self.points2d, self.pixels2d):
            r_tot += d * (r - _r)**2
            c_tot += d * (c - _c)**2

        _sr = r_tot / d_tot
        _sc = c_tot / d_tot

        self.gold2dvar = matrix.col((_sr, _sc))

        #r_tot = 0.0
        #c_tot = 0.0

        #for (r, c), d in zip(self.points2d, self.pixels2d):
        #  r_tot += d * (r - _r) ** 2
        #  c_tot += d * (c - _c) ** 2

        #_sr = r_tot / (d_tot-1)
        #_sc = c_tot / (d_tot-1)

        #self.gold2dubvar = matrix.col((_sr, _sc))

        pixel_x, pixel_y = zip(*self.points2d)
        xc = flex.mean_and_variance(flex.double(pixel_x),
                                    self.pixels2d.as_1d())
        yc = flex.mean_and_variance(flex.double(pixel_y),
                                    self.pixels2d.as_1d())
        self.gold2dubvar = matrix.col(
            (xc.gsl_stats_wvariance(), yc.gsl_stats_wvariance()))
Example #22
0
def index_of_dispersion(data):
    from scitbx.array_family import flex

    mv = flex.mean_and_variance(data)
    mean = mv.mean()
    var = mv.unweighted_sample_variance()
    return var / mean
  def tst_mean_and_variance_filter(self):
    from dials.algorithms.image.filter import mean_and_variance_filter
    from scitbx.array_family import flex
    from random import randint

    # Create an image
    image = flex.random_double(2000 * 2000)
    image.reshape(flex.grid(2000, 2000))

    # Calculate the summed area table
    mean_and_variance = mean_and_variance_filter(image, (3, 3))
    mean = mean_and_variance.mean()
    variance = mean_and_variance.variance()
    sample_variance = mean_and_variance.sample_variance()

    # For a selection of random points, ensure that the value is the
    # sum of the area under the kernel
    eps = 1e-7
    for i in range(10000):
      i = randint(10, 1990)
      j = randint(10, 1990)
      m1 = mean[j,i]
      v1 = variance[j,i]
      sv1 = sample_variance[j,i]
      p = image[j-3:j+4,i-3:i+4]
      mv = flex.mean_and_variance(p.as_1d())
      m2 = mv.mean()
      sv2 = mv.unweighted_sample_variance()
      assert(abs(m1 - m2) <= eps)
      assert(abs(sv1 - sv2) <= eps)

    # Test passed
    print 'OK'
    def adjust_errors(self):
        """
    Use the distribution of intensities in a given miller index to compute the error for each merged reflection
    """
        print >> self.log, "Computing error estimates from sample residuals"
        self.scaler.summed_weight = flex.double(self.scaler.n_refl, 0.)
        self.scaler.summed_wt_I = flex.double(self.scaler.n_refl, 0.)

        for hkl_id in range(self.scaler.n_refl):
            hkl = self.scaler.miller_set.indices()[hkl_id]
            if hkl not in self.scaler.ISIGI: continue

            n = len(self.scaler.ISIGI[hkl])
            if n > 1:
                variance = flex.mean_and_variance(
                    flex.double([
                        self.scaler.ISIGI[hkl][i][0] for i in range(n)
                    ])).unweighted_sample_variance()
            else:
                continue

            for i in range(n):
                Intensity = self.scaler.ISIGI[hkl][i][0]  # scaled intensity
                self.scaler.summed_wt_I[hkl_id] += Intensity / variance
                self.scaler.summed_weight[hkl_id] += 1 / variance
        print >> self.log, "Done computing error estimates"
Example #25
0
    def _filtered_stats(function, iterable):
        """The _filtered_stats() computes first- and second-order statistics
    on an array, after first applying a filter.

    @param function Function which returns @c True for those elements
                    of @p iterable which should be excluded in the
                    returned statistics, or @c None to include all
                    data
    @param iterable An iterable sequence of data items
    @return         A four-tuple of mean, standard deviation,
                    effective sample size, and number of rejected
                    samples
    """

        filtered_data = filter(function, iterable)
        if len(filtered_data) == 0:
            return (0, 0, 0, 0)

        stats = flex.mean_and_variance(flex.double(filtered_data))
        mean = stats.mean()
        if len(filtered_data) > 1:
            stddev = stats.unweighted_sample_standard_deviation()
        else:
            stddev = 0

        return (mean, stddev, len(filtered_data), len(iterable) - len(filtered_data))
Example #26
0
    def adjust_errors(self):
        """
    Use the distribution of intensities in a given miller index to compute the error for each merged reflection
    """
        print("Computing error estimates from sample residuals", file=self.log)
        self.scaler.summed_weight = flex.double(self.scaler.n_refl, 0.)
        self.scaler.summed_wt_I = flex.double(self.scaler.n_refl, 0.)

        for hkl_id in range(self.scaler.n_refl):
            hkl = self.scaler.miller_set.indices()[hkl_id]
            if hkl not in self.scaler.ISIGI: continue

            n = len(self.scaler.ISIGI[hkl])
            if n <= 1: continue
            x = flex.double([self.scaler.ISIGI[hkl][i][0] for i in range(n)])
            if self.scaler.params.raw_data.error_models.errors_from_sample_residuals.biased:
                m = flex.mean(x)
                variance = flex.sum((x - m)**2) / n
            else:
                variance = flex.mean_and_variance(
                    x).unweighted_sample_variance()  # flex.sum((x-m)**2)/(n-1)

            for i in range(n):
                Intensity = self.scaler.ISIGI[hkl][i][0]  # scaled intensity
                self.scaler.summed_wt_I[hkl_id] += Intensity / variance
                self.scaler.summed_weight[hkl_id] += 1 / variance
        print("Done computing error estimates", file=self.log)
Example #27
0
def fp_distro(plt):
  OX = GS_ROI(full_path("data_sherrell/pf-rd-ox_fftkk.out"))
  RD = GS_ROI(full_path("data_sherrell/pf-rd-red_fftkk.out"))
  MM = GS_ROI(full_path("data_sherrell/Fe_fake.dat"))
  delta_fp = flex.double()

  for i in range(1,100):
    #delta_fp.append ( MM.fp[i]-MM.fp[i-1] );
    delta_fp.append ( OX.fp[i]-OX.fp[i-1] ); delta_fp.append ( RD.fp[i]-RD.fp[i-1] )
  STATS = flex.mean_and_variance(delta_fp)
  mean = STATS.mean()
  sigma = STATS.unweighted_sample_standard_deviation()
  displaynorm = 3.0
  plotx = flex.double([0.005*x for x in range(-100,100)])
  ploty = flex.double([displaynorm *
       (1./math.sqrt(2.*math.pi*sigma*sigma))*math.exp(-0.5*(math.pow(x-mean,2))/(sigma*sigma))
       for x in plotx])
  print("mean",mean,"sigma",sigma)
  compute_functional_and_gradients_fp(FE1_fp=OX.fp,FE2_fp=RD.fp,mean=mean,sigma=sigma)
  n,bins,patches = plt.hist(delta_fp, 50, normed=0, facecolor="orange", alpha=0.75)
  plt.plot(plotx, ploty, "r-")
  plt.xlabel("Delta fp")
  plt.title("Histogram of Delta fp")
  plt.axis([-1,1,0,40])
  plt.show()
Example #28
0
def test_masked_mean_filter():
    from scitbx.array_family import flex

    from dials.algorithms.image.filter import mean_filter

    # Create an image
    image = flex.random_double(2000 * 2000)
    image.reshape(flex.grid(2000, 2000))
    mask = flex.random_bool(2000 * 2000, 0.99).as_int()
    mask.reshape(flex.grid(2000, 2000))

    # Calculate the summed area table
    mask2 = mask.deep_copy()
    mean = mean_filter(image, mask2, (3, 3), 1)

    # For a selection of random points, ensure that the value is the
    # sum of the area under the kernel
    eps = 1e-7
    for i in range(10000):
        i = random.randint(10, 1990)
        j = random.randint(10, 1990)
        m1 = mean[j, i]
        p = image[j - 3:j + 4, i - 3:i + 4]
        m = mask[j - 3:j + 4, i - 3:i + 4]
        if mask[j, i] == 0:
            m2 = 0.0
        else:
            p = flex.select(p, flags=m)
            mv = flex.mean_and_variance(flex.double(p))
            m2 = mv.mean()
        assert m1 == pytest.approx(m2, abs=eps)
Example #29
0
File: mark3.py Project: dials/cctbx
 def same_sensor_table(self, verbose=True):
     radii = flex.double()  # from-instrument-center distance in pixels
     delrot = flex.double()  # delta rotation in degrees
     weight = flex.double()  #
     displacement = []  # vector between two same-sensor ASICS in pixels
     for x in range(len(self.tiles) // 8):
         delrot.append(self.x[len(self.tiles) // 2 + 2 * x] -
                       self.x[len(self.tiles) // 2 + 1 + 2 * x])
         radii.append((self.radii[2 * x] + self.radii[2 * x + 1]) / 2)
         weight.append(
             min([self.tilecounts[2 * x], self.tilecounts[2 * x + 1]]))
         displacement.append(
             col((self.To_x[2 * x + 1], self.To_y[2 * x + 1])) -
             col((self.x[2 * (2 * x + 1)], self.x[2 * (2 * x + 1) + 1])) -
             col((self.To_x[2 * x], self.To_y[2 * x])) +
             col((self.x[2 * (2 * x)], self.x[2 * (2 * x) + 1])))
     order = flex.sort_permutation(radii)
     if verbose:
         for x in order:
             print("%02d %02d %5.0f" % (2 * x, 2 * x + 1, weight[x]),
                   end=' ')
             print("%6.1f" % radii[x], end=' ')
             print("%5.2f" % (delrot[x]), end=' ')
             print("%6.3f" % (displacement[x].length() -
                              194.))  # ASIC is 194; just print gap
     stats = flex.mean_and_variance(
         flex.double([t.length() - 194. for t in displacement]), weight)
     print("sensor gap is %7.3f px +/- %7.3f" %
           (stats.mean(), stats.gsl_stats_wsd()))
Example #30
0
def compare_two_raw_images(
        reference,
        test,
        tol=1.E-7):  # TODO: run more tests to decide on the default tolerance
    from six.moves import cPickle as pickle
    from scitbx.array_family import flex
    with open(reference, 'rb') as F:
        ref_array = pickle.load(F)
    with open(test, 'rb') as F:
        test_array = pickle.load(F)
    print("\nComparing raw image: '%s' with the reference: '%s'" %
          (test, reference))
    diff_array = test_array - ref_array
    if diff_array.all_eq(0.0):
        print("There are 0 differences\n")
    else:
        stats = flex.mean_and_variance(diff_array.as_1d(
        ))  # flex.mean_and_variance works only on 1d arrays
        diff_mean = stats.mean()
        diff_std = stats.unweighted_sample_standard_deviation()
        diff_min = flex.min(diff_array)
        diff_max = flex.max(diff_array)
        print("Differences: range (%.2E to %.2E); mean %.2E; std %.2E" %
              (diff_min, diff_max, diff_mean, diff_std))
        # assert acceptable differences
        assert abs(
            diff_mean) < tol, "The raw image is different from the reference."
Example #31
0
    def tst_mean_and_variance_filter(self):
        from dials.algorithms.image.filter import mean_and_variance_filter
        from scitbx.array_family import flex
        from random import randint

        # Create an image
        image = flex.random_double(2000 * 2000)
        image.reshape(flex.grid(2000, 2000))

        # Calculate the summed area table
        mean_and_variance = mean_and_variance_filter(image, (3, 3))
        mean = mean_and_variance.mean()
        variance = mean_and_variance.variance()
        sample_variance = mean_and_variance.sample_variance()

        # For a selection of random points, ensure that the value is the
        # sum of the area under the kernel
        eps = 1e-7
        for i in range(10000):
            i = randint(10, 1990)
            j = randint(10, 1990)
            m1 = mean[j, i]
            v1 = variance[j, i]
            sv1 = sample_variance[j, i]
            p = image[j - 3:j + 4, i - 3:i + 4]
            mv = flex.mean_and_variance(p.as_1d())
            m2 = mv.mean()
            sv2 = mv.unweighted_sample_variance()
            assert (abs(m1 - m2) <= eps)
            assert (abs(sv1 - sv2) <= eps)

        # Test passed
        print 'OK'
Example #32
0
    def _estimate_cc_true(self):

        # A1.2. Estimation of E(CC; S).

        # (i)

        var_intensities = flex.mean_and_variance(
            self.intensities.data()
        ).unweighted_sample_variance()
        var_sigmas = flex.mean_and_variance(flex.pow2(self.intensities.sigmas())).mean()
        self.E_cc_true = var_intensities / (var_intensities + var_sigmas)

        # (ii)

        reindexed_intensities = self.intensities.change_basis(
            sgtbx.change_of_basis_op("-x,-y,-z")
        ).map_to_asu()
        x, y = self.intensities.common_sets(
            reindexed_intensities, assert_is_similar_symmetry=False
        )
        self.cc_identity = CorrelationCoefficientAccumulator(x.data(), y.data())

        min_sd = 0.05
        min_sample = 10
        sigma_1 = max(min_sd, self.cc_sig_fac / 200 ** 0.5)
        w1 = 0
        w2 = 0
        if sigma_1 > 0.0001:
            w1 = 1 / sigma_1 ** 2
        if self.cc_identity.n() > min_sample:
            sigma_2 = max(min_sd, self.cc_sig_fac / self.cc_identity.n() ** 0.5)
            w2 = 1 / sigma_2 ** 2

        assert (w1 + w2) > 0
        self.cc_true = (w1 * self.E_cc_true + w2 * self.cc_identity.coefficient()) / (
            w1 + w2
        )

        logger.debug("cc_true = w1 * E_cc_true + w2 * cc_identity)/(w1 + w2)")
        logger.debug("w1: %g", w1)
        logger.debug("w2: %g", w2)
        logger.debug("E_cc_true: %g", self.E_cc_true)
        logger.debug("cc_identity: %g", self.cc_identity.coefficient())
        logger.debug("cc_true: %g", self.cc_true)
Example #33
0
  def normal_probability_plot(self, data, rankits_sel=None, plot=False):
    """ Use normal probability analysis to determine if a set of data is normally distributed
    See https://en.wikipedia.org/wiki/Normal_probability_plot.
    Rankits are computed in the same way as qqnorm does in R.
    @param data flex array
    @param rankits_sel only use the rankits in a certain range. Useful for outlier rejection. Should be
    a tuple such as (-0.5,0.5).
    @param plot whether to show the normal probabilty plot
    """
    from scitbx.math import distributions
    import numpy as np
    norm = distributions.normal_distribution()

    n = len(data)
    if n <= 10:
      a = 3/8
    else:
      a = 0.5

    sorted_data = flex.sorted(data)
    rankits = flex.double([norm.quantile((i+1-a)/(n+1-(2*a))) for i in range(n)])

    if rankits_sel is None:
      corr, slope, offset = self.get_overall_correlation_flex(sorted_data, rankits)
    else:
      sel = (rankits >= rankits_sel[0]) & (rankits <= rankits_sel[1])
      corr, slope, offset = self.get_overall_correlation_flex(sorted_data.select(sel), rankits.select(sel))

    if plot:
      from matplotlib import pyplot as plt
      f = plt.figure(0)
      lim = -5, 5
      x = np.linspace(lim[0],lim[1],100) # 100 linearly spaced numbers
      y = slope * x + offset
      plt.plot(sorted_data, rankits, '-')
      #plt.plot(x,y)
      plt.title("CC: %.3f Slope: %.3f Offset: %.3f"%(corr, slope, offset))
      plt.xlabel("Sorted data")
      plt.ylabel("Rankits")
      plt.xlim(lim); plt.ylim(lim)
      plt.axes().set_aspect('equal')

      f = plt.figure(1)
      h = flex.histogram(sorted_data, n_slots=100, data_min = lim[0], data_max = lim[1])
      stats = flex.mean_and_variance(sorted_data)
      plt.plot(h.slot_centers().as_numpy_array(), h.slots().as_numpy_array(), '-')
      plt.xlim(lim)
      plt.xlabel("Sorted data")
      plt.ylabel("Count")
      plt.title("Normalized data mean: %.3f +/- %.3f"%(stats.mean(), stats.unweighted_sample_standard_deviation()))

      if self.scaler.params.raw_data.error_models.sdfac_refine.plot_refinement_steps:
        plt.ion()
        plt.pause(0.05)

    return corr, slope, offset
def volume_weight(map, threshold=0.3):
    #threshold = threshold * flex.max( map )
    map_1d = map.as_1d()
    mean_and_var = flex.mean_and_variance(map_1d)
    threshold = mean_and_var.mean(
    ) - mean_and_var.unweighted_standard_error_of_mean()
    molecule = flex.bool(map_1d >= threshold)
    molecule_tot = flex.sum(molecule.as_double())
    fraction = molecule_tot / map.size()
    return fraction
 def is_correct(self, shoebox, mask, n_sigma, min_pixels):
   from scitbx.array_family import flex
   flags = [m & (1 << 0) for m in mask]
   pixels = flex.select(shoebox, flags=flags)
   assert(len(pixels) >= min_pixels)
   meanp = flex.mean_and_variance(flex.double(pixels))
   maxp = flex.max(flex.double(pixels))
   m = meanp.mean()
   s = meanp.unweighted_sample_standard_deviation()
   assert(maxp <= m + n_sigma * s)
Example #36
0
 def is_correct(self, shoebox, mask, n_sigma, min_pixels):
     from scitbx.array_family import flex
     flags = [m & (1 << 0) for m in mask]
     pixels = flex.select(shoebox, flags=flags)
     assert (len(pixels) >= min_pixels)
     meanp = flex.mean_and_variance(flex.double(pixels))
     maxp = flex.max(flex.double(pixels))
     m = meanp.mean()
     s = meanp.unweighted_sample_standard_deviation()
     assert (maxp <= m + n_sigma * s)
Example #37
0
    def __init__(self, rs_vectors, percentile=0.05):
        from scitbx.array_family import flex
        NEAR = 10
        self.NNBIN = 5  # target number of neighbors per histogram bin

        # nearest neighbor analysis
        from annlib_ext import AnnAdaptor
        query = flex.double()
        for spot in rs_vectors:  # spots, in reciprocal space xyz
            query.append(spot[0])
            query.append(spot[1])
            query.append(spot[2])

        assert len(
            rs_vectors) > NEAR  # Can't do nearest neighbor with too few spots

        IS_adapt = AnnAdaptor(data=query, dim=3, k=1)
        IS_adapt.query(query)

        direct = flex.double()
        for i in range(len(rs_vectors)):
            direct.append(1.0 / math.sqrt(IS_adapt.distances[i]))

        # determine the most probable nearest neighbor distance (direct space)
        hst = flex.histogram(direct, n_slots=int(len(rs_vectors) / self.NNBIN))
        centers = hst.slot_centers()
        islot = hst.slots()
        highest_bin_height = flex.max(islot)
        most_probable_neighbor = centers[list(islot).index(highest_bin_height)]

        if False:  # to print out the histogramming analysis
            smin, smax = flex.min(direct), flex.max(direct)
            stats = flex.mean_and_variance(direct)
            import sys
            out = sys.stdout
            print("     range:     %6.2f - %.2f" % (smin, smax), file=out)
            print("     mean:      %6.2f +/- %6.2f on N = %d" %
                  (stats.mean(), stats.unweighted_sample_standard_deviation(),
                   direct.size()),
                  file=out)
            hst.show(f=out, prefix="    ", format_cutoffs="%6.2f")
            print("", file=out)

        # determine the 5th-percentile direct-space distance
        perm = flex.sort_permutation(direct, reverse=True)
        percentile = direct[perm[int(percentile * len(rs_vectors))]]

        MAXTOL = 1.5  # Margin of error for max unit cell estimate
        self.max_cell = max(MAXTOL * most_probable_neighbor,
                            MAXTOL * percentile)

        if False:
            self.plot(direct)
Example #38
0
    def _calculate_centroids(self, coords, intensity, spots):
        """Calculate the spot centroids.

        Params:
            coords The spot coords
            intensity The spot intensities
            spots The pixel-spot mapping

        Returns:
            (centroid position, centroid variance)

        """
        from scitbx.array_family import flex

        # Loop through all the spots
        centroid_pos = flex.vec3_double()
        centroid_var = flex.vec3_double()
        for s in spots:

            # Get pixel coords and values
            pixel_coords = [map(lambda x: x + 0.5, coords[i]) for i in s]
            pixel_values = flex.double([intensity[i] for i in s])
            pixel_x, pixel_y, pixel_z = zip(*pixel_coords)

            # Calculate the centroid and variance
            xc = flex.mean_and_variance(flex.double(pixel_x), pixel_values)
            yc = flex.mean_and_variance(flex.double(pixel_y), pixel_values)
            zc = flex.mean_and_variance(flex.double(pixel_z), pixel_values)

            # Add the centroid and variance
            centroid_pos.append((xc.mean(), yc.mean(), zc.mean()))
            centroid_var.append((
                xc.gsl_stats_wvariance(),
                yc.gsl_stats_wvariance(),
                zc.gsl_stats_wvariance(),
            ))

        # Return the centroid and variance
        return centroid_pos, centroid_var
Example #39
0
 def assert_std_norm(self, z):
   from dials.array_family import flex
   mv = flex.mean_and_variance(z)
   m = mv.mean()
   s = mv.unweighted_sample_standard_deviation()
   # Mean-test failure rate:
   # 5*P(abs(X/1000) > 0.1336) where X is normally distributed
   #   with mean 0 and standard deviation sqrt(1000)
   if (abs(m) > 0.1336) or (abs(s - 1.0) > 0.1):
     #from matplotlib import pylab
     #pylab.hist(list(z), 100)
     #pylab.show()
     raise Exception('Mean %f (abs. value <0.1336), Sdev %f (>0.9, <1.1)' % (m, s))
Example #40
0
  def same_sensor_table(self,verbose=True):
    radii = flex.double() # from-instrument-center distance in pixels
    delrot= flex.double() # delta rotation in degrees
    meanrot=flex.double() # mean rotation in degrees
    weight= flex.double() #
    displacement = [] # vector between two same-sensor ASICS in pixels
    for x in range(len(self.tiles) // 8):
      delrot.append(self.tile_rotations.x[2*x] - self.tile_rotations.x[1+2*x])
      meanrot.append(0.5*(self.tile_rotations.x[2*x] + self.tile_rotations.x[1+2*x]))
      radii.append((self.radii[2*x]+self.radii[2*x+1])/2)
      weight.append(min([self.tilecounts[2*x],self.tilecounts[2*x+1]]))
      displacement.append(   col((self.To_x[2*x+1], self.To_y[2*x+1]))
                            -col((self.tile_translations.x[2*(2*x+1)], self.tile_translations.x[2*(2*x+1)+1]))
                            -col((self.To_x[2*x], self.To_y[2*x]))
                            +col((self.tile_translations.x[2*(2*x)], self.tile_translations.x[2*(2*x)+1]))  )

    unrotated_displacement = [] # same, except correct for the off-square rotation of this sensor
    for i,a in enumerate(displacement):
      corrected_a = a.rotate_2d(angle=meanrot[i],deg=True)
      while (corrected_a[0] < 0. or abs(corrected_a[1]) > abs(corrected_a[0])):
        corrected_a = corrected_a.rotate_2d(angle=90., deg=True)
      unrotated_displacement.append( corrected_a )

    order = flex.sort_permutation(radii)
    if verbose:
      for x in order:
        print "%02d %02d %5.0f"%(2*x,2*x+1,weight[x]),
        print "%6.1f"%radii[x],
        print "%5.2f"%(delrot[x]),
        print "%6.3f"%(displacement[x].length()-194.), # ASIC is 194; just print gap
        #print "  %6.3f"%(self.tile_distances.x[x])
        print "lateral %7.3f transverse %7.3f pix"%(unrotated_displacement[x][0], unrotated_displacement[x][1])
    stats = flex.mean_and_variance(flex.double([t.length()-194. for t in displacement]),weight)
    print " sensor gap is %7.3f px +/- %7.3f"%(stats.mean(), stats.gsl_stats_wsd())
    stats = flex.mean_and_variance(flex.double([t[0] for t in unrotated_displacement]),weight)
    print "lateral gap is %7.3f px +/- %7.3f"%(stats.mean(), stats.gsl_stats_wsd())
    stats = flex.mean_and_variance(flex.double([t[1] for t in unrotated_displacement]),weight)
    print "transverse gap is %7.3f px +/- %7.3f"%(stats.mean(), stats.gsl_stats_wsd())
  def __init__(self, rs_vectors, percentile=0.05):
    from scitbx.array_family import flex
    NEAR = 10
    self.NNBIN = 5 # target number of neighbors per histogram bin

    # nearest neighbor analysis
    from annlib_ext import AnnAdaptor
    query = flex.double()
    for spot in rs_vectors: # spots, in reciprocal space xyz
      query.append(spot[0])
      query.append(spot[1])
      query.append(spot[2])

    assert len(rs_vectors)>NEAR # Can't do nearest neighbor with too few spots

    IS_adapt = AnnAdaptor(data=query,dim=3,k=1)
    IS_adapt.query(query)

    direct = flex.double()
    for i in xrange(len(rs_vectors)):
       direct.append(1.0/math.sqrt(IS_adapt.distances[i]))

    # determine the most probable nearest neighbor distance (direct space)
    hst = flex.histogram(direct, n_slots=int(len(rs_vectors)/self.NNBIN))
    centers = hst.slot_centers()
    islot = hst.slots()
    highest_bin_height = flex.max(islot)
    most_probable_neighbor = centers[list(islot).index(highest_bin_height)]

    if False:  # to print out the histogramming analysis
      smin, smax = flex.min(direct), flex.max(direct)
      stats = flex.mean_and_variance(direct)
      import sys
      out = sys.stdout
      print >> out, "     range:     %6.2f - %.2f" % (smin, smax)
      print >> out, "     mean:      %6.2f +/- %6.2f on N = %d" % (
        stats.mean(), stats.unweighted_sample_standard_deviation(), direct.size())
      hst.show(f=out, prefix="    ", format_cutoffs="%6.2f")
      print >> out, ""

    # determine the 5th-percentile direct-space distance
    perm = flex.sort_permutation(direct, reverse=True)
    percentile = direct[perm[int(percentile * len(rs_vectors))]]

    MAXTOL = 1.5 # Margin of error for max unit cell estimate
    self.max_cell = max( MAXTOL * most_probable_neighbor,
                         MAXTOL * percentile)

    if False:
      self.plot(direct)
Example #42
0
    def assert_std_norm(self, z):
        from dials.array_family import flex

        mv = flex.mean_and_variance(z)
        m = mv.mean()
        s = mv.unweighted_sample_standard_deviation()
        # Mean-test failure rate:
        # 5*P(abs(X/1000) > 0.1336) where X is normally distributed
        #   with mean 0 and standard deviation sqrt(1000)
        if (abs(m) > 0.1336) or (abs(s - 1.0) > 0.1):
            # from matplotlib import pylab
            # pylab.hist(list(z), 100)
            # pylab.show()
            raise Exception("Mean %f (abs. value <0.1336), Sdev %f (>0.9, <1.1)" % (m, s))
Example #43
0
        def __init__(self, crystal, beam, detector, goniometer, scan,
                     reflections):

            from dials.array_family import flex

            # Calculate a list of angles and zeta's
            tau, zeta = self._calculate_tau_and_zeta(crystal, beam, detector,
                                                     goniometer, scan,
                                                     reflections)

            # Calculate zeta * (tau +- dphi / 2) / math.sqrt(2)
            X = tau * zeta
            mv = flex.mean_and_variance(X)
            self.sigma = math.sqrt(mv.unweighted_sample_variance())
Example #44
0
  def _calculate_centroids(self, coords, intensity, spots):
    '''Calculate the spot centroids.

    Params:
        coords The spot coords
        intensity The spot intensities
        spots The pixel-spot mapping

    Returns:
        (centroid position, centroid variance)

    '''
    from scitbx.array_family import flex

    # Loop through all the spots
    centroid_pos = flex.vec3_double()
    centroid_var = flex.vec3_double()
    for s in spots:

      # Get pixel coords and values
      pixel_coords = [map(lambda x: x + 0.5, coords[i]) for i in s]
      pixel_values = flex.double([intensity[i] for i in s])
      pixel_x, pixel_y, pixel_z = zip(*pixel_coords)

      # Calculate the centroid and variance
      xc = flex.mean_and_variance(flex.double(pixel_x), pixel_values)
      yc = flex.mean_and_variance(flex.double(pixel_y), pixel_values)
      zc = flex.mean_and_variance(flex.double(pixel_z), pixel_values)

      # Add the centroid and variance
      centroid_pos.append((xc.mean(), yc.mean(), zc.mean()))
      centroid_var.append((xc.gsl_stats_wvariance(),
                           yc.gsl_stats_wvariance(),
                           zc.gsl_stats_wvariance()))

    # Return the centroid and variance
    return centroid_pos, centroid_var
Example #45
0
    def __init__(self, crystal, beam, detector, goniometer, scan, reflections):

      from dials.array_family import flex
      from math import sqrt

      # Get the oscillation width
      dphi2 = scan.get_oscillation(deg=False)[1] / 2.0

      # Calculate a list of angles and zeta's
      tau, zeta = self._calculate_tau_and_zeta(crystal, beam, detector,
                                               goniometer, scan, reflections)

      # Calculate zeta * (tau +- dphi / 2) / sqrt(2)
      X = tau * zeta
      mv = flex.mean_and_variance(X)
      self.sigma = sqrt(mv.unweighted_sample_variance())
Example #46
0
  def run(self):
    from dials.algorithms.image.filter import fano_filter
    from scitbx.array_family import flex
    from random import randint

    # Create an image
    image = flex.random_double(2000 * 2000)
    image.reshape(flex.grid(2000, 2000))
    mask = flex.random_bool(2000 * 2000, 0.99).as_int()
    mask.reshape(flex.grid(2000, 2000))

    # Calculate the summed area table
    mask2 = mask.deep_copy()
    fano_filter = fano_filter(image, mask2, (3, 3), 2)
    mean = fano_filter.mean()
    var = fano_filter.sample_variance()
    fano = fano_filter.fano()

    # For a selection of random points, ensure that the value is the
    # sum of the area under the kernel
    eps = 1e-7
    for i in range(10000):
      i = randint(10, 1990)
      j = randint(10, 1990)
      m1 = mean[j,i]
      v1 = var[j,i]
      f1 = fano[j,i]
      p = image[j-3:j+4,i-3:i+4]
      m = mask[j-3:j+4,i-3:i+4]
      if mask[j,i] == 0:
        m2 = 0.0
        v2 = 0.0
        f2 = 1.0
      else:
        p = flex.select(p, flags=m)
        mv = flex.mean_and_variance(flex.double(p))
        m2 = mv.mean()
        v2 = mv.unweighted_sample_variance()
        f2 = v2 / m2
      assert(abs(m1 - m2) <= eps)
      assert(abs(v1 - v2) <= eps)
      assert(abs(f1 - f2) <= eps)

    # Test passed
    print 'OK'
 def tst_zero_intensity(self):
   from math import sqrt
   from scitbx.array_family import flex
   counts = 0
   num = 100
   rlist = self.generate_profiles(num, counts)
   I = []
   S = []
   for r in rlist:
     I.append(r['intensity.sum.value'])
     S.append(sqrt(r['intensity.sum.variance']))
   Z = [(i - counts) / s for i, s in zip(I, S)]
   mv = flex.mean_and_variance(flex.double(Z))
   meanz = mv.mean()
   varz = mv.unweighted_sample_variance()
   sdevz = sqrt(varz)
   print "Z: mean=%f, sdev=%f" % (meanz, sdevz)
   assert(abs(meanz - 0.0) < (5 * sdevz / sqrt(num)))
   assert(abs(sdevz - 1.0) < 1e-1)
   print 'OK'
Example #48
0
  def get_active_data_corrected_with_fft(self):
    #data = self.imgobj.linearintdata
    data = self.imgobj.correct_gain_in_place(
      filename = self.phil.speckfinder.dark_stddev,
      adu_scale = self.phil.speckfinder.dark_adu_scale,
      phil = self.phil
    )
    indexing = []
    for iraw,raw_asic in enumerate(self.corners):
      filtered_data = self.imgobj.correct_background_by_block(raw_asic)

      active_data = filtered_data.as_double().as_1d()

      order = flex.sort_permutation(active_data)
      stats = flex.mean_and_variance(active_data)
      if self.verbose:
        #print "Stats on %d pixels"%len(active_data)
        print "stats are mean",stats.mean(),"sigma",stats.unweighted_sample_standard_deviation()
        #print "The 90-percentile pixel is ",active_data[order[int(0.9*len(active_data))]]
        #print "The 99-percentile pixel is ",active_data[order[int(0.99*len(active_data))]]

      maximas = flex.vec2_double()
      for idx in xrange(len(active_data)-1, int(0.9*len(active_data)), -1):
        if active_data[order[idx]] > stats.mean() + 12.0*stats.unweighted_sample_standard_deviation():
          if self.verbose: print "    ", idx, active_data[order[idx]]
          irow = order[idx] // (raw_asic[3]-raw_asic[1])
          icol = order[idx] % (raw_asic[3]-raw_asic[1])
          maximas.append((irow, icol))
      CLUS = clustering(maximas)
      coords = CLUS.as_spot_center_of_mass(filtered_data,raw_asic,stats.mean())
      intensities = CLUS.intensities
      for coord,height in zip(coords,intensities):
        self.green.append(coord)
        indexing.append( (
          coord[0] * float(self.inputpd["pixel_size"]),
          coord[1] * float(self.inputpd["pixel_size"]),
          0.0, # 0 -degree offset for still image
          height)
        )
    return indexing
Example #49
0
 def same_sensor_table(self,verbose=True):
   radii = flex.double() # from-instrument-center distance in pixels
   delrot= flex.double() # delta rotation in degrees
   weight= flex.double() #
   displacement = [] # vector between two same-sensor ASICS in pixels
   for x in xrange(len(self.tiles) // 8):
     delrot.append(self.x[len(self.tiles) // 2 +2*x] - self.x[len(self.tiles) // 2 + 1 +2*x])
     radii.append((self.radii[2*x]+self.radii[2*x+1])/2)
     weight.append(min([self.tilecounts[2*x],self.tilecounts[2*x+1]]))
     displacement.append(   col((self.To_x[2*x+1], self.To_y[2*x+1]))
                           -col((self.x[2*(2*x+1)], self.x[2*(2*x+1)+1]))
                           -col((self.To_x[2*x], self.To_y[2*x]))
                           +col((self.x[2*(2*x)], self.x[2*(2*x)+1]))  )
   order = flex.sort_permutation(radii)
   if verbose:
     for x in order:
       print "%02d %02d %5.0f"%(2*x,2*x+1,weight[x]),
       print "%6.1f"%radii[x],
       print "%5.2f"%(delrot[x]),
       print "%6.3f"%(displacement[x].length()-194.) # ASIC is 194; just print gap
   stats = flex.mean_and_variance(flex.double([t.length()-194. for t in displacement]),weight)
   print "sensor gap is %7.3f px +/- %7.3f"%(stats.mean(), stats.gsl_stats_wsd())
Example #50
0
  def get_active_data_sigma(self):
    data = self.imgobj.linearintdata
    indexing = []
    for asic in self.corners:
      block = data.matrix_copy_block(
          i_row=asic[0],i_column=asic[1],
          n_rows=asic[2]-asic[0],
          n_columns=asic[3]-asic[1])
      active_data = block.as_1d().as_double()

      order = flex.sort_permutation(active_data)
      if self.verbose:
        print "The mean is ",flex.mean(active_data),"on %d pixels"%len(active_data)
        print "The 90-percentile pixel is ",active_data[order[int(0.9*len(active_data))]]
        print "The 99-percentile pixel is ",active_data[order[int(0.99*len(active_data))]]

      stats = flex.mean_and_variance(active_data)
      print "stats are mean",stats.mean(),"sigma",stats.unweighted_sample_standard_deviation()
      maximas = flex.vec2_double()
      for idx in xrange(len(active_data)-1, int(0.9*len(active_data)), -1):
        if active_data[order[idx]] > stats.mean() + 6.0*stats.unweighted_sample_standard_deviation():
          if self.verbose: print "    ", idx, active_data[order[idx]]
          irow = order[idx] // (asic[3]-asic[1])
          icol = order[idx] % (asic[3]-asic[1])
          #self.green.append((asic[0]+irow, asic[1]+icol))
          maximas.append((irow, icol))
      CLUS = clustering(maximas)
      #coords = CLUS.as_spot_max_pixels(block,asic)
      coords = CLUS.as_spot_center_of_mass(block,asic,stats.mean())
      intensities = CLUS.intensities
      for coord,height in zip(coords,intensities):
        self.green.append(coord)
        indexing.append( (
          coord[0] * float(self.inputpd["pixel_size"]),
          coord[1] * float(self.inputpd["pixel_size"]),
          0.0, # 0 -degree offset for still image
          height)
        )
    return indexing
  def tst_masked_mean_filter(self):
    from dials.algorithms.image.filter import mean_filter
    from scitbx.array_family import flex
    from random import randint

    # Create an image
    image = flex.random_double(2000 * 2000)
    image.reshape(flex.grid(2000, 2000))
    mask = flex.random_bool(2000 * 2000, 0.99).as_int()
    mask.reshape(flex.grid(2000, 2000))

    # Calculate the summed area table
    mask2 = mask.deep_copy()
    mean = mean_filter(image, mask2, (3, 3), 1)

    # For a selection of random points, ensure that the value is the
    # sum of the area under the kernel
    eps = 1e-7
    for i in range(10000):
      i = randint(10, 1990)
      j = randint(10, 1990)
      m1 = mean[j,i]
      p = image[j-3:j+4,i-3:i+4]
      m = mask[j-3:j+4,i-3:i+4]
      if mask[j,i] == 0:
        m2 = 0.0
      else:
        p = flex.select(p, flags=m)
        mv = flex.mean_and_variance(flex.double(p))
        m2 = mv.mean()
        s1 = flex.sum(flex.double(p))
        s2 = flex.sum(m.as_1d())
      assert(abs(m1 - m2) <= eps)

    # Test passed
    print 'OK'
def estimate_global_threshold(image, mask=None):

  from scitbx.array_family import flex
  from scitbx import matrix

  n_above_threshold = flex.size_t()
  threshold = flex.double()
  for i in range(1, 20):
    g = 1.5**i
    g = int(g)
    n_above_threshold.append((image > g).count(True))
    threshold.append(g)

  # Find the elbow point of the curve, in the same manner as that used by
  # distl spotfinder for resolution method 1 (Zhang et al 2006).
  # See also dials/algorithms/spot_finding/per_image_analysis.py

  x = threshold.as_double()
  y = n_above_threshold.as_double()
  slopes = (y[-1] - y[:-1])/(x[-1] - x[:-1])
  p_m = flex.min_index(slopes)

  x1 = matrix.col((x[p_m], y[p_m]))
  x2 = matrix.col((x[-1], y[-1]))

  gaps = flex.double()
  v = matrix.col(((x2[1] - x1[1]), -(x2[0] - x1[0]))).normalize()

  for i in range(p_m, len(x)):
    x0 = matrix.col((x[i], y[i]))
    r = x1 - x0
    g = abs(v.dot(r))
    gaps.append(g)

  mv = flex.mean_and_variance(gaps)
  s = mv.unweighted_sample_standard_deviation()

  p_k = flex.max_index(gaps)
  g_k = gaps[p_k]
  p_g = p_k

  #x_g = x[p_g + p_m]
  #y_g = y[p_g + p_m]

  #x_g = x[p_g + p_m -1]
  #y_g = y[p_g + p_m -1]

  # more conservative, choose point 2 left of the elbow point
  x_g = x[p_g + p_m -2]
  y_g = y[p_g + p_m -2]

  #from matplotlib import pyplot
  #pyplot.scatter(threshold, n_above_threshold)
  ##for i in range(len(threshold)-1):
    ##pyplot.plot([threshold[i], threshold[-1]],
                ##[n_above_threshold[i], n_above_threshold[-1]])
  ##for i in range(1, len(threshold)):
    ##pyplot.plot([threshold[0], threshold[i]],
                ##[n_above_threshold[0], n_above_threshold[i]])
  #pyplot.plot(
    #[threshold[p_m], threshold[-1]], [n_above_threshold[p_m], n_above_threshold[-1]])
  #pyplot.plot(
    #[x_g, threshold[-1]], [y_g, n_above_threshold[-1]])
  #pyplot.show()

  return x_g
  def __init__ (self, pdb_hierarchy, xray_structure, ignore_hd=True,
      collect_outliers=True) :
    for name in self.__slots__ :
      setattr(self, name, None)
    validation.__init__(self)
    assert len(xray_structure.scatterers()) != 0
    from cctbx import adptbx
    from scitbx.array_family import flex
    xrs = xray_structure
    self.n_total = xrs.scatterers().size() # always include H/D
    self.results = None
    pdb_atoms = pdb_hierarchy.atoms()
    pdb_atoms.reset_i_seq()
    hd_selection = xrs.hd_selection()
    subtract_hd = True
    self.n_all = hd_selection.size()
    self.n_hd = hd_selection.count(True)
    if (ignore_hd) and (0 < self.n_hd < self.n_all) :
      xrs = xrs.select(~hd_selection)
      subtract_hd = False
    u_isos = xrs.extract_u_iso_or_u_equiv()
    occ = xrs.scatterers().extract_occupancies()
    self.n_atoms = xrs.scatterers().size()
    self.n_non_hd = self.n_all - self.n_hd
    self.n_aniso = xrs.use_u_aniso().count(True)
    self.n_aniso_h = (xray_structure.use_u_aniso() & hd_selection).count(True)
    self.n_npd = xrs.is_positive_definite_u().count(False)
    self.n_zero_b = (u_isos == 0).count(True)
    self.n_zero_occ = (occ == 0).count(True)
    u_cutoff_high = sys.maxint
    u_cutoff_low = 0
    u_non_zero = u_isos.select(u_isos > 0)
    if (len(u_non_zero) > 1) :
      mv = flex.mean_and_variance(u_non_zero)
      sigma = mv.unweighted_sample_standard_deviation()
      u_cutoff_high = mv.mean() + (4.0 * sigma)
      u_cutoff_low = mv.mean() - (4.0 * sigma)
    self.b_mean = adptbx.u_as_b(flex.mean(u_isos))
    self.b_min = adptbx.u_as_b(flex.min(u_isos))
    self.b_max = adptbx.u_as_b(flex.max(u_isos))
    self.o_mean = flex.mean(occ)
    self.o_min = flex.min(occ)
    self.o_max = flex.max(occ)
    self.n_outliers = self.n_aniso_h + self.n_npd
    self.zero_occ = []
    self.partial_occ = []
    self.different_occ = []
    self.bad_adps = []
    self.b_histogram = None # TODO
    def is_u_iso_outlier (u) :
      return (u < u_cutoff_low) or (u > u_cutoff_high) or (u <= 0)
    # these statistics cover all atoms!
    occupancies = xray_structure.scatterers().extract_occupancies()
    u_isos = xray_structure.extract_u_iso_or_u_equiv()
    collected = flex.bool(occupancies.size(), False)
    if (collect_outliers) :
      for i_seq, occ in enumerate(occupancies) :
        if (hd_selection[i_seq] and ignore_hd) or collected[i_seq] :
          continue
        pdb_atom = pdb_atoms[i_seq]
        parent = pdb_atom.parent()
        if (occ <= 0) :
          group_atoms = parent.atoms()
          labels = pdb_atom.fetch_labels()
          if (len(group_atoms) > 1) and (group_atoms.extract_occ().all_eq(0)) :
            i_seqs = group_atoms.extract_i_seq()
            b_mean = adptbx.u_as_b(flex.mean(u_isos.select(i_seqs)))
            outlier = residue_occupancy(
              chain_id=labels.chain_id,
              resseq=labels.resseq,
              icode=labels.icode,
              altloc=labels.altloc,
              resname=labels.resname,
              occupancy=occ,
              outlier=True,
              xyz=group_atoms.extract_xyz().mean(),
              b_iso=b_mean)
            self.zero_occ.append(outlier)
            self.n_outliers += 1
            collected.set_selected(i_seqs, True)
          else :
            assert (pdb_atom.occ == occ), "%s: %s <--> %s" % (pdb_atom.id_str(),
              pdb_atom.occ, occ)
            outlier = atom_occupancy(
              pdb_atom=pdb_atom,
              occupancy=occ,
              b_iso=adptbx.u_as_b(u_isos[i_seq]),
              xyz=pdb_atom.xyz,
              outlier=True)
            self.zero_occ.append(outlier)
            self.n_outliers += 1
        elif is_u_iso_outlier(u_isos[i_seq]) :
          # zero displacements will always be recorded on a per-atom basis
          if (u_isos[i_seq] <= 0) :
            outlier = atom_bfactor(
              pdb_atom=pdb_atom,
              occupancy=occ,
              b_iso=adptbx.u_as_b(u_isos[i_seq]),
              xyz=pdb_atom.xyz,
              outlier=True)
            self.bad_adps.append(outlier)
            self.n_outliers += 1
          else :
            # if the average displacement for the entire residue falls outside
            # the cutoffs, save as a single residue outlier
            group_atoms = parent.atoms()
            i_seqs = group_atoms.extract_i_seq()
            u_mean = flex.mean(u_isos.select(i_seqs))
            if is_u_iso_outlier(u_mean) :
              labels = pdb_atom.fetch_labels()
              outlier = residue_bfactor(
                chain_id=labels.chain_id,
                resseq=labels.resseq,
                icode=labels.icode,
                altloc=labels.altloc,
                resname=labels.resname,
                occupancy=occ,
                outlier=True,
                xyz=group_atoms.extract_xyz().mean(),
                b_iso=adptbx.u_as_b(u_mean))
              self.bad_adps.append(outlier)
              self.n_outliers += 1
              collected.set_selected(i_seqs, True)
            # otherwise, just save this atom
            else :
              outlier = atom_bfactor(
                pdb_atom=pdb_atom,
                occupancy=occ,
                b_iso=adptbx.u_as_b(u_isos[i_seq]),
                xyz=pdb_atom.xyz,
                outlier=True)
              self.bad_adps.append(outlier)
              self.n_outliers += 1

      # analyze occupancies for first model
      model = pdb_hierarchy.models()[0]
      for chain in model.chains() :
        residue_groups = chain.residue_groups()
        for residue_group in chain.residue_groups() :
          # get unique set of atom names
          atom_names = set()
          for atom in residue_group.atoms():
            atom_names.add(atom.name.strip())

          # check total occupancy for each atom
          for name in atom_names:
            occupancy = 0.0
            atoms = list()
            for atom_group in residue_group.atom_groups():
              atom = atom_group.get_atom(name)
              if (atom is not None):
                occupancy += atom.occ
                atoms.append(atom)

            if ( not approx_equal(occupancy, 1.0, out=None, eps=1.0e-3) ):
              for atom in atoms:
                outlier = atom_occupancy(
                  pdb_atom=atom,
                  occupancy=atom.occ,
                  b_iso=adptbx.u_as_b(atom.b),
                  xyz=atom.xyz,
                  outlier=True)
                self.partial_occ.append(outlier)
                self.n_outliers += 1

          # check that atoms in an atom group have the same occupancy
          for atom_group in residue_group.atom_groups():
            residue_is_okay = True
            base_occupancy = atom_group.atoms()[0].occ
            for atom in atom_group.atoms():
              if (not approx_equal(base_occupancy, atom.occ,
                                   out=None, eps=1.0e-3)):
                labels = atom.fetch_labels()
                i_seqs = atom_group.atoms().extract_i_seq()
                b_mean = adptbx.u_as_b(flex.mean(u_isos.select(i_seqs)))
                outlier = residue_occupancy(
                  chain_id=labels.chain_id,
                  resseq=labels.resseq,
                  icode=labels.icode,
                  altloc=labels.altloc,
                  resname=labels.resname,
                  occupancy=occ,
                  outlier=True,
                  xyz=atom_group.atoms().extract_xyz().mean(),
                  b_iso=b_mean)
                self.different_occ.append(outlier)
                self.n_outliers += 1
                residue_is_okay = False
                break
            if (not residue_is_okay):
              break
Example #54
0
        #      err, bg_err,
        #      (flex.mean(r.shoebox_background) - expected_background) < err,
        #      r.intensity / tot_err)

        #    pixels = r.shoebox.select(flex.bool(
        #      [bool(p & MaskCode.Foreground) for p in r.shoebox_mask]))
        #    subtracted = pixels - flex.mean(r.shoebox_background)
        #    print flex.mean(pixels)
        print m, n, intensity, sqrt(variance), r.intensity, sqrt(r.intensity_variance)
        I.append(intensity)
        V.append(variance)

    S = [sqrt(v) for v in V]
    Z = [(i - expected_intensity) / s for i, s in zip(I, S)]

    mv = flex.mean_and_variance(flex.double(Z))
    meanz = mv.mean()
    varz = mv.unweighted_sample_variance()
    sdevz = sqrt(varz)

    print "Z: (mean: %f), (var: %f), (sdev: %f)" % (meanz, varz, sdevz)

    pylab.hist(Z)
    pylab.show()


#  pylab.errorbar(range(len(I)), I, yerr=E, fmt='o')
#  pylab.axhline(expected_intensity)
#  pylab.show()
#
#  pylab.scatter(range(len(ios)), ios)
Example #55
0
    def print_table(self):
        from libtbx import table_utils
        from libtbx.str_utils import format_value

        table_header = [
            "Tile",
            "Dist",
            "Nobs",
            "aRmsd",
            "Rmsd",
            "delx",
            "dely",
            "disp",
            "rotdeg",
            "Rsigma",
            "Tsigma",
            "Transx",
            "Transy",
            "DelRot",
        ]
        table_data = []
        table_data.append(table_header)
        sort_radii = flex.sort_permutation(flex.double(self.radii))
        tile_rmsds = flex.double()
        radial_sigmas = flex.double(64)
        tangen_sigmas = flex.double(64)

        wtaveg = [0.0] * 64
        for x in xrange(64):
            if self.tilecounts[x] >= 3:
                wtaveg[x] = self.weighted_average_angle_deg_from_tile(
                    x, self.post_mean_cv[x], self.correction_vector_x, self.correction_vector_y
                )

        for idx in xrange(64):
            x = sort_radii[idx]
            if self.tilecounts[x] < 3:
                radial = (0, 0)
                tangential = (0, 0)
                rmean, tmean, rsigma, tsigma = (0, 0, 1, 1)
            else:
                radial, tangential, rmean, tmean, rsigma, tsigma = get_radial_tangential_vectors(self, x)

            # paired rotations of two ASICS on the same sensor
            if x % 2 == 0:
                delrot = "%5.2f" % (wtaveg[x] - wtaveg[x + 1])
            else:
                delrot = ""

            radial_sigmas[x] = rsigma
            tangen_sigmas[x] = tsigma
            table_data.append(
                [
                    format_value("%3d", x),
                    format_value("%7.2f", self.radii[x]),
                    format_value("%6d", self.tilecounts[x]),
                    format_value("%5.2f", self.asymmetric_tile_rmsd[x]),
                    format_value("%5.2f", self.tile_rmsd[x]),
                    format_value("%5.2f", self.post_mean_cv[x][0]),
                    format_value("%5.2f", self.post_mean_cv[x][1]),
                    format_value("%5.2f", matrix.col(self.post_mean_cv[x]).length()),
                    format_value("%6.2f", wtaveg[x]),
                    format_value("%6.2f", rsigma),
                    format_value("%6.2f", tsigma),
                    format_value("%5.2f", self.x[2 * x]),
                    format_value("%5.2f", self.x[2 * x + 1]),
                    copy.copy(delrot),
                ]
            )
        table_data.append([""] * len(table_header))
        rstats = flex.mean_and_variance(radial_sigmas, self.tilecounts.as_double())
        tstats = flex.mean_and_variance(tangen_sigmas, self.tilecounts.as_double())
        table_data.append(
            [
                format_value("%3s", "ALL"),
                format_value("%s", ""),
                format_value("%6d", self.overall_N),
                format_value("%5.2f", math.sqrt(flex.mean(self.delrsq))),
                format_value("%5.2f", self.overall_rmsd),
                format_value("%5.2f", self.overall_cv[0]),
                format_value("%5.2f", self.overall_cv[1]),
                format_value("%5.2f", flex.mean(flex.double([cv.length() for cv in self.post_mean_cv]))),
                format_value("%s", ""),
                format_value("%6.2f", rstats.mean()),
                format_value("%6.2f", tstats.mean()),
                format_value("%s", ""),
                format_value("%s", ""),
                format_value("%s", ""),
            ]
        )

        print
        print table_utils.format(table_data, has_header=1, justify="center", delim=" ")
Example #56
0
#for k in range(9):
#    #print prof.as_numpy_array()[k].astype(numpy.int32)
#    pylab.imshow(prof.as_numpy_array()[k], vmin=flex.min(prof), vmax=flex.max(prof))
#    pylab.show()
#print ""

#for k in range(9):
##    print (3000 * rprof.as_numpy_array()[k]).astype(numpy.int32)
#    pylab.imshow(rprof.as_numpy_array()[k], vmin=flex.min(rprof), vmax=flex.max(rprof))
#    pylab.show()

# Calculate the correlation
prof_a = prof
prof_b = rprof
n = len(prof_a)
mv_a = flex.mean_and_variance(prof_a.as_1d())
mv_b = flex.mean_and_variance(prof_b.as_1d())
ma, sa = mv_a.mean(), mv_a.unweighted_sample_standard_deviation()
mb, sb = mv_b.mean(), mv_b.unweighted_sample_standard_deviation()
R = (1.0/(n-1.0)) * flex.sum((prof_a-ma) * (prof_b-mb) / (sa*sb))
print "Correlation: ", R

from dials.algorithms.reflection_basis.transform import ideal_profile
iprof = ideal_profile(4, 5)

prof_a = prof
prof_b = iprof
n = len(prof_a)
mv_a = flex.mean_and_variance(prof_a.as_1d())
mv_b = flex.mean_and_variance(prof_b.as_1d())
ma, sa = mv_a.mean(), mv_a.unweighted_sample_standard_deviation()
Example #57
0
  def print_table(self):
    from libtbx import table_utils
    from libtbx.str_utils import format_value
    table_header = ["Tile","Dist","Nobs","aRmsd","Rmsd","delx","dely","disp","rotdeg",
                    "Rsigma","Tsigma","Transx","Transy","DelRot","Rotdeg"]
    table_data = []
    table_data.append(table_header)
    sort_radii = flex.sort_permutation(flex.double(self.radii))
    tile_rmsds = flex.double()
    radial_sigmas = flex.double(len(self.tiles) // 4)
    tangen_sigmas = flex.double(len(self.tiles) // 4)

    wtaveg = [0.]*(len(self.tiles) // 4)
    for x in range(len(self.tiles) // 4):
      if self.tilecounts[x] >= 3:
        wtaveg[x] = self.weighted_average_angle_deg_from_tile(x, self.post_mean_cv[x], self.correction_vector_x,
          self.correction_vector_y)

    for idx in range(len(self.tiles) // 4):
      x = sort_radii[idx]
      if self.tilecounts[x] < 3:
        radial = (0,0)
        tangential = (0,0)
        rmean,tmean,rsigma,tsigma=(0,0,1,1)
      else:
        radial,tangential,rmean,tmean,rsigma,tsigma = get_radial_tangential_vectors(self,x,
          self.post_mean_cv[x],
          self.correction_vector_x, self.correction_vector_y,
          self.model_calcx-self.refined_cntr_x,
          self.model_calcy-self.refined_cntr_y)

      # paired rotations of two ASICS on the same sensor
      if x%2==0:
        # previous method: delrot = "%5.2f"%(wtaveg[x]-wtaveg[x+1])
        delrot = "%5.2f"%(self.tile_rotations.x[x] - self.tile_rotations.x[1+x])
      else:
        delrot = ""

      radial_sigmas[x]=rsigma
      tangen_sigmas[x]=tsigma
      table_data.append(  [
        format_value("%3d",   x),
        format_value("%7.2f", self.radii[x]),
        format_value("%6d",  self.tilecounts[x]),
        format_value("%5.2f", self.asymmetric_tile_rmsd[x]),
        format_value("%5.2f", self.tile_rmsd[x]),
        format_value("%5.2f", self.post_mean_cv[x][0]),
        format_value("%5.2f", self.post_mean_cv[x][1]),
        format_value("%5.2f", matrix.col(self.post_mean_cv[x]).length()),
        format_value("%6.2f", wtaveg[x]),
        format_value("%6.2f", rsigma),
        format_value("%6.2f", tsigma),
        format_value("%5.2f", self.tile_translations.x[2*x]),
        format_value("%5.2f", self.tile_translations.x[2*x+1]),
        copy.copy(delrot),
        format_value("%5.2f", self.tile_rotations.x[x])
      ])
    table_data.append([""]*len(table_header))
    rstats = flex.mean_and_variance(radial_sigmas,self.tilecounts.as_double())
    tstats = flex.mean_and_variance(tangen_sigmas,self.tilecounts.as_double())
    table_data.append(  [
        format_value("%3s",   "ALL"),
        format_value("%s", ""),
        format_value("%6d",  self.overall_N),
        format_value("%5.2f", math.sqrt(flex.mean(self.delrsq))),
        format_value("%5.2f", self.overall_rmsd),
        format_value("%5.2f", self.overall_cv[0]),
        format_value("%5.2f", self.overall_cv[1]),
        format_value("%5.2f", flex.mean(flex.double([cv.length() for cv in self.post_mean_cv]))),
        format_value("%s", ""),
        format_value("%6.2f", rstats.mean()),
        format_value("%6.2f", tstats.mean()),
        format_value("%s", ""),
        format_value("%s", ""),
        #root mean squared difference in same-sensor (adjacent)-ASIC rotations, weighted by minimum # of observations on either ASIC of the sensor
        format_value("%5.2f", math.sqrt(
           flex.sum(
             flex.double([
               (min([self.tilecounts[2*isen],self.tilecounts[2*isen+1]])) *
                    (self.tile_rotations.x[2*isen] - self.tile_rotations.x[1+2*isen])**2
               for isen in xrange(len(self.tiles) // 8)]
             )
           )/
           flex.sum(
             flex.double(
               [(min([self.tilecounts[2*isen],self.tilecounts[2*isen+1]])) for isen in xrange(len(self.tiles) // 8)]
             )
           )
        )),
        format_value("%s", ""),
    ])

    print
    print table_utils.format(table_data,has_header=1,justify='center',delim=" ")
Example #58
0
    def event(self, evt, env):
        """The event() function is called for every L1Accept transition.

    For now, log error and set bogus value to allow stuff to continue
    -- must check for the bogosity later

    XXX The dead time of the detector complicates checking how often
    things are updated!  Move this to the ring buffer?

    @param evt Event data object, a configure object
    @param env Environment object
    """

        from pyana.event import Event
        from acqiris_ext import acqiris_integrate, apd_hitfind

        super(mod_ledge, self).event(evt, env)
        if evt.status() != Event.Normal:
            pass  # XXX return -- Never skip because arrays will end up
            # different length, so ignore this?

        # Get the time of the event, in fractional seconds since the
        # epoch.  This is needed for all subsequent history-keeping, and
        # is hence determined first.  XXX Is history-keeping even
        # justified?
        time = cspad_tbx.evt_time(evt)
        if time is None:
            time = float("nan")
        else:
            time = time[0] + time[1] / 1e3
        self._timestamp.append(time)

        # The repetition rate is currently just used for sanity checking.
        repetition_rate = cspad_tbx.evt_repetition_rate(evt)
        if repetition_rate is None:
            repetition_rate = float("nan")
        self._repetition_rate.append(repetition_rate)

        # Get the I0.  No need to warn about it here, it will be done once
        # the image is written out.
        I0 = cspad_tbx.evt_pulse_energy(evt)
        if I0 is None:
            I0 = float("nan")
        self._I0.append(I0)

        # Get the FEE energy.  Average the two readings before and after
        # attenuation separately.  XXX What are the units?  It look like
        # it could be mJ?
        fee_before = 0.5 * sum(evt.getFeeGasDet()[0:2])
        if fee_before is None:
            fee_before = float("nan")
        self._fee_before.append(fee_before)

        fee_after = 0.5 * sum(evt.getFeeGasDet()[2:4])
        if fee_after is None:
            fee_after = float("nan")
        self._fee_after.append(fee_after)

        # XXX Just a check: this is what xtcexplorer does:
        fee_energy = evt.get(xtc.TypeId.Type.Id_FEEGasDetEnergy)
        if fee_energy is not None:
            assert (
                evt.getFeeGasDet()[0] == fee_energy.f_11_ENRC
                and evt.getFeeGasDet()[1] == fee_energy.f_12_ENRC
                and evt.getFeeGasDet()[2] == fee_energy.f_21_ENRC
                and evt.getFeeGasDet()[3] == fee_energy.f_22_ENRC
            )

        """
    # For Bill: expect 84240 data points for r0054
    #
    # grep "^BILL_POINT" | cut -d' ' -f2,3,4,5,6 > t.dat
    # gnuplot> m=0.1 ; k=-0.01e-8; f(x) = k * x + m
    # gnuplot> fit f(x) "t.dat" using ($3):($5) via k,m
    if not hasattr(self, '_gmd_seqno'):
      self._gmd_seqno = 0
    gmd = evt.get(key=xtc.TypeId.Type.Id_GMD)
    if gmd is None:
      return
    acq_apd = evt.getAcqValue('SxrEndstation-0|Acqiris-1', 0, env)
    if acq_apd is not None and acq_apd.waveform() is not None:
      w = acq_apd.waveform()
      baseline = numpy.mean(w[0:(w.shape[0] / 5)])
      peak = numpy.min(w[(w.shape[0] / 5):w.shape[0]])
      self._gmd_seqno += 1
      print "BILL_POINT %d %s %s %s %s" % (self._gmd_seqno,
                                           repr(gmd.fBgValuePerSample),
                                           repr(gmd.fCorrectedSumPerPulse),
                                           repr(gmd.fRelativeEnergyPerPulse),
                                           repr(peak - baseline))
    return
    """

        """
    # XXX Record injector motion--note that they cannot be added--see
    # Ray's email.
    injector_micos_xyz = cspad_tbx.env_pv3_get(
      env,
      ['SXR:EXP:MZM:%02d:ENCPOSITIONGET' % i for i in [1, 2, 3]])
    if injector_micos_xyz is None:
      self.logger.error("No micos injector motor positions")
      injector_micos_xyz = (float('nan'), float('nan'), float('nan'))
    self._injector_micos_xyz.append(injector_micos_xyz)

    injector_rough_xyz = cspad_tbx.env_pv3_get(
      env,
      ['SXR:EXP:MMS:%02d.RBV' % i for i in [1, 2, 3]])
    if injector_rough_xyz is None:
      self.logger.error("No rough injector motor positions")
      injector_rough_xyz = (float('nan'), float('nan'), float('nan'))
    self._injector_rough_xyz.append(injector_rough_xyz)

    # Injector power supplies XXX There is a third PSU, no?
    #
    # The -5kV supply
    # SXR:EXP:SHV:VHS6:CH0:VoltageMeasure
    # SXR:EXP:SHV:VHS6:CH0:CurrentMeasure
    #
    # The plus 5kV supply
    # SXR:EXP:SHV:VHS2:CH0:VoltageMeasure
    # SXR:EXP:SHV:VHS2:CH0:CurrentMeasure
    injector_plus_current = cspad_tbx.env_pv1_get(
      env, 'SXR:EXP:SHV:VHS6:CH0:CurrentMeasure')
    if injector_plus_current is None:
      self.logger.error("No plus-motor current")
      injector_plus_current = -1
    self._injector_plus_current.append(injector_plus_current)

    injector_plus_voltage = cspad_tbx.env_pv1_get(
      env, 'SXR:EXP:SHV:VHS6:CH0:VoltageMeasure')
    if injector_plus_voltage is None:
      self.logger.error("No plus-motor voltage")
      injector_plus_voltage = -1
    self._injector_plus_voltage.append(injector_plus_voltage)

    injector_minus_current = cspad_tbx.env_pv1_get(
      env, 'SXR:EXP:SHV:VHS2:CH0:CurrentMeasure')
    if injector_minus_current is None:
      self.logger.error("No minus-motor current")
      injector_minus_current = -1
    self._injector_minus_current.append(injector_minus_current)

    injector_minus_voltage = cspad_tbx.env_pv1_get(
      env, 'SXR:EXP:SHV:VHS2:CH0:VoltageMeasure')
    if injector_minus_voltage is None:
      self.logger.error("No minus-motor voltage")
      injector_minus_voltage = -1
    self._injector_minus_voltage.append(injector_minus_voltage)
    """

        """
    # The spectrometer motor positions are just used for sanity
    # checking.
    spectrometer_xyz = cspad_tbx.env_spectrometer_xyz_sxr(env)
    if spectrometer_xyz is None:
      self.logger.error("No spectrometer motor positions")
      spectrometer_xyz = (float('nan'), float('nan'), float('nan'))
    self._spectrometer_xyz.append(spectrometer_xyz)
    """

        # Get the pulse energy after monochromator, and fall back on the
        # pre-monochromator energy if the former is absent.  Record in
        # list for mean and stddev.  XXX Verify that the wavelength after
        # the monochromator is updated at around 1 Hz.
        #
        # For the publication an offset and scale were calibrated.
        wavelength = cspad_tbx.env_wavelength_sxr(evt, env)
        if wavelength is None:
            wavelength = cspad_tbx.evt_wavelength(evt)
        if wavelength is None:
            energy = float("nan")
        else:
            energy = 12398.4187 / wavelength
        self._energy.append(energy)
        self._history_energy.push(time, energy)  # XXX Not necessary?!

        """
    # Laser shutters XXX need to sort out laser numbering XXX Laser
    # power stuff? XXX Position of polarizer/analyser
    shutters = cspad_tbx.env_laser_shutters(env)
    #print "Got shutters", shutters
    """

        # Read out the diode traces from the via the Acqiris.  XXX In any
        # case, the APD and the more sensitive Opto Diode in the monitor
        # tank (i.e. the transmission diode) should be anti-correlated, so
        # check it!  The entire trace always covers 10 us.  XXX Could this
        # be figured out from xtc.TypeId.Type.Id_AcqConfig?
        #
        # XXX This appears to be suboptimal: look at the
        # skewness-transform for the APD to sort this out.
        acq_apd = evt.getAcqValue("SxrEndstation-0|Acqiris-1", 0, env)
        acq_apd_integral = float("nan")
        if acq_apd is not None:
            waveform = acq_apd.waveform()
            if waveform is not None:
                # With a 40k-point trace, one should integrate from 18200 to
                # 18400.
                waveform = waveform.flatten()
                nmemb = len(waveform) // 200
                if nmemb > 0:
                    acq_apd_integral = acqiris_integrate(flex.double(waveform), 91 * nmemb, 100 * nmemb, nmemb)
        self._acq_apd_integral.append(acq_apd_integral)

        if evt.expNum() == 208:
            # Opto diode address for L632.
            acq_opto_diode = evt.getAcqValue("SxrEndstation-0|Acqiris-1", 1, env)
        elif evt.expNum() == 363:
            # Opto diode address for LB68.
            acq_opto_diode = evt.getAcqValue("SxrEndstation-0|Acqiris-2", 2, env)
        acq_opto_diode_integral = float("nan")
        if acq_opto_diode is not None:
            waveform = acq_opto_diode.waveform()
            if waveform is not None:
                # With a 40k-point trace, one should integrate from 16000 to
                # 24000.  With a 20k-point trace, a suitable integration
                # region is bounded by 8000 and 12000.  There is no need for
                # thresholding, because the integral of the Opto Diode will
                # not be used for hit finding.  XXX What are the "misses" we
                # record on the Opto Diode?  XXX The direct beam is completely
                # gone after it hits the sample, because soft X-rays.
                waveform = waveform.flatten()
                nmemb = len(waveform) // 5
                if nmemb > 0:
                    acq_opto_diode_integral = acqiris_integrate(flex.double(waveform), 2 * nmemb, 4 * nmemb, nmemb)
        self._acq_opto_diode_integral.append(acq_opto_diode_integral)

        # Sanity check: verify that the timestamps for the two Acqiris
        # traces are similar enough.
        if acq_apd is not None and acq_opto_diode is not None:
            assert (
                len(acq_apd.timestamps()) == len(acq_opto_diode.timestamps())
                and numpy.any(numpy.abs(acq_apd.timestamps() - acq_opto_diode.timestamps())) < 1e-6
            )

        # self.logger.info("DIODE INTEGRALS: %f %f %f" % (I0, acq_apd_integral, acq_opto_diode_integral))

        """
    import matplotlib.pyplot as plt

    hit_array_apd = apd_hitfind(
      flex.double(acq_apd.waveform()),
      len(acq_apd.waveform()) // 5)
    hit_array_opto_diode = apd_hitfind(
      flex.double(acq_opto_diode.waveform()),
      len(acq_opto_diode.waveform()) // 5)

    fig = plt.figure()
    ax = fig.add_subplot(111)
    #ax.plot(
    #  range(len(acq_apd.timestamps())), acq_apd.waveform())
    ax.plot(
      range(len(acq_opto_diode.timestamps())), acq_opto_diode.waveform()[0, :])
    plt.show()

    fig = plt.figure()
    ax = fig.add_subplot(111)
    #ax.plot(
    #  acq_apd.timestamps()[0:len(hit_array_apd)], hit_array)
    ax.plot(
      acq_opto_diode.timestamps()[0:len(hit_array_opto_diode)], hit_array)
    plt.show()
    """

        # Determine whether the beam hit the sample, and register the
        # outcome.  If not using any diodes for hit-finding, every shot is
        # assumed to be a hit.  XXX Unfortunately, this crucial piece is
        # very unreliable.  The threshold for the APD needs to be
        # verified--inspect all the histograms.  XXX hitfind_flags is
        # probable better as a module parameter.
        #    hitfind_flags = 0x3
        hitfind_flags = 0
        hit = False
        if not hitfind_flags:
            hit = True
        elif hitfind_flags & 0x1 and acq_apd_integral > 0.2:
            hit = True
        self._hit.append(hit)

        # Always proceed all the way through (even if some shots have
        # invalid values of e.g. I0) because images are precious.  XXX
        # Must reset counters before returning!  XXX What about skipping
        # all of the above if display is True?
        if self.cspad_img is not None:
            self._nframes += 1

            """
      # The spectrometer should not move!
      t = (self._spectrometer_xyz -
           self._spectrometer_xyz.mean()).rms_length()
      print "Spectrometer displacement", t

      # Fine/rough motor position deviations from the mean.  See Ray's
      # email.
      t = (self._injector_micos_xyz -
           self._injector_micos_xyz.mean()).rms_length()
      print "Injector micos displacement", t

      t = (self._injector_rough_xyz -
           self._injector_rough_xyz.mean()).rms_length()
      print "Injector rough displacement", t

      # Injector motor position means and deviations
      if self._injector_plus_current.size() > 1:
        t = flex.mean_and_variance(self._injector_plus_current)
        print "Injector plus current mean %10e stddev %10e" % \
            (t.mean(), t.unweighted_sample_standard_deviation())
      if self._injector_plus_voltage.size() > 1:
        t = flex.mean_and_variance(self._injector_plus_voltage)
        print "Injector plus voltage mean %10e stddev %10e" % \
            (t.mean(), t.unweighted_sample_standard_deviation())

      if self._injector_minus_current.size() > 1:
        t = flex.mean_and_variance(self._injector_minus_current)
        print "Injector minus current mean %10e stddev %10e" % \
            (t.mean(), t.unweighted_sample_standard_deviation())
      if self._injector_minus_voltage.size() > 1:
        t = flex.mean_and_variance(self._injector_minus_voltage)
        print "Injector minus voltage mean %10e stddev %10e" % \
            (t.mean(), t.unweighted_sample_standard_deviation())

      """

            # Energy statistics are collected from all shots, regardless of
            # whether they are hits or not.  Since this statistic mentions
            # the frame number, it should be reported first.  XXX The energy
            # should have a really small standard deviation.  Check
            # self._energy.size() and self._history_energy.frequency() XXX
            # verify that it works for one data point.
            (energy_mean, energy_stddev, energy_nmemb, n) = self._filtered_stats(
                lambda x: not math.isnan(x) and x > 0, self._energy
            )
            if n > 0:
                self.logger.warning("%d shots have undefined energy" % n)

            (I0_mean, I0_stddev, I0_nmemb, n) = self._filtered_stats(lambda x: not math.isnan(x), self._I0)
            if n > 0:
                self.logger.warning("%d shots have undefined I0" % n)

            self.logger.info(
                "Frame %d: E=%.3f+/-%.3f (N=%d) I0=%.0f+/-%.0f (N=%d)"
                % (self._nframes, energy_mean, energy_stddev, energy_nmemb, I0_mean, I0_stddev, I0_nmemb)
            )

            # Sanity check: unless changed while integrating the frame, the
            # repetition rate should have a standard deviation of zero.
            dt = self._timestamp[-1] - self._timestamp[0]
            rr_mean = rr_observed = rr_stddev = 0
            if dt > 0:
                rr_observed = (len(self._timestamp) - 1) / dt
                rr = filter(lambda x: not math.isnan(x) and x > 0, self._repetition_rate)
                if len(rr) > 1:
                    rr_stats = flex.mean_and_variance(flex.double(rr))
                    rr_mean = rr_stats.mean()
                    rr_stddev = rr_stats.unweighted_sample_standard_deviation()
            self.logger.info(
                "Repetition rate: %.3f Hz (observed), %.3f+/-%.3f Hz (expected)" % (rr_observed, rr_mean, rr_stddev)
            )

            # Compare observed and configured exposure time.
            config = cspad_tbx.getConfig(self.address, env)
            exposure_time = 0
            if config is not None and dt > 0 and len(self._timestamp) > 0:
                exposure_time = dt * (len(self._timestamp) + 1) / len(self._timestamp)
            self.logger.info(
                "Exposure time: %.3f s (observed), %.3f s (configured)" % (exposure_time, config.exposureTime())
            )

            # Compute the leading dead time, the time between starting the
            # readout of the previous frame and the arrival of the shot
            # immediately following it.  This is an interesting statistic,
            # no matter what.  XXX Maybe look at its distribution?
            dead_time = 0
            if rr_observed > 0 and hasattr(self, "_previous_readout_time"):
                dead_time = self._timestamp[0] - self._previous_readout_time - 1 / rr_observed
                if math.isnan(dead_time):
                    dead_time = 0
            self.logger.info("Dead time: %.3f s" % dead_time)
            self._previous_readout_time = self._timestamp[-1]

            assert time == self._timestamp[-1]  # XXX ZAP once one run survives it!

            # Flag blank images (i.e. images that had no hits), because
            # these may interesting for background subtraction.
            hits = self._hit.count(True)
            self.logger.info("Hit rate: %d/%d (%.2f%%)" % (hits, self._hit.size(), 100 * hits / self._hit.size()))
            if hits == 0:
                self.logger.info("Frame %d is blank" % self._nframes)

            # Get the normalisation factor by summing up I0 for all hits.
            # Invalid and non-positive values of I0 are treated as zeroes.
            # XXX Make this kind of summing a function of its own.
            I0 = sum(filter(lambda x: not math.isnan(x) and x > 0, self._I0.select(self._hit)))
            I0_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._I0))

            fee_before_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._fee_before))
            fee_after_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._fee_after))

            # Register the template to the image and locate the regions of
            # interest based on the registration parameters.  XXX Should
            # also give contrast: fit 2D-Gaussian to peak and report its
            # standard deviations and fit?
            if self._template is not None:
                gamma = lewis(self._template, self.cspad_img)
                p = flex.max_index(gamma)
                peak = (
                    p // gamma.focus()[1] - self._template.focus()[0] + 1,
                    p % gamma.focus()[1] - self._template.focus()[1] + 1,
                )

                # """
                ### REFERENCE CHECK ###
                from os.path import dirname, isdir, join
                from scipy import io

                mat_dirname = dirname(cspad_tbx.pathsubst(self._mat_path, evt, env, frame_number=self._nframes))
                if not isdir(mat_dirname):
                    makedirs(mat_dirname)

                io.savemat(
                    file_name=join(mat_dirname, "cross-check-%05d.mat" % self._nframes),
                    mdict=dict(
                        image=self.cspad_img.as_numpy_array(),
                        template=self._template.as_numpy_array(),
                        gamma=gamma.as_numpy_array(),
                        peak=numpy.array(peak),
                    ),
                    appendmat=False,
                    do_compression=True,
                    oned_as="column",
                )

                return
                ### REFERENCE CHECK ###
                # """
            else:
                # Alternative: position everything with respect to the frame
                # origin.
                peak = (0, 0)

            # XXX Come up with a better way to handle the offsets!  They
            # really do depend on the template, and should therefore be
            # packaged with it.
            self.logger.info("Template registration anchor point (%d, %d)" % (peak[0], peak[1]))

            roi = []
            if evt.expNum() == 208:
                # Regions of interest for L632 (experiment number 208).  XXX
                # Could perhaps migrate the template matching here instead?

                # The left, middle, and right manganese signals.  XXX Extend the
                # rightmost ROI three pixels in upward direction (see runs 145
                # and onwards, also note narrower slit)?
                roi.append((peak[0] + 59, peak[1] - 24, 12, 5))
                roi.append((peak[0] + 61, peak[1] + 28, 12, 4))
                roi.append((peak[0] + 61, peak[1] + 79, 12, 5))

                # Two background regions between the manganese spots, with the
                # same total area as the signal.
                roi.append((peak[0] + 62, peak[1] + 1, 8, 8))
                roi.append((peak[0] + 63, peak[1] + 51, 8, 8))

                # The left and right direct reflections from the Si substrate
                # (i.e. the areas between the zone plates).  These were the
                # features used for template registration.
                roi.append((peak[0], peak[1], 40, 10))
                roi.append((peak[0], peak[1] + 50, 40, 9))

                # Spot between the direct reflections.  XXX What is this?
                roi.append((peak[0] + 1, peak[1] + 23, 22, 13))

                # The horizontal slit, where the direct reflection occurs.  This
                # is fixed.  XXX Verify this!
                roi.append((22, 0, 41, 128))

                # Background stripe, below the manganese spots.  This is fixed
                # to the bottom of the detector.
                roi.append((104, 0, 20, 128))

            elif evt.expNum() == 363:
                # Regions of interest for LB68 (experiment number 363).
                # 0-pixel are active, 255-pixel are inactive
                from scipy.misc import imread

                # Dec 5, 2013 (09:00 - 21:00): initial estimates from r0010
                """
        roi.append((peak[0] +  14, peak[1] + 138 + 23, 25, 50 - 25))
        roi.append((peak[0] +  45, peak[1] + 138 + 23, 25, 50 - 25))
        roi.append((peak[0] +  78, peak[1] + 137 + 23, 25, 50 - 25))
        roi.append((peak[0] + 111, peak[1] + 137 + 23, 25, 50 - 25))
        roi.append((peak[0] + 144, peak[1] + 137 + 23, 25, 50 - 25))
        roi.append((peak[0] + 177, peak[1] + 136 + 23, 25, 50 - 25))
        roi.append((peak[0] + 210, peak[1] + 136 + 23, 25, 50 - 25))
        roi.append((peak[0] + 243, peak[1] + 136 + 23, 25, 50 - 25))
        roi.append((peak[0] + 278, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 312, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 344, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 376, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 408, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 442, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 475, peak[1] + 135 + 23, 25, 50 - 25))
        """

                # Dec 6, 2013 (09:00 - 21:00): rough estimates
                """
        roi.append((peak[0] + 0, peak[1] +  25, 512,  25)) # bkg
        roi.append((peak[0] + 0, peak[1] + 135, 512,  25)) # oxygen
        roi.append((peak[0] + 0, peak[1] + 160, 512,  25)) # signal
        roi.append((peak[0] + 0, peak[1] + 300, 512, 130)) # zeroth order
        """

                # Dec 7, 2013 (09:00 - 21:00): overlap between oxygen and
                # signal.  Will loose some signal.
                """
        roi.append((peak[0] + 0, peak[1] +  25, 512,  25)) # bkg
        roi.append((peak[0] + 0, peak[1] + 135, 512,  50)) # oxygen
        roi.append((peak[0] + 0, peak[1] + 185, 512,  40)) # signal
        roi.append((peak[0] + 0, peak[1] + 270, 512, 170)) # zeroth order
        """

                """
        # Dec 7 2013 (09:00 - 21:00): binary masks stored in PNG
        # images.

        roi.append((peak[0] + 0, peak[1] +  25, 512,  25)) # bkg
        roi.append((peak[0] + 0, peak[1] + 135, 512,  25)) # oxygen

        #roi_image = flex.float(
        #  imread('/reg/neh/home1/hattne/myrelease/LB68-r0039-max-mask.png',
        #         flatten=True))
        #roi_image = flex.float(
        #  imread('/reg/neh/home1/hattne/myrelease/LB68-r0039-std-mask.png',
        #         flatten=True))
        roi_image = flex.float(
          imread('/reg/neh/home1/hattne/myrelease/LB68-r0052-avg-mask.png',
                 flatten=True))
        roi_image = (255 - roi_image)

        #roi.append((0, 0, self.cspad_img.focus()[0], self.cspad_img.focus()[1]))
        roi.append(roi_image)

        roi.append((peak[0] + 0, peak[1] + 270, 512, 170)) # zeroth order
        """

                # Dec 9, 2013 (09:00 - 21:00)
                # """
                roi.append((peak[0] + 0, peak[1] + 25, 512, 25))  # bkg
                roi.append((peak[0] + 0, peak[1] + 135, 512, 25))  # oxygen
                # roi.append((peak[0] + 0, peak[1] + 160, 512,  25)) # signal
                roi_image = flex.float(imread("/reg/neh/home1/hattne/myrelease/LB68-r0067-max-mask.png", flatten=True))
                roi.append(roi_image)

                roi.append((peak[0] + 0, peak[1] + 240, 512, 180))  # zeroth order
                # """

            else:
                self.logger.error(
                    "No regions of interest for %s (experiment number %d)" % (env.experiment(), evt.expNum())
                )

            # Clip the regions of interest to the actual image.  If the ROI
            # does not overlap with the image at all, set its width and
            # height to zero.  XXX Do the integration here as well?
            for i in range(len(roi)):
                if not isinstance(roi[i], tuple):
                    continue

                r = roi[i]
                if (
                    r[0] + r[2] < 0
                    or r[0] >= self.cspad_img.focus()[0]
                    or r[1] + r[3] < 0
                    or r[1] >= self.cspad_img.focus()[1]
                ):
                    roi[i] = (r[0], r[1], 0, 0)
                    continue

                r = roi[i]
                if r[0] < 0:
                    roi[i] = (0, r[1], r[2] + r[0], r[3])

                r = roi[i]
                if r[1] < 0:
                    roi[i] = (r[0], 0, r[2], r[3] + r[1])

                r = roi[i]
                if r[0] + r[2] > self.cspad_img.focus()[0]:
                    roi[i] = (r[0], r[1], self.cspad_img.focus()[0] - r[0], r[3])

                r = roi[i]
                if r[1] + r[3] > self.cspad_img.focus()[1]:
                    roi[i] = (r[0], r[1], r[2], self.cspad_img.focus()[1] - r[1])

            # Sum up intensities in all regions of interest, and keep track
            # of the actual number of pixels summed.  The common_mode module
            # takes care of dark-subtraction.  XXX Would like to estimate
            # sigma for spot, like in spotfinder/LABELIT.
            I = flex.double(len(roi))
            I_nmemb = flex.int(len(roi))
            for i in range(len(roi)):
                if isinstance(roi[i], flex.float):
                    sel = roi[i].as_1d() < 128
                    I[i] = flex.sum(self.cspad_img.as_1d().select(sel))
                    I_nmemb[i] = sel.count(True)
                    continue

                if roi[i][2] <= 0 or roi[i][3] <= 0:
                    I[i] = 0
                    I_nmemb[i] = 0
                else:
                    I[i] = flex.sum(
                        self.cspad_img.matrix_copy_block(
                            i_row=roi[i][0], i_column=roi[i][1], n_rows=roi[i][2], n_columns=roi[i][3]
                        )
                    )
                    I_nmemb[i] = roi[i][2] * roi[i][3]
                    """
          # Sanity check: white out the region of interest.
          self.cspad_img.matrix_paste_block_in_place(
            block=flex.double(flex.grid(roi[i][2], roi[i][3])),
            i_row=roi[i][0],
            i_column=roi[i][1])
          """

            acq_apd_sum = sum(filter(lambda x: not math.isnan(x) and x > 0, self._acq_apd_integral.select(self._hit)))
            acq_opto_diode_sum = sum(
                filter(lambda x: not math.isnan(x) and x > 0, self._acq_opto_diode_integral.select(self._hit))
            )

            acq_apd_sum_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._acq_apd_integral))
            acq_opto_diode_sum_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._acq_opto_diode_integral))

            # Append the data point to the stream: shots, hits, energy, and
            # I.  XXX OrderedDict requires Python 2.7, could fall back on
            # regular Dict at the price of non-deterministic column order.
            from collections import OrderedDict

            csv_dict = OrderedDict(
                [
                    ("n_frames", self._hit.size()),
                    ("n_hits", hits),
                    ("I0", I0),
                    ("I0_all", I0_all),
                    ("fee_before_all", fee_before_all),
                    ("fee_after_all", fee_after_all),
                    ("energy_mean", energy_mean),
                    ("acq_apd_sum", acq_apd_sum),
                    ("acq_apd_sum_all", acq_apd_sum_all),
                    ("acq_opto_diode_sum", acq_opto_diode_sum),
                    ("acq_opto_diode_sum_all", acq_opto_diode_sum_all),
                ]
            )
            for (i, item) in enumerate(zip(roi, I, I_nmemb)):
                key = "roi_" + ("bkg", "oxygen", "manganese", "zeroth_order")[i]
                csv_dict["%s_nmemb" % key] = item[2]

                if isinstance(item[0], tuple):
                    csv_dict["%s_ss_start" % key] = item[0][0]
                    csv_dict["%s_fs_start" % key] = item[0][1]
                    csv_dict["%s_ss_size" % key] = item[0][2]
                    csv_dict["%s_fs_size" % key] = item[0][3]
                else:
                    csv_dict["%s_ss_start" % key] = 0
                    csv_dict["%s_fs_start" % key] = 0
                    csv_dict["%s_ss_size" % key] = item[0].focus()[0]
                    csv_dict["%s_fs_size" % key] = item[0].focus()[1]

                csv_dict["%s_I" % key] = item[1]

            # XXX assert that keys match up with what's in the file already?
            # Or exploit the error-reporting mechanism already implemented?
            # Write the header.  XXX How to control the order of the
            # columns?
            if not hasattr(self, "_csv"):
                from csv import DictWriter

                self._csv = DictWriter(self._stream_table, csv_dict.keys())
                self._csv.writerow({key: key for key in csv_dict.keys()})
            self._csv.writerow(csv_dict)

            # Output the non-normalised image and all other relevant data to
            # a binary MATLAB file.  XXX What if scipy is not available?
            from os import makedirs, path
            from scipy import io

            mat_path = cspad_tbx.pathsubst(self._mat_path, evt, env, frame_number=self._nframes)
            if not path.isdir(path.dirname(mat_path)):
                makedirs(path.dirname(mat_path))

            io.savemat(
                file_name=mat_path,
                mdict=dict(
                    DATA=self.cspad_img.as_numpy_array(),
                    DIODES=numpy.array((acq_apd_sum, acq_apd_sum_all, acq_opto_diode_sum, acq_opto_diode_sum_all)),
                    ENERGY=energy_mean,
                    HITS=numpy.array((hits, self._hit.size())),
                    I0=numpy.array((I0, I0_all)),
                    INTENSITIES=numpy.array(I),
                    ROIS=numpy.array([r for r in roi if isinstance(r, tuple)]),
                ),
                appendmat=False,
                do_compression=True,
                oned_as="column",
            )

            # Optionally update the image in the viewer.  See mod_view.
            if self._display:
                from time import localtime, strftime

                # Copy over regions of interest to shared multiprocessing
                # array.  XXX Flip to honour wxPython convention.
                for i in range(len(roi)):
                    if not isinstance(roi[i], tuple):
                        continue
                    self._roi[4 * i + 0] = roi[i][1]
                    self._roi[4 * i + 1] = roi[i][0]
                    self._roi[4 * i + 2] = roi[i][3]
                    self._roi[4 * i + 3] = roi[i][2]

                time_str = strftime("%H:%M:%S", localtime(evt.getTime().seconds()))
                title = "r%04d@%s: frame %d on %s" % (evt.run(), time_str, self._nframes, self.address)

                # XXX No distance in the Andor experiment.  So don't bother
                # with the fictional beam center, distance, and saturation
                # value?  See also mod_average.endjob()
                img_obj = (
                    dict(
                        BEAM_CENTER=(0, 0),
                        DATA=self.cspad_img,
                        DETECTOR_ADDRESS=self.address,
                        DISTANCE=10,  # XXX Evil kludge to keep dxtbx happy!
                        PIXEL_SIZE=13.5e-3,  # XXX Hard-coded, again!
                        SATURATED_VALUE=10000,
                        TIME_TUPLE=cspad_tbx.evt_time(evt),
                        WAVELENGTH=12398.4187 / energy,
                    ),
                    title,
                )

                while not self._queue.empty():
                    if not self._proc.is_alive():
                        evt.setStatus(Event.Stop)
                        return
                while True:
                    try:
                        self._queue.put(img_obj, timeout=1)
                        break
                    except Exception:  # Queue.Full:
                        pass

            self._reset_counters()
            return