Ejemplo n.º 1
0
  def intensities(self):
    ''' Compare the intensities. '''
    from dials.array_family import flex

    # Sort by resolution
    d = self.refl1['d']
    index = flex.size_t(reversed(sorted(range(len(d)), key=lambda x: d[x])))
    self.refl1.reorder(index)
    self.refl2.reorder(index)

    # Get the intensities
    I1 = self.refl1['intensity.sum.value']
    I2 = self.refl2['intensity.sum.value']
    S1 = flex.sqrt(self.refl1['intensity.sum.variance'])
    S2 = flex.sqrt(self.refl2['intensity.sum.variance'])
    xyz1 = self.refl1['xyzcal.px']
    xyz2 = self.refl2['xyzcal.px']

    # Compute chunked statistics
    corr = []
    R = []
    scale = []
    res = []
    for i in range(len(self.refl1) // 1000):

      # Get the chunks of data
      a = i * 1000
      b = (i+1) * 1000
      II1 = I1[a:b]
      II2 = I2[a:b]
      res.append(d[a])

      # Compute the mean and standard deviation per chunk
      mv1 = flex.mean_and_variance(II1)
      mv2 = flex.mean_and_variance(II2)
      m1 = mv1.mean()
      m2 = mv2.mean()
      s1 = mv1.unweighted_sample_standard_deviation()
      s2 = mv2.unweighted_sample_standard_deviation()

      # compute the correlation coefficient
      r = (1/(len(II1) - 1))*sum(((II1[j] - m1) / s1) * ((II2[j] - m2) / s2)
          for j in range(len(II1)))
      corr.append(r)

      # Compute the scale between the chunks
      s = sum(II1) / sum(II2)
      scale.append(s)

      # Compute R between the chunks
      r = sum(abs(abs(II1[j]) - abs(s * II2[j])) for j in range(len(II1))) \
        / sum(abs(II1[j]) for j in range(len(II1)))
      R.append(r)

    from matplotlib import pylab
    pylab.plot(corr, label="CC")
    pylab.plot(R, label="R")
    pylab.plot(scale, label="K")
    pylab.legend()
    pylab.show()
Ejemplo n.º 2
0
def cctbx_i_over_sigi_ms_from_dials_data(reflections, cctbx_crystal_symmetry):
  from dials.array_family import flex
  from cctbx.miller import set as miller_set
  refl = reflections.select(reflections['intensity.sum.variance'] > 0)
  return miller_set(cctbx_crystal_symmetry, refl['miller_index']).array(
    data=refl['intensity.sum.value'],
    sigmas=flex.sqrt(refl['intensity.sum.variance']))
Ejemplo n.º 3
0
  def _xl_unit_cell_derivatives(self, isel, parameterisation=None,
    reflections=None):

    # Get required data
    h = self._h.select(isel)
    B = self._B.select(isel)
    wl = self._wavelength.select(isel)

    # get derivatives of the B matrix wrt the parameters
    dB_dxluc_p = [None if der is None else flex.mat3_double(len(isel), der.elems) \
                  for der in parameterisation.get_ds_dp(use_none_as_null=True)]

    d2theta_dp = []

    # loop through the parameters
    for der in dB_dxluc_p:

      if der is None:
        d2theta_dp.append(None)
        continue

      r0 = B * h
      dr0 = der * h
      r0len = r0.norms()
      dr0len = dr0.dot(r0) / r0len

      # 2theta = 2 * arcsin( |r0| / (2 * |s0| ) )
      sintheta = 0.5 * r0len * wl
      fac = 1.0 / flex.sqrt(flex.double(len(wl), 1.0) - sintheta**2)
      val = fac * wl * dr0len

      d2theta_dp.append(val)

    return d2theta_dp
def get_pix_coords(wavelength, A, mill_arr, detector, delta_i=0.02):
    """ Code copied from sim.py courtesy of Aaron and Tara """
    s0=col((0,0,-1/wavelength))
    q=flex.vec3_double([A*col(idx) for idx in  mill_arr.indices().as_vec3_double()])
    s0_hat=flex.vec3_double([s0.normalize()]*len(q))
    q_hat=q.each_normalize()
    #q_hat.cross(flex.vec3_double([s0_hat]*len(q_hat)))
    e1_hat = q_hat.cross(s0_hat)
    c0_hat = s0_hat.cross(e1_hat)
    q_len_sq = flex.double([col(v).length_sq() for v in q])
    a_side=q_len_sq*wavelength/2
    b_side=flex.sqrt(q_len_sq)-a_side**2
    #flex.vec3_double([sqrt(q.length_sq()-a_side**2 for idx in mill_arr)])
    r_vec=flex.vec3_double(-a_side*s0_hat+b_side*c0_hat)
    s1=r_vec+s0

    EQ=q+s0
    len_EQ=flex.double([col(v).length() for v in EQ])
    ratio=len_EQ*wavelength

    indices = flex.miller_index()
    coords =flex.vec2_double()
    for i in range(len(s1)):
        if ratio[i] > 1 - delta_i and ratio[i] < 1 + delta_i:
            indices.append(mill_arr.indices()[i])
            pix = detector[0].get_ray_intersection_px(s1[i])
            if detector[0].is_coord_valid(pix):
                coords.append(pix)

    return coords, indices
    def jacobian_callable(self,values):
      PB = self.get_partiality_array(values)
      EXP = flex.exp(-2.*values.BFACTOR*self.DSSQ)
      G_terms = (EXP * PB * self.ICALCVEC)
      B_terms = (values.G * EXP * PB * self.ICALCVEC)*(-2.*self.DSSQ)
      P_terms = (values.G * EXP * self.ICALCVEC)

      thetax = values.thetax; thetay = values.thetay;
      Rx = matrix.col((1,0,0)).axis_and_angle_as_r3_rotation_matrix(thetax)
      dRx_dthetax = matrix.col((1,0,0)).axis_and_angle_as_r3_derivative_wrt_angle(thetax)
      Ry = matrix.col((0,1,0)).axis_and_angle_as_r3_rotation_matrix(thetay)
      dRy_dthetay = matrix.col((0,1,0)).axis_and_angle_as_r3_derivative_wrt_angle(thetay)
      ref_ori = matrix.sqr(self.ORI.reciprocal_matrix())
      miller_vec = self.MILLER.as_vec3_double()
      ds1_dthetax = flex.mat3_double(len(self.MILLER),Ry * dRx_dthetax * ref_ori) * miller_vec
      ds1_dthetay = flex.mat3_double(len(self.MILLER),dRy_dthetay * Rx * ref_ori) * miller_vec

      s1vec = self.get_s1_array(values)
      s1lenvec = flex.sqrt(s1vec.dot(s1vec))
      dRh_dthetax = s1vec.dot(ds1_dthetax)/s1lenvec
      dRh_dthetay = s1vec.dot(ds1_dthetay)/s1lenvec
      rs = values.RS
      Rh = self.get_Rh_array(values)
      rs_sq = rs*rs
      denomin = (2. * Rh * Rh + rs_sq)
      dPB_dRh = -PB * 4. * Rh / denomin
      dPB_dthetax = dPB_dRh * dRh_dthetax
      dPB_dthetay = dPB_dRh * dRh_dthetay
      Px_terms = P_terms * dPB_dthetax; Py_terms = P_terms * dPB_dthetay

      dPB_drs = 4 * rs * Rh * Rh / (denomin * denomin)
      Prs_terms = P_terms * dPB_drs

      return [G_terms,B_terms,Prs_terms,Px_terms,Py_terms]
Ejemplo n.º 6
0
def spot_resolution_shells(imagesets, reflections, params):
  goniometer = imagesets[0].get_goniometer()
  from dials.algorithms.indexing import indexer
  from dials.array_family import flex
  mapped_reflections = flex.reflection_table()
  for i, imageset in enumerate(imagesets):
    if 'imageset_id' in reflections:
      sel = (reflections['imageset_id'] == i)
    else:
      sel = (reflections['id'] == i)
    if isinstance(reflections['id'], flex.size_t):
      reflections['id'] = reflections['id'].as_int()
    refl = indexer.indexer_base.map_spots_pixel_to_mm_rad(
      reflections.select(sel),
      imageset.get_detector(), imageset.get_scan())

    indexer.indexer_base.map_centroids_to_reciprocal_space(
      refl, imageset.get_detector(), imageset.get_beam(),
      imageset.get_goniometer())
    mapped_reflections.extend(refl)
  reflections = mapped_reflections
  two_theta_array = reflections['rlp'].norms()
  h0 = flex.weighted_histogram(two_theta_array ** 2, n_slots=params.shells)
  n = h0.slots()
  d = 1.0 / flex.sqrt(h0.slot_centers())

  for j in range(params.shells):
    print '%d %f %d' % (j, d[j], n[j])
Ejemplo n.º 7
0
      def plot_one_panel(self, ax, rlist):
        RMSD = flex.sqrt(rlist['background.mse'])
        MEAN = rlist['background.mean']
        RMSD = RMSD / MEAN
        x, y, z = rlist['xyzcal.px'].parts()

        hex_ax = ax.hexbin(
          x.as_numpy_array(), y.as_numpy_array(),
          C=RMSD.as_numpy_array(), gridsize=self.gridsize,
          vmin=0, vmax=1
        )
        return hex_ax
Ejemplo n.º 8
0
 def rmsd_vs_ios(self, rlist):
   ''' Analyse the correlations. '''
   from os.path import join
   RMSD = flex.sqrt(rlist['background.mse'])
   MEAN = rlist['background.mean']
   RMSD = RMSD / MEAN
   I = rlist['intensity.sum.value']
   I_sig = flex.sqrt(rlist['intensity.sum.variance'])
   I_over_S = I / I_sig
   mask = I_over_S > 0.1
   I_over_S = I_over_S.select(mask)
   RMSD = RMSD.select(mask)
   fig = pyplot.figure()
   pyplot.title("Background Model CVRMSD vs Log I/Sigma")
   cax = pyplot.hexbin(flex.log(I_over_S), RMSD, gridsize=100)
   cbar = pyplot.colorbar(cax)
   cax.axes.set_xlabel("Log I/Sigma")
   cax.axes.set_ylabel("Background Model CVRMSD")
   cbar.ax.set_ylabel("# reflections")
   fig.savefig(join(self.directory, "background_model_cvrmsd_vs_ios.png"))
   pyplot.close()
Ejemplo n.º 9
0
 def i_over_s_hist(self, rlist):
   ''' Analyse the correlations. '''
   from os.path import join
   I = rlist['intensity.sum.value']
   I_sig = flex.sqrt(rlist['intensity.sum.variance'])
   I_over_S = I / I_sig
   fig = pyplot.figure()
   pyplot.title("Log I/Sigma histogram")
   pyplot.hist(flex.log(I_over_S), bins=20)
   pyplot.xlabel("Log I/Sigma")
   pyplot.ylabel("# reflections")
   fig.savefig(join(self.directory, "ioversigma_hist"))
   pyplot.close()
Ejemplo n.º 10
0
 def centroid_diff_hist(self, rlist, threshold):
   ''' Analyse the correlations. '''
   from os.path import join
   I = rlist['intensity.sum.value']
   I_sig = flex.sqrt(rlist['intensity.sum.variance'])
   I_over_S = I / I_sig
   mask = I_over_S > threshold
   rlist = rlist.select(mask)
   assert(len(rlist) > 0)
   xc, yc, zc = rlist['xyzcal.px'].parts()
   xo, yo, zo = rlist['xyzobs.px.value'].parts()
   xd = xo - xc
   yd = yo - yc
   zd = zo - zc
   diff = flex.sqrt(xd*xd + yd*yd + zd*zd)
   fig = pyplot.figure()
   pyplot.title("Difference between observed and calculated")
   pyplot.hist(diff, bins=20)
   pyplot.xlabel("Difference in position")
   pyplot.ylabel("# reflections")
   fig.savefig(join(self.directory, "centroid_diff_hist.png"))
   pyplot.close()
Ejemplo n.º 11
0
 def rmsd_hist(self, rlist):
   ''' Analyse the background RMSD. '''
   from os.path import join
   RMSD = flex.sqrt(rlist['background.mse'])
   MEAN = rlist['background.mean']
   RMSD = RMSD / MEAN
   fig = pyplot.figure()
   pyplot.title("Background Model mean histogram")
   pyplot.hist(RMSD, bins=20)
   pyplot.xlabel("mean")
   pyplot.ylabel("# reflections")
   fig.savefig(join(self.directory, "background_model_cvrmsd_hist"))
   pyplot.close()
Ejemplo n.º 12
0
def flex_ios(val, var):
  '''
  Compute I/sigma or return zero for each element.

  '''
  assert(len(val) == len(var))
  result = flex.double(len(val),0)
  indices = flex.size_t(range(len(val))).select(var > 0)
  val = val.select(indices)
  var = var.select(indices)
  assert(var.all_gt(0))
  result.set_selected(indices, val / flex.sqrt(var))
  return result
Ejemplo n.º 13
0
      def plot_one_panel(self, ax, rlist):
        I_sig = flex.sqrt(rlist['intensity.%s.variance' %intensity_type])
        sel = I_sig > 0
        rlist = rlist.select(sel)
        I_sig = I_sig.select(sel)
        I = rlist['intensity.%s.value' %intensity_type]
        I_over_S = I / I_sig
        x, y, z = rlist['xyzcal.px'].parts()

        hex_ax = ax.hexbin(
          x.as_numpy_array(), y.as_numpy_array(),
          C=flex.log(I_over_S), gridsize=self.gridsize,
        )
        return hex_ax
Ejemplo n.º 14
0
  def __init__(self, reflections, experiment, params=None):

    if params is None: params = om_scope.extract()

    # initial filter
    reflections = reflections.select(reflections.get_flags(
      reflections.flags.integrated))

    # create new column containing the reduced Miller index
    xl = experiment.crystal
    symm = cctbx.crystal.symmetry(xl.get_unit_cell(),
                                  space_group=xl.get_space_group())
    hkl_set = cctbx.miller.set(symm, reflections['miller_index'])
    asu_set = hkl_set.map_to_asu()
    reflections['asu_miller_index'] = asu_set.indices()

    # sort table by reduced hkl
    reflections.sort('asu_miller_index')

    if params.observations.integration_type == 'mix':
      raise Sorry('integration_type=mix is not supported yet')
    else:
      ikey = 'intensity.' + params.observations.integration_type + '.value'
      vkey = 'intensity.' + params.observations.integration_type + '.variance'

    # filters
    sel = reflections[vkey] > 0
    reflections = reflections.select(sel)
    if params.observations.i_over_sigma_cutoff > 0:
      ios = reflections[ikey] / flex.sqrt(reflections[vkey])
      sel = ios >= params.observations.i_over_sigma_cutoff
      reflections = reflections.select(sel)
    if params.observations.min_multiplicity > 0:
      sel = minimum_multiplicity_selection(reflections['asu_miller_index'],
        params.observations.min_multiplicity)
      reflections = reflections.select(sel)

    # extract columns of interest
    gp_idx = reflections['asu_miller_index']
    intensity = reflections[ikey]
    weight = 1. / reflections[vkey]
    phi = reflections['xyzcal.mm'].parts()[2]
    scale = flex.double(len(reflections), 1.0)

    # set up reflection grouping object
    self._go = GroupedObservations(gp_idx, intensity, weight, phi, scale)

    return
Ejemplo n.º 15
0
 def i_over_s_vs_z(self, rlist):
   ''' Plot I/Sigma vs Z. '''
   from os.path import join
   I = rlist['intensity.sum.value']
   I_sig = flex.sqrt(rlist['intensity.sum.variance'])
   I_over_S = I / I_sig
   x, y, z = rlist['xyzcal.px'].parts()
   fig = pyplot.figure()
   pyplot.title("Distribution of I/Sigma vs Z")
   cax = pyplot.hexbin(z, flex.log(I_over_S), gridsize=100)
   cax.axes.set_xlabel("z")
   cax.axes.set_ylabel("Log I/Sigma")
   cbar = pyplot.colorbar(cax)
   cbar.ax.set_ylabel("# reflections")
   fig.savefig(join(self.directory, "ioversigma_vs_z.png"))
   pyplot.close()
Ejemplo n.º 16
0
 def rmsd_vs_z(self, rlist):
   ''' Plot I/Sigma vs Z. '''
   from os.path import join
   RMSD = flex.sqrt(rlist['background.mse'])
   MEAN = rlist['background.mean']
   RMSD = RMSD / MEAN
   x, y, z = rlist['xyzcal.px'].parts()
   fig = pyplot.figure()
   pyplot.title("Distribution of Background Model CVRMSD vs Z")
   cax = pyplot.hexbin(z, RMSD, gridsize=100)
   cax.axes.set_xlabel("z")
   cax.axes.set_ylabel("Background Model CVRMSD")
   cbar = pyplot.colorbar(cax)
   cbar.ax.set_ylabel("# reflections")
   fig.savefig(join(self.directory, "background_model_cvrmsd_vs_z.png"))
   pyplot.close()
  def get_beam_gradients(self, reflections):

    ds0_dbeam_p = self.beam_parameterisation.get_ds_dp()
    p_names = self.beam_parameterisation.get_param_names()

    n = len(reflections)
    U = flex.mat3_double(n, self.U)
    B = flex.mat3_double(n, self.B)
    UB = U*B

    # q is the reciprocal lattice vector, in the lab frame
    h = reflections['miller_index'].as_vec3_double()
    q = (UB * h)
    qlen = q.norms()
    qlen2 = q.dot(q)

    q_s0 = q + self.s0
    s1 = reflections['s1']
    ss = qlen2 + 2 * q.dot(self.s0) + self.s0len2
    assert (ss > 0.0).all_eq(True)
    s = flex.sqrt(ss)
    sss = s * ss
    inv_s = 1.0 / s
    inv_sss = 1.0 / sss

    # check equation 10
    tmp = self.s0len * (q_s0) / s
    for a, b in zip(s1, tmp): assert approx_equal(a, b)

    ds1_dp = {}

    # loop through the parameters
    for name, der in zip(p_names, ds0_dbeam_p):

      # term1
      term1 = self.us0.dot(der) * q_s0 + self.s0len * (der)
      term1 = term1 * inv_s

      # term2
      term2 = self.s0len * q_s0 * q_s0.dot(der)
      term2 = term2 * inv_sss

      name = 'Beam1' + name # XXXX Hack to get matching keys
      ds1_dp[name] = {'ds1':(term1 - term2)}

    return ds1_dp
  def get_crystal_unit_cell_gradients(self, reflections):

    # get derivatives of the B matrix wrt the parameters
    dB_dxluc_p = self.xl_unit_cell_parameterisation.get_ds_dp()
    p_names = self.xl_unit_cell_parameterisation.get_param_names()

    n = len(reflections)
    U = flex.mat3_double(n, self.U)
    B = flex.mat3_double(n, self.B)
    UB = U*B

    # q is the reciprocal lattice vector, in the lab frame
    h = reflections['miller_index'].as_vec3_double()
    q = (UB * h)
    qlen = q.norms()
    qlen2 = q.dot(q)

    q_s0 = q + self.s0
    s1 = reflections['s1']
    ss = qlen2 + 2 * q.dot(self.s0) + self.s0len2
    assert (ss > 0.0).all_eq(True)
    s = flex.sqrt(ss)
    sss = s * ss
    inv_s = 1.0 / s
    inv_sss = 1.0 / sss

    ds1_dp = {}

    # loop through the parameters
    for name, der in zip(p_names, dB_dxluc_p):

      # calculate the derivative of q for this parameter
      dq = U * flex.mat3_double(n, der.elems) * h

      # term1
      term1 = self.s0len * dq
      term1 = term1 * inv_s

      # term2
      term2 = self.s0len * q_s0 * q_s0.dot(dq)
      term2 = term2 * inv_sss

      name = 'Crystal1' + name # XXXX Hack to get matching keys
      ds1_dp[name] = {'ds1':(term1 - term2)}

    return ds1_dp
Ejemplo n.º 19
0
  def check_reference(self, reference):
    ''' Check the reference spots. '''
    from dials.array_family import flex
    from dials.algorithms.image.centroid import centroid_image
    from math import sqrt

    # Get a load of stuff
    I_sim = reference['intensity.sim']
    I_exp = reference['intensity.exp']
    I_cal = reference['intensity.prf.value']
    I_var = reference['intensity.prf.variance']

    # Get the transformed shoeboxes
    profiles = reference['rs_shoebox']
    n_sigma = 3
    n_sigma2 = 5
    grid_size = 4
    step_size = n_sigma2 / (grid_size + 0.5)
    eps = 1e-7
    for i in range(len(profiles)):
      data = profiles[i].data
      #dmax = flex.max(data)
      #data = 100 * data / dmax
      #p = data.as_numpy_array()
      #p = p.astype(numpy.int)
      #print p
      print flex.sum(data), I_exp[i], I_cal[i]
      #assert(abs(flex.sum(data) - I_exp[i]) < eps)
      centroid = centroid_image(data)
      m = centroid.mean()
      v = centroid.variance()
      s1 = tuple(sqrt(vv) for vv in v)
      s2 = tuple(ss * step_size for ss in s1)
      assert(all(abs(mm - (grid_size + 0.5)) < 0.25 for mm in m))
      assert(all(abs(ss2 - n_sigma / n_sigma2) < 0.25 for ss2 in s2))

    # Calculate Z
    Z = (I_cal - I_exp) / flex.sqrt(I_var)
    mv = flex.mean_and_variance(Z)
    Z_mean = mv.mean()
    Z_var = mv.unweighted_sample_variance()
    print "Z: mean: %f, var: %f" % (Z_mean, Z_var)

    from matplotlib import pylab
    pylab.hist((I_cal - I_exp) / I_exp)
    pylab.show()
Ejemplo n.º 20
0
def log_sum_i_sigi_vs_resolution(reflections, imageset, plot_filename=None):
  d_star_sq = flex.pow2(reflections['rlp'].norms())
  hist = get_histogram(d_star_sq)

  intensities = reflections['intensity.sum.value']
  variances = reflections['intensity.sum.variance']

  sel = variances > 0
  intensities = intensities.select(sel)
  variances = intensities.select(sel)

  i_over_sigi = intensities/flex.sqrt(variances)
  #log_i_over_sigi = flex.log(i_over_sigi)

  slots = []
  for slot in hist.slot_infos():
    sel = (d_star_sq > slot.low_cutoff) & (d_star_sq < slot.high_cutoff)
    if sel.count(True) > 0:
      slots.append(math.log(flex.sum(i_over_sigi.select(sel))))
    else:
      slots.append(0)

  if plot_filename is not None:
    if pyplot is None:
      raise Sorry("matplotlib must be installed to generate a plot.")
    fig = pyplot.figure()
    ax = fig.add_subplot(1,1,1)
    #ax.bar(hist.slot_centers()-0.5*hist.slot_width(), hist.slots(),
    ax.scatter(hist.slot_centers()-0.5*hist.slot_width(), slots, s=20, color='blue', marker='o', alpha=0.5)
    ax.set_xlabel("d_star_sq")
    ax.set_ylabel("ln(sum(I/sigI))")

    ax_ = ax.twiny() # ax2 is responsible for "top" axis and "right" axis
    xticks = ax.get_xticks()
    xlim = ax.get_xlim()
    xticks_d = [
      uctbx.d_star_sq_as_d(ds2) if ds2 > 0 else 0 for ds2 in xticks ]
    xticks_ = [ds2/(xlim[1]-xlim[0]) for ds2 in xticks]
    ax_.set_xticks(xticks)
    ax_.set_xlim(ax.get_xlim())
    ax_.set_xlabel(r"Resolution ($\AA$)")
    ax_.set_xticklabels(["%.1f" %d for d in xticks_d])
    #pyplot.show()
    pyplot.savefig(plot_filename)
    pyplot.close()
Ejemplo n.º 21
0
def log_sum_i_sigi_vs_resolution(reflections, imageset, plot_filename=None):
  d_star_sq = flex.pow2(reflections['rlp'].norms())
  hist = get_histogram(d_star_sq)

  intensities = reflections['intensity.sum.value']
  variances = reflections['intensity.sum.variance']

  sel = variances > 0
  intensities = intensities.select(sel)
  variances = intensities.select(sel)

  i_over_sigi = intensities/flex.sqrt(variances)
  #log_i_over_sigi = flex.log(i_over_sigi)

  slots = []
  for slot in hist.slot_infos():
    sel = (d_star_sq > slot.low_cutoff) & (d_star_sq < slot.high_cutoff)
    if sel.count(True) > 0:
      slots.append(math.log(flex.sum(i_over_sigi.select(sel))))
    else:
      slots.append(0)

  if plot_filename is not None:
    if pyplot is None:
      raise Sorry("matplotlib must be installed to generate a plot.")
    fig = pyplot.figure()
    ax = fig.add_subplot(1,1,1)
    #ax.bar(hist.slot_centers()-0.5*hist.slot_width(), hist.slots(),
    ax.scatter(hist.slot_centers()-0.5*hist.slot_width(), slots, s=20, color='blue', marker='o', alpha=0.5)
    ax.set_xlabel("d_star_sq")
    ax.set_ylabel("ln(sum(I/sigI))")

    ax_ = ax.twiny() # ax2 is responsible for "top" axis and "right" axis
    xticks = ax.get_xticks()
    xlim = ax.get_xlim()
    xticks_d = [
      uctbx.d_star_sq_as_d(ds2) if ds2 > 0 else 0 for ds2 in xticks ]
    xticks_ = [ds2/(xlim[1]-xlim[0]) for ds2 in xticks]
    ax_.set_xticks(xticks)
    ax_.set_xlim(ax.get_xlim())
    ax_.set_xlabel(r"Resolution ($\AA$)")
    ax_.set_xticklabels(["%.1f" %d for d in xticks_d])
    #pyplot.show()
    pyplot.savefig(plot_filename)
    pyplot.close()
Ejemplo n.º 22
0
    def _test_Imid_combinations(self):
        rows = []
        results = {}
        for Imid in self.Imids:
            combined_intensities = flex.double([])
            combined_sigmas = flex.double([])
            combined_scales = flex.double([])
            combined_indices = flex.miller_index([])
            for dataset in self.datasets:
                Int, Var = _get_Is_from_Imidval(dataset, Imid)
                Int *= dataset["prescaling_correction"]
                sigma = flex.sqrt(Var) * dataset["prescaling_correction"]
                combined_intensities.extend(Int)
                combined_sigmas.extend(sigma)
                combined_scales.extend(dataset["inverse_scale_factor"])
                combined_indices.extend(dataset["miller_index"])
            # apply scale factor before determining merging stats
            miller_set = miller.set(
                crystal_symmetry=self.active_scalers[0].experiment.crystal.
                get_crystal_symmetry(),
                indices=combined_indices,
                anomalous_flag=False,
            )
            i_obs = miller.array(miller_set,
                                 data=combined_intensities / combined_scales)
            i_obs.set_observation_type_xray_intensity()
            i_obs.set_sigmas(combined_sigmas / combined_scales)
            try:
                rmeas, cchalf = fast_merging_stats(array=i_obs)
                logger.debug("Imid: %s, Rmeas %s, cchalf %s", Imid, rmeas,
                             cchalf)
            except RuntimeError:
                raise DialsMergingStatisticsError(
                    "Unable to merge for intensity combination")

            # record the results
            results[Imid] = rmeas
            res_str = {0: "prf only", 1: "sum only"}
            if Imid not in res_str:
                res_str[Imid] = "Imid = " + str(round(Imid, 2))
            rows.append(
                [res_str[Imid],
                 str(round(cchalf, 5)),
                 str(round(rmeas, 5))])
        return rows, results
Ejemplo n.º 23
0
def _reflection_table_to_iobs(table, unit_cell, space_group):

    miller_set = miller.set(
        crystal_symmetry=crystal.symmetry(
            unit_cell=unit_cell,
            space_group=space_group,
            assert_is_compatible_unit_cell=False,
        ),
        indices=table["asu_miller_index"],
        anomalous_flag=False,
    )
    i_obs = miller.array(
        miller_set, data=table["intensity"] / table["inverse_scale_factor"]
    )
    i_obs.set_observation_type_xray_intensity()
    i_obs.set_sigmas(flex.sqrt(table["variance"]) / table["inverse_scale_factor"])
    i_obs.set_info(miller.array_info(source="DIALS", source_type="reflection_tables"))
    return i_obs
Ejemplo n.º 24
0
 def unpack_stddev(self):
   # the data-to_parameter ratio will control which method for returning e.s.d's
   data_to_parameter = float(self.N_raw_obs) / self.helper.x.size()
   self.helper.build_up()
   if data_to_parameter <= 4. and self.helper.x.size() < 500:
     # estimate standard deviations by singular value decomposition
     norm_mat_packed_upper = self.helper.get_normal_matrix()
     norm_mat_all_elems = self.packed_to_all(norm_mat_packed_upper)
     NM = sqr(norm_mat_all_elems)
     from scitbx.linalg.svd import inverse_via_svd
     svd_inverse,sigma = inverse_via_svd(NM.as_flex_double_matrix())
     IA = sqr(svd_inverse)
     estimated_stddev = flex.double([math.sqrt(IA(i,i)) for i in xrange(self.helper.x.size())])
   else:
     # estimate standard deviations by normal matrix curvatures
     diagonal_curvatures = self.helper.get_normal_matrix_diagonal()
     estimated_stddev = flex.sqrt(1./diagonal_curvatures)
   return self.fitted_as_annotated(estimated_stddev)
Ejemplo n.º 25
0
def example_array(reflections):
    """Generate a miller array for a test."""
    exp_dict = {
        "__id__": "crystal",
        "real_space_a": [1.0, 0.0, 0.0],
        "real_space_b": [0.0, 1.0, 0.0],
        "real_space_c": [0.0, 0.0, 1.0],
        "space_group_hall_symbol": "-P 2yb",
    }
    crystal = Crystal.from_dict(exp_dict)
    ms = miller.set(
        crystal_symmetry=crystal.get_crystal_symmetry(),
        indices=reflections["miller_index"],
        anomalous_flag=False,
    )
    ma = miller.array(ms, data=reflections["intensity"])
    ma.set_sigmas(flex.sqrt(reflections["variance"]))
    return ma
    def get_crystal_unit_cell_gradients(self, reflections):

        # get derivatives of the B matrix wrt the parameters
        dB_dxluc_p = self.xl_unit_cell_parameterisation.get_ds_dp()
        p_names = self.xl_unit_cell_parameterisation.get_param_names()

        n = len(reflections)
        U = flex.mat3_double(n, self.U)
        B = flex.mat3_double(n, self.B)
        UB = U * B

        # q is the reciprocal lattice vector, in the lab frame
        h = reflections["miller_index"].as_vec3_double()
        q = UB * h
        qlen2 = q.dot(q)

        q_s0 = q + self.s0
        ss = qlen2 + 2 * q.dot(self.s0) + self.s0len2
        assert (ss > 0.0).all_eq(True)
        s = flex.sqrt(ss)
        sss = s * ss
        inv_s = 1.0 / s
        inv_sss = 1.0 / sss

        ds1_dp = {}

        # loop through the parameters
        for name, der in zip(p_names, dB_dxluc_p):

            # calculate the derivative of q for this parameter
            dq = U * flex.mat3_double(n, der.elems) * h

            # term1
            term1 = self.s0len * dq
            term1 = term1 * inv_s

            # term2
            term2 = self.s0len * q_s0 * q_s0.dot(dq)
            term2 = term2 * inv_sss

            name = "Crystal1" + name  # XXXX Hack to get matching keys
            ds1_dp[name] = {"ds1": (term1 - term2)}

        return ds1_dp
Ejemplo n.º 27
0
    def _test_Imid_combinations(self):
        """Test the different combinations, returning the rows and results dict."""
        rows = []
        results = {}

        for Imid in self.Imids:
            Int, Var = _get_Is_from_Imidval(self.dataset, Imid)
            miller_set = miller.set(
                crystal_symmetry=self.experiment.crystal.get_crystal_symmetry(
                    assert_is_compatible_unit_cell=False
                ),
                indices=self.dataset["miller_index"],
                anomalous_flag=False,
            )
            i_obs = miller.array(
                miller_set,
                data=(
                    Int
                    * self.dataset["prescaling_correction"]
                    / self.dataset["inverse_scale_factor"]
                ),
            )
            i_obs.set_observation_type_xray_intensity()
            i_obs.set_sigmas(
                flex.sqrt(Var)
                * self.dataset["prescaling_correction"]
                / self.dataset["inverse_scale_factor"]
            )
            try:
                rmeas, cchalf = fast_merging_stats(array=i_obs)
                logger.debug("Imid: %s, Rmeas %s, cchalf %s", Imid, rmeas, cchalf)
            except RuntimeError:
                raise DialsMergingStatisticsError(
                    "Unable to merge for intensity combination"
                )

            # record the results
            results[Imid] = rmeas
            res_str = {0: "prf only", 1: "sum only"}
            if Imid not in res_str:
                res_str[Imid] = "Imid = " + str(round(Imid, 2))
            rows.append([res_str[Imid], str(round(cchalf, 5)), str(round(rmeas, 5))])

        return rows, results
Ejemplo n.º 28
0
 def mean_vs_ios(self, rlist):
     """ Analyse the correlations. """
     MEAN = rlist["background.mean"]
     I = rlist["intensity.sum.value"]
     I_sig = flex.sqrt(rlist["intensity.sum.variance"])
     I_over_S = I / I_sig
     mask = I_over_S > 0.1
     I_over_S = I_over_S.select(mask)
     MEAN = MEAN.select(mask)
     fig = pyplot.figure()
     pyplot.title("Background Model mean vs Log I/Sigma")
     cax = pyplot.hexbin(flex.log(I_over_S), MEAN, gridsize=100)
     cbar = pyplot.colorbar(cax)
     cax.axes.set_xlabel("Log I/Sigma")
     cax.axes.set_ylabel("Background Model mean")
     cbar.ax.set_ylabel("# reflections")
     fig.savefig(
         os.path.join(self.directory, "background_model_mean_vs_ios.png"))
     pyplot.close()
Ejemplo n.º 29
0
    def get_amplitudes(self, dials_model, refl_table, test_without_mpi=True):
        D = dials_model
        R = refl_table
        from cctbx.crystal import symmetry
        from cctbx.miller import array, set as miller_set
        uc = D.crystal.get_unit_cell()
        sg = D.crystal.get_space_group()
        MS = miller_set(symmetry(unit_cell=uc, space_group=sg),
                        anomalous_flag=True,
                        indices=R["miller_index"].select(R["spots_order"]))
        self.amplitudes = array(MS,
                                data=flex.sqrt(
                                    R["spots_mockup_shoebox_sum"].select(
                                        R["spots_order"])))

        from simtbx.gpu import gpu_energy_channels
        recommend_device = int(os.environ.get("CCTBX_RECOMMEND_DEVICE", 0))
        self.gpu_channels_singleton = gpu_energy_channels(
            deviceId=recommend_device)
Ejemplo n.º 30
0
 def unpack_stddev(self):
     # the data-to_parameter ratio will control which method for returning e.s.d's
     data_to_parameter = float(self.N_raw_obs) / self.helper.x.size()
     self.helper.build_up()
     if data_to_parameter <= 4. and self.helper.x.size() < 500:
         # estimate standard deviations by singular value decomposition
         norm_mat_packed_upper = self.helper.get_normal_matrix()
         norm_mat_all_elems = self.packed_to_all(norm_mat_packed_upper)
         NM = sqr(norm_mat_all_elems)
         from scitbx.linalg.svd import inverse_via_svd
         svd_inverse, sigma = inverse_via_svd(NM.as_flex_double_matrix())
         IA = sqr(svd_inverse)
         estimated_stddev = flex.double(
             [math.sqrt(IA(i, i)) for i in range(self.helper.x.size())])
     else:
         # estimate standard deviations by normal matrix curvatures
         diagonal_curvatures = self.helper.get_normal_matrix_diagonal()
         estimated_stddev = flex.sqrt(1. / diagonal_curvatures)
     return self.fitted_as_annotated(estimated_stddev)
Ejemplo n.º 31
0
    def __init__(self, reflection_file, experiment_file):

        data = pickle.load(open(reflection_file, "rb"))

        self.data = data.select(data["intensity.sum.variance"] > 0)

        i = data["intensity.sum.value"]
        v = data["intensity.sum.variance"]
        s = flex.sqrt(v)
        self.i_s = i / s

        self.scale = 2

        from dxtbx.model.experiment_list import ExperimentListFactory

        expt = ExperimentListFactory.from_json_file(experiment_file)
        panel = expt.detectors()[0][0]
        crystal = expt.crystals()[0]
        self.s0 = matrix.col(expt.beams()[0].get_s0())
        wavelength = expt.beams()[0].get_wavelength()

        # make a list of observed q positions

        self.qobs = []
        for j in range(data.size()):
            x, y, z = data["xyzobs.px.value"][j]
            p = matrix.col(panel.get_pixel_lab_coord((x, y)))
            q = p.normalize() / wavelength - self.s0
            self.qobs.append(q)

        self.wavelength = wavelength
        self.panel = panel
        self.beam = expt.beams()[0]
        self.crystal = crystal

        # slurp data from $somewhere

        imageset = expt.imagesets()[0]
        self.raw_data = imageset.get_raw_data(0)[0]
        self.imageset = imageset

        return
Ejemplo n.º 32
0
    def __init__(self, reflection_file, experiment_file):
        # make a pie

        data = pickle.load(open(reflection_file, "rb"))
        print("%d reflections" % data.size())

        self.data = data.select(data["intensity.sum.variance"] > 0)

        i = data["intensity.sum.value"]
        v = data["intensity.sum.variance"]
        s = flex.sqrt(v)
        self.i_s = i / s

        from dxtbx.model.experiment_list import ExperimentListFactory

        expt = ExperimentListFactory.from_json_file(experiment_file)
        crystal = expt.crystals()[0]
        self.s0 = matrix.col(expt.beams()[0].get_s0())

        from dials.algorithms.refinement.parameterisation.crystal_parameters import (
            CrystalUnitCellParameterisation,
            CrystalOrientationParameterisation,
        )

        self.crystal = crystal
        self.cucp = CrystalUnitCellParameterisation(crystal)
        self.cop = CrystalOrientationParameterisation(crystal)

        # 0-point and deltas
        values = flex.double(self.cucp.get_param_vals() +
                             self.cop.get_param_vals())
        offset = flex.double([0.01 * v for v in self.cucp.get_param_vals()] +
                             [0.1, 0.1, 0.1])

        initial = crystal.get_unit_cell()
        initial_score = self.target(values)
        doohicky = simple_simplex(values, offset, self, 2000)
        best = doohicky.get_solution()
        print("Initial cell:", initial)
        print("Final cell:  ", crystal.get_unit_cell())
        print("Score change", initial_score, self.target(best, do_print=False))
        self.best = best
Ejemplo n.º 33
0
 def centroid_diff_z(self, rlist, threshold):
     """ Look at the centroid difference in x, y """
     I = rlist["intensity.sum.value"]
     I_sig = flex.sqrt(rlist["intensity.sum.variance"])
     I_over_S = I / I_sig
     mask = I_over_S > threshold
     rlist = rlist.select(mask)
     assert len(rlist) > 0
     xc, yc, zc = rlist["xyzcal.px"].parts()
     xo, yo, zo = rlist["xyzobs.px.value"].parts()
     zd = zo - zc
     fig = pyplot.figure()
     pyplot.title("Difference between observed and calculated in Z")
     cax = pyplot.hexbin(zc, zd, gridsize=100)
     cax.axes.set_xlabel("z (images)")
     cax.axes.set_ylabel("Difference in z position")
     cbar = pyplot.colorbar(cax)
     cbar.ax.set_ylabel("# Reflections")
     fig.savefig(os.path.join(self.directory, "centroid_diff_z.png"))
     pyplot.close()
Ejemplo n.º 34
0
        def get_correlation(cb_op=None):
            """ Helper function to get CC to the reference given an operator """
            # Build a miller array for the experiment reflections
            exp_miller_indices = miller.set(
                target_symm, exp_reflections['miller_index_asymmetric'], True)
            exp_intensities = miller.array(
                exp_miller_indices, exp_reflections['intensity.sum.value'],
                flex.sqrt(exp_reflections['intensity.sum.variance']))
            if cb_op:
                exp_intensities = exp_intensities.change_basis(
                    cb_op).map_to_asu()

            # Extract an array of HKLs from the model to match the experiment HKLs
            matching_indices = miller.match_multi_indices(
                miller_indices_unique=model_intensities.indices(),
                miller_indices=exp_intensities.indices())

            # Least squares
            scaling_result = scaler.fit_experiment_to_reference(
                model_intensities, exp_intensities, matching_indices)
            return scaling_result.correlation if scaling_result.correlation is not None else -1
Ejemplo n.º 35
0
 def centroid_diff_z(self, rlist, threshold):
   ''' Look at the centroid difference in x, y '''
   from os.path import join
   I = rlist['intensity.sum.value']
   I_sig = flex.sqrt(rlist['intensity.sum.variance'])
   I_over_S = I / I_sig
   mask = I_over_S > threshold
   rlist = rlist.select(mask)
   assert(len(rlist) > 0)
   xc, yc, zc = rlist['xyzcal.px'].parts()
   xo, yo, zo = rlist['xyzobs.px.value'].parts()
   zd = zo - zc
   fig = pyplot.figure()
   pyplot.title("Difference between observed and calculated in Z")
   cax = pyplot.hexbin(zc, zd, gridsize=100)
   cax.axes.set_xlabel("z")
   cax.axes.set_ylabel("Difference in z position")
   cbar = pyplot.colorbar(cax)
   cbar.ax.set_ylabel("# Reflections")
   fig.savefig(join(self.directory, "centroid_diff_z.png"))
   pyplot.close()
Ejemplo n.º 36
0
  def _find_nearest_neighbours_single(self, oxyz, pxyz):
    '''
    Find the nearest predicted spot to the observed spot.

    :param observed: The observed reflections
    :param predicted: The predicted reflections

    :returns: (nearest neighbours, distance)

    '''
    from annlib_ext import AnnAdaptor
    from scitbx.array_family import flex

    # Create the KD Tree
    ann = AnnAdaptor(pxyz.as_double().as_1d(), 3)

    # Query to find all the nearest neighbours
    ann.query(oxyz.as_double().as_1d())

    # Return the nearest neighbours and distances
    return ann.nn, flex.sqrt(ann.distances)
Ejemplo n.º 37
0
    def get_amplitudes(self, dials_model, refl_table, test_without_mpi=True):
        from LS49.adse13_187.cyto_batch import parse_input
        self.params, options = parse_input()

        D = dials_model
        R = refl_table
        from cctbx.crystal import symmetry
        from cctbx.miller import array, set as miller_set
        uc = D.crystal.get_unit_cell()
        sg = D.crystal.get_space_group()
        MS = miller_set(symmetry(unit_cell=uc, space_group=sg),
                        anomalous_flag=True,
                        indices=R["miller_index"].select(R["spots_order"]))
        self.amplitudes = array(MS,
                                data=flex.sqrt(
                                    R["spots_mockup_shoebox_sum"].select(
                                        R["spots_order"])))

        from simtbx.gpu import gpu_energy_channels
        self.gpu_channels_singleton = gpu_energy_channels(
            deviceId=0)  # determine device by rank id later
Ejemplo n.º 38
0
 def modify_errors(self, reflections):
     '''Formerly sdfac_auto, ha14 method applies sdfac to each-image data assuming negative intensities are normally distributed noise'''
     assert 0 == (reflections['intensity.sum.variance'] <= 0.0).count(True)
     I_over_sig = reflections['intensity.sum.value'] / flex.sqrt(
         reflections['intensity.sum.variance'])
     negative_I_over_sig = I_over_sig.select(I_over_sig < 0.)
     #assert that at least a few I/sigmas are less than zero
     negative_I_over_sig_count = I_over_sig.select(I_over_sig < 0.).size()
     if negative_I_over_sig_count > 2:
         # get a rough estimate for the SDFAC, assuming that negative measurements
         # represent false predictions and therefore normally distributed noise.
         no_signal = negative_I_over_sig
         for xns in range(len(no_signal)):
             no_signal.append(-no_signal[xns])
         Stats = flex.mean_and_variance(no_signal)
         SDFAC = Stats.unweighted_sample_standard_deviation()
     else:
         SDFAC = 1.
     self.logger.log("The applied SDFAC is %7.4f" % SDFAC)
     reflections['intensity.sum.variance'] *= (SDFAC**2)
     return reflections
Ejemplo n.º 39
0
    def _find_nearest_neighbours_single(self, oxyz, pxyz):
        '''
    Find the nearest predicted spot to the observed spot.

    :param observed: The observed reflections
    :param predicted: The predicted reflections

    :returns: (nearest neighbours, distance)

    '''
        from annlib_ext import AnnAdaptor
        from scitbx.array_family import flex

        # Create the KD Tree
        ann = AnnAdaptor(pxyz.as_double().as_1d(), 3)

        # Query to find all the nearest neighbours
        ann.query(oxyz.as_double().as_1d())

        # Return the nearest neighbours and distances
        return ann.nn, flex.sqrt(ann.distances)
Ejemplo n.º 40
0
    def jacobian_callable(self, values):
        PB = self.get_partiality_array(values)
        EXP = flex.exp(-2. * values.BFACTOR * self.DSSQ)
        G_terms = (EXP * PB * self.ICALCVEC)
        B_terms = (values.G * EXP * PB * self.ICALCVEC) * (-2. * self.DSSQ)
        P_terms = (values.G * EXP * self.ICALCVEC)

        thetax = values.thetax
        thetay = values.thetay
        Rx = matrix.col((1, 0, 0)).axis_and_angle_as_r3_rotation_matrix(thetax)
        dRx_dthetax = matrix.col(
            (1, 0, 0)).axis_and_angle_as_r3_derivative_wrt_angle(thetax)
        Ry = matrix.col((0, 1, 0)).axis_and_angle_as_r3_rotation_matrix(thetay)
        dRy_dthetay = matrix.col(
            (0, 1, 0)).axis_and_angle_as_r3_derivative_wrt_angle(thetay)
        ref_ori = matrix.sqr(self.ORI.reciprocal_matrix())
        miller_vec = self.MILLER.as_vec3_double()
        ds1_dthetax = flex.mat3_double(len(self.MILLER),
                                       Ry * dRx_dthetax * ref_ori) * miller_vec
        ds1_dthetay = flex.mat3_double(len(self.MILLER),
                                       dRy_dthetay * Rx * ref_ori) * miller_vec

        s1vec = self.get_s1_array(values)
        s1lenvec = flex.sqrt(s1vec.dot(s1vec))
        dRh_dthetax = s1vec.dot(ds1_dthetax) / s1lenvec
        dRh_dthetay = s1vec.dot(ds1_dthetay) / s1lenvec
        rs = values.RS
        Rh = self.get_Rh_array(values)
        rs_sq = rs * rs
        denomin = (2. * Rh * Rh + rs_sq)
        dPB_dRh = {
            "lorentzian": -PB * 4. * Rh / denomin,
            "gaussian": -PB * 4. * math.log(2) * Rh / rs_sq
        }[self.profile_shape]
        dPB_dthetax = dPB_dRh * dRh_dthetax
        dPB_dthetay = dPB_dRh * dRh_dthetay
        Px_terms = P_terms * dPB_dthetax
        Py_terms = P_terms * dPB_dthetay

        return [G_terms, B_terms, 0, Px_terms, Py_terms]
Ejemplo n.º 41
0
def wilson_outliers(reflections, ice_sel=None, p_cutoff=1e-2):
  # http://scripts.iucr.org/cgi-bin/paper?ba0032
  if ice_sel is None:
    ice_sel = flex.bool(len(reflections), False)

  E_cutoff = math.sqrt(-math.log(p_cutoff))
  intensities = reflections['intensity.sum.value']
  variances = reflections['intensity.sum.variance']

  Sigma_n = flex.mean(intensities.select(~ice_sel))
  normalised_amplitudes = flex.sqrt(intensities)/math.sqrt(Sigma_n)

  outliers = normalised_amplitudes >= E_cutoff

  if outliers.count(True):
    # iterative outlier rejection
    inliers = ~outliers
    outliers.set_selected(
      inliers, wilson_outliers(
        reflections.select(inliers), ice_sel.select(inliers)))

  return outliers
Ejemplo n.º 42
0
def wilson_outliers(reflections, ice_sel=None, p_cutoff=1e-2):
    # http://scripts.iucr.org/cgi-bin/paper?ba0032
    if ice_sel is None:
        ice_sel = flex.bool(len(reflections), False)

    E_cutoff = math.sqrt(-math.log(p_cutoff))
    intensities = reflections["intensity.sum.value"]

    Sigma_n = flex.mean(intensities.select(~ice_sel))
    normalised_amplitudes = flex.sqrt(intensities) / math.sqrt(Sigma_n)

    outliers = normalised_amplitudes >= E_cutoff

    if outliers.count(True):
        # iterative outlier rejection
        inliers = ~outliers
        outliers.set_selected(
            inliers,
            wilson_outliers(reflections.select(inliers), ice_sel.select(inliers)),
        )

    return outliers
Ejemplo n.º 43
0
    def _xl_unit_cell_derivatives(self,
                                  isel,
                                  parameterisation=None,
                                  reflections=None):

        # Get required data
        h = self._h.select(isel)
        B = self._B.select(isel)
        wl = self._wavelength.select(isel)

        # get derivatives of the B matrix wrt the parameters
        dB_dxluc_p = [
            None if der is None else flex.mat3_double(len(isel), der.elems)
            for der in parameterisation.get_ds_dp(use_none_as_null=True)
        ]

        d2theta_dp = []

        # loop through the parameters
        for der in dB_dxluc_p:

            if der is None:
                d2theta_dp.append(None)
                continue

            r0 = B * h
            dr0 = der * h
            r0len = r0.norms()
            dr0len = dr0.dot(r0) / r0len

            # 2theta = 2 * arcsin( |r0| / (2 * |s0| ) )
            sintheta = 0.5 * r0len * wl
            fac = 1.0 / flex.sqrt(
                flex.double(len(wl), 1.0) - flex.pow2(sintheta))
            val = fac * wl * dr0len

            d2theta_dp.append(val)

        return d2theta_dp
Ejemplo n.º 44
0
            def correct(refl_sele, smart_sigmas=True):
                kapton_correction = image_kapton_correction(
                    panel_size_px=panel_size_px,
                    pixel_size_mm=pixel_size_mm,
                    detector_dist_mm=detector_dist_mm,
                    wavelength_ang=wavelength_ang,
                    reflections_sele=refl_sele,
                    params=self.params,
                    expt=expt,
                    refl=refl,
                    smart_sigmas=smart_sigmas,
                    logger=self.logger,
                )

                k_corr, k_sigmas = kapton_correction()
                refl_sele["kapton_absorption_correction"] = k_corr
                if smart_sigmas:
                    refl_sele["kapton_absorption_correction_sigmas"] = k_sigmas
                    # apply corrections and propagate error
                    # term1 = (sig(C)/C)^2
                    # term2 = (sig(Imeas)/Imeas)^2
                    # I' = C*I
                    # sig^2(I') = (I')^2*(term1 + term2)
                    integrated_data = refl_sele["intensity.sum.value"]
                    integrated_variance = refl_sele["intensity.sum.variance"]
                    integrated_sigma = flex.sqrt(integrated_variance)
                    term1 = flex.pow(k_sigmas / k_corr, 2)
                    term2 = flex.pow(integrated_sigma / integrated_data, 2)
                    integrated_data *= k_corr
                    integrated_variance = flex.pow(integrated_data,
                                                   2) * (term1 + term2)
                    refl_sele["intensity.sum.value"] = integrated_data
                    refl_sele["intensity.sum.variance"] = integrated_variance
                    # order is purposeful: the two lines above require that integrated_data
                    # has already been corrected!
                else:
                    refl_sele["intensity.sum.value"] *= k_corr
                    refl_sele["intensity.sum.variance"] *= flex.pow2(k_corr)
                return refl_sele
Ejemplo n.º 45
0
 def ideal_reflection_corr_vs_ios(self, rlist, filename):
   ''' Analyse the correlations. '''
   from os.path import join
   if 'correlation.ideal.profile' in rlist:
     corr = rlist['correlation.ideal.profile']
     I = rlist['intensity.prf.value']
     I_sig = flex.sqrt(rlist['intensity.prf.variance'])
     mask = I_sig > 0
     I = I.select(mask)
     I_sig = I_sig.select(mask)
     corr = corr.select(mask)
     I_over_S = I / I_sig
     mask = I_over_S > 0.1
     I_over_S = I_over_S.select(mask)
     corr = corr.select(mask)
     pyplot.title("Reflection correlations vs Log I/Sigma")
     cax = pyplot.hexbin(flex.log(I_over_S), corr, gridsize=100)
     cbar = pyplot.colorbar(cax)
     pyplot.xlabel("Log I/Sigma")
     pyplot.ylabel("Correlation with reference profile")
     cbar.ax.set_ylabel("# reflections")
     pyplot.savefig(join(self.directory, "ideal_%s_corr_vs_ios.png" % filename))
     pyplot.close()
Ejemplo n.º 46
0
 def reflection_corr_vs_ios(self, rlist, filename):
     """ Analyse the correlations. """
     corr = rlist["profile.correlation"]
     I = rlist["intensity.prf.value"]
     I_sig = flex.sqrt(rlist["intensity.prf.variance"])
     mask = I_sig > 0
     I = I.select(mask)
     I_sig = I_sig.select(mask)
     corr = corr.select(mask)
     I_over_S = I / I_sig
     mask = I_over_S > 0.1
     I_over_S = I_over_S.select(mask)
     corr = corr.select(mask)
     fig = pyplot.figure()
     pyplot.title("Reflection correlations vs Log I/Sigma")
     cax = pyplot.hexbin(flex.log(I_over_S), corr, gridsize=100)
     cbar = pyplot.colorbar(cax)
     cax.axes.set_xlabel("Log I/Sigma")
     cax.axes.set_ylabel("Correlation with reference profile")
     cbar.ax.set_ylabel("# reflections")
     fig.savefig(
         os.path.join(self.directory, "%s_corr_vs_ios.png" % filename))
     pyplot.close()
Ejemplo n.º 47
0
def test_as_miller_array():
    table = flex.reflection_table()
    table["intensity.1.value"] = flex.double([1.0, 2.0, 3.0])
    table["intensity.1.variance"] = flex.double([0.25, 1.0, 4.0])
    table["miller_index"] = flex.miller_index([(1, 0, 0), (2, 0, 0), (3, 0, 0)])

    crystal = Crystal(
        real_space_a=(10, 0, 0),
        real_space_b=(0, 11, 0),
        real_space_c=(0, 0, 12),
        space_group=sgtbx.space_group_info("P 222").group(),
    )
    experiment = Experiment(crystal=crystal)

    iobs = table.as_miller_array(experiment, intensity="1")
    assert list(iobs.data()) == list(table["intensity.1.value"])
    assert list(iobs.sigmas()) == list(flex.sqrt(table["intensity.1.variance"]))

    with pytest.raises(KeyError):
        _ = table.as_miller_array(experiment, intensity="2")
    table["intensity.2.value"] = flex.double([1.0, 2.0, 3.0])
    with pytest.raises(KeyError):
        _ = table.as_miller_array(experiment, intensity="2")
def match(a, b, max_separation=2, key="xyzobs.px.value", scale=(1, 1, 1)):
    """Match reflections from list a and list b, returning tuple of
    flex.size_t indices, optionally specifying the maximum distance and
    key to search on (which is assumed to be a 3-vector column). Can also
    apply relative scales to the vectors before matching in case e.g. very
    wide or very fine slicing."""

    xyz_a = a[key]
    xyz_b = b[key]

    if scale != (1, 1, 1):
        x, y, z = xyz_a.parts()
        x *= scale[0]
        y *= scale[1]
        z *= scale[2]
        xyz_a = flex.vec3_double(x, y, z)

        x, y, z = xyz_b.parts()
        x *= scale[0]
        y *= scale[1]
        z *= scale[2]
        xyz_b = flex.vec3_double(x, y, z)

    a = xyz_a.as_double().as_1d()
    b = xyz_b.as_double().as_1d()
    ann = AnnAdaptorSelfInclude(a, 3)
    ann.query(b)

    mm = flex.size_t(range(xyz_b.size()))
    nn, distance = ann.nn, flex.sqrt(ann.distances)

    sel = distance <= max_separation

    mm = mm.select(sel)
    nn = nn.select(sel)
    distance = distance.select(sel)
    return nn, mm, distance
Ejemplo n.º 49
0
  def setup_work_arrays(self, reflections):
    '''Select multiply-measured HKLs. Calculate and cache reflection deltas, deltas squared, and HKL means for every reflection'''
    self.deltas     = flex.double()
    self.work_table = flex.reflection_table()
    delta_sq        = flex.double()
    mean            = flex.double() # mean = <I'_hj>
    biased_mean     = flex.double() # biased_mean = <I_h>, so dont leave out any reflection
    var             = flex.double()
    all_biased_mean = flex.double()

    for refls in reflection_table_utils.get_next_hkl_reflection_table(reflections):
      number_of_measurements = refls.size()
      if number_of_measurements == 0: # if the returned "refls" list is empty, it's the end of the input "reflections" list
        break
      refls_biased_mean = flex.double(len(refls), flex.mean(refls['intensity.sum.value']))
      all_biased_mean.extend(refls_biased_mean)

      if number_of_measurements > self.params.merging.minimum_multiplicity:
        nn_factor_sqrt = math.sqrt((number_of_measurements - 1) / number_of_measurements)
        i_sum = flex.double(number_of_measurements, flex.sum(refls['intensity.sum.value']))
        i_sum_minus_val = i_sum - refls['intensity.sum.value']
        mean_without_val = i_sum_minus_val/(number_of_measurements-1)
        delta = nn_factor_sqrt * (refls['intensity.sum.value'] - mean_without_val)
        self.deltas.extend(delta/flex.sqrt(refls['intensity.sum.variance'])) # Please be careful about where to put the var
        delta_sq.extend(delta**2)
        mean.extend(mean_without_val)
        biased_mean.extend(refls_biased_mean)
        var.extend(refls['intensity.sum.variance'])

    self.work_table["delta_sq"]    = delta_sq
    self.work_table["mean"]        = mean
    self.work_table["biased_mean"] = biased_mean
    self.work_table["var"]         = var
    reflections['biased_mean'] = all_biased_mean
    self.logger.log("Number of work reflections selected: %d"%self.deltas.size())
    return reflections
Ejemplo n.º 50
0
  def __call__(self, experiments, reflections):
    results = flex.reflection_table()
    table_header = ["","","","I","IsigI","N >","RMSD","Cutoff"]
    table_header2 = ["Bin","Resolution Range","Completeness","","","cutoff","(um)",""]

    for exp_id in xrange(len(experiments)):
      print("*"*80)
      print("Significance filtering experiment", exp_id)
      table_data = []
      table_data.append(table_header)
      table_data.append(table_header2)
      experiment = experiments[exp_id]

      # Find the bins for this experiment
      crystal = experiment.crystal
      refls = reflections.select(reflections['id'] == exp_id)
      sym = symmetry(unit_cell = crystal.get_unit_cell(), space_group = crystal.get_space_group())
      d = crystal.get_unit_cell().d(refls['miller_index'])
      mset = sym.miller_set(indices = refls['miller_index'], anomalous_flag=False)
      binner = mset.setup_binner(n_bins=self.params.n_bins)
      acceptable_resolution_bins = []

      # Iterate through the bins, examining I/sigI at each bin
      for i in binner.range_used():
        d_max, d_min = binner.bin_d_range(i)
        sel = (d <= d_max) & (d > d_min)
        sel &= refls['intensity.sum.value'] > 0
        bin_refls = refls.select(sel)
        n_refls = len(bin_refls)
        avg_i = flex.mean(bin_refls['intensity.sum.value']) if n_refls > 0 else 0
        avg_i_sigi = flex.mean(bin_refls['intensity.sum.value'] /
                               flex.sqrt(bin_refls['intensity.sum.variance'])) if n_refls > 0 else 0
        acceptable_resolution_bins.append(avg_i_sigi >= self.params.isigi_cutoff)

        bright_refls = bin_refls.select((bin_refls['intensity.sum.value']/flex.sqrt(bin_refls['intensity.sum.variance'])) >= self.params.isigi_cutoff)
        n_bright = len(bright_refls)

        rmsd_obs = 1000*math.sqrt((bright_refls['xyzcal.mm']-bright_refls['xyzobs.mm.value']).sum_sq()/n_bright) if n_bright > 0 else 0

        table_row = []
        table_row.append("%3d"%i)
        table_row.append("%-13s"%binner.bin_legend(i_bin=i,show_bin_number=False,show_bin_range=False,
                                                   show_d_range=True, show_counts=False))
        table_row.append("%13s"%binner.bin_legend(i_bin=i,show_bin_number=False,show_bin_range=False,
                                                  show_d_range=False, show_counts=True))

        table_row.append("%.1f"%(avg_i))
        table_row.append("%.1f"%(avg_i_sigi))
        table_row.append("%3d"%n_bright)
        table_row.append("%.1f"%(rmsd_obs))
        table_data.append(table_row)

      # Throw out bins that go back above the cutoff after the first non-passing bin is found
      acceptable_resolution_bins = [acceptable_resolution_bins[i] for i in xrange(len(acceptable_resolution_bins))
                                    if False not in acceptable_resolution_bins[:i+1]]

      for b, row in zip(acceptable_resolution_bins, table_data[2:]):
        if b:
          row.append("X")
      print(table_utils.format(table_data,has_header=2,justify='center',delim=" "))

      # Save the results
      if any(acceptable_resolution_bins):
        best_index = acceptable_resolution_bins.count(True)-1
        best_row = table_data[best_index+2]
        d_min = binner.bin_d_range(binner.range_used()[best_index])[1]
        print("best row:", " ".join(best_row))
        if self.params.enable:
          results.extend(refls.select(d >= d_min))
      else:
        print("Data didn't pass cutoff")
    if self.params.enable:
      return results
    else:
      return reflections
Ejemplo n.º 51
0
    def apply_significance_filter(self, experiments, reflections):

        self.logger.log_step_time("SIGNIFICANCE_FILTER")

        # Apply an I/sigma filter ... accept resolution bins only if they
        #   have significant signal; tends to screen out higher resolution observations
        #   if the integration model doesn't quite fit
        unit_cell = self.params.scaling.unit_cell
        if unit_cell is None:
            try:
                unit_cell = self.params.statistics.average_unit_cell
            except AttributeError:
                pass
        target_symm = symmetry(
            unit_cell=unit_cell,
            space_group_info=self.params.scaling.space_group)

        new_experiments = ExperimentList()
        new_reflections = flex.reflection_table()

        for expt_id, experiment in enumerate(experiments):
            exp_reflections = reflections.select(
                reflections['exp_id'] == experiment.identifier)
            if not len(exp_reflections): continue

            N_obs_pre_filter = exp_reflections.size()

            N_bins_small_set = N_obs_pre_filter // self.params.select.significance_filter.min_ct
            N_bins_large_set = N_obs_pre_filter // self.params.select.significance_filter.max_ct

            # Ensure there is at least one bin.
            N_bins = max([
                min([
                    self.params.select.significance_filter.n_bins,
                    N_bins_small_set
                ]), N_bins_large_set, 1
            ])

            #print ("\nN_obs_pre_filter %d"%N_obs_pre_filter)
            #print >> out, "Total obs %d Choose n bins = %d"%(N_obs_pre_filter,N_bins)
            #if indices_to_edge is not None:
            #  print >> out, "Total preds %d to edge of detector"%indices_to_edge.size()

            # Build a miller array for the experiment reflections
            exp_miller_indices = miller.set(target_symm,
                                            exp_reflections['miller_index'],
                                            True)
            exp_observations = miller.array(
                exp_miller_indices, exp_reflections['intensity.sum.value'],
                flex.sqrt(exp_reflections['intensity.sum.variance']))

            assert exp_observations.size() == exp_reflections.size()

            out = StringIO()
            bin_results = show_observations(exp_observations,
                                            out=out,
                                            n_bins=N_bins)

            if self.params.output.log_level == 0:
                self.logger.log(out.getvalue())

            acceptable_resolution_bins = [
                bin.mean_I_sigI > self.params.select.significance_filter.sigma
                for bin in bin_results
            ]

            acceptable_nested_bin_sequences = [
                i for i in range(len(acceptable_resolution_bins))
                if False not in acceptable_resolution_bins[:i + 1]
            ]

            if len(acceptable_nested_bin_sequences) == 0:
                continue
            else:
                N_acceptable_bins = max(acceptable_nested_bin_sequences) + 1

                imposed_res_filter = float(bin_results[N_acceptable_bins -
                                                       1].d_range.split()[2])
                self.logger.log(
                    "Experiment id %d, image index %d, resolution cutoff %f\n"
                    % (expt_id, experiment.imageset.indices()[0],
                       imposed_res_filter))

                imposed_res_sel = exp_observations.resolution_filter_selection(
                    d_min=imposed_res_filter)

                assert imposed_res_sel.size() == exp_reflections.size()

                new_exp_reflections = exp_reflections.select(imposed_res_sel)

                if new_exp_reflections.size() > 0:
                    new_experiments.append(experiment)
                    new_reflections.extend(new_exp_reflections)

                #self.logger.log("N acceptable bins %d"%N_acceptable_bins)
                #self.logger.log("Old n_obs: %d, new n_obs: %d"%(N_obs_pre_filter, exp_observations.size()))
                #if indices_to_edge is not None:
                #  print >> out, "Total preds %d to edge of detector"%indices_to_edge.size()

        removed_reflections = len(reflections) - len(new_reflections)
        removed_experiments = len(experiments) - len(new_experiments)

        self.logger.log(
            "Reflections rejected because of significance filter: %d" %
            removed_reflections)
        self.logger.log(
            "Experiments rejected because of significance filter: %d" %
            removed_experiments)

        # MPI-reduce total counts
        comm = self.mpi_helper.comm
        MPI = self.mpi_helper.MPI
        total_removed_reflections = comm.reduce(removed_reflections, MPI.SUM,
                                                0)
        total_removed_experiments = comm.reduce(removed_experiments, MPI.SUM,
                                                0)

        # rank 0: log total counts
        if self.mpi_helper.rank == 0:
            self.logger.main_log(
                "Total reflections rejected because of significance filter: %d"
                % total_removed_reflections)
            self.logger.main_log(
                "Total experiments rejected because of significance filter: %d"
                % total_removed_experiments)

        self.logger.log_step_time("SIGNIFICANCE_FILTER", True)

        return new_experiments, new_reflections
Ejemplo n.º 52
0
    def test_for_reflections(self, refl):
        from dials.algorithms.integration.sum import IntegrationAlgorithm
        from dials.array_family import flex
        from dials.algorithms.statistics import \
          kolmogorov_smirnov_test_standard_normal

        # Get the calculated background and simulated background
        B_sim = refl['background.sim.a'].as_double()
        I_sim = refl['intensity.sim'].as_double()
        I_exp = refl['intensity.exp']

        # Set the background as simulated
        shoebox = refl['shoebox']
        for i in range(len(shoebox)):
            bg = shoebox[i].background
            ms = shoebox[i].mask
            for j in range(len(bg)):
                bg[j] = B_sim[i]

        # Integrate
        integration = IntegrationAlgorithm()
        integration(refl)
        I_cal = refl['intensity.sum.value']
        I_var = refl['intensity.sum.variance']

        # Only select variances greater than zero
        mask = I_var > 0
        I_cal = I_cal.select(mask)
        I_var = I_var.select(mask)
        I_sim = I_sim.select(mask)
        I_exp = I_exp.select(mask)

        # Calculate the z score
        perc = self.mv3n_tolerance_interval(3 * 3)
        Z = (I_cal - I_exp) / flex.sqrt(I_var)
        mv = flex.mean_and_variance(Z)
        Z_mean = mv.mean()
        Z_var = mv.unweighted_sample_variance()
        print "Z: mean: %f, var: %f" % (Z_mean, Z_var)

        # Do the kolmogorov smirnov test
        D, p = kolmogorov_smirnov_test_standard_normal(Z)
        print "KS: D: %f, p-value: %f" % (D, p)

        # FIXME Z score should be a standard normal distribution. When background is
        # the main component, we do indeed see that the z score is in a standard
        # normal distribution. When the intensity dominates, the variance of the Z
        # scores decreases indicating that for increasing intensity of the signal,
        # the variance is over estimated.
        assert (abs(Z_mean) <= 3 * Z_var)

        #from matplotlib import pylab
        #pylab.hist(Z, 20)
        #pylab.show()

        #Z_I = sorted(Z)
        ##n = int(0.05 * len(Z_I))
        ##Z_I = Z_I[n:-n]
        ##mv = flex.mean_and_variance(flex.double(Z_I))
        ##print "Mean: %f, Sdev: %f" % (mv.mean(), mv.unweighted_sample_standard_deviation())
        #edf = [float(i+1) / len(Z_I) for i in range(len(Z_I))]
        #cdf = [0.5 * (1.0 + erf(z / sqrt(2.0))) for z in Z_I]

        print 'OK'
Ejemplo n.º 53
0
def export_sadabs(integrated_data, experiment_list, params):
    """Export data from integrated_data corresponding to experiment_list to a
    file for input to SADABS. FIXME probably need to make a .p4p file as
    well..."""

    from dials.array_family import flex

    # for the moment assume (and assert) that we will convert data from exactly
    # one lattice...

    assert len(experiment_list) == 1
    # select reflections that are assigned to an experiment (i.e. non-negative id)

    integrated_data = integrated_data.select(integrated_data["id"] >= 0)
    assert max(integrated_data["id"]) == 0

    # export for sadabs should only be for non-scaled reflections
    assert any(i in integrated_data
               for i in ["intensity.sum.value", "intensity.prf.value"])

    integrated_data = filter_reflection_table(
        integrated_data,
        intensity_choice=params.intensity,
        partiality_threshold=params.mtz.partiality_threshold,
        combine_partials=params.mtz.combine_partials,
        min_isigi=params.mtz.min_isigi,
        filter_ice_rings=params.mtz.filter_ice_rings,
        d_min=params.mtz.d_min,
    )

    experiment = experiment_list[0]
    assert experiment.scan is not None

    # sort data before output
    nref = len(integrated_data["miller_index"])
    indices = flex.size_t_range(nref)
    perm = sorted(indices, key=lambda k: integrated_data["miller_index"][k])
    integrated_data = integrated_data.select(flex.size_t(perm))

    assert experiment.goniometer is not None

    # Warn of unhelpful SADABS behaviour for certain multi-sequence data sets
    hkl_file_root, _ = os.path.splitext(params.sadabs.hklout)
    if not params.sadabs.run or re.search("_0+$", hkl_file_root):
        logger.warning(
            "It seems SADABS rejects multi-sequence data when the first "
            "filename ends "
            "'_0', '_00', etc., with a cryptic error message:\n"
            "\t'Inconsistent 2theta values in same scan'.\n"
            "You may need to begin the numbering of your SADABS HKL files from 1, "
            "rather than 0, and ensure the SADABS run/batch number is greater than 0."
        )

    axis = matrix.col(experiment.goniometer.get_rotation_axis_datum())

    beam = matrix.col(experiment.beam.get_sample_to_source_direction())
    s0 = matrix.col(experiment.beam.get_s0())

    F = matrix.sqr(experiment.goniometer.get_fixed_rotation())
    S = matrix.sqr(experiment.goniometer.get_setting_rotation())
    unit_cell = experiment.crystal.get_unit_cell()

    if params.debug:
        m_format = "%6.3f%6.3f%6.3f\n%6.3f%6.3f%6.3f\n%6.3f%6.3f%6.3f"
        c_format = "%.2f %.2f %.2f %.2f %.2f %.2f"

        logger.info(
            "Unit cell parameters from experiment: %s",
            c_format % unit_cell.parameters(),
        )
        logger.info(
            "Symmetry: %s",
            experiment.crystal.get_space_group().type().lookup_symbol())

        logger.info("Goniometer fixed matrix:\n%s", m_format % F.elems)
        logger.info("Goniometer setting matrix:\n%s", m_format % S.elems)
        logger.info("Goniometer scan axis:\n%6.3f%6.3f%6.3f", axis.elems)

    # detector scaling info
    assert len(experiment.detector) == 1
    panel = experiment.detector[0]
    dims = panel.get_image_size()
    pixel = panel.get_pixel_size()
    fast_axis = matrix.col(panel.get_fast_axis())
    slow_axis = matrix.col(panel.get_slow_axis())
    normal = fast_axis.cross(slow_axis)
    detector2t = s0.angle(normal, deg=True)

    if params.debug:
        logger.info("Detector fast, slow axes:")
        logger.info("%6.3f%6.3f%6.3f", fast_axis.elems)
        logger.info("%6.3f%6.3f%6.3f", slow_axis.elems)
        logger.info("Detector two theta (degrees): %.2f", detector2t)

    scl_x = 512.0 / (dims[0] * pixel[0])
    scl_y = 512.0 / (dims[1] * pixel[1])

    image_range = experiment.scan.get_image_range()

    from cctbx.array_family import flex as cflex  # implicit import # noqa: F401
    from cctbx.miller import map_to_asu_isym  # implicit import # noqa: F401

    # gather the required information for the reflection file

    nref = len(integrated_data["miller_index"])

    miller_index = integrated_data["miller_index"]

    if "intensity.sum.value" in integrated_data:
        I = integrated_data["intensity.sum.value"]
        V = integrated_data["intensity.sum.variance"]
        assert V.all_gt(0)
        sigI = flex.sqrt(V)
    else:
        I = integrated_data["intensity.prf.value"]
        V = integrated_data["intensity.prf.variance"]
        assert V.all_gt(0)
        sigI = flex.sqrt(V)

    # figure out scaling to make sure data fit into format 2F8.2 i.e. Imax < 1e5

    Imax = flex.max(I)

    if params.debug:
        logger.info("Maximum intensity in file: %8.2f", Imax)

    if Imax > 99999.0:
        scale = 99999.0 / Imax
        I = I * scale
        sigI = sigI * scale

    phi_start, phi_range = experiment.scan.get_image_oscillation(
        image_range[0])

    if params.sadabs.predict:
        logger.info("Using scan static predicted spot locations")
        from dials.algorithms.spot_prediction import ScanStaticReflectionPredictor

        predictor = ScanStaticReflectionPredictor(experiment)
        UB = experiment.crystal.get_A()
        predictor.for_reflection_table(integrated_data, UB)

    if not experiment.crystal.num_scan_points:
        logger.info("No scan varying model: use static")
        static = True
    else:
        static = False

    with open(params.sadabs.hklout, "w") as fout:

        for j in range(nref):

            h, k, l = miller_index[j]

            if params.sadabs.predict:
                x_mm, y_mm, z_rad = integrated_data["xyzcal.mm"][j]
            else:
                x_mm, y_mm, z_rad = integrated_data["xyzobs.mm.value"][j]

            z0 = integrated_data["xyzcal.px"][j][2]
            istol = int(round(10000 * unit_cell.stol((h, k, l))))

            if params.sadabs.predict or static:
                # work from a scan static model & assume perfect goniometer
                # FIXME maybe should work back in the option to predict spot positions
                UB = matrix.sqr(experiment.crystal.get_A())
                phi = phi_start + z0 * phi_range
                R = axis.axis_and_angle_as_r3_rotation_matrix(phi, deg=True)
                RUB = S * R * F * UB
            else:
                # properly compute RUB for every reflection
                UB = matrix.sqr(
                    experiment.crystal.get_A_at_scan_point(int(round(z0))))
                phi = phi_start + z0 * phi_range
                R = axis.axis_and_angle_as_r3_rotation_matrix(phi, deg=True)
                RUB = S * R * F * UB

            x = RUB * (h, k, l)
            s = (s0 + x).normalize()

            # can also compute s based on centre of mass of spot
            # s = (origin + x_mm * fast_axis + y_mm * slow_axis).normalize()

            astar = (RUB * (1, 0, 0)).normalize()
            bstar = (RUB * (0, 1, 0)).normalize()
            cstar = (RUB * (0, 0, 1)).normalize()

            ix = beam.dot(astar)
            iy = beam.dot(bstar)
            iz = beam.dot(cstar)

            dx = s.dot(astar)
            dy = s.dot(bstar)
            dz = s.dot(cstar)

            x = x_mm * scl_x
            y = y_mm * scl_y
            z = (z_rad * 180 / math.pi - phi_start) / phi_range

            fout.write("%4d%4d%4d%8.2f%8.2f%4d%8.5f%8.5f%8.5f%8.5f%8.5f%8.5f" %
                       (h, k, l, I[j], sigI[j], params.sadabs.run, ix, dx, iy,
                        dy, iz, dz))
            fout.write("%7.2f%7.2f%8.2f%7.2f%5d\n" %
                       (x, y, z, detector2t, istol))

    fout.close()
    logger.info("Output %d reflections to %s", nref, params.sadabs.hklout)
Ejemplo n.º 54
0
    def _multiplicity_mean_error_stddev(self,
                                        calculate_variances=False,
                                        keep_singles=False):
        """
        Calculate aggregate properties of grouped symmetry-equivalent reflections.

        Populate the reflection table of observations with the following
        properties:
          * ``multiplicity`` — Multiplicity of observations of a given reflection
          in the asymmetric unit;
          :type: `dials.array_family_flex_ext.int` array
          * ``intensity.mean.value`` — Mean of symmetry-equivalent reflections,
          weighted by measurement error;
          :type: `dials.array_family_flex_ext.double` array
          * ``intensity.mean.std_error`` — Standard error on the weighted mean;
          :type: `dials.array_family_flex_ext.double` array
          * (optional) ``intensity.mean.variance`` — variance of
          symmetry-equivalent reflections, weighted by measurement error;
          :type: `dials.array_family_flex_ext.double` array

        :param calculate_variances: Elect whether to calculate the weighted
        variances.  Defaults to False, to spare an expensive computation.
        :type calculate_variances: bool
        :param keep_singles: Choose whether to keep single-multiplicity
        reflections.
        :type keep_singles: bool
        """

        for key, rtable in self.rtables.items():
            # Sort the reflection table for speedier iteration.
            rtable.sort("miller_index.asu")
            # Record the positions of any multiplicity-1 reflections.
            if not keep_singles:
                singles = flex.size_t()
            # Record the multiplicities.
            multiplicity = flex.int()
            # For weighted averaging.
            weights = 1 / rtable["intensity.sum.variance"]
            sum_weights = flex.double()
            if calculate_variances:
                sum_square_weights = flex.double()
            # Calculate the weighted mean intensities.
            i_means = flex.double()
            # Calculate the standard deviations from unbiased weighted variances.
            variances = flex.double()

            # Iterate over the reflections, grouping by equivalent Miller index,
            # to calculate multiplicities, weighted mean intensities, etc..
            # Some time can be saved by only calculating variances if necessary.
            # Initial values:
            prev_index = None
            count = 1
            # The following will be set during loop iteration
            i_sum, sum_weight, sum_square_weight = None, None, None
            # One big loop through the entire reflection table:
            for j in range(rtable.size()):
                index = rtable["miller_index.asu"][j]
                weight = weights[j]
                # Aggregate within a symmetry-equivalent group of reflections:
                if index == prev_index:
                    count += 1
                    i_sum += weight * rtable["intensity.sum.value"][j]
                    sum_weight += weight
                    if calculate_variances:
                        sum_square_weight += weight * weight
                # Record the aggregated values for the group:
                elif prev_index:
                    if count == 1 and not keep_singles:
                        singles.append(j - 1)
                    multiplicity.extend(flex.int(count, count))
                    i_means.extend(flex.double(count, i_sum / sum_weight))
                    sum_weights.extend(flex.double(count, sum_weight))
                    if calculate_variances:
                        sum_square_weights.extend(
                            flex.double(count, sum_square_weight))
                    # And reinitialise:
                    prev_index = index
                    count = 1
                    i_sum = weight * rtable["intensity.sum.value"][j]
                    sum_weight = weight
                    if calculate_variances:
                        sum_square_weight = weight * weight
                # Handle the first row:
                else:
                    prev_index = rtable["miller_index.asu"][j]
                    i_sum = weight * rtable["intensity.sum.value"][j]
                    sum_weight = weight
                    if calculate_variances:
                        sum_square_weight = weight * weight
            # Record the aggregated values for the last group:
            if count == 1 and not keep_singles:
                singles.append(rtable.size() - 1)
            multiplicity.extend(flex.int(count, count))
            i_means.extend(flex.double(count, i_sum / sum_weight))
            sum_weights.extend(flex.double(count, sum_weight))
            if calculate_variances:
                sum_square_weights.extend(flex.double(count,
                                                      sum_square_weight))

            # Discard singletons:
            if not keep_singles:
                singles_del = flex.bool(rtable.size(), True)
                singles_del.set_selected(singles, False)
                multiplicity, weights, sum_weights, i_means = [
                    a.select(singles_del)
                    for a in (multiplicity, weights, sum_weights, i_means)
                ]
                rtable.del_selected(singles)
                if calculate_variances:
                    sum_square_weights = sum_square_weights.select(singles_del)

            # Record the multiplicities in the reflection table.
            rtable["multiplicity"] = multiplicity
            # Record the weighted mean intensities in the reflection table.
            rtable["intensity.mean.value"] = i_means
            # Record the standard errors on the means in the reflection table.
            rtable["intensity.mean.std_error"] = flex.sqrt(1 / sum_weights)

            if calculate_variances:
                # Initialise values:
                prev_index = None
                weighted_sum_square_residual = None
                for j in range(rtable.size()):
                    index = rtable["miller_index.asu"][j]
                    weight = weights[j]
                    residual = rtable["intensity.sum.value"][j] - i_means[j]
                    # Aggregate within a symmetry-equivalent group of reflections:
                    if index == prev_index:
                        count += 1
                        weighted_sum_square_residual += weight * residual * residual
                    # Record the aggregated value for the group:
                    elif prev_index:
                        # The weighted variance is undefined for multiplicity=1,
                        # use the measured variance instead in this case.
                        if count == 1:
                            variances.append(
                                rtable["intensity.sum.variance"][j - 1])
                        else:
                            sum_weight = sum_weights[j - 1]
                            var_weight = 1 / (
                                sum_weight -
                                sum_square_weights[j - 1] / sum_weight)
                            variances.extend(
                                flex.double(
                                    count,
                                    weighted_sum_square_residual * var_weight))
                        # Reinitialise:
                        prev_index = index
                        count = 1
                        weighted_sum_square_residual = weight * residual * residual
                    # Handle the first row:
                    else:
                        prev_index = rtable["miller_index.asu"][j]
                        count = 1
                        weighted_sum_square_residual = weight * residual * residual
                # Record the aggregated values for the last group:
                # The weighted variance is undefined for multiplicity=1,
                # use the measured variance instead in this case.
                if count == 1:
                    variances.append(rtable["intensity.sum.variance"][-1])
                else:
                    sum_weight = sum_weights[-1]
                    var_weight = 1 / (sum_weight -
                                      sum_square_weights[-1] / sum_weight)
                    variances.extend(
                        flex.double(count,
                                    weighted_sum_square_residual * var_weight))
                # Record the variances in the reflection table.
                rtable["intensity.mean.variance"] = variances

            self.rtables[key] = rtable
Ejemplo n.º 55
0
    def write_columns(self, integrated_data):
        """Write the column definitions AND data to the current dataset."""

        # now create the actual data structures - first keep a track of the columns

        # H K L M/ISYM BATCH I SIGI IPR SIGIPR FRACTIONCALC XDET YDET ROT WIDTH
        # LP MPART FLAG BGPKRATIOS

        # gather the required information for the reflection file

        nref = len(integrated_data["miller_index"])
        assert nref
        xdet, ydet, _ = [
            flex.double(x) for x in integrated_data["xyzobs.px.value"].parts()
        ]

        # now add column information...

        # FIXME add DIALS_FLAG which can include e.g. was partial etc.

        type_table = {
            "H": "H",
            "K": "H",
            "L": "H",
            "I": "J",
            "SIGI": "Q",
            "IPR": "J",
            "SIGIPR": "Q",
            "BG": "R",
            "SIGBG": "R",
            "XDET": "R",
            "YDET": "R",
            "BATCH": "B",
            "BGPKRATIOS": "R",
            "WIDTH": "R",
            "MPART": "I",
            "M_ISYM": "Y",
            "FLAG": "I",
            "LP": "R",
            "FRACTIONCALC": "R",
            "ROT": "R",
            "QE": "R",
        }

        # derive index columns from original indices with
        #
        # from m.replace_original_index_miller_indices
        #
        # so all that is needed now is to make space for the reflections - fill with
        # zeros...

        self.mtz_file.adjust_column_array_sizes(nref)
        self.mtz_file.set_n_reflections(nref)
        dataset = self.current_dataset

        # assign H, K, L, M_ISYM space
        for column in "H", "K", "L", "M_ISYM":
            dataset.add_column(column, type_table[column]).set_values(
                flex.double(nref, 0.0).as_float())

        self.mtz_file.replace_original_index_miller_indices(
            integrated_data["miller_index"])

        dataset.add_column("BATCH", type_table["BATCH"]).set_values(
            integrated_data["batch"].as_double().as_float())

        # if intensity values used in scaling exist, then just export these as I, SIGI
        if "intensity.scale.value" in integrated_data:
            I_scaling = integrated_data["intensity.scale.value"]
            V_scaling = integrated_data["intensity.scale.variance"]
            # Trap negative variances
            assert V_scaling.all_gt(0)
            dataset.add_column("I", type_table["I"]).set_values(
                I_scaling.as_float())
            dataset.add_column("SIGI", type_table["SIGI"]).set_values(
                flex.sqrt(V_scaling).as_float())
            dataset.add_column("SCALEUSED", "R").set_values(
                integrated_data["inverse_scale_factor"].as_float())
            dataset.add_column("SIGSCALEUSED", "R").set_values(
                flex.sqrt(integrated_data["inverse_scale_factor_variance"]).
                as_float())
        else:
            if "intensity.prf.value" in integrated_data:
                if "intensity.sum.value" in integrated_data:
                    col_names = ("IPR", "SIGIPR")
                else:
                    col_names = ("I", "SIGI")
                I_profile = integrated_data["intensity.prf.value"]
                V_profile = integrated_data["intensity.prf.variance"]
                # Trap negative variances
                assert V_profile.all_gt(0)
                dataset.add_column(col_names[0], type_table["I"]).set_values(
                    I_profile.as_float())
                dataset.add_column(col_names[1],
                                   type_table["SIGI"]).set_values(
                                       flex.sqrt(V_profile).as_float())
            if "intensity.sum.value" in integrated_data:
                I_sum = integrated_data["intensity.sum.value"]
                V_sum = integrated_data["intensity.sum.variance"]
                # Trap negative variances
                assert V_sum.all_gt(0)
                dataset.add_column("I", type_table["I"]).set_values(
                    I_sum.as_float())
                dataset.add_column("SIGI", type_table["SIGI"]).set_values(
                    flex.sqrt(V_sum).as_float())
        if ("background.sum.value" in integrated_data
                and "background.sum.variance" in integrated_data):
            bg = integrated_data["background.sum.value"]
            varbg = integrated_data["background.sum.variance"]
            assert (varbg >= 0).count(False) == 0
            sigbg = flex.sqrt(varbg)
            dataset.add_column("BG",
                               type_table["BG"]).set_values(bg.as_float())
            dataset.add_column("SIGBG", type_table["SIGBG"]).set_values(
                sigbg.as_float())

        dataset.add_column("FRACTIONCALC",
                           type_table["FRACTIONCALC"]).set_values(
                               integrated_data["fractioncalc"].as_float())

        dataset.add_column("XDET",
                           type_table["XDET"]).set_values(xdet.as_float())
        dataset.add_column("YDET",
                           type_table["YDET"]).set_values(ydet.as_float())
        dataset.add_column("ROT", type_table["ROT"]).set_values(
            integrated_data["ROT"].as_float())
        if "lp" in integrated_data:
            dataset.add_column("LP", type_table["LP"]).set_values(
                integrated_data["lp"].as_float())
        if "qe" in integrated_data:
            dataset.add_column("QE", type_table["QE"]).set_values(
                integrated_data["qe"].as_float())
        elif "dqe" in integrated_data:
            dataset.add_column("QE", type_table["QE"]).set_values(
                integrated_data["dqe"].as_float())
        else:
            dataset.add_column("QE", type_table["QE"]).set_values(
                flex.double(nref, 1.0).as_float())
Ejemplo n.º 56
0
  def integrate(self, experiments, indexed):
    from time import time

    st = time()

    logger.info('*' * 80)
    logger.info('Integrating Reflections')
    logger.info('*' * 80)


    indexed,_ = self.process_reference(indexed)

    # Get the integrator from the input parameters
    logger.info('Configuring integrator from input parameters')
    from dials.algorithms.profile_model.factory import ProfileModelFactory
    from dials.algorithms.integration.integrator import IntegratorFactory
    from dials.array_family import flex

    # Compute the profile model
    # Predict the reflections
    # Match the predictions with the reference
    # Create the integrator
    experiments = ProfileModelFactory.create(self.params, experiments, indexed)
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info("Predicting reflections")
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=self.params.prediction.d_min,
      dmax=self.params.prediction.d_max,
      margin=self.params.prediction.margin,
      force_static=self.params.prediction.force_static)
    predicted.match_with_reference(indexed)
    logger.info("")
    integrator = IntegratorFactory.create(self.params, experiments, predicted)

    # Integrate the reflections
    integrated = integrator.integrate()

    # Select only those reflections which were integrated
    if 'intensity.prf.variance' in integrated:
      selection = integrated.get_flags(
        integrated.flags.integrated,
        all=True)
    else:
      selection = integrated.get_flags(
        integrated.flags.integrated_sum)
    integrated = integrated.select(selection)

    len_all = len(integrated)
    integrated = integrated.select(~integrated.get_flags(integrated.flags.foreground_includes_bad_pixels))
    print "Filtering %d reflections with at least one bad foreground pixel out of %d"%(len_all-len(integrated), len_all)

    # verify sigmas are sensible
    if 'intensity.prf.value' in integrated:
      if (integrated['intensity.prf.variance'] <= 0).count(True) > 0:
        raise Sorry("Found negative variances")
    if 'intensity.sum.value' in integrated:
      if (integrated['intensity.sum.variance'] <= 0).count(True) > 0:
        raise Sorry("Found negative variances")
      # apply detector gain to summation variances
      integrated['intensity.sum.variance'] *= self.params.integration.summation.detector_gain
    if 'background.sum.value' in integrated:
      if (integrated['background.sum.variance'] < 0).count(True) > 0:
        raise Sorry("Found negative variances")
      if (integrated['background.sum.variance'] == 0).count(True) > 0:
        print "Filtering %d reflections with zero background variance" % ((integrated['background.sum.variance'] == 0).count(True))
        integrated = integrated.select(integrated['background.sum.variance'] > 0)
      # apply detector gain to background summation variances
      integrated['background.sum.variance'] *= self.params.integration.summation.detector_gain

    if self.params.output.integrated_filename:
      # Save the reflections
      self.save_reflections(integrated, self.params.output.integrated_filename)

    self.write_integration_pickles(integrated, experiments)
    from dials.algorithms.indexing.stills_indexer import calc_2D_rmsd_and_displacements

    rmsd_indexed, _ = calc_2D_rmsd_and_displacements(indexed)
    log_str = "RMSD indexed (px): %f\n"%(rmsd_indexed)
    for i in xrange(6):
      bright_integrated = integrated.select((integrated['intensity.sum.value']/flex.sqrt(integrated['intensity.sum.variance']))>=i)
      if len(bright_integrated) > 0:
        rmsd_integrated, _ = calc_2D_rmsd_and_displacements(bright_integrated)
      else:
        rmsd_integrated = 0
      log_str += "N reflections integrated at I/sigI >= %d: % 4d, RMSD (px): %f\n"%(i, len(bright_integrated), rmsd_integrated)

    crystal_model = experiments.crystals()[0]

    if hasattr(crystal_model, '._ML_domain_size_ang'):
      log_str += ". Final ML model: domain size angstroms: %f, half mosaicity degrees: %f"%(crystal_model._ML_domain_size_ang, crystal_model._ML_half_mosaicity_deg)
    logger.info(log_str)

    logger.info('')
    logger.info('Time Taken = %f seconds' % (time() - st))
    return integrated
Ejemplo n.º 57
0
def export_sadabs(integrated_data, experiment_list, hklout, run=0,
                  summation=False, include_partials=False, keep_partials=False,
                  debug=False, predict=True):
  '''Export data from integrated_data corresponding to experiment_list to a
  file for input to SADABS. FIXME probably need to make a .p4p file as
  well...'''

  from dials.array_family import flex
  from scitbx import matrix
  import math

  # for the moment assume (and assert) that we will convert data from exactly
  # one lattice...

  assert(len(experiment_list) == 1)
  # select reflections that are assigned to an experiment (i.e. non-negative id)

  integrated_data = integrated_data.select(integrated_data['id'] >= 0)
  assert max(integrated_data['id']) == 0

  if not summation:
    assert('intensity.prf.value' in integrated_data)

  # strip out negative variance reflections: these should not really be there
  # FIXME Doing select on summation results. Should do on profile result if
  # present? Yes

  if 'intensity.prf.variance' in integrated_data:
    selection = integrated_data.get_flags(
      integrated_data.flags.integrated,
      all=True)
  else:
    selection = integrated_data.get_flags(
      integrated_data.flags.integrated_sum)
  integrated_data = integrated_data.select(selection)

  selection = integrated_data['intensity.sum.variance'] <= 0
  if selection.count(True) > 0:
    integrated_data.del_selected(selection)
    logger.info('Removing %d reflections with negative variance' % \
          selection.count(True))

  if 'intensity.prf.variance' in integrated_data:
    selection = integrated_data['intensity.prf.variance'] <= 0
    if selection.count(True) > 0:
      integrated_data.del_selected(selection)
      logger.info('Removing %d profile reflections with negative variance' % \
            selection.count(True))

  if include_partials:
    integrated_data = sum_partial_reflections(integrated_data)
    integrated_data = scale_partial_reflections(integrated_data)

  if 'partiality' in integrated_data:
    selection = integrated_data['partiality'] < 0.99
    if selection.count(True) > 0 and not keep_partials:
      integrated_data.del_selected(selection)
      logger.info('Removing %d incomplete reflections' % \
        selection.count(True))

  experiment = experiment_list[0]
  assert(not experiment.scan is None)

  # sort data before output
  nref = len(integrated_data['miller_index'])
  indices = flex.size_t_range(nref)
  perm = sorted(indices, key=lambda k: integrated_data['miller_index'][k])
  integrated_data = integrated_data.select(flex.size_t(perm))

  assert (not experiment.goniometer is None)

  axis = matrix.col(experiment.goniometer.get_rotation_axis_datum())

  beam = matrix.col(experiment.beam.get_direction())
  s0 = matrix.col(experiment.beam.get_s0())

  F = matrix.sqr(experiment.goniometer.get_fixed_rotation())
  S = matrix.sqr(experiment.goniometer.get_setting_rotation())
  unit_cell = experiment.crystal.get_unit_cell()

  if debug:
    m_format = '%6.3f%6.3f%6.3f\n%6.3f%6.3f%6.3f\n%6.3f%6.3f%6.3f'
    c_format = '%.2f %.2f %.2f %.2f %.2f %.2f'

    logger.info('Unit cell parameters from experiment: %s' % (c_format %
         unit_cell.parameters()))
    logger.info('Symmetry: %s' % experiment.crystal.get_space_group().type(
         ).lookup_symbol())

    logger.info('Goniometer fixed matrix:\n%s' % (m_format % F.elems))
    logger.info('Goniometer setting matrix:\n%s' % (m_format % S.elems))
    logger.info('Goniometer scan axis:\n%6.3f%6.3f%6.3f' % (axis.elems))

  # detector scaling info
  assert(len(experiment.detector) == 1)
  panel = experiment.detector[0]
  dims = panel.get_image_size()
  pixel = panel.get_pixel_size()
  fast_axis = matrix.col(panel.get_fast_axis())
  slow_axis = matrix.col(panel.get_slow_axis())
  normal = fast_axis.cross(slow_axis)
  detector2t = s0.angle(normal, deg=True)
  origin = matrix.col(panel.get_origin())

  if debug:
    logger.info('Detector fast, slow axes:')
    logger.info('%6.3f%6.3f%6.3f' % (fast_axis.elems))
    logger.info('%6.3f%6.3f%6.3f' % (slow_axis.elems))
    logger.info('Detector two theta (degrees): %.2f' % detector2t)

  scl_x = 512.0 / (dims[0] * pixel[0])
  scl_y = 512.0 / (dims[1] * pixel[1])

  image_range = experiment.scan.get_image_range()

  from cctbx.array_family import flex as cflex # implicit import
  from cctbx.miller import map_to_asu_isym # implicit import

  # gather the required information for the reflection file

  nref = len(integrated_data['miller_index'])
  zdet = flex.double(integrated_data['xyzcal.px'].parts()[2])

  miller_index = integrated_data['miller_index']

  I = None
  sigI = None

  # export including scale factors

  if 'lp' in integrated_data:
    lp = integrated_data['lp']
  else:
    lp = flex.double(nref, 1.0)
  if 'dqe' in integrated_data:
    dqe = integrated_data['dqe']
  else:
    dqe = flex.double(nref, 1.0)
  scl = lp / dqe

  if summation:
    I = integrated_data['intensity.sum.value'] * scl
    V = integrated_data['intensity.sum.variance'] * scl * scl
    assert V.all_gt(0)
    sigI = flex.sqrt(V)
  else:
    I = integrated_data['intensity.prf.value'] * scl
    V = integrated_data['intensity.prf.variance'] * scl * scl
    assert V.all_gt(0)
    sigI = flex.sqrt(V)

  # figure out scaling to make sure data fit into format 2F8.2 i.e. Imax < 1e5

  Imax = flex.max(I)

  if debug:
    logger.info('Maximum intensity in file: %8.2f' % Imax)

  if Imax > 99999.0:
    scale = 99999.0 / Imax
    I = I * scale
    sigI = sigI * scale

  phi_start, phi_range = experiment.scan.get_image_oscillation(image_range[0])

  if predict:
    logger.info('Using scan static predicted spot locations')
    from dials.algorithms.spot_prediction import ScanStaticReflectionPredictor
    predictor = ScanStaticReflectionPredictor(experiment)
    UB = experiment.crystal.get_A()
    predictor.for_reflection_table(integrated_data, UB)

  if not experiment.crystal.num_scan_points:
    logger.info('No scan varying model: use static')
    static = True
  else:
    static = False

  fout = open(hklout, 'w')

  for j in range(nref):

    h, k, l = miller_index[j]

    if predict:
      x_mm, y_mm, z_rad = integrated_data['xyzcal.mm'][j]
    else:
      x_mm, y_mm, z_rad = integrated_data['xyzobs.mm.value'][j]

    z0 = integrated_data['xyzcal.px'][j][2]
    istol = int(round(10000 * unit_cell.stol((h, k, l))))

    if predict or static:
      # work from a scan static model & assume perfect goniometer
      # FIXME maybe should work back in the option to predict spot positions
      UB = experiment.crystal.get_A()
      phi = phi_start + z0 * phi_range
      R = axis.axis_and_angle_as_r3_rotation_matrix(phi, deg=True)
      RUB = S * R * F * UB
    else:
      # properly compute RUB for every reflection
      UB = experiment.crystal.get_A_at_scan_point(int(round(z0)))
      phi = phi_start + z0 * phi_range
      R = axis.axis_and_angle_as_r3_rotation_matrix(phi, deg=True)
      RUB = S * R * F * UB

    x = RUB * (h, k, l)
    s = (s0 + x).normalize()

    # can also compute s based on centre of mass of spot
    # s = (origin + x_mm * fast_axis + y_mm * slow_axis).normalize()

    astar = (RUB * (1, 0, 0)).normalize()
    bstar = (RUB * (0, 1, 0)).normalize()
    cstar = (RUB * (0, 0, 1)).normalize()

    ix = beam.dot(astar)
    iy = beam.dot(bstar)
    iz = beam.dot(cstar)

    dx = s.dot(astar)
    dy = s.dot(bstar)
    dz = s.dot(cstar)

    x = x_mm * scl_x
    y = y_mm * scl_y
    z = (z_rad * 180 / math.pi - phi_start) / phi_range

    fout.write('%4d%4d%4d%8.2f%8.2f%4d%8.5f%8.5f%8.5f%8.5f%8.5f%8.5f' % \
               (h, k, l, I[j], sigI[j], run, ix, dx, iy, dy, iz, dz))
    fout.write('%7.2f%7.2f%8.2f%7.2f%5d\n' % (x, y, z, detector2t, istol))

  fout.close()
  logger.info('Output %d reflections to %s' % (nref, hklout))
  return
Ejemplo n.º 58
0
def run(args):
  import libtbx.load_env
  usage = "%s experiments.json indexed.pickle [options]" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)
  if len(experiments) == 0:
    parser.print_help()
    return
  elif len(experiments) > 1:
    raise Sorry("More than one experiment present")

  experiment = experiments[0]
  assert(len(reflections) == 1)
  reflections = reflections[0]

  intensities = reflections['intensity.sum.value']
  variances = reflections['intensity.sum.variance']
  if 'intensity.prf.value' in reflections:
    intensities = reflections['intensity.prf.value']
    variances = reflections['intensity.prf.variance']
  sel = (variances > 0)
  intensities = intensities.select(sel)
  variances = variances.select(sel)
  sigmas = flex.sqrt(variances)
  indices = reflections['miller_index'].select(sel)

  from cctbx import crystal, miller
  crystal_symmetry = crystal.symmetry(
    space_group=experiment.crystal.get_space_group(),
    unit_cell=experiment.crystal.get_unit_cell())

  miller_set = miller.set(
    crystal_symmetry=crystal_symmetry,
    anomalous_flag=True,
    indices=indices)
  miller_array = miller.array(
    miller_set=miller_set,
    data=intensities,
    sigmas=sigmas).set_observation_type_xray_intensity()

  #miller_array.setup_binner(n_bins=50, reflections_per_bin=100)
  miller_array.setup_binner(auto_binning=True, n_bins=20)
  result = miller_array.i_over_sig_i(use_binning=True)
  result.show()

  from cctbx import uctbx
  d_star_sq_centre = result.binner.bin_centers(2)
  i_over_sig_i = flex.double(
    [d if d is not None else 0 for d in result.data[1:-1]])
  sel = (i_over_sig_i > 0)
  d_star_sq_centre = d_star_sq_centre.select(sel)
  i_over_sig_i = i_over_sig_i.select(sel)
  log_i_over_sig_i = flex.log(i_over_sig_i)
  weights = result.binner.counts()[1:-1].as_double().select(sel)
  fit = flex.linear_regression(
    d_star_sq_centre, log_i_over_sig_i, weights=weights)

  m = fit.slope()
  c = fit.y_intercept()

  import math
  y_cutoff = math.log(params.i_sigi_cutoff)
  x_cutoff = (y_cutoff - c)/m

  estimated_d_min = uctbx.d_star_sq_as_d(x_cutoff)
  print "estimated d_min: %.2f" %estimated_d_min

  if params.plot:
    from matplotlib import pyplot
    fig = pyplot.figure()
    ax = fig.add_subplot(1,1,1)

    ax.plot(
      list(d_star_sq_centre),
      list(log_i_over_sig_i),
      label=r"ln(I/sigI)")
    ax.plot(pyplot.xlim(), [(m * x + c) for x in pyplot.xlim()], color='red')
    ax.plot([x_cutoff, x_cutoff], pyplot.ylim(), color='grey', linestyle='dashed')
    ax.plot(pyplot.xlim(), [y_cutoff, y_cutoff], color='grey', linestyle='dashed')
    ax.set_xlabel("d_star_sq")
    ax.set_ylabel("ln(I/sigI)")

    ax_ = ax.twiny() # ax2 is responsible for "top" axis and "right" axis
    xticks = ax.get_xticks()
    xlim = ax.get_xlim()
    xticks_d = [
      uctbx.d_star_sq_as_d(ds2) if ds2 > 0 else 0 for ds2 in xticks ]
    xticks_ = [ds2/(xlim[1]-xlim[0]) for ds2 in xticks]
    ax_.set_xticks(xticks)
    ax_.set_xlim(ax.get_xlim())
    ax_.set_xlabel(r"Resolution ($\AA$)")
    ax_.set_xticklabels(["%.1f" %d for d in xticks_d])
    pyplot.savefig("estimate_resolution_limit.png")
    pyplot.clf()
Ejemplo n.º 59
0
      frame,
      len(subset),
      average)

    # from matplotlib import pylab
    # pylab.imshow((count > 0).as_numpy_array())
    # pylab.show()

  average = flex.double(len(sum_background))
  variance = flex.double(len(sum_background))
  count_mask = count > 1
  indices = flex.size_t(range(len(mask))).select(count_mask.as_1d())
  from matplotlib import pylab
  pylab.imshow(count_mask.as_numpy_array())
  pylab.show()

  sumb = sum_background.as_1d().select(indices)
  numb = count.as_1d().select(indices).as_double()
  avrb = sumb / numb
  sumsqb = sum_sq_background.as_1d().select(indices)
  varb = (sumsqb - sumb*sumb / numb) / (numb - 1)
  average.set_selected(indices, avrb)
  average.reshape(count_mask.accessor())
  variance.set_selected(indices, varb)
  variance.reshape(count_mask.accessor())

  print "Saving to model.pickle"
  with open("model.pickle", "w") as outfile:
    import cPickle as pickle
    pickle.dump((average, count_mask, flex.sqrt(variance)), outfile)
Ejemplo n.º 60
0
def export_mtz(
    integrated_data,
    experiment_list,
    hklout,
    ignore_panels=False,
    include_partials=False,
    keep_partials=False,
    min_isigi=None,
    force_static_model=False,
    filter_ice_rings=False,
):
    """Export data from integrated_data corresponding to experiment_list to an
  MTZ file hklout."""

    from dials.array_family import flex

    # for the moment assume (and assert) that we will convert data from exactly
    # one lattice...

    # FIXME allow for more than one experiment in here: this is fine just add
    # multiple MTZ data sets (DIALS1...DIALSN) and multiple batch headers: one
    # range of batches for each experiment

    assert len(experiment_list) == 1
    # select reflections that are assigned to an experiment (i.e. non-negative id)
    integrated_data = integrated_data.select(integrated_data["id"] >= 0)
    assert max(integrated_data["id"]) == 0

    # strip out negative variance reflections: these should not really be there
    # FIXME Doing select on summation results. Should do on profile result if
    # present? Yes

    if "intensity.prf.variance" in integrated_data:
        selection = integrated_data.get_flags(integrated_data.flags.integrated, all=True)
    else:
        selection = integrated_data.get_flags(integrated_data.flags.integrated_sum)
    integrated_data = integrated_data.select(selection)

    selection = integrated_data["intensity.sum.variance"] <= 0
    if selection.count(True) > 0:
        integrated_data.del_selected(selection)
        logger.info("Removing %d reflections with negative variance" % selection.count(True))

    if "intensity.prf.variance" in integrated_data:
        selection = integrated_data["intensity.prf.variance"] <= 0
        if selection.count(True) > 0:
            integrated_data.del_selected(selection)
            logger.info("Removing %d profile reflections with negative variance" % selection.count(True))

    if filter_ice_rings:
        selection = integrated_data.get_flags(integrated_data.flags.in_powder_ring)
        integrated_data.del_selected(selection)
        logger.info("Removing %d reflections in ice ring resolutions" % selection.count(True))

    if min_isigi is not None:

        selection = (
            integrated_data["intensity.sum.value"] / flex.sqrt(integrated_data["intensity.sum.variance"])
        ) < min_isigi
        integrated_data.del_selected(selection)
        logger.info("Removing %d reflections with I/Sig(I) < %s" % (selection.count(True), min_isigi))

        if "intensity.prf.variance" in integrated_data:
            selection = (
                integrated_data["intensity.prf.value"] / flex.sqrt(integrated_data["intensity.prf.variance"])
            ) < min_isigi
            integrated_data.del_selected(selection)
            logger.info("Removing %d profile reflections with I/Sig(I) < %s" % (selection.count(True), min_isigi))

    # FIXME in here work on including partial reflections => at this stage best
    # to split off the partial refections into a different selection & handle
    # gracefully... better to work on a short list as will need to "pop" them &
    # find matching parts to combine.

    if include_partials:
        integrated_data = sum_partial_reflections(integrated_data)
        integrated_data = scale_partial_reflections(integrated_data)

    if "partiality" in integrated_data:
        selection = integrated_data["partiality"] < 0.99
        if selection.count(True) > 0 and not keep_partials:
            integrated_data.del_selected(selection)
            logger.info("Removing %d incomplete reflections" % selection.count(True))

    # FIXME TODO for more than one experiment into an MTZ file:
    #
    # - add an epoch (or recover an epoch) from the scan and add this as an extra
    #   column to the MTZ file for scaling, so we know that the two lattices were
    #   integrated at the same time
    # - decide a sensible BATCH increment to apply to the BATCH value between
    #   experiments and add this
    #
    # At the moment this is probably enough to be working on.

    experiment = experiment_list[0]

    # also only work with one panel(for the moment)

    if not ignore_panels:
        assert len(experiment.detector) == 1

    from scitbx import matrix

    if experiment.goniometer:
        axis = matrix.col(experiment.goniometer.get_rotation_axis())
    else:
        axis = 0.0, 0.0, 0.0
    s0 = experiment.beam.get_s0()
    wavelength = experiment.beam.get_wavelength()

    panel = experiment.detector[0]
    origin = matrix.col(panel.get_origin())
    fast = matrix.col(panel.get_fast_axis())
    slow = matrix.col(panel.get_slow_axis())

    pixel_size = panel.get_pixel_size()

    fast *= pixel_size[0]
    slow *= pixel_size[1]

    cb_op_to_ref = experiment.crystal.get_space_group().info().change_of_basis_op_to_reference_setting()

    experiment.crystal = experiment.crystal.change_basis(cb_op_to_ref)

    U = experiment.crystal.get_U()
    if experiment.goniometer is not None:
        F = matrix.sqr(experiment.goniometer.get_fixed_rotation())
    else:
        F = matrix.sqr((1, 0, 0, 0, 1, 0, 0, 0, 1))
    unit_cell = experiment.crystal.get_unit_cell()

    from iotbx import mtz

    from scitbx.array_family import flex
    from math import floor, sqrt

    m = mtz.object()
    m.set_title("from dials.export_mtz")
    m.set_space_group_info(experiment.crystal.get_space_group().info())

    if experiment.scan:
        image_range = experiment.scan.get_image_range()
    else:
        image_range = 1, 1

    # pointless (at least) doesn't like batches starting from zero
    b_incr = max(image_range[0], 1)

    for b in range(image_range[0], image_range[1] + 1):
        o = m.add_batch().set_num(b + b_incr).set_nbsetid(1).set_ncryst(1)
        o.set_time1(0.0).set_time2(0.0).set_title("Batch %d" % (b + b_incr))
        o.set_ndet(1).set_theta(flex.float((0.0, 0.0))).set_lbmflg(0)
        o.set_alambd(wavelength).set_delamb(0.0).set_delcor(0.0)
        o.set_divhd(0.0).set_divvd(0.0)

        # FIXME hard-coded assumption on indealized beam vector below... this may be
        # broken when we come to process data from a non-imgCIF frame
        o.set_so(flex.float(s0)).set_source(flex.float((0, 0, -1)))

        # these are probably 0, 1 respectively, also flags for how many are set, sd
        o.set_bbfac(0.0).set_bscale(1.0)
        o.set_sdbfac(0.0).set_sdbscale(0.0).set_nbscal(0)

        # unit cell (this is fine) and the what-was-refined-flags FIXME hardcoded

        # take time-varying parameters from the *end of the frame* unlikely to
        # be much different at the end - however only exist if time-varying refinement
        # was used
        if not force_static_model and experiment.crystal.num_scan_points > 0:
            _unit_cell = experiment.crystal.get_unit_cell_at_scan_point(b - image_range[0])
            _U = experiment.crystal.get_U_at_scan_point(b - image_range[0])
        else:
            _unit_cell = unit_cell
            _U = U

        # apply the fixed rotation to this to unify matrix definitions - F * U
        # was what was used in the actual prediction: U appears to be stored
        # as the transpose?! At least is for Mosflm...
        #
        # FIXME Do we need to apply the setting rotation here somehow? i.e. we have
        # the U.B. matrix assuming that the axis is equal to S * axis_datum but
        # here we are just giving the effective axis so at scan angle 0 this will
        # not be correct... FIXME 2 not even sure we can express the stack of
        # matrices S * R * F * U * B in MTZ format?...
        _U = dials_u_to_mosflm(F * _U, _unit_cell)

        # FIXME need to get what was refined and what was constrained from the
        # crystal model
        o.set_cell(flex.float(_unit_cell.parameters()))
        o.set_lbcell(flex.int((-1, -1, -1, -1, -1, -1)))
        o.set_umat(flex.float(_U.transpose().elems))

        # get the mosaic spread though today it may not actually be set
        mosaic = experiment.crystal.get_mosaicity()
        o.set_crydat(flex.float([mosaic, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))

        o.set_lcrflg(0)
        o.set_datum(flex.float((0.0, 0.0, 0.0)))

        # detector size, distance
        o.set_detlm(flex.float([0.0, panel.get_image_size()[0], 0.0, panel.get_image_size()[1], 0, 0, 0, 0]))
        o.set_dx(flex.float([panel.get_directed_distance(), 0.0]))

        # goniometer axes and names, and scan axis number, and number of axes, missets
        o.set_e1(flex.float(axis))
        o.set_e2(flex.float((0.0, 0.0, 0.0)))
        o.set_e3(flex.float((0.0, 0.0, 0.0)))
        o.set_gonlab(flex.std_string(("AXIS", "", "")))
        o.set_jsaxs(1)
        o.set_ngonax(1)
        o.set_phixyz(flex.float((0.0, 0.0, 0.0, 0.0, 0.0, 0.0)))

        # scan ranges, axis
        if experiment.scan:
            phi_start, phi_range = experiment.scan.get_image_oscillation(b)
        else:
            phi_start, phi_range = 0.0, 0.0
        o.set_phistt(phi_start)
        o.set_phirange(phi_range)
        o.set_phiend(phi_start + phi_range)
        o.set_scanax(flex.float(axis))

        # number of misorientation angles
        o.set_misflg(0)

        # crystal axis closest to rotation axis (why do I want this?)
        o.set_jumpax(0)

        # type of data - 1; 2D, 2; 3D, 3; Laue
        o.set_ldtype(2)

    # now create the actual data structures - first keep a track of the columns
    # H K L M/ISYM BATCH I SIGI IPR SIGIPR FRACTIONCALC XDET YDET ROT WIDTH
    # LP MPART FLAG BGPKRATIOS

    from cctbx.array_family import flex as cflex  # implicit import
    from cctbx.miller import map_to_asu_isym  # implicit import

    # gather the required information for the reflection file

    nref = len(integrated_data["miller_index"])
    x_px, y_px, z_px = integrated_data["xyzcal.px"].parts()

    xdet = flex.double(x_px)
    ydet = flex.double(y_px)
    zdet = flex.double(z_px)

    # compute ROT values
    if experiment.scan:
        rot = flex.double([experiment.scan.get_angle_from_image_index(z) for z in zdet])
    else:
        rot = zdet

    # compute BATCH values
    batch = flex.floor(zdet).iround() + 1 + b_incr

    # we're working with full reflections so...
    fractioncalc = flex.double(nref, 1.0)

    # now go for it and make an MTZ file...

    x = m.add_crystal("XTAL", "DIALS", unit_cell.parameters())
    d = x.add_dataset("FROMDIALS", wavelength)

    # now add column information...

    # FIXME add DIALS_FLAG which can include e.g. was partial etc.

    type_table = {
        "H": "H",
        "K": "H",
        "L": "H",
        "I": "J",
        "SIGI": "Q",
        "IPR": "J",
        "SIGIPR": "Q",
        "BG": "R",
        "SIGBG": "R",
        "XDET": "R",
        "YDET": "R",
        "BATCH": "B",
        "BGPKRATIOS": "R",
        "WIDTH": "R",
        "MPART": "I",
        "M_ISYM": "Y",
        "FLAG": "I",
        "LP": "R",
        "FRACTIONCALC": "R",
        "ROT": "R",
        "DQE": "R",
    }

    # derive index columns from original indices with
    #
    # from m.replace_original_index_miller_indices
    #
    # so all that is needed now is to make space for the reflections - fill with
    # zeros...

    m.adjust_column_array_sizes(nref)
    m.set_n_reflections(nref)

    # assign H, K, L, M_ISYM space
    for column in "H", "K", "L", "M_ISYM":
        d.add_column(column, type_table[column]).set_values(flex.double(nref, 0.0).as_float())

    m.replace_original_index_miller_indices(cb_op_to_ref.apply(integrated_data["miller_index"]))

    d.add_column("BATCH", type_table["BATCH"]).set_values(batch.as_double().as_float())

    if "lp" in integrated_data:
        lp = integrated_data["lp"]
    else:
        lp = flex.double(nref, 1.0)
    if "dqe" in integrated_data:
        dqe = integrated_data["dqe"]
    else:
        dqe = flex.double(nref, 1.0)
    I_profile = None
    V_profile = None
    I_sum = None
    V_sum = None
    # FIXME errors in e.g. LP correction need to be propogated here
    scl = lp / dqe
    if "intensity.prf.value" in integrated_data:
        I_profile = integrated_data["intensity.prf.value"] * scl
        V_profile = integrated_data["intensity.prf.variance"] * scl * scl
        # Trap negative variances
        assert V_profile.all_gt(0)
        d.add_column("IPR", type_table["I"]).set_values(I_profile.as_float())
        d.add_column("SIGIPR", type_table["SIGI"]).set_values(flex.sqrt(V_profile).as_float())
    if "intensity.sum.value" in integrated_data:
        I_sum = integrated_data["intensity.sum.value"] * scl
        V_sum = integrated_data["intensity.sum.variance"] * scl * scl
        # Trap negative variances
        assert V_sum.all_gt(0)
        d.add_column("I", type_table["I"]).set_values(I_sum.as_float())
        d.add_column("SIGI", type_table["SIGI"]).set_values(flex.sqrt(V_sum).as_float())
    if "background.sum.value" in integrated_data and "background.sum.variance" in integrated_data:
        bg = integrated_data["background.sum.value"]
        varbg = integrated_data["background.sum.variance"]
        assert (varbg >= 0).count(False) == 0
        sigbg = flex.sqrt(varbg)
        d.add_column("BG", type_table["BG"]).set_values(bg.as_float())
        d.add_column("SIGBG", type_table["SIGBG"]).set_values(sigbg.as_float())

    d.add_column("FRACTIONCALC", type_table["FRACTIONCALC"]).set_values(fractioncalc.as_float())

    d.add_column("XDET", type_table["XDET"]).set_values(xdet.as_float())
    d.add_column("YDET", type_table["YDET"]).set_values(ydet.as_float())
    d.add_column("ROT", type_table["ROT"]).set_values(rot.as_float())
    d.add_column("LP", type_table["LP"]).set_values(lp.as_float())
    d.add_column("DQE", type_table["DQE"]).set_values(dqe.as_float())

    m.write(hklout)

    return m