def plot_positions(values, positions, file_name, cmap=pyplot.cm.Reds,
                     vmin=None, vmax=None, invalid='white'):
    values = values.as_double()
    assert positions.size() >= values.size()
    positions = positions[:values.size()]

    if vmin is None:
      vmin = flex.min(values)
    if vmax is None:
      vmax = flex.max(values)

    x, y = positions.parts()
    dx = flex.abs(x[1:] - x[:-1])
    dy = flex.abs(y[1:] - y[:-1])
    dx = dx.select(dx > 0)
    dy = dy.select(dy > 0)

    scale = 1/flex.min(dx)
    #print scale
    x = (x * scale).iround()
    y = (y * scale).iround()

    from libtbx.math_utils import iceil
    z = flex.double(flex.grid(iceil(flex.max(y))+1, iceil(flex.max(x))+1), -2)
    #print z.all()
    for x_, y_, z_ in zip(x, y, values):
      z[y_, x_] = z_

    plot_grid(z.as_1d(), z.all(), file_name, cmap=cmap, vmin=vmin, vmax=vmax,
              invalid=invalid)
    return
Example #2
0
File: PyChef.py Project: xia2/xia2
  def __init__(self, intensities, dose, n_bins=8,
               range_min=None, range_max=None, range_width=1):

    self.intensities = intensities
    self.dose = dose
    self.n_bins = n_bins
    self.range_min = range_min
    self.range_max = range_max
    self.range_width = range_width
    assert self.range_width > 0

    if self.range_min is None:
      self.range_min = flex.min(self.dose) - self.range_width
    if self.range_max is None:
      self.range_max = flex.max(self.dose)
    self.n_steps = 2 + int((self.range_max - self.range_min) - self.range_width)

    sel = (self.dose.as_double() <= self.range_max) & (self.dose.as_double() >= self.range_min)
    self.dose = self.dose.select(sel)

    self.intensities = self.intensities.select(sel)
    self.d_star_sq = self.intensities.d_star_sq().data()

    self.binner = self.intensities.setup_binner_d_star_sq_step(
      d_star_sq_step=(flex.max(self.d_star_sq)-flex.min(self.d_star_sq)+1e-8)/self.n_bins)

    self.observations = unmerged_observations(self.intensities)

    self._calc_completeness_vs_dose()
    self._calc_rcp_scp()
    self._calc_rd()
Example #3
0
    def minimize_kbu(self, n_cycles=10):
        #print "minimize_kbu start r:", self.kbu.r_factor()
        for use_curvatures in [False, True] * n_cycles:
            #print "  minimize_kbu r:", self.kbu.r_factor()
            start_r = self.kbu.r_factor()
            save_k_sols = self.kbu.k_sols()
            save_b_sols = self.kbu.b_sols()
            save_b_cart = self.kbu.b_cart()
            #self.set_use_scale(value = random.choice([True, False]))
            self.set_use_scale(value=True)
            m = self.minimize_kb_once(use_curvatures=use_curvatures)
            r = self.kbu.r_factor()
            if (r > start_r and r > 1.e-2
                    and (flex.min(self.kbu.k_sols()) < 0
                         or flex.max(self.kbu.k_sols()) > 1
                         or flex.min(self.kbu.b_sols()) < 0
                         or flex.max(self.kbu.k_sols()) > 100.)):
                self.kbu.update(k_sols=save_k_sols, b_sols=save_b_sols)


#      assert m.minimizer.n_calls == m.minimizer.nfun()
            m = self.minimize_u_once()
            #     assert m.minimizer.n_calls == m.minimizer.nfun()
            r = self.kbu.r_factor()
            bc = list(flex.abs(flex.double(self.kbu.b_cart())))
            if (r > start_r and r > 1.e-2 and max(bc) > 100):
                self.kbu.update(b_cart=save_b_cart)
                break
Example #4
0
  def __init__(self, intensities, dose, n_bins=8,
               range_min=None, range_max=None, range_width=1):

    self.intensities = intensities
    self.dose = dose
    self.n_bins = n_bins
    self.range_min = range_min
    self.range_max = range_max
    self.range_width = range_width
    assert self.range_width > 0

    if self.range_min is None:
      self.range_min = flex.min(self.dose) - self.range_width
    if self.range_max is None:
      self.range_max = flex.max(self.dose)
    self.n_steps = 2 + int((self.range_max - self.range_min) - self.range_width)

    sel = (self.dose.as_double() <= self.range_max) & (self.dose.as_double() >= self.range_min)
    self.dose = self.dose.select(sel)

    self.intensities = self.intensities.select(sel)
    self.d_star_sq = self.intensities.d_star_sq().data()

    self.binner = self.intensities.setup_binner_d_star_sq_step(
      d_star_sq_step=(flex.max(self.d_star_sq)-flex.min(self.d_star_sq)+1e-8)/self.n_bins)

    self.observations = unmerged_observations(self.intensities)

    self._calc_completeness_vs_dose()
    self._calc_rcp_scp()
    self._calc_rd()
def show_refinement_update(fmodels, selection, da_sel_refinable, prefix):
    fmt1 = "%s Rwork= %8.6f Rfree= %8.6f Number of: non-DA= %d DA= %d all= %d"
    print(fmt1 % (prefix, fmodels.fmodel_xray().r_work(),
                  fmodels.fmodel_xray().r_free(), selection.count(False),
                  selection.count(True),
                  fmodels.fmodel_xray().xray_structure.scatterers().size()))
    occ = fmodels.fmodel_xray().xray_structure.scatterers(
    ).extract_occupancies()
    occ_da = occ.select(selection)
    if (occ_da.size() > 0):
        occ_ma = occ.select(~selection)
        print("         non-da: occ(min,max,mean)= %6.3f %6.3f %6.3f" %
              (flex.min(occ_ma), flex.max(occ_ma), flex.mean(occ_ma)))
        print("             da: occ(min,max,mean)= %6.3f %6.3f %6.3f" %
              (flex.min(occ_da), flex.max(occ_da), flex.mean(occ_da)))
        b = fmodels.fmodel_xray().xray_structure.extract_u_iso_or_u_equiv()*\
          adptbx.u_as_b(1.)
        b_da = b.select(selection)
        b_ma = b.select(~selection)
        print("         non-da: ADP(min,max,mean)= %7.2f %7.2f %7.2f" %
              (flex.min(b_ma), flex.max(b_ma), flex.mean(b_ma)))
        print("             da: ADP(min,max,mean)= %7.2f %7.2f %7.2f" %
              (flex.min(b_da), flex.max(b_da), flex.mean(b_da)))
        print("da_sel_refinable:", da_sel_refinable.size(),
              da_sel_refinable.count(True))
  def plot_positions(values, positions, file_name, cmap=pyplot.cm.Reds,
                     vmin=None, vmax=None, invalid='white'):
    values = values.as_double()
    assert positions.size() >= values.size()
    positions = positions[:values.size()]

    if vmin is None:
      vmin = flex.min(values)
    if vmax is None:
      vmax = flex.max(values)

    x, y = positions.parts()
    dx = flex.abs(x[1:] - x[:-1])
    dy = flex.abs(y[1:] - y[:-1])
    dx = dx.select(dx > 0)
    dy = dy.select(dy > 0)

    scale = 1/flex.min(dx)
    #print scale
    x = (x * scale).iround()
    y = (y * scale).iround()

    from libtbx.math_utils import iceil
    z = flex.double(flex.grid(iceil(flex.max(y))+1, iceil(flex.max(x))+1), -2)
    #print z.all()
    for x_, y_, z_ in zip(x, y, values):
      z[y_, x_] = z_

    plot_grid(z.as_1d(), z.all(), file_name, cmap=cmap, vmin=vmin, vmax=vmax,
              invalid=invalid)
    return
Example #7
0
def write_json(filename, x,y1, y2=None):
  out=open(filename, 'w')

  head = '{"elements":['
  ele1 = '{"type": "line", "dot-style":{"type": "dot", "dot-size": 3, "colour":"#DFC329"}, "width":3, "colour": "DFC329", "text":"model", "font-size":10,'
  print>>out, head, ele1, '"values":',
  y1 = flex.log(flex.abs(y1)+1e-16 )
  y_min=flex.min(y1)-1.0
  y_max=flex.max(y1)+1.0
  print>>out, '[' + ', '.join('%5.6f' % v for v in y1) + ']',
#  for xx, yy in zip( x[:-2], y1[:-2]):
    #print>>out,'{"x":%f, "y":%f},'%(xx,yy),
#  print>>out,'{"x":%f, "y":%f}'%(x[-1],y1[-1]),
  print>>out, '}',  # end of y1
  if(y2 is not None):
    ele2 = ',{"type": "line", "dot-style":{"type": "dot", "dot-size": 1, "colour":"#111111"}, "width":1, "colour": "111111", "text":"expt", "font-size":10,'
    print>>out, ele2, '"values":',
    y2 = flex.log(flex.abs(y2)+1e-16)
    y_min=min(flex.min(y2)-1.0, y_min)
    y_max=max(flex.max(y2)+1.0, y_max)
    print>>out, '[' + ', '.join('%5.6f' % v for v in y2) + ']',
    print>>out, '}', # end of y2
  print>>out, ']',  # end of elements
  ### now setup axis ###
  print>>out,',"y_axis":{"min":%f, "max":%f}'%(y_min, y_max),
  steps = int(0.05/(x[1]-x[0]))
  x_labels = '["'
  for xx in x:
    x_labels = x_labels + str(xx) + '","'
  x_labels=x_labels[0:-2]+']'
  print>>out,',"x_axis":{"min":%d, "max":%d, "steps":%d, "labels":{"labels":%s,"steps":%d}}'%(0,x.size(),steps,x_labels, steps),
  print>>out,'}' ##end of the file
  out.close()
Example #8
0
def branch_3_mn():
    m = None
    n = None
    small = 1.e-15
    for i in range(10000):
        m2 = []
        n2 = []
        for i in range(4):
            r1 = random.random()
            r2 = random.random()
            r3 = random.random()
            if (r3 > 0.1):
                r1 = r2
            m2.append(r1)
            n2.append(r2)
        p1 = 0.5 * (m2[0] + m2[3] + math.sqrt(4 * m2[1] * m2[2] +
                                              (m2[0] - m2[3])**2))
        p2 = 0.5 * (m2[0] + m2[3] - math.sqrt(4 * m2[1] * m2[2] +
                                              (m2[0] - m2[3])**2))
        q1 = 0.5 * (n2[0] + m2[3] + math.sqrt(4 * n2[1] * n2[2] +
                                              (n2[0] - n2[3])**2))
        q2 = 0.5 * (n2[0] + m2[3] - math.sqrt(4 * n2[1] * n2[2] +
                                              (n2[0] - n2[3])**2))
        if (min(p1, p2) > 0.0 and min(q1, q2) > 0.0):
            r = random.random()
            if (r > 0.5):
                r1 = r3
                r2 = r2
            m = [m2[0], m2[3], r1, m2[1], 0, 0]
            n = [n2[0], n2[3], r2, n2[1], 0, 0]
            if ([
                    adptbx.is_positive_definite(m),
                    adptbx.is_positive_definite(n)
            ].count(True) == 2):
                esm = adptbx.eigensystem(m)
                esn = adptbx.eigensystem(n)
                vn = esn.values()
                vm = esm.values()
                mmin = flex.min(flex.double(vm))
                nmin = flex.min(flex.double(vn))
                if (abs(abs(mmin) - abs(nmin)) < small and mmin > 0.
                        and nmin > 0.):
                    for i, v in enumerate(vn):
                        if (abs(abs(nmin) - v) < small): break
                    for j, v in enumerate(vm):
                        if (abs(abs(mmin) - v) < small): break
                    vecn = flex.double(esn.vectors(i))
                    vecm = flex.double(esm.vectors(j))
                    if (flex.mean(vecm - vecn) < small): break
                else:
                    m = None
                    n = None
    assert [m, n] != [None, None]
    assert [adptbx.is_positive_definite(m),
            adptbx.is_positive_definite(n)].count(True) == 2
    r = random.random()
    if (r > 0.5):
        m = adptbx.random_rotate_ellipsoid(u_cart=m)
        n = adptbx.random_rotate_ellipsoid(u_cart=n)
    return m, n
Example #9
0
def miller_array_export_as_shelx_hklf(
      miller_array,
      file_object=None,
      normalise_if_format_overflow=False):
  """\
  If the maximum data value does not fit into the f8.2/f8.0 format:
  normalise_if_format_overflow=False: RuntimeError is thrown
  normalise_if_format_overflow=True: data is normalised to the largest
  number to fit f8.2/f8.0 format
  """
  assert miller_array.is_real_array()
  if (file_object is None): file_object = sys.stdout
  def raise_f8_overflow(v):
    raise RuntimeError("SHELX HKL file F8.2/F8.0 format overflow: %.8g" % v)
  data = miller_array.data()
  sigmas = miller_array.sigmas()
  assert data is not None
  min_val = flex.min(data)
  max_val = flex.max(data)
  if (sigmas is not None):
    min_val = min(min_val, flex.min(sigmas))
    max_val = max(max_val, flex.max(sigmas))
  min_sc = 1
  max_sc = 1
  if (min_val < 0):
    s = "%8.0f" % min_val
    if (len(s) > 8):
      if (not normalise_if_format_overflow):
        raise_f8_overflow(min_val)
      min_sc = -9999999. / min_val
  if (max_val > 0):
    s = "%8.0f" % max_val
    if (len(s) > 8):
      if (not normalise_if_format_overflow):
        raise_f8_overflow(max_val)
      max_sc = 99999999. / max_val
  scale = min(min_sc, max_sc)
  sigmas = miller_array.sigmas()
  s = 0.01
  for i,h in enumerate(miller_array.indices()):
    if (sigmas is not None): s = sigmas[i]
    def fmt_3i4(h):
      result = "%4d%4d%4d" % h
      if (len(result) != 12):
        raise RuntimeError(
          "SHELXL HKL file 3I4 format overflow: %s" % result)
      return result
    def fmt_f8(v):
      result = "%8.2f" % v
      if (len(result) != 8):
        result = "%8.1f" % v
        if (len(result) != 8):
          result = "%8.0f" % v
          assert len(result) == 8
      return result
    line = fmt_3i4(h) + fmt_f8(data[i]*scale) + fmt_f8(s*scale)
    print >> file_object, line
  print >> file_object, "   0   0   0    0.00    0.00"
def show_histogram(data, n_slots):
  print flex.min(data), flex.max(data), flex.mean(data)
  hm = flex.histogram(data = data, n_slots = n_slots)
  lc_1 = hm.data_min()
  s_1 = enumerate(hm.slots())
  for (i_1,n_1) in s_1:
    hc_1 = hm.data_min() + hm.slot_width() * (i_1+1)
    print "%10.3f - %-10.3f : %10.2f" % (lc_1, hc_1, float(n_1)/(data.size())*100.)
    lc_1 = hc_1
def show_histogram(data, n_slots):
    print flex.min(data), flex.max(data), flex.mean(data)
    hm = flex.histogram(data=data, n_slots=n_slots)
    lc_1 = hm.data_min()
    s_1 = enumerate(hm.slots())
    for (i_1, n_1) in s_1:
        hc_1 = hm.data_min() + hm.slot_width() * (i_1 + 1)
        print "%10.3f - %-10.3f : %10.2f" % (lc_1, hc_1, float(n_1) /
                                             (data.size()) * 100.)
        lc_1 = hc_1
Example #12
0
    def resolution_i_mean_over_sigma_mean(self, limit=None):
        """Compute a resolution limit where either <I>/<sigma> = 1.0 (limit if
        set) or the full extent of the data."""

        if limit is None:
            limit = self._params.i_mean_over_sigma_mean

        isigma_s = flex.double([
            b.i_mean_over_sigi_mean for b in self._merging_statistics.bins
        ]).reversed()
        s_s = flex.double([
            1 / b.d_min**2 for b in self._merging_statistics.bins
        ]).reversed()

        sel = isigma_s > 0
        isigma_s = isigma_s.select(sel)
        s_s = s_s.select(sel)

        if flex.min(isigma_s) > limit:
            r_isigma = 1.0 / math.sqrt(flex.max(s_s))
            isigma_f = None

        else:
            isigma_f = log_fit(s_s, isigma_s, 6)

            logger.debug(
                "isigma: fits\n%s",
                tabulate(
                    [("d*2", "d", "isigma_s", "isigma_f")] +
                    [(s, 1.0 / math.sqrt(s), isigma_s[j], isigma_f[j])
                     for j, s in enumerate(s_s)],
                    headers="firstrow",
                ),
            )

            try:
                r_isigma = 1.0 / math.sqrt(
                    interpolate_value(s_s, isigma_f, limit))
            except Exception:
                if limit > max(isigma_f):
                    r_isigma = 1.0 / math.sqrt(flex.min(s_s))
                else:
                    r_isigma = 1.0 / math.sqrt(flex.max(s_s))

        if self._params.plot:
            plot = resolution_plot(ylabel="Unmerged <I>/<sigma>")
            if isigma_f is not None:
                plot.plot(s_s, isigma_f, label="fit")
            plot.plot(s_s, isigma_s, label="Unmerged <I>/<sigma>")
            plot.plot_resolution_limit(r_isigma)
            plot.savefig("i_mean_over_sigma_mean.png")

        return r_isigma
def branch_3_mn():
  m = None
  n = None
  small = 1.e-15
  for i in xrange(10000):
      m2=[]
      n2=[]
      for i in xrange(4):
          r1 = random.random()
          r2 = random.random()
          r3 = random.random()
          if(r3 > 0.1):
             r1 = r2
          m2.append(r1)
          n2.append(r2)
      p1 = 0.5 * (m2[0]+m2[3] + math.sqrt(4*m2[1]*m2[2]+(m2[0]-m2[3])**2))
      p2 = 0.5 * (m2[0]+m2[3] - math.sqrt(4*m2[1]*m2[2]+(m2[0]-m2[3])**2))
      q1 = 0.5 * (n2[0]+m2[3] + math.sqrt(4*n2[1]*n2[2]+(n2[0]-n2[3])**2))
      q2 = 0.5 * (n2[0]+m2[3] - math.sqrt(4*n2[1]*n2[2]+(n2[0]-n2[3])**2))
      if(min(p1,p2) > 0.0 and min(q1,q2) > 0.0):
          r = random.random()
          if(r > 0.5):
             r1 = r3
             r2 = r2
          m = [m2[0],m2[3],r1,m2[1],0,0]
          n = [n2[0],n2[3],r2,n2[1],0,0]
          if([adptbx.is_positive_definite(m),
              adptbx.is_positive_definite(n)].count(True)==2):
              esm = adptbx.eigensystem(m)
              esn = adptbx.eigensystem(n)
              vn = esn.values()
              vm = esm.values()
              mmin = flex.min(flex.double(vm))
              nmin = flex.min(flex.double(vn))
              if(abs(abs(mmin) - abs(nmin)) < small and mmin> 0. and nmin> 0.):
                  for i, v in enumerate(vn):
                      if(abs(abs(nmin) - v) < small): break
                  for j, v in enumerate(vm):
                      if(abs(abs(mmin) - v) < small): break
                  vecn = flex.double(esn.vectors(i))
                  vecm = flex.double(esm.vectors(j))
                  if(flex.mean(vecm-vecn) < small): break
              else:
                m = None
                n = None
  assert [m,n] != [None,None]
  assert [adptbx.is_positive_definite(m),
          adptbx.is_positive_definite(n)].count(True)==2
  r = random.random()
  if(r > 0.5):
     m = adptbx.random_rotate_ellipsoid(u_cart = m)
     n = adptbx.random_rotate_ellipsoid(u_cart = n)
  return m,n
Example #14
0
 def solvent_adjust(self):
   if not self.params.solvent_adjust: return
   min_solvent_density = flex.min(self.map.select(self.solvent_iselection))
   min_protein_density = flex.min(self.map.select(self.protein_iselection))
   self.solvent_add = ((self.mean_protein_density-min_protein_density)
                  /self.params.protein_solvent_ratio) \
               + min_protein_density - self.mean_solvent_density
   self.map.as_1d().copy_selected(
     self.solvent_iselection, (self.map + self.solvent_add).as_1d())
   #self.mean_solvent_density = flex.mean(self.map.select(self.solvent_iselection))
   self.mean_solvent_density = (1-self.params.solvent_fraction) \
       * (self.mean_solvent_density+self.solvent_add-self.mean_protein_density)
Example #15
0
 def solvent_adjust(self):
   if not self.params.solvent_adjust: return
   min_solvent_density = flex.min(self.map.select(self.solvent_iselection))
   min_protein_density = flex.min(self.map.select(self.protein_iselection))
   self.solvent_add = ((self.mean_protein_density-min_protein_density)
                  /self.params.protein_solvent_ratio) \
               + min_protein_density - self.mean_solvent_density
   self.map.as_1d().copy_selected(
     self.solvent_iselection, (self.map + self.solvent_add).as_1d())
   #self.mean_solvent_density = flex.mean(self.map.select(self.solvent_iselection))
   self.mean_solvent_density = (1-self.params.solvent_fraction) \
       * (self.mean_solvent_density+self.solvent_add-self.mean_protein_density)
    def resolution_i_mean_over_sigma_mean(self, limit=None, log=None):
        """Compute a resolution limit where either <I>/<sigma> = 1.0 (limit if
        set) or the full extent of the data."""

        if limit is None:
            limit = self._params.i_mean_over_sigma_mean

        isigma_s = flex.double([
            b.i_mean_over_sigi_mean for b in self._merging_statistics.bins
        ]).reversed()
        s_s = flex.double([
            1 / b.d_min**2 for b in self._merging_statistics.bins
        ]).reversed()

        sel = isigma_s > 0
        isigma_s = isigma_s.select(sel)
        s_s = s_s.select(sel)

        if flex.min(isigma_s) > limit:
            r_isigma = 1.0 / math.sqrt(flex.max(s_s))
            isigma_f = None

        else:
            isigma_f = log_fit(s_s, isigma_s, 6)

            if log:
                fout = open(log, "w")
                for j, s in enumerate(s_s):
                    d = 1.0 / math.sqrt(s)
                    o = isigma_s[j]
                    m = isigma_f[j]
                    fout.write("%f %f %f %f\n" % (s, d, o, m))
                fout.close()

            try:
                r_isigma = 1.0 / math.sqrt(
                    interpolate_value(s_s, isigma_f, limit))
            except Exception:
                if limit > max(isigma_f):
                    r_isigma = 1.0 / math.sqrt(flex.min(s_s))
                else:
                    r_isigma = 1.0 / math.sqrt(flex.max(s_s))

        if self._params.plot:
            plot = resolution_plot(ylabel="Unmerged <I>/<sigma>")
            if isigma_f is not None:
                plot.plot(s_s, isigma_f, label="fit")
            plot.plot(s_s, isigma_s, label="Unmerged <I>/<sigma>")
            plot.plot_resolution_limit(r_isigma)
            plot.savefig("i_mean_over_sigma_mean.png")

        return r_isigma
 def minimize_kb(self, use_curvatures_options,
                 set_use_scale_options=[True, False], n_cycles=5):
   #print "minimize_kb, r:", self.kbu.r_factor()
   for use_curvatures in use_curvatures_options*n_cycles:
     start_r = self.kbu.r_factor()
     save_k_sols = self.kbu.k_sols()
     save_b_sols = self.kbu.b_sols()
     #self.set_use_scale(value = random.choice(set_use_scale_options))
     self.set_use_scale(value = True)
     m = self.minimize_kb_once(use_curvatures=use_curvatures)
     r = self.kbu.r_factor()
     if(r>start_r and r>1.e-2 and (flex.min(self.kbu.k_sols())<0 or
        flex.max(self.kbu.k_sols())>1 or flex.min(self.kbu.b_sols())<0 or
        flex.max(self.kbu.k_sols())>100.)):
       self.kbu.update(k_sols = save_k_sols, b_sols = save_b_sols)
Example #18
0
 def minimize_kb(self, use_curvatures_options,
                 set_use_scale_options=[True, False], n_cycles=5):
   #print "minimize_kb, r:", self.kbu.r_factor()
   for use_curvatures in use_curvatures_options*n_cycles:
     start_r = self.kbu.r_factor()
     save_k_sols = self.kbu.k_sols()
     save_b_sols = self.kbu.b_sols()
     #self.set_use_scale(value = random.choice(set_use_scale_options))
     self.set_use_scale(value = True)
     m = self.minimize_kb_once(use_curvatures=use_curvatures)
     r = self.kbu.r_factor()
     if(r>start_r and r>1.e-2 and (flex.min(self.kbu.k_sols())<0 or
        flex.max(self.kbu.k_sols())>1 or flex.min(self.kbu.b_sols())<0 or
        flex.max(self.kbu.k_sols())>100.)):
       self.kbu.update(k_sols = save_k_sols, b_sols = save_b_sols)
Example #19
0
def show_xray_structure_statistics(xray_structure,
                                   atom_selections,
                                   hd_sel=None):
    result = group_args(all=None,
                        macromolecule=None,
                        sidechain=None,
                        solvent=None,
                        ligand=None,
                        backbone=None)
    if (hd_sel is not None):
        xray_structure = xray_structure.select(~hd_sel)
    for key in atom_selections.__dict__.keys():
        value = atom_selections.__dict__[key]
        if (value.count(True) > 0):
            if (hd_sel is not None):
                value = value.select(~hd_sel)
            xrs = xray_structure.select(value)
            atom_counts = xrs.scattering_types_counts_and_occupancy_sums()
            atom_counts_strs = []
            for ac in atom_counts:
                atom_counts_strs.append(
                    "%s:%s:%s" % (ac.scattering_type, str(
                        ac.count), str("%10.2f" % ac.occupancy_sum).strip()))
            atom_counts_str = " ".join(atom_counts_strs)
            b_isos = xrs.extract_u_iso_or_u_equiv()
            n_aniso = xrs.use_u_aniso().count(True)
            n_not_positive_definite = xrs.is_positive_definite_u().count(False)
            b_mean = format_value("%-6.1f", adptbx.u_as_b(flex.mean(b_isos)))
            b_min = format_value("%-6.1f", adptbx.u_as_b(flex.min(b_isos)))
            b_max = format_value("%-6.1f", adptbx.u_as_b(flex.max(b_isos)))
            n_atoms = format_value("%-8d", xrs.scatterers().size()).strip()
            n_npd = format_value("%-8s", n_not_positive_definite).strip()
            occ = xrs.scatterers().extract_occupancies()
            o_mean = format_value("%-6.2f", flex.mean(occ)).strip()
            o_min = format_value("%-6.2f", flex.min(occ)).strip()
            o_max = format_value("%-6.2f", flex.max(occ)).strip()
            tmp_result = group_args(n_atoms=n_atoms,
                                    atom_counts_str=atom_counts_str,
                                    b_min=b_min,
                                    b_max=b_max,
                                    b_mean=b_mean,
                                    o_min=o_min,
                                    o_max=o_max,
                                    o_mean=o_mean,
                                    n_aniso=n_aniso,
                                    n_npd=n_npd)
            setattr(result, key, tmp_result)
    return result
Example #20
0
def i_sig_i_vs_batch(intensities, batches):
  assert intensities.size() == batches.size()
  assert intensities.sigmas() is not None
  sel = intensities.sigmas() > 0

  i_sig_i = intensities.data().select(sel) / intensities.sigmas().select(sel)
  batches = batches.select(sel)

  bins = []
  data = []

  perm = flex.sort_permutation(batches.data())
  batches = batches.data().select(perm)
  i_sig_i = i_sig_i.select(perm)

  i_batch_start = 0
  current_batch = flex.min(batches)
  n_ref = batches.size()
  for i_ref in range(n_ref + 1):
    if i_ref == n_ref or batches[i_ref] != current_batch:
      assert batches[i_batch_start:i_ref].all_eq(current_batch)
      data.append(flex.mean(i_sig_i[i_batch_start:i_ref]))
      bins.append(current_batch)
      i_batch_start = i_ref
      if i_ref < n_ref:
        current_batch = batches[i_batch_start]

  return batch_binned_data(bins, data)
Example #21
0
def filter_mask(mask_p1,
                volume_cutoff,
                crystal_symmetry,
                for_structure_factors=False):
    co = maptbx.connectivity(map_data=mask_p1,
                             threshold=0.01,
                             preprocess_against_shallow=True,
                             wrapping=True)
    mi, ma = flex.min(mask_p1), flex.max(mask_p1)
    print(mask_p1.size(), (mask_p1 < 0).count(True))
    assert mi == 0, mi
    assert ma == 1, ma
    a, b, c = crystal_symmetry.unit_cell().parameters()[:3]
    na, nb, nc = mask_p1.accessor().all()
    step = flex.mean(flex.double([a / na, b / nb, c / nc]))
    if (crystal_symmetry.space_group_number() != 1):
        co.merge_symmetry_related_regions(
            space_group=crystal_symmetry.space_group())
    conn = co.result().as_double()
    z = zip(co.regions(), range(0, co.regions().size()))
    sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
    for i_seq, p in enumerate(sorted_by_volume):
        v, i = p
        if (i == 0): continue  # skip macromolecule
        # skip small volume
        volume = v * step**3
        if volume < volume_cutoff:
            conn = conn.set_selected(conn == i, 0)
    conn = conn.set_selected(conn > 0, 1)
    if for_structure_factors:
        conn = conn / crystal_symmetry.space_group().order_z()
    return conn
def quick_test(file_name):
  from libtbx.utils import user_plus_sys_time
  t = user_plus_sys_time()
  s = reader(file_name)
  print "Time read:", t.delta()
  s.show_summary()
  print tuple(s.original_indices[:3])
  print tuple(s.unique_indices[:3])
  print tuple(s.batch_numbers[:3])
  print tuple(s.centric_tags[:3])
  print tuple(s.spindle_flags[:3])
  print tuple(s.asymmetric_unit_indices[:3])
  print tuple(s.i_obs[:3])
  print tuple(s.sigmas[:3])
  print tuple(s.original_indices[-3:])
  print tuple(s.unique_indices[-3:])
  print tuple(s.batch_numbers[-3:])
  print tuple(s.centric_tags[-3:])
  print tuple(s.spindle_flags[-3:])
  print tuple(s.asymmetric_unit_indices[-3:])
  print tuple(s.i_obs[-3:])
  print tuple(s.sigmas[-3:])
  m = s.as_miller_array(merge_equivalents=False).merge_equivalents()
  print "min redundancies:", flex.min(m.redundancies().data())
  print "max redundancies:", flex.max(m.redundancies().data())
  print "mean redundancies:", flex.mean(m.redundancies().data().as_double())
  s.as_miller_arrays()[0].show_summary()
  print
Example #23
0
  def suggest_likely_candidates( self, acceptable_violations = 1e+90 ):
    used = flex.bool( len(self.sg_choices), False )
    order = []

    all_done = False
    count = -1
    if (len(self.tuple_score) == 0) :
      return []
    tmp_scores = []
    for tt in self.tuple_score:
      tmp_scores.append( tt[0] )
    order = flex.sort_permutation( flex.double( tmp_scores ), False  )


    sorted_rows = []
    max_score = flex.min( flex.double( tmp_scores ) )
    for ii in order:
      sg             = self.sg_choices[ii]
      tmp_n          = self.n[ii]
      tmp_violations = self.violations[ii]
      tmp_mean_i     = self.mean_i[ii]
      tmp_mean_isigi = self.mean_isigi[ii]
      tuple_score    = self.tuple_score[ii]

      sorted_rows.append( [str(sg), '%i'%(tmp_n),
                           '%8.2f  '%(tmp_mean_i),
                           '%8.2f  '%(tmp_mean_isigi),
                           ' %i '%(tuple_score[1]),
                           ' %i '%(tuple_score[2]),
                           ' %8.3e '%((tuple_score[0]-max_score))
                          ])

    return sorted_rows
Example #24
0
  def __init__(self, f_obs, ncs_pairs, reflections_per_bin):
    adopt_init_args(self, locals())
    # Create bins
    f_obs.setup_binner(reflections_per_bin = reflections_per_bin)
    self.binner = f_obs.binner()
    n_bins = self.binner.n_bins_used()
    self.n_bins = n_bins
    self.SigmaN = None
    self.update_SigmaN()
    #
    self.rbin = flex.int(f_obs.data().size(), -1)
    for i_bin in self.binner.range_used():
      for i_seq in self.binner.array_indices(i_bin):
        self.rbin[i_seq] = i_bin-1 # i_bin starts with 1, not 0 !
    assert flex.min(self.rbin)==0
    assert flex.max(self.rbin)==n_bins-1
    # Extract symmetry matrices
    self.sym_matrices = []
    for m_as_string in f_obs.space_group().smx():
      o = sgtbx.rt_mx(symbol=str(m_as_string), t_den=f_obs.space_group().t_den())
      m_as_double = o.r().as_double()
      self.sym_matrices.append(m_as_double)
    self.gradient_evaluator = None
    self.target_and_grads = ext.tncs_eps_factor_refinery(
        tncs_pairs               = self.ncs_pairs,
        f_obs                    = self.f_obs.data(),
        sigma_f_obs              = self.f_obs.sigmas(),
        rbin                     = self.rbin,
        SigmaN                   = self.SigmaN,
        space_group              = self.f_obs.space_group(),
        miller_indices           = self.f_obs.indices(),
        fractionalization_matrix = self.f_obs.unit_cell().fractionalization_matrix(),
        sym_matrices             = self.sym_matrices)

    self.update()
Example #25
0
def set_refinable_parameters(xray_structure, parameters, selections,
                             enforce_positivity=False):
  # XXX PVA: Code below is terribly inefficient and MUST be moved into C++
  sz = xray_structure.scatterers().size()
  i = 0
  for sel in selections:
    # pre-check for positivity begin
    # spread negative occupancies across i_seqs having positive ones
    par_all = flex.double()
    par_neg = flex.double()
    i_p = i
    for sel_ in sel:
      p = parameters[i_p]
      par_all.append(p)
      if(p<0): par_neg.append(p)
      i_p += 1
    if(enforce_positivity and par_neg.size()>0):
      par_all = par_all - flex.min(par_all)
      fs = flex.sum(par_all)
      if(fs != 0):
        par_all = par_all / fs
    # pre-check for positivity end
    for j, sel_ in enumerate(sel):
      sel__b = flex.bool(sz, flex.size_t(sel_))
      xray_structure.set_occupancies(par_all[j], sel__b)
      i+=1
Example #26
0
 def target_and_gradients(self, xray_structure, to_compute_weight=False):
   if(to_compute_weight):
     xrs = xray_structure.deep_copy_scatterers()
     # This may be useful to explore:
     #xrs.shake_adp_if_all_equal(b_iso_tolerance = 1.e-3)
     #xrs.shake_adp(spread=10, keep_anisotropic= False)
   else:
     xrs = xray_structure
   if(self.refine_adp): params = xrs.extract_u_iso_or_u_equiv()
   if(self.refine_occ): params = xrs.scatterers().extract_occupancies()
   if(to_compute_weight):
     pmin = flex.min(params)
     pmax = flex.max(params)
     if(abs(pmin-pmax)/abs(pmin+pmax)*2*100<1.e-3):
       pmean = flex.mean(params)
       n_par = params.size()
       params = flex.double()
       for i in xrange(n_par):
         params.append(pmean + 0.1 * pmean * random.choice([-1,0,1]))
   return crystal.adp_iso_local_sphere_restraints_energies(
     pair_sym_table           = self.pair_sym_table,
     orthogonalization_matrix = self.orthogonalization_matrix,
     sites_frac               = self.sites_frac,
     u_isos                   = params,
     selection                = self.selection,
     use_u_iso                = self.selection,
     grad_u_iso               = self.selection,
     sphere_radius            = self.sphere_radius,
     distance_power           = 2,
     average_power            = 1,
     min_u_sum                = 1.e-6,
     compute_gradients        = True,
     collect                  = False)
Example #27
0
 def target_and_gradients(self, xray_structure, to_compute_weight=False):
     if (to_compute_weight):
         xrs = xray_structure.deep_copy_scatterers()
         # This may be useful to explore:
         #xrs.shake_adp_if_all_equal(b_iso_tolerance = 1.e-3)
         #xrs.shake_adp(spread=10, keep_anisotropic= False)
     else:
         xrs = xray_structure
     if (self.refine_adp): params = xrs.extract_u_iso_or_u_equiv()
     if (self.refine_occ): params = xrs.scatterers().extract_occupancies()
     if (to_compute_weight):
         pmin = flex.min(params)
         pmax = flex.max(params)
         if (abs(pmin + pmax) != 0.
                 and abs(pmin - pmax) / abs(pmin + pmax) * 2 * 100 < 1.e-3):
             pmean = flex.mean(params)
             n_par = params.size()
             params = flex.double()
             for i in range(n_par):
                 params.append(pmean +
                               0.1 * pmean * random.choice([-1, 0, 1]))
     return crystal.adp_iso_local_sphere_restraints_energies(
         pair_sym_table=self.pair_sym_table,
         orthogonalization_matrix=self.orthogonalization_matrix,
         sites_frac=self.sites_frac,
         u_isos=params,
         selection=self.selection,
         use_u_iso=self.selection,
         grad_u_iso=self.selection,
         sphere_radius=self.sphere_radius,
         distance_power=2,
         average_power=1,
         min_u_sum=1.e-6,
         compute_gradients=True,
         collect=False)
Example #28
0
def set_refinable_parameters(xray_structure, parameters, selections,
                             enforce_positivity=False):
  # XXX PVA: Code below is terribly inefficient and MUST be moved into C++
  sz = xray_structure.scatterers().size()
  i = 0
  for sel in selections:
    # pre-check for positivity begin
    # spread negative occupancies across i_seqs having positive ones
    par_all = flex.double()
    par_neg = flex.double()
    i_p = i
    for sel_ in sel:
      p = parameters[i_p]
      par_all.append(p)
      if(p<0): par_neg.append(p)
      i_p += 1
    if(enforce_positivity and par_neg.size()>0):
      par_all = par_all - flex.min(par_all)
      fs = flex.sum(par_all)
      if(fs != 0):
        par_all = par_all / fs
    # pre-check for positivity end
    for j, sel_ in enumerate(sel):
      sel__b = flex.bool(sz, flex.size_t(sel_))
      xray_structure.set_occupancies(par_all[j], sel__b)
      i+=1
Example #29
0
 def histogram(self):
     normalised_score = self._normalised_delta_cc_i()
     return {
         "delta_cc_half_histogram": {
             "data": [{
                 "x": list(normalised_score),
                 "xbins": {
                     "start": math.floor(flex.min(normalised_score)),
                     "end": math.ceil(flex.max(normalised_score)) + 1,
                     "size": 0.1,
                 },
                 "type": "histogram",
                 "name": u"Delta CC<sub>½</sub>",
             }],
             "layout": {
                 "title": u"Histogram of Delta CC<sub>½</sub>",
                 "xaxis": {
                     "title": u"σ"
                 },
                 "yaxis": {
                     "title": "Frequency"
                 },
             },
         }
     }
Example #30
0
 def get_map():
   av = [random.random() for i in xrange(10*20*30)]
   m = flex.double(av)
   m = m-flex.min(m)
   m = m/flex.max(m)
   m.resize(flex.grid((10,20,30)))
   return m
Example #31
0
def quick_test(file_name):
  from libtbx.utils import user_plus_sys_time
  t = user_plus_sys_time()
  s = reader(file_name)
  print("Time read:", t.delta())
  s.show_summary()
  print(tuple(s.original_indices[:3]))
  print(tuple(s.unique_indices[:3]))
  print(tuple(s.batch_numbers[:3]))
  print(tuple(s.centric_tags[:3]))
  print(tuple(s.spindle_flags[:3]))
  print(tuple(s.asymmetric_unit_indices[:3]))
  print(tuple(s.i_obs[:3]))
  print(tuple(s.sigmas[:3]))
  print(tuple(s.original_indices[-3:]))
  print(tuple(s.unique_indices[-3:]))
  print(tuple(s.batch_numbers[-3:]))
  print(tuple(s.centric_tags[-3:]))
  print(tuple(s.spindle_flags[-3:]))
  print(tuple(s.asymmetric_unit_indices[-3:]))
  print(tuple(s.i_obs[-3:]))
  print(tuple(s.sigmas[-3:]))
  m = s.as_miller_array(merge_equivalents=False).merge_equivalents()
  print("min redundancies:", flex.min(m.redundancies().data()))
  print("max redundancies:", flex.max(m.redundancies().data()))
  print("mean redundancies:", flex.mean(m.redundancies().data().as_double()))
  s.as_miller_arrays()[0].show_summary()
  print()
Example #32
0
    def suggest_likely_candidates(self, acceptable_violations=1e+90):
        used = flex.bool(len(self.sg_choices), False)
        order = []

        all_done = False
        count = -1
        if (len(self.tuple_score) == 0):
            return []
        tmp_scores = []
        for tt in self.tuple_score:
            tmp_scores.append(tt[0])
        order = flex.sort_permutation(flex.double(tmp_scores), False)

        sorted_rows = []
        max_score = flex.min(flex.double(tmp_scores))
        for ii in order:
            sg = self.sg_choices[ii]
            tmp_n = self.n[ii]
            tmp_violations = self.violations[ii]
            tmp_mean_i = self.mean_i[ii]
            tmp_mean_isigi = self.mean_isigi[ii]
            tuple_score = self.tuple_score[ii]

            sorted_rows.append([
                str(sg),
                '%i' % (tmp_n),
                '%8.2f  ' % (tmp_mean_i),
                '%8.2f  ' % (tmp_mean_isigi),
                ' %i ' % (tuple_score[1]),
                ' %i ' % (tuple_score[2]),
                ' %8.3e ' % ((tuple_score[0] - max_score))
            ])

        return sorted_rows
Example #33
0
def show_xray_structure_statistics(xray_structure, atom_selections, hd_sel = None):
  result = group_args(
    all           = None,
    macromolecule = None,
    sidechain     = None,
    solvent       = None,
    ligand        = None,
    backbone      = None)
  if(hd_sel is not None):
    xray_structure = xray_structure.select(~hd_sel)
  for key in atom_selections.__dict__.keys():
    value = atom_selections.__dict__[key]
    if(value.count(True) > 0):
      if(hd_sel is not None):
        value = value.select(~hd_sel)
      xrs = xray_structure.select(value)
      atom_counts = xrs.scattering_types_counts_and_occupancy_sums()
      atom_counts_strs = []
      for ac in atom_counts:
        atom_counts_strs.append("%s:%s:%s"%(ac.scattering_type,str(ac.count),
          str("%10.2f"%ac.occupancy_sum).strip()))
      atom_counts_str = " ".join(atom_counts_strs)
      b_isos = xrs.extract_u_iso_or_u_equiv()
      n_aniso = xrs.use_u_aniso().count(True)
      n_not_positive_definite = xrs.is_positive_definite_u().count(False)
      b_mean = format_value("%-6.1f",adptbx.u_as_b(flex.mean(b_isos)))
      b_min = format_value("%-6.1f",adptbx.u_as_b(flex.min(b_isos)))
      b_max = format_value("%-6.1f",adptbx.u_as_b(flex.max(b_isos)))
      n_atoms = format_value("%-8d",xrs.scatterers().size()).strip()
      n_npd = format_value("%-8s",n_not_positive_definite).strip()
      occ = xrs.scatterers().extract_occupancies()
      o_mean = format_value("%-6.2f",flex.mean(occ)).strip()
      o_min = format_value("%-6.2f",flex.min(occ)).strip()
      o_max = format_value("%-6.2f",flex.max(occ)).strip()
      tmp_result = group_args(
        n_atoms         = n_atoms,
        atom_counts_str = atom_counts_str,
        b_min           = b_min,
        b_max           = b_max,
        b_mean          = b_mean,
        o_min           = o_min,
        o_max           = o_max,
        o_mean          = o_mean,
        n_aniso         = n_aniso,
        n_npd           = n_npd)
      setattr(result,key,tmp_result)
  return result
Example #34
0
def test_4():

   symmetry = crystal.symmetry(unit_cell          = (15.67, 25.37, 35.68, 90, 90, 90),
                               space_group_symbol = "P 21 21 21")
   structure = xray.structure(crystal_symmetry = symmetry)
   ma = structure.structure_factors(d_min          = 1.5,
                                    anomalous_flag = False).f_calc()
   mi = ma.indices()
   # ================================================================= TEST-1
   alpha  = flex.double(mi.size())
   beta   = flex.double(mi.size())
   d_obs  = flex.double(mi.size())
   d_calc = flex.double(mi.size())

   # define test set reflections
   flags=flex.int(beta.size(), 0)
   k=0
   for i in xrange(flags.size()):
     k=k+1
     if (k !=10):
       flags[i]=0
     else:
       k=0
       flags[i]=1

   for i in range(1,mi.size()+1):
     d_obs [i-1] = i*1.5
     d_calc[i-1] = i*1.0
     beta  [i-1] = i*500.0
     alpha [i-1] = float(i) / float(i + 1)

   obj = max_lik.fom_and_phase_error(
     f_obs          = d_obs,
     f_model        = d_calc,
     alpha          = alpha,
     beta           = beta,
     epsilons       = ma.epsilons().data().as_double(),
     centric_flags  = ma.centric_flags().data())
   per = obj.phase_error()
   fom = obj.fom()
   assert approx_equal(flex.max(per) ,  89.9325000127     , 1.e-4)
   assert approx_equal(flex.min(per) ,  5.37565067746e-05 , 1.e-4)
   assert approx_equal(flex.mean(per),  20.7942460698     , 1.e-4)
   assert approx_equal(flex.max(fom) ,  0.999999402705    , 1.e-4)
   assert approx_equal(flex.min(fom) ,  0.000749999859375 , 1.e-4)
   assert approx_equal(flex.mean(fom),  0.858269037582    , 1.e-4)
Example #35
0
 def __init__(self, unmerged_intensities, batches):
     self.unmerged_intensities = unmerged_intensities
     self.batches = batches
     self.minb = flex.min(self.batches.data())
     self.maxb = flex.max(self.batches.data())
     n_scale_factors = self.maxb - self.minb + 1
     self.x = flex.double(n_scale_factors, 1)
     scitbx.lbfgs.run(target_evaluator=self)
Example #36
0
 def __init__(self, unmerged_intensities, batches):
   self.unmerged_intensities = unmerged_intensities
   self.batches = batches
   self.minb = flex.min(self.batches.data())
   self.maxb = flex.max(self.batches.data())
   n_scale_factors = self.maxb-self.minb + 1
   self.x = flex.double(n_scale_factors, 1)
   scitbx.lbfgs.run(target_evaluator=self)
 def functional(self, x):
   if (0):
     print "functional(): x =", list(x)
   if (flex.min(x[:3]) < 1):
     print "FunctionalException: small length"
     raise scitbx.minimizers.FunctionalException
   if (flex.min(x[3:]) < 50):
     print "FunctionalException: small angle"
     raise scitbx.minimizers.FunctionalException
   try:
     result = residual(
       self.two_thetas_obs, self.miller_indices, self.wavelength,
       unit_cell=uctbx.unit_cell(iter(x)))
   except KeyboardInterrupt: raise
   except Exception, e:
     print "FunctionalException:", str(e)
     raise scitbx.minimizers.FunctionalException
Example #38
0
 def __call__(self,
              xray_structure,
              u_iso_refinable_params,
              dp,
              n_parameters,
              verbose=0):
     omptbx.env.num_threads = libtbx.introspection.number_of_processors()
     result = xray.fast_gradients(
         unit_cell=xray_structure.unit_cell(),
         scatterers=xray_structure.scatterers(),
         scattering_type_registry=xray_structure.scattering_type_registry(),
         u_base=self.u_base(),
         wing_cutoff=self.wing_cutoff(),
         exp_table_one_over_step_size=self.exp_table_one_over_step_size(),
         tolerance_positive_definite=1.e-5)
     if (0 or verbose):
         print("u_base:", result.u_base())
         print("u_extra:", result.u_extra())
     gradient_map = self.ft_dp(dp, u_extra=result.u_extra())
     if (not gradient_map.anomalous_flag()):
         gradient_map = gradient_map.real_map()
     else:
         gradient_map = gradient_map.complex_map()
         assert not gradient_map.is_padded()
         if (0 or verbose):
             print("grid:", gradient_map.focus())
             print("ft_dt_map real: %.4g %.4g" %
                   (flex.min(flex.real(gradient_map)),
                    flex.max(flex.real(gradient_map))))
             print("ft_dt_map imag: %.4g %.4g" %
                   (flex.min(flex.imag(gradient_map)),
                    flex.max(flex.imag(gradient_map))))
             print()
     result.sampling(
         scatterers=xray_structure.scatterers(),
         u_iso_refinable_params=u_iso_refinable_params,
         scattering_type_registry=xray_structure.scattering_type_registry(),
         site_symmetry_table=xray_structure.site_symmetry_table(),
         ft_d_target_d_f_calc=gradient_map,
         n_parameters=n_parameters,
         sampled_density_must_be_positive=False)
     if (0 or verbose):
         print("max_sampling_box_edges:", result.max_sampling_box_edges())
         print("exp_table_size:", result.exp_table_size())
         print()
     return result
Example #39
0
def test_4():

   symmetry = crystal.symmetry(unit_cell          = (15.67, 25.37, 35.68, 90, 90, 90),
                               space_group_symbol = "P 21 21 21")
   structure = xray.structure(crystal_symmetry = symmetry)
   ma = structure.structure_factors(d_min          = 1.5,
                                    anomalous_flag = False).f_calc()
   mi = ma.indices()
   # ================================================================= TEST-1
   alpha  = flex.double(mi.size())
   beta   = flex.double(mi.size())
   d_obs  = flex.double(mi.size())
   d_calc = flex.double(mi.size())

   # define test set reflections
   flags=flex.int(beta.size(), 0)
   k=0
   for i in range(flags.size()):
     k=k+1
     if (k !=10):
       flags[i]=0
     else:
       k=0
       flags[i]=1

   for i in range(1,mi.size()+1):
     d_obs [i-1] = i*1.5
     d_calc[i-1] = i*1.0
     beta  [i-1] = i*500.0
     alpha [i-1] = float(i) / float(i + 1)

   obj = max_lik.fom_and_phase_error(
     f_obs          = d_obs,
     f_model        = d_calc,
     alpha          = alpha,
     beta           = beta,
     epsilons       = ma.epsilons().data().as_double(),
     centric_flags  = ma.centric_flags().data())
   per = obj.phase_error()
   fom = obj.fom()
   assert approx_equal(flex.max(per) ,  89.9325000127     , 1.e-4)
   assert approx_equal(flex.min(per) ,  5.37565067746e-05 , 1.e-4)
   assert approx_equal(flex.mean(per),  20.7942460698     , 1.e-4)
   assert approx_equal(flex.max(fom) ,  0.999999402705    , 1.e-4)
   assert approx_equal(flex.min(fom) ,  0.000749999859375 , 1.e-4)
   assert approx_equal(flex.mean(fom),  0.858269037582    , 1.e-4)
Example #40
0
def exercise_2():
  symmetry = crystal.symmetry(
    unit_cell=(5.67, 10.37, 10.37, 90, 135.49, 90),
    space_group_symbol="C2")
  structure = xray.structure(crystal_symmetry=symmetry)
  atmrad = flex.double()
  xyzf = flex.vec3_double()
  for k in xrange(100):
    scatterer = xray.scatterer(
      site = ((1.+k*abs(math.sin(k)))/1000.0,
              (1.+k*abs(math.cos(k)))/1000.0,
              (1.+ k)/1000.0),
      scattering_type = "C")
    structure.add_scatterer(scatterer)
    atmrad.append(van_der_waals_radii.vdw.table[scatterer.element_symbol()])
    xyzf.append(scatterer.site)
  miller_set = miller.build_set(
    crystal_symmetry=structure,
    d_min=1.0,
    anomalous_flag=False)
  step = 0.5
  crystal_gridding = maptbx.crystal_gridding(
    unit_cell=structure.unit_cell(),
    step=step)
  nxyz = crystal_gridding.n_real()
  shrink_truncation_radius = 1.0
  solvent_radius = 1.0
  m1 = around_atoms(
    structure.unit_cell(),
    structure.space_group().order_z(),
    structure.sites_frac(),
    atmrad,
    nxyz,
    solvent_radius,
    shrink_truncation_radius)
  assert m1.solvent_radius == 1
  assert m1.shrink_truncation_radius == 1
  assert flex.max(m1.data) == 1
  assert flex.min(m1.data) == 0
  assert m1.data.size() == m1.data.count(1) + m1.data.count(0)
  m2 = mmtbx.masks.bulk_solvent(
    xray_structure=structure,
    gridding_n_real=nxyz,
    ignore_zero_occupancy_atoms = False,
    solvent_radius=solvent_radius,
    shrink_truncation_radius=shrink_truncation_radius)
  assert m2.data.all_eq(m1.data)
  m3 = mmtbx.masks.bulk_solvent(
    xray_structure=structure,
    grid_step=step,
    ignore_zero_occupancy_atoms = False,
    solvent_radius=solvent_radius,
    shrink_truncation_radius=shrink_truncation_radius)
  assert m3.data.all_eq(m1.data)
  f_mask2 = m2.structure_factors(miller_set=miller_set)
  f_mask3 = m3.structure_factors(miller_set=miller_set)
  assert approx_equal(f_mask2.data(), f_mask3.data())
  assert approx_equal(flex.sum(flex.abs(f_mask3.data())), 1095.17999134)
 def __call__(self, xray_structure,
                    u_iso_refinable_params,
                    dp,
                    n_parameters,
                    verbose=0):
   omptbx.env.num_threads = libtbx.introspection.number_of_processors()
   result = xray.fast_gradients(
     unit_cell=xray_structure.unit_cell(),
     scatterers=xray_structure.scatterers(),
     scattering_type_registry=xray_structure.scattering_type_registry(),
     u_base=self.u_base(),
     wing_cutoff=self.wing_cutoff(),
     exp_table_one_over_step_size=self.exp_table_one_over_step_size(),
     tolerance_positive_definite=1.e-5)
   if (0 or verbose):
     print "u_base:", result.u_base()
     print "u_extra:", result.u_extra()
   gradient_map = self.ft_dp(dp, u_extra=result.u_extra())
   if (not gradient_map.anomalous_flag()):
     gradient_map = gradient_map.real_map()
   else:
     gradient_map = gradient_map.complex_map()
     assert not gradient_map.is_padded()
     if (0 or verbose):
       print "grid:", gradient_map.focus()
       print "ft_dt_map real: %.4g %.4g" % (
         flex.min(flex.real(gradient_map)),
         flex.max(flex.real(gradient_map)))
       print "ft_dt_map imag: %.4g %.4g" % (
         flex.min(flex.imag(gradient_map)),
         flex.max(flex.imag(gradient_map)))
       print
   result.sampling(
     scatterers=xray_structure.scatterers(),
     u_iso_refinable_params=u_iso_refinable_params,
     scattering_type_registry=xray_structure.scattering_type_registry(),
     site_symmetry_table=xray_structure.site_symmetry_table(),
     ft_d_target_d_f_calc=gradient_map,
     n_parameters=n_parameters,
     sampled_density_must_be_positive=False)
   if (0 or verbose):
     print "max_sampling_box_edges:", result.max_sampling_box_edges()
     print "exp_table_size:", result.exp_table_size()
     print
   return result
Example #42
0
def compute(miller_array, step_scale=0.0005):
    miller_array.show_comprehensive_summary(prefix="  ")
    step = miller_array.d_min() * step_scale
    #
    ma_p1 = miller_array.expand_to_p1()
    #
    n_h = {}
    n_k = {}
    n_l = {}
    indices = ma_p1.indices()
    for ind in indices:
        h, k, l = ind
        n_h.setdefault(h, flex.int()).append(1)
        n_k.setdefault(k, flex.int()).append(1)
        n_l.setdefault(l, flex.int()).append(1)

    def count(d):
        for k in d.keys():
            d[k] = d[k].size()
        return d

    n_h = count(n_h)
    n_k = count(n_k)
    n_l = count(n_l)
    # resolutions along axes
    a, b, c = miller_array.unit_cell().parameters()[:3]
    x, rho_x = one_d_image_along_axis(n=n_h, step=step, uc_length=a)
    y, rho_y = one_d_image_along_axis(n=n_k, step=step, uc_length=b)
    z, rho_z = one_d_image_along_axis(n=n_l, step=step, uc_length=c)
    # 2nd derivatives
    r2x = second_derivatives(rho=rho_x, delta=step)
    r2y = second_derivatives(rho=rho_y, delta=step)
    r2z = second_derivatives(rho=rho_z, delta=step)
    # effective resolution along axes
    d_eff_a = compute_d_eff(r=x, rho_2nd=r2x)
    d_eff_b = compute_d_eff(r=y, rho_2nd=r2y)
    d_eff_c = compute_d_eff(r=z, rho_2nd=r2z)
    print("  Effective resolution along axes a,b,c: %6.3f %6.3f %6.3f" %
          (d_eff_a, d_eff_b, d_eff_c))
    # all directions
    l = 0.8 * min(d_eff_a / 2.5, d_eff_b / 2.5, d_eff_c / 2.5)
    r = 1.2 * max(d_eff_a / 2.5, d_eff_b / 2.5, d_eff_c / 2.5)
    us = regular_grid_on_unit_sphere.rosca(m=9, hemisphere=True)
    d_effs = flex.double()
    o = maptbx.ft_analytical_1d_point_scatterer_at_origin(N=100000)
    for i, u in enumerate(us):
        o.compute(miller_indices=indices,
                  step=step,
                  left=l,
                  right=r,
                  u_frac=miller_array.unit_cell().fractionalize(u))
        dist, rho_ = o.distances(), o.rho()
        rho2 = second_derivatives(rho=rho_, delta=step)
        d_eff = compute_d_eff(r=dist, rho_2nd=rho2)
        d_effs.append(d_eff)
    print("  Effective resolution (min,max): %8.3f%8.3f" %
          (flex.min(d_effs), flex.max(d_effs)))
Example #43
0
def test_3():

   symmetry = crystal.symmetry(unit_cell          = (15.67, 25.37, 35.68, 90, 90, 90),
                               space_group_symbol = "P 21 21 21")
   structure = xray.structure(crystal_symmetry = symmetry)
   mi = structure.structure_factors(d_min          = 1.5,
                                    anomalous_flag = False).f_calc().indices()
   # ================================================================= TEST-1
   alpha  = flex.double(mi.size())
   beta   = flex.double(mi.size())
   d_obs  = flex.double(mi.size())
   d_calc = flex.double(mi.size())

   for i in range(1,mi.size()+1):
     d_obs [i-1] = i*1.0
     d_calc[i-1] = i*1.0
     beta  [i-1] = i*500.0
     alpha [i-1] = float(i) / float(i + 1)

   obj = max_lik.f_star_w_star_mu_nu(f_obs          = d_obs,
                                     f_model        = d_calc,
                                     alpha          = alpha,
                                     beta           = beta,
                                     space_group    = symmetry.space_group(),
                                     miller_indices = mi)
   f_star = obj.f_star()
   w_star = obj.w_star()
   mu     = obj.mu()
   nu     = obj.nu()
   nzero  = obj.number_of_f_star_zero()

   assert approx_equal(flex.max(f_star) ,          2505.77677201 , 1.e-4)
   assert approx_equal(flex.min(f_star) ,          0.0           , 1.e-4)
   assert approx_equal(flex.mean(f_star),          1085.99060715 , 1.e-4)
   assert approx_equal(flex.max(w_star) ,          1.0           , 1.e-4)
   assert approx_equal(flex.min(w_star) ,          0.0           , 1.e-4)
   assert approx_equal(flex.mean(w_star),          0.01782658613 , 1.e-4)
   assert approx_equal(flex.max(mu)     ,          2.23810354633 , 1.e-4)
   assert approx_equal(flex.min(mu)     ,          0.0           , 1.e-4)
   assert approx_equal(flex.mean(mu)    ,          1.20159615933 , 1.e-4)
   assert approx_equal(flex.max(nu)     ,          0.999107484116, 1.e-4)
   assert approx_equal(flex.min(nu)     ,          0.0           , 1.e-4)
   assert approx_equal(flex.mean(nu)    ,          0.745699513719, 1.e-4)
   assert approx_equal(nzero            ,          501           )
Example #44
0
def test_3():

   symmetry = crystal.symmetry(unit_cell          = (15.67, 25.37, 35.68, 90, 90, 90),
                               space_group_symbol = "P 21 21 21")
   structure = xray.structure(crystal_symmetry = symmetry)
   mi = structure.structure_factors(d_min          = 1.5,
                                    anomalous_flag = False).f_calc().indices()
   # ================================================================= TEST-1
   alpha  = flex.double(mi.size())
   beta   = flex.double(mi.size())
   d_obs  = flex.double(mi.size())
   d_calc = flex.double(mi.size())

   for i in range(1,mi.size()+1):
     d_obs [i-1] = i*1.0
     d_calc[i-1] = i*1.0
     beta  [i-1] = i*500.0
     alpha [i-1] = float(i) / float(i + 1)

   obj = max_lik.f_star_w_star_mu_nu(f_obs          = d_obs,
                                     f_model        = d_calc,
                                     alpha          = alpha,
                                     beta           = beta,
                                     space_group    = symmetry.space_group(),
                                     miller_indices = mi)
   f_star = obj.f_star()
   w_star = obj.w_star()
   mu     = obj.mu()
   nu     = obj.nu()
   nzero  = obj.number_of_f_star_zero()

   assert approx_equal(flex.max(f_star) ,          2505.77677201 , 1.e-4)
   assert approx_equal(flex.min(f_star) ,          0.0           , 1.e-4)
   assert approx_equal(flex.mean(f_star),          1085.99060715 , 1.e-4)
   assert approx_equal(flex.max(w_star) ,          1.0           , 1.e-4)
   assert approx_equal(flex.min(w_star) ,          0.0           , 1.e-4)
   assert approx_equal(flex.mean(w_star),          0.01782658613 , 1.e-4)
   assert approx_equal(flex.max(mu)     ,          2.23810354633 , 1.e-4)
   assert approx_equal(flex.min(mu)     ,          0.0           , 1.e-4)
   assert approx_equal(flex.mean(mu)    ,          1.20159615933 , 1.e-4)
   assert approx_equal(flex.max(nu)     ,          0.999107484116, 1.e-4)
   assert approx_equal(flex.min(nu)     ,          0.0           , 1.e-4)
   assert approx_equal(flex.mean(nu)    ,          0.745699513719, 1.e-4)
   assert approx_equal(nzero            ,          501           )
def compute(miller_array, step_scale=0.0005):
  miller_array.show_comprehensive_summary(prefix="  ")
  step = miller_array.d_min()*step_scale
  #
  ma_p1 = miller_array.expand_to_p1()
  #
  n_h = {}
  n_k = {}
  n_l = {}
  indices = ma_p1.indices()
  for ind in indices:
    h,k,l = ind
    n_h.setdefault(h, flex.int()).append(1)
    n_k.setdefault(k, flex.int()).append(1)
    n_l.setdefault(l, flex.int()).append(1)
  def count(d):
    for k in d.keys():
      d[k] = d[k].size()
    return d
  n_h = count(n_h)
  n_k = count(n_k)
  n_l = count(n_l)
  # resolutions along axes
  a,b,c = miller_array.unit_cell().parameters()[:3]
  x, rho_x = one_d_image_along_axis(n=n_h, step=step, uc_length=a)
  y, rho_y = one_d_image_along_axis(n=n_k, step=step, uc_length=b)
  z, rho_z = one_d_image_along_axis(n=n_l, step=step, uc_length=c)
  # 2nd derivatives
  r2x = second_derivatives(rho=rho_x, delta=step)
  r2y = second_derivatives(rho=rho_y, delta=step)
  r2z = second_derivatives(rho=rho_z, delta=step)
  # effective resolution along axes
  d_eff_a = compute_d_eff(r=x, rho_2nd=r2x)
  d_eff_b = compute_d_eff(r=y, rho_2nd=r2y)
  d_eff_c = compute_d_eff(r=z, rho_2nd=r2z)
  print "  Effective resolution along axes a,b,c: %6.3f %6.3f %6.3f"%(
    d_eff_a, d_eff_b, d_eff_c)
  # all directions
  l = 0.8 * min(d_eff_a/2.5, d_eff_b/2.5, d_eff_c/2.5)
  r = 1.2 * max(d_eff_a/2.5, d_eff_b/2.5, d_eff_c/2.5)
  us = regular_grid_on_unit_sphere.rosca(m=9, hemisphere=True)
  d_effs = flex.double()
  o = maptbx.ft_analytical_1d_point_scatterer_at_origin(N=100000)
  for i, u in enumerate(us):
    o.compute(
      miller_indices=indices,
      step=step,
      left=l,
      right=r,
      u_frac=miller_array.unit_cell().fractionalize(u))
    dist, rho_ = o.distances(), o.rho()
    rho2 = second_derivatives(rho=rho_, delta=step)
    d_eff = compute_d_eff(r=dist, rho_2nd=rho2)
    d_effs.append(d_eff)
  print "  Effective resolution (min,max): %8.3f%8.3f"%(
    flex.min(d_effs), flex.max(d_effs))
 def functional(self, x):
     if (0):
         print "functional(): x =", list(x)
     if (flex.min(x[:3]) < 1):
         print "FunctionalException: small length"
         raise scitbx.minimizers.FunctionalException
     if (flex.min(x[3:]) < 50):
         print "FunctionalException: small angle"
         raise scitbx.minimizers.FunctionalException
     try:
         result = residual(self.two_thetas_obs,
                           self.miller_indices,
                           self.wavelength,
                           unit_cell=uctbx.unit_cell(iter(x)))
     except KeyboardInterrupt:
         raise
     except Exception, e:
         print "FunctionalException:", str(e)
         raise scitbx.minimizers.FunctionalException
def resolution_fit(d_star_sq, y_obs, model, limit, sel=None):
    """Estimate a resolution limit based on the input merging statistics

    The function defined by `model` will be fit to the input `d_star_sq` and `y_obs`.
    The estimated resolution limit is chosen as the `d_star_sq` value at which the
    fitted function equals `limit`.

    Args:
        d_star_sq (scitbx.array_family.flex.double): The high resolution limits of the
            resolution bins in units 1/d*2
        y_obs (scitbx.array_family.flex.double): The statistic against which to fit the
            function `model`
        model: The function to fit against `y_obs`. Must be callable, taking as input x
            (d_star_sq) and y (the metric to be fitted) values, returning the fitted
            y(x) values.
        limit (float): The resolution limit criterion.
        sel (scitbx.array_family.flex.bool): An optional selection to apply to the
            `d_star_sq` and `y_obs` values.

    Returns: The estimated resolution limit in units of Ã…^-1

    Raises:
        RuntimeError: Raised if no `y_obs` values remain after application of the
        selection `sel`
    """
    if not sel:
        sel = flex.bool(len(d_star_sq), True)
    sel &= y_obs > 0
    y_obs = y_obs.select(sel)
    d_star_sq = d_star_sq.select(sel)

    if not len(y_obs):
        raise RuntimeError("No reflections left for fitting")
    y_fit = model(d_star_sq, y_obs, 6)
    logger.debug(
        tabulate(
            [("d*2", "d", "obs", "fit")]
            + [
                (ds2, uctbx.d_star_sq_as_d(ds2), yo, yf)
                for ds2, yo, yf in zip(d_star_sq, y_obs, y_fit)
            ],
            headers="firstrow",
        )
    )

    if flex.min(y_obs) > limit:
        d_min = 1.0 / math.sqrt(flex.max(d_star_sq))
    else:
        try:
            d_min = 1.0 / math.sqrt(interpolate_value(d_star_sq, y_fit, limit))
        except RuntimeError as e:
            logger.debug(f"Error interpolating value: {e}")
            d_min = None

    return ResolutionResult(d_star_sq, y_obs, y_fit, d_min)
Example #48
0
def exercise_with_pdb(verbose):
    if (not libtbx.env.has_module(name="mmtbx")):
        print("Skipping exercise_with_pdb():", \
          "mmtbx.monomer_library.pdb_interpretation not available")
        return
    if (libtbx.env.find_in_repositories(relative_path="chem_data") is None):
        print(
            "Skipping exercise_with_pdb(): chem_data directory not available")
        return
    if (verbose):
        out = sys.stdout
    else:
        out = StringIO()
    with open("tmp_cctbx_geometry_restraints.pdb", "w") as f:
        f.write(enk_pdb)
    pdb_interpretation_params = pdb_interpretation.master_params.extract()
    pdb_interpretation_params.sort_atoms = False
    processed_pdb_file = pdb_interpretation.run(
        args=["tmp_cctbx_geometry_restraints.pdb"],
        strict_conflict_handling=False,
        params=pdb_interpretation_params,
        log=out,
    )
    geo = processed_pdb_file.geometry_restraints_manager()
    site_labels = processed_pdb_file.xray_structure().scatterers() \
      .extract_labels()
    #
    assert approx_equal(flex.min(geo.nonbonded_model_distances()), 0.4777342)
    #
    geo._sites_cart_used_for_pair_proxies = None
    #
    sel0 = geo.simple_edge_list()
    assert len(sel0) == 46
    assert sel0[:4] == [(0, 1), (0, 8), (0, 11), (1, 2)]
    assert sel0[-4:] == [(42, 43), (42, 44), (45, 46), (45, 47)]
    geo.bond_params_table[13][14].slack = 0.1
    geo.bond_params_table[28][30].slack = 0.3
    sel = geo.simple_edge_list()
    assert sorted(set(sel0) - set(sel)) == [(13, 14), (28, 30)]
    sel = geo.simple_edge_list(omit_slack_greater_than=0.2)
    assert sorted(set(sel0) - set(sel)) == [(28, 30)]
    #
    d = geo.discard_symmetry(new_unit_cell=(10, 10, 10, 90, 90, 90))
    assert d.site_symmetry_table.special_position_indices().size() == 0
    #
    clusters = geo.rigid_clusters_due_to_dihedrals_and_planes(
        constrain_dihedrals_with_sigma_less_than=10)
    assert sorted([tuple(sorted(c))
                   for c in clusters]) == [(0, 8, 10, 15), (0, 8, 12, 15),
                                           (1, 2, 3, 4, 5, 6, 7, 9),
                                           (5, 6, 7, 9), (12, 13, 14, 19),
                                           (12, 13, 16, 19), (16, 17, 18, 29),
                                           (16, 17, 20, 29), (20, 28, 30, 37),
                                           (20, 28, 31, 37),
                                           (21, 22, 23, 24, 25, 26, 27)]
Example #49
0
def miller_array_as_phases_phs(self,
                               out,
                               scale_amplitudes=True,
                               phases=None,
                               phases_deg=None,
                               figures_of_merit=None):
    """http://www.sdsc.edu/CCMS/Packages/XTALVIEW/xtalviewfaq.html"""
    if (phases is not None): assert phases_deg is False or phases_deg is True
    if (self.is_complex_array()):
        amplitudes = self.amplitudes().data()
    else:
        amplitudes = self.data()
    if (scale_amplitudes):
        amplitudes_max = flex.max(amplitudes)
        if (amplitudes_max > 0):
            amplitudes = (9999.99 / amplitudes_max) * amplitudes
    assert len(" %7.2f" % flex.min(amplitudes)) == 8
    assert len(" %7.2f" % flex.max(amplitudes)) == 8
    if (phases is None):
        phases = self.phases(deg=True).data()
    else:
        if (hasattr(phases, "data")):
            phases = phases.data()
        if (not phases_deg):
            phases = phases * (180 / math.pi)
    assert len(" %7.2f" % flex.min(phases)) == 8
    assert len(" %7.2f" % flex.max(phases)) == 8
    if (figures_of_merit is None):
        for h, a, p in zip(self.indices(), amplitudes, phases):
            print("%4d%4d%4d" % h + " %7.2f" % a + " %7.2f" % 1 + " %7.2f" % p,
                  file=out)
    else:
        if (hasattr(figures_of_merit, "data")):
            assert figures_of_merit.indices().all_eq(self.indices())
            figures_of_merit = figures_of_merit.data()
        assert len(" %7.2f" % flex.min(figures_of_merit)) == 8
        assert len(" %7.2f" % flex.max(figures_of_merit)) == 8
        for h, a, p, f in zip(self.indices(), amplitudes, phases,
                              figures_of_merit):
            print("%4d%4d%4d" % h + " %7.2f" % a + " %7.2f" % f + " %7.2f" % p,
                  file=out)
Example #50
0
 def _show_each (edges) :
   for edge, ref_edge, label in zip(edges, ref_edges, labels) :
     h = flex.histogram(edge, n_slots=n_slots)
     smin, smax = flex.min(edge), flex.max(edge)
     stats = flex.mean_and_variance(edge)
     print >> out, "  %s edge" % label
     print >> out, "     range:     %6.2f - %.2f" % (smin, smax)
     print >> out, "     mean:      %6.2f +/- %6.2f on N = %d" % (
       stats.mean(), stats.unweighted_sample_standard_deviation(), edge.size())
     print >> out, "     reference: %6.2f" % ref_edge
     h.show(f=out, prefix="    ", format_cutoffs="%6.2f")
     print >> out, ""
Example #51
0
    def __init__(
        self,
        intensities,
        params,
        batches=None,
        scales=None,
        dose=None,
        report_dir=None,
        experiments=None,
    ):

        self.params = params

        if params.d_min or params.d_max:
            intensities = intensities.resolution_filter(d_min=params.d_min,
                                                        d_max=params.d_max)
            if batches:
                batches = batches.resolution_filter(d_min=params.d_min,
                                                    d_max=params.d_max)
            if scales:
                scales = scales.resolution_filter(d_min=params.d_min,
                                                  d_max=params.d_max)

        self.intensities = intensities
        self.experiments = experiments
        self.batches = batches
        self.scales = scales
        self.dose = dose
        self.report_dir = report_dir
        self._xanalysis = None

        assert self.intensities is not None
        # assert self.batches is not None

        if self.batches is not None and len(self.params.batch) == 0:
            separate = separate_unmerged(self.intensities, self.batches)
            scope = libtbx.phil.parse(batch_phil_scope)
            for i, batches in separate.batches.items():
                batch_params = scope.extract().batch[0]
                batch_params.id = i
                batch_params.range = (
                    flex.min(batches.data()),
                    flex.max(batches.data()),
                )
                self.params.batch.append(batch_params)

        if self.params.anomalous:
            self.intensities = self.intensities.as_anomalous_array()
            if self.batches is not None:
                self.batches = self.batches.as_anomalous_array()

        self.intensities.setup_binner(n_bins=self.params.resolution_bins)
        self.merged_intensities = self.intensities.merge_equivalents().array()
Example #52
0
def show_refinement_update(fmodels, selection, da_sel_refinable, prefix):
  fmt1 = "%s Rwork= %8.6f Rfree= %8.6f Number of: non-DA= %d DA= %d all= %d"
  print fmt1%(prefix, fmodels.fmodel_xray().r_work(),
    fmodels.fmodel_xray().r_free(),
    selection.count(False),selection.count(True),
    fmodels.fmodel_xray().xray_structure.scatterers().size())
  occ = fmodels.fmodel_xray().xray_structure.scatterers().extract_occupancies()
  occ_da = occ.select(selection)
  if(occ_da.size()>0):
    occ_ma = occ.select(~selection)
    print "         non-da: occ(min,max,mean)= %6.3f %6.3f %6.3f"%(
      flex.min(occ_ma),flex.max(occ_ma),flex.mean(occ_ma))
    print "             da: occ(min,max,mean)= %6.3f %6.3f %6.3f"%(
      flex.min(occ_da),flex.max(occ_da),flex.mean(occ_da))
    b = fmodels.fmodel_xray().xray_structure.extract_u_iso_or_u_equiv()*\
      adptbx.u_as_b(1.)
    b_da = b.select(selection)
    b_ma = b.select(~selection)
    print "         non-da: ADP(min,max,mean)= %7.2f %7.2f %7.2f"%(
      flex.min(b_ma),flex.max(b_ma),flex.mean(b_ma))
    print "             da: ADP(min,max,mean)= %7.2f %7.2f %7.2f"%(
      flex.min(b_da),flex.max(b_da),flex.mean(b_da))
    print "da_sel_refinable:", da_sel_refinable.size(), da_sel_refinable.count(True)
def exercise_with_pdb(verbose):
  if (not libtbx.env.has_module(name="mmtbx")):
    print "Skipping exercise_with_pdb():", \
      "mmtbx.monomer_library.pdb_interpretation not available"
    return
  if (libtbx.env.find_in_repositories(relative_path="chem_data") is None):
    print "Skipping exercise_with_pdb(): chem_data directory not available"
    return
  if (verbose):
    out = sys.stdout
  else:
    out = StringIO()
  open("tmp_cctbx_geometry_restraints.pdb", "w").write(enk_pdb)
  pdb_interpretation_params = pdb_interpretation.master_params.extract()
  pdb_interpretation_params.sort_atoms=False
  processed_pdb_file = pdb_interpretation.run(
    args=["tmp_cctbx_geometry_restraints.pdb"],
    strict_conflict_handling=False,
    params=pdb_interpretation_params,
    log=out,)
  geo = processed_pdb_file.geometry_restraints_manager()
  site_labels = processed_pdb_file.xray_structure().scatterers() \
    .extract_labels()
  #
  assert approx_equal(flex.min(geo.nonbonded_model_distances()), 0.4777342)
  #
  geo._sites_cart_used_for_pair_proxies = None
  #
  sel0 = geo.simple_edge_list()
  assert len(sel0) == 46
  assert sel0[:4] == [(0, 1), (0, 8), (0, 11), (1, 2)]
  assert sel0[-4:] == [(42, 43), (42, 44), (45, 46), (45, 47)]
  geo.bond_params_table[13][14].slack = 0.1
  geo.bond_params_table[28][30].slack = 0.3
  sel = geo.simple_edge_list()
  assert sorted(set(sel0) - set(sel)) == [(13, 14), (28, 30)]
  sel = geo.simple_edge_list(omit_slack_greater_than=0.2)
  assert sorted(set(sel0) - set(sel)) == [(28, 30)]
  #
  d = geo.discard_symmetry(new_unit_cell=(10,10,10,90,90,90))
  assert d.site_symmetry_table.special_position_indices().size()==0
  #
  clusters = geo.rigid_clusters_due_to_dihedrals_and_planes(
    constrain_dihedrals_with_sigma_less_than=10)
  assert sorted([tuple(sorted(c)) for c in clusters]) == [
    (0, 8, 10, 15), (0, 8, 12, 15), (1, 2, 3, 4, 5, 6, 7, 9),
    (5, 6, 7, 9), (12, 13, 14, 19), (12, 13, 16, 19), (16, 17, 18, 29),
    (16, 17, 20, 29), (20, 28, 30, 37), (20, 28, 31, 37),
    (21, 22, 23, 24, 25, 26, 27)]
  def minimize_kbu(self, n_cycles=10):
    #print "minimize_kbu start r:", self.kbu.r_factor()
    for use_curvatures in [False, True]*n_cycles:
      #print "  minimize_kbu r:", self.kbu.r_factor()
      start_r = self.kbu.r_factor()
      save_k_sols = self.kbu.k_sols()
      save_b_sols = self.kbu.b_sols()
      save_b_cart = self.kbu.b_cart()
      #self.set_use_scale(value = random.choice([True, False]))
      self.set_use_scale(value = True)
      m = self.minimize_kb_once(use_curvatures=use_curvatures)
      r = self.kbu.r_factor()
      if(r>start_r and r>1.e-2 and (flex.min(self.kbu.k_sols())<0 or
         flex.max(self.kbu.k_sols())>1 or flex.min(self.kbu.b_sols())<0 or
         flex.max(self.kbu.k_sols())>100.)):
        self.kbu.update(k_sols = save_k_sols, b_sols = save_b_sols)
#      assert m.minimizer.n_calls == m.minimizer.nfun()
      m = self.minimize_u_once()
 #     assert m.minimizer.n_calls == m.minimizer.nfun()
      r = self.kbu.r_factor()
      bc = list(flex.abs(flex.double(self.kbu.b_cart())))
      if(r>start_r and r>1.e-2 and max(bc)>100):
        self.kbu.update(b_cart = save_b_cart)
        break
 def update(O, miller_index_i_seqs):
   from cctbx.array_family import flex
   if (O.use_symmetry):
     isel = O.i_calc.asu_iselection.select(miller_index_i_seqs)
   else:
     isel = miller_index_i_seqs
   previously_zero = O.counts.increment_and_track_up_from_zero(
     iselection=isel)
   O.new_0 = O.currently_zero - previously_zero
   O.completeness_history.append(1-O.new_0/O.n_indices)
   O.min_count_history.append(flex.min(O.counts))
   assert O.new_0 >= 0
   if (O.new_0 == 0 and O.currently_zero != 0):
     print "Complete with %d images." % (len(O.completeness_history)-1)
     print
   O.currently_zero = O.new_0
def exercise_icosahedron(max_level=2, verbose=0):
  for level in xrange(0,max_level+1):
    if (0 or verbose):
      print "level:", level
    icosahedron = scitbx.math.icosahedron(level=level)
    try:
      distance_cutoff = icosahedron.next_neighbors_distance()*(1+1.e-3)
      estimated_distance_cutoff = False
    except RuntimeError, e:
      assert str(e) == "next_neighbors_distance not known."
      distance_cutoff = 0.4/(2**(level-1))
      estimated_distance_cutoff = True
    asu_mappings = crystal.direct_space_asu.non_crystallographic_asu_mappings(
      sites_cart=icosahedron.sites)
    pair_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
    pair_asu_table.add_all_pairs(distance_cutoff=distance_cutoff)
    if (0 or verbose):
      ps = pair_asu_table.show_distances(sites_cart=icosahedron.sites) \
        .distances_info
      print "level", level, "min", flex.min(ps.distances)
      print "     ", " ",   "max", flex.max(ps.distances)
      assert ps.pair_counts.all_eq(pair_asu_table.pair_counts())
      if (level == 0):
        for d in ps.distances:
          assert approx_equal(d, 1.0514622242382672)
    elif (level < 2):
      s = StringIO()
      ps = pair_asu_table.show_distances(sites_cart=icosahedron.sites, out=s) \
        .distances_info
      assert ps.pair_counts.all_eq(pair_asu_table.pair_counts())
      assert len(s.getvalue().splitlines()) == [72,320][level]
      del s
    if (level == 0):
      assert pair_asu_table.pair_counts().all_eq(5)
    else:
      assert pair_asu_table.pair_counts().all_eq(3)
    del pair_asu_table
    max_distance = crystal.neighbors_fast_pair_generator(
      asu_mappings=asu_mappings,
      distance_cutoff=distance_cutoff).max_distance_sq()**.5
    if (0 or verbose):
      print "max_distance:", max_distance
    if (not estimated_distance_cutoff):
      assert approx_equal(max_distance, icosahedron.next_neighbors_distance())
      assert approx_equal(max_distance/icosahedron.next_neighbors_distance(),1)
Example #57
0
 def as_miller_arrays(self,
       crystal_symmetry=None,
       force_symmetry=False,
       merge_equivalents=True,
       base_array_info=None,
       include_unmerged_data=False,
       ):
   assert not include_unmerged_data, "Unmerged data not supported in MTZ"
   other_symmetry = crystal_symmetry
   if (base_array_info is None):
     base_array_info = miller.array_info(source_type="ccp4_mtz")
   result = []
   for crystal in self.crystals():
     try :
       unit_cell = crystal.unit_cell()
     except ValueError, e :
       raise Sorry(str(e))
     crystal_symmetry_from_file = cctbx.crystal.symmetry(
       unit_cell=unit_cell,
       space_group_info=self.space_group_info(),
       raise_sorry_if_incompatible_unit_cell=True)
     crystal_symmetry = crystal_symmetry_from_file.join_symmetry(
       other_symmetry=other_symmetry,
       force=force_symmetry)
     for dataset in crystal.datasets():
       base_dataset_info = base_array_info.customized_copy(
         wavelength=dataset.wavelength())
       column_groups = self.group_columns(
         crystal_symmetry_from_file=crystal_symmetry_from_file,
         crystal_symmetry=crystal_symmetry,
         base_array_info=base_dataset_info,
         dataset=dataset)
       for column_group in column_groups:
         if (merge_equivalents
             and isinstance(column_group.data(), flex.double)
             and isinstance(column_group.sigmas(), flex.double)
             and column_group.sigmas().size() != 0
             and flex.min(column_group.sigmas()) > 0):
           merged_column_group = column_group.merge_equivalents().array()
           if (merged_column_group.indices().size()
               != column_group.indices().size()):
             merged_column_group.set_info(
               column_group.info().customized_copy(merged=True))
             column_group = merged_column_group
         result.append(column_group)
Example #58
0
 def show(self, fmodels, message, log):
   if(log is not None):
     print >> log, "|-"+message+"-"*(79-len("|-"+message+"|"))+"|"
     fm_x, fm_n = fmodels.fmodel_xray(), fmodels.fmodel_neutron()
     if(fm_n is not None):
       print >> log, "|"+" "*36+"X-ray"+" "*36+"|"
     self.show_helper(fmodel = fm_x, log = log)
     if(fm_n is not None):
       print >> log, "|"+" "*35+"neutron"+" "*35+"|"
       self.show_helper(fmodel = fm_n, log = log)
     occupancies = fm_x.xray_structure.scatterers().extract_occupancies()
     occ_max = format_value("%4.2f", flex.max(occupancies))
     occ_min = format_value("%4.2f", flex.min(occupancies))
     number_small = format_value("%8d", (occupancies < 0.1).count(True))
     print >> log, \
       "| occupancies: max = %s  min = %s   number of occupancies < 0.1: %s |"%(
       occ_max, occ_min, number_small)
     print >> log, "|"+"-"*77+"|"
Example #59
0
  def resolution_merged_isigma(self, limit = None, log = None):
    '''Compute a resolution limit where either Mn(I/sigma) = 1.0 (limit if
    set) or the full extent of the data.'''

    if limit is None:
      limit = self._params.misigma

    misigma_s = flex.double(
      [b.i_over_sigma_mean for b in self._merging_statistics.bins]).reversed()
    s_s = flex.double(
      [1/b.d_min**2 for b in self._merging_statistics.bins]).reversed()

    sel = misigma_s > 0
    misigma_s = misigma_s.select(sel)
    s_s = s_s.select(sel)

    if flex.min(misigma_s) > limit:
      return 1.0 / math.sqrt(flex.max(s_s))

    misigma_f = log_fit(s_s, misigma_s, 6)

    if log:
      fout = open(log, 'w')
      for j, s in enumerate(s_s):
        d = 1.0 / math.sqrt(s)
        o = misigma_s[j]
        m = misigma_f[j]
        fout.write('%f %f %f %f\n' % (s, d, o, m))
      fout.close()

    try:
      r_misigma = 1.0 / math.sqrt(
          interpolate_value(s_s, misigma_f, limit))
    except:
      r_misigma = 1.0 / math.sqrt(flex.max(s_s))

    if self._params.plot:
      plot = resolution_plot(ylabel='Merged I/sigma')
      plot.plot(s_s, misigma_f, label='fit')
      plot.plot(s_s, misigma_s, label='Merged I/sigma')
      plot.plot_resolution_limit(r_misigma)
      plot.savefig('misigma.png')

    return r_misigma
Example #60
0
  def resolution_completeness(self, limit = None, log = None):
    '''Compute a resolution limit where completeness < 0.5 (limit if
    set) or the full extent of the data. N.B. this completeness is
    with respect to the *maximum* completeness in a shell, to reflect
    triclinic cases.'''

    if limit is None:
      limit = self._params.completeness

    comp_s = flex.double(
      [b.completeness for b in self._merging_statistics.bins]).reversed()
    s_s = flex.double(
      [1/b.d_min**2 for b in self._merging_statistics.bins]).reversed()

    if flex.min(comp_s) > limit:
      return 1.0 / math.sqrt(flex.max(s_s))

    comp_f = fit(s_s, comp_s, 6)

    rlimit = limit * max(comp_s)

    if log:
      fout = open(log, 'w')
      for j, s in enumerate(s_s):
        d = 1.0 / math.sqrt(s)
        o = comp_s[j]
        m = comp_f[j]
        fout.write('%f %f %f %f\n' % (s, d, o, m))
      fout.close()

    try:
      r_comp = 1.0 / math.sqrt(
          interpolate_value(s_s, comp_f, rlimit))
    except Exception:
      r_comp = 1.0 / math.sqrt(flex.max(s_s))

    if self._params.plot:
      plot = resolution_plot(ylabel='Completeness')
      plot.plot(s_s, comp_f, label='fit')
      plot.plot(s_s, comp_s, label='Completeness')
      plot.plot_resolution_limit(r_comp)
      plot.savefig('completeness.png')

    return r_comp