Ejemplo n.º 1
0
def r_split(self, other, assume_index_matching=False, use_binning=False):
    # Used in Boutet et al. (2012), which credit it to Owen et al
    # (2006).  See also R_mrgd_I in Diederichs & Karplus (1997)?
    # Barends cites Collaborative Computational Project Number 4. The
    # CCP4 suite: programs for protein crystallography. Acta
    # Crystallogr. Sect. D-Biol. Crystallogr. 50, 760-763 (1994) and
    # White, T. A. et al. CrystFEL: a software suite for snapshot
    # serial crystallography. J. Appl. Cryst. 45, 335–341 (2012).

    if not use_binning:
        assert other.indices().size() == self.indices().size()
        if self.data().size() == 0:
            return None

        if assume_index_matching:
            (o, c) = (self, other)
        else:
            (o, c) = self.common_sets(other=other, assert_no_singles=True)

        # The case where the denominator is less or equal to zero is
        # pathological and should never arise in practice.
        den = flex.sum(flex.abs(o.data() + c.data()))
        assert den > 0
        return math.sqrt(2) * flex.sum(flex.abs(o.data() - c.data())) / den

    assert self.binner is not None
    results = []
    for i_bin in self.binner().range_all():
        sel = self.binner().selection(i_bin)
        results.append(
            r_split(self.select(sel), other.select(sel), assume_index_matching=assume_index_matching, use_binning=False)
        )
    return binned_data(binner=self.binner(), data=results, data_fmt="%7.4f")
Ejemplo n.º 2
0
def r1_factor(self, other, scale_factor=None, assume_index_matching=False, use_binning=False):
    """Get the R1 factor according to this formula

    .. math::
       R1 = \dfrac{\sum{||F| - k|F'||}}{\sum{|F|}}

    where F is self.data() and F' is other.data() and
    k is the factor to put F' on the same scale as F"""
    assert not use_binning or self.binner() is not None
    assert other.indices().size() == self.indices().size()
    if not use_binning:
        if self.data().size() == 0:
            return None
        if assume_index_matching:
            o, c = self, other
        else:
            o, c = self.common_sets(other=other, assert_no_singles=True)
        o = flex.abs(o.data())
        c = flex.abs(c.data())
        if scale_factor is None:
            den = flex.sum(c * c)
            if den != 0:
                c *= flex.sum(o * c) / den
        elif scale_factor is not None:
            c *= scale_factor
        return flex.sum(flex.abs(o - c)) / flex.sum(o)
    results = []
    for i_bin in self.binner().range_all():
        sel = self.binner().selection(i_bin)
        results.append(r1_factor(self.select(sel), other.select(sel), scale_factor.data[i_bin], assume_index_matching))
    return binned_data(binner=self.binner(), data=results, data_fmt="%7.4f")
Ejemplo n.º 3
0
def r_factor(x,y, use_scale):
  try:
    x = flex.abs(x.data())
    y = flex.abs(y.data())
  except Exception: pass
  sc=1
  if(use_scale): sc = scale(x,y)
  return flex.sum(flex.abs(x-sc*y))/flex.sum(x)
Ejemplo n.º 4
0
def run_00():
  time_aniso_u_scaler = 0
  for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric:
    #print symbol, "-"*50
    space_group_info = sgtbx.space_group_info(symbol = symbol)
    xrs = random_structure.xray_structure(
      space_group_info  = space_group_info,
      elements          = ["N"]*100,
      volume_per_atom   = 50.0,
      random_u_iso      = True)
    # XXX ad a method to adptbx to do this
    point_group = sgtbx.space_group_info(
      symbol=symbol).group().build_derived_point_group()
    adp_constraints = sgtbx.tensor_rank_2_constraints(
      space_group=point_group,
      reciprocal_space=True)
    u_star = adptbx.u_cart_as_u_star(xrs.unit_cell(),
      adptbx.random_u_cart(u_scale=1,u_min=0.1))
    u_indep = adp_constraints.independent_params(all_params=u_star)
    u_star = adp_constraints.all_params(independent_params=u_indep)
    b_cart_start=adptbx.u_as_b(adptbx.u_star_as_u_cart(xrs.unit_cell(), u_star))
    #
    tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3
    b_cart_start = [b_cart_start[0]-tr,b_cart_start[1]-tr,b_cart_start[2]-tr,
           b_cart_start[3],b_cart_start[4],b_cart_start[5]]
    tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3
    #
    #print "Input b_cart :", " ".join(["%8.4f"%i for i in b_cart_start]), "tr:", tr
    F = xrs.structure_factors(d_min = 2.0).f_calc()
    u_star = adptbx.u_cart_as_u_star(
      F.unit_cell(), adptbx.b_as_u(b_cart_start))
    fbc = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star)
    fc = F.structure_factors_from_scatterers(xray_structure=xrs).f_calc()
    f_obs = F.customized_copy(data = flex.abs(fc.data()*fbc))
    t0 = time.time()
    #
    obj = bulk_solvent.aniso_u_scaler(
      f_model_abs    = flex.abs(fc.data()),
      f_obs          = f_obs.data(),
      miller_indices = f_obs.indices(),
      adp_constraint_matrix = adp_constraints.gradient_sum_matrix())
    time_aniso_u_scaler += (time.time()-t0)
    b_cart_final = adptbx.u_as_b(adptbx.u_star_as_u_cart(f_obs.unit_cell(),
      adp_constraints.all_params(tuple(obj.u_star_independent))))
    #
    obj = bulk_solvent.aniso_u_scaler(
      f_model_abs    = flex.abs(fc.data()),
      f_obs          = f_obs.data(),
      miller_indices = f_obs.indices())
    b_cart_final2 = adptbx.u_as_b(adptbx.u_star_as_u_cart(f_obs.unit_cell(),
      tuple(obj.u_star)))
    #
    assert approx_equal(b_cart_final, b_cart_final2)
    #print "Output b_cart:", " ".join(["%8.4f"%i for i in b_cart_final])
    assert approx_equal(b_cart_start, b_cart_final, 1.e-4)
  print "Time (aniso_u_scaler only): %6.4f"%time_aniso_u_scaler
Ejemplo n.º 5
0
def scale(x, y):
  assert type(x) == type(y)
  if(type(x) == miller.array):
    x = x.data()
    y = y.data()
  x = flex.abs(x)
  y = flex.abs(y)
  d = flex.sum(y*y)
  if d == 0: return 1
  else:
    return flex.sum(x*y)/d
def run(args):
  from scitbx.array_family import flex
  from scitbx import matrix
  from dials.util.command_line import Importer
  from dials.algorithms.reflection_basis import zeta_factor

  importer = Importer(args, check_format=False)
  assert importer.datablocks is not None
  assert len(importer.datablocks) == 1
  datablock = importer.datablocks[0]
  imagesets = datablock.extract_imagesets()
  assert len(imagesets) == 1
  imageset = imagesets[0]

  detector = imageset.get_detector()
  beam = imageset.get_beam()
  goniometer = imageset.get_goniometer()
  assert goniometer is not None
  assert len(detector) == 1
  panel = detector[0]

  lab_coords = flex.vec3_double(flex.grid(panel.get_image_size()))

  for i in range(panel.get_image_size()[0]):
    for j in range(panel.get_image_size()[1]):
      lab_coords[i,j] = panel.get_lab_coord(panel.pixel_to_millimeter((i,j)))

  axis = matrix.col(goniometer.get_rotation_axis())
  s0 = matrix.col(beam.get_s0())
  s1 = (lab_coords.as_1d()/lab_coords.as_1d().norms()) * s0.length()
  s1_cross_s0 = s1.cross(flex.vec3_double(s1.size(), s0.elems))
  p_volume = flex.abs(s1_cross_s0.dot(axis.elems))
  p_volume.reshape(flex.grid(panel.get_image_size()))
  zeta = flex.abs(zeta_factor(axis.elems, s0.elems, s1.as_1d()))
  zeta.reshape(flex.grid(panel.get_image_size()))

  from matplotlib import pyplot
  pyplot.figure()
  pyplot.title('parallelepiped volume')
  CS = pyplot.contour(p_volume.matrix_transpose().as_numpy_array(), 10)
  pyplot.clabel(CS, inline=1, fontsize=10, fmt="%6.3f")
  pyplot.axes().set_aspect('equal')
  pyplot.show()
  pyplot.title('zeta factor')
  CS = pyplot.contour(zeta.matrix_transpose().as_numpy_array(), 10)
  pyplot.clabel(CS, inline=1, fontsize=10, fmt="%6.3f")
  pyplot.axes().set_aspect('equal')
  pyplot.show()
def another_example(np=41,nt=5):
  x = flex.double( range(np) )/(np-1)
  y = 0.99*flex.exp(-x*x*0.5)
  y = -flex.log(1.0/y-1)
  w = y*y/1.0
  d = (flex.random_double(np)-0.5)*w
  y_obs = y+d

  y = 1.0/( 1.0 + flex.exp(-y) )

  fit_w = chebyshev_lsq_fit.chebyshev_lsq_fit(nt,
                                              x,
                                              y_obs,
                                              w )
  fit_w_f = chebyshev_polynome(
    nt, fit_w.low_limit, fit_w.high_limit, fit_w.coefs)


  fit_nw = chebyshev_lsq_fit.chebyshev_lsq_fit(nt,
                                              x,
                                              y_obs)
  fit_nw_f = chebyshev_polynome(
    nt, fit_nw.low_limit, fit_nw.high_limit, fit_nw.coefs)
  print
  print "Coefficients from weighted lsq"
  print list( fit_w.coefs )
  print "Coefficients from non-weighted lsq"
  print list( fit_nw.coefs )
  assert flex.max( flex.abs(fit_nw.coefs-fit_w.coefs) ) > 0
Ejemplo n.º 8
0
def exercise_complex_to_complex_3d():
  print "complex_to_complex_3d"
  for n_complex,n_repeats in [((100,80,90),2), ((200,160,180),1)]:
    print "  dimensions:", n_complex
    print "  repeats:", n_repeats
    np = n_complex[0]*n_complex[1]*n_complex[2]
    d0 = (flex.random_double(size=np)*2-1) * flex.polar(
      1, flex.random_double(size=np)*2-1)
    d0.reshape(flex.grid(n_complex))
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
    overhead = time.time()-t0
    print "    overhead: %.2f seconds" % overhead
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      fftw3tbx.complex_to_complex_3d_in_place(data=d, exp_sign=-1)
      fftw3tbx.complex_to_complex_3d_in_place(data=d, exp_sign=+1)
    print "    fftw:     %.2f seconds" % (time.time()-t0-overhead)
    rw = d / np
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      fftpack.complex_to_complex_3d(n_complex).forward(d)
      fftpack.complex_to_complex_3d(n_complex).backward(d)
    print "    fftpack:  %.2f seconds" % (time.time()-t0-overhead)
    sys.stdout.flush()
    rp = d / np
    #
    assert flex.max(flex.abs(rw-rp)) < 1.e-6
Ejemplo n.º 9
0
def r_factor(x, r1, r2, eps=1.e-6):
  sel  = x > 0-eps
  sel &= x < 0+eps
  assert sel.count(True) == 1
  r1_ = r1.select(sel)
  r2_ = r2.select(sel)
  return flex.abs( r1_-r2_ )[0]
Ejemplo n.º 10
0
def _get_sorted (O,
        unit_cell,
        sites_cart,
        pdb_atoms,
        by_value="residual",
        use_segids_in_place_of_chainids=False) :
  assert by_value in ["residual", "delta"]
  if (O.size() == 0): return []
  import cctbx.geometry_restraints
  from scitbx.array_family import flex
  deltas = flex.abs(O.deltas(sites_cart=sites_cart))
  residuals = O.residuals(sites_cart=sites_cart)
  if (by_value == "residual"):
    data_to_sort = residuals
  elif (by_value == "delta"):
    data_to_sort = deltas
  i_proxies_sorted = flex.sort_permutation(data=data_to_sort, reverse=True)
  sorted_table = []
  for i_proxy in i_proxies_sorted:
    proxy = O[i_proxy]
    sigma = cctbx.geometry_restraints.weight_as_sigma(proxy.weight)
    score = sqrt(residuals[i_proxy]) / sigma
    proxy_atoms = get_atoms_info(pdb_atoms, iselection=proxy.i_seqs,
      use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
    sorted_table.append((proxy, proxy_atoms))
  return sorted_table
Ejemplo n.º 11
0
  def finite_difference_test(self,g):
    """
    Run basic gradient test. compare numerical estimate gradient to
    the largest calculated one. using t'(x)=(t(x+d)-t(x-d))/(2d)

    Argument:
     g : gradient, flex array
    """
    if(self.fmodel.r_work()>1.e-3):
      g = g.as_double()
      d = 1.e-5
      # find the index of the max gradient value
      i_g_max = flex.max_index(flex.abs(g))
      x_d = self.x
      # calc t(x+d)
      x_d[i_g_max] = self.x[i_g_max] + d
      self.update_model_sites(x = x_d)
      self.fmodel.update_xray_structure(update_f_calc=True)
      t1,_ = self.compute_functional_and_gradients(compute_gradients=False)
      # calc t(x-d)
      x_d[i_g_max] = self.x[i_g_max] - d
      self.update_model_sites(x = x_d)
      del x_d
      self.fmodel.update_xray_structure(update_f_calc=True)
      t2,_ = self.compute_functional_and_gradients(compute_gradients=False)
      # Return fmodel to the correct coordinates values
      self.update_model_sites(x = self.x)
      self.fmodel.update_xray_structure(update_f_calc=True)
      self.buffer_max_grad.append(g[i_g_max])
      self.buffer_calc_grad.append((t1-t2)/(d*2))
Ejemplo n.º 12
0
 def jacobian(self, x):
   analytical = self.jacobian_analytical(x=x)
   if (self.check_with_finite_differences):
     finite = self.jacobian_finite(x=x)
     scale = max(1, flex.max(flex.abs(analytical)))
     assert approx_equal(analytical/scale, finite/scale, 1.e-5)
   return analytical
Ejemplo n.º 13
0
 def load_reflections_file (self, file_name, **kwds) :
   if (isinstance(file_name, unicode)) :
     file_name = str(file_name)
   if (file_name != "") :
     from iotbx.reflection_file_reader import any_reflection_file
     from cctbx import miller
     from scitbx.array_family import flex
     try :
       hkl_file = any_reflection_file(file_name)
     except Exception, e :
       raise Sorry(str(e))
     arrays = hkl_file.as_miller_arrays(merge_equivalents=True)
     f_obs = f_model = None
     for array in arrays :
       labels = array.info().label_string()
       if labels.startswith("F-obs-filtered") :
         f_obs = array
       elif labels.startswith("F-model") :
         f_model = array
     if (f_obs is None) or (f_model is None) :
       raise Sorry("This does not appear to be a phenix.refine output "+
         "file.  The MTZ file should contain data arrays for the filtered "+
         "amplitudes (F-obs) and F-model.")
     f_delta = f_obs.customized_copy(sigmas=None,
       data=flex.abs(f_obs.data()-abs(f_model).data())).set_info(
         miller.array_info(labels=["abs(F_obs - F_model)"]))
     self.set_miller_array(f_delta)
Ejemplo n.º 14
0
def apply_default_filter(database_dict, d_min, max_models_for_default_filter,
                         key = "high_resolution"):
  database_dict = order_by_value(database_dict = database_dict, key = key)
  values = flex.double()
  for v in database_dict[key]: values.append(float(v))
  diff = flex.abs(values-d_min)
  min_val = flex.min(diff)
  i_min_sel = (diff == min_val).iselection()
  assert i_min_sel.size() > 0
  i_min = i_min_sel[i_min_sel.size()//2]
  i_l = max(0, i_min-max_models_for_default_filter//2)
  i_r = min(values.size()-1, i_min+max_models_for_default_filter//2)
  #
  print "apply_default_filter:"
  print "  found data points dmin->higher =", abs(i_l-i_min)
  print "  found data points dmin->lower  =", abs(i_r-i_min)
  imm = min(abs(i_l-i_min), abs(i_r-i_min))
  i_l, i_r = i_min-imm, i_min+imm
  if (imm == 0) :
    if (i_l == 0) :
      i_r = 100
      print "  used data points dmin->higher =", 0
      print "  used data points dmin->lower  =", i_r
    elif (i_l == i_r == len(values) - 1) :
      i_l -= 100
      print "  used data points dmin->higher =", i_l
      print "  used data points dmin->lower  =", 0
  else :
    print "  used data points dmin->higher =", imm
    print "  used data points dmin->lower  =", imm
  #
  selection = flex.bool(values.size(), False)
  for i in xrange(i_l,i_r): selection[i] = True
  return select_dict(database_dict = database_dict, selection = selection)
Ejemplo n.º 15
0
def solve_a_x_eq_b_min_norm_given_a_sym_b_col(
      a, b,
      relative_min_abs_pivot=1e-12,
      absolute_min_abs_pivot=0,
      back_substitution_epsilon_factor=10):
  """\
Assumes a is symmetric, without checking to avoid overhead.

Special case of
  generalized_inverse(a) * b
taking advantage of the fact that a is real and symmetric.

Opportunistic algorithm: first assumes that a has full rank. If this
is true, solves a*x=b for x using simple back-substitution.

Only if a is rank-deficient:
  To obtain the x with minimum norm, transforms a to a basis formed
  by its eigenvectors, solves a*x=b in this basis, then transforms
  x back to the original basis system.

Returns None if a*x=b has no solution.
"""
  if (isinstance(a, matrix.rec)):
    a = a.as_flex_double_matrix()
  if (isinstance(b, matrix.rec)):
    assert b.n_columns() == 1
    b = flex.double(b)
  if (relative_min_abs_pivot is None):
    min_abs_pivot = 0
  else:
    min_abs_pivot = \
       max(a.all()) \
     * flex.max(flex.abs(a)) \
     * relative_min_abs_pivot
  min_abs_pivot = max(min_abs_pivot, absolute_min_abs_pivot)
  epsilon = min_abs_pivot * back_substitution_epsilon_factor
  aw = a.deep_copy()
  bw = b.deep_copy()
  ef = row_echelon_full_pivoting(
    a_work=aw, b_work=bw, min_abs_pivot=min_abs_pivot)
  if (ef.nullity == 0):
    x = ef.back_substitution(
      free_values=flex.double(ef.nullity), epsilon=epsilon)
  else:
    assert a.is_square_matrix()
    aw = a.deep_copy()
    es = scitbx.linalg.eigensystem.real_symmetric(aw)
    c = es.vectors() # may be left-handed, but that's OK here
    ct = c.matrix_transpose()
    aw = c.matrix_multiply(aw).matrix_multiply(ct)
    bw = c.matrix_multiply(b)
    max_rank = ef.rank
    ef = row_echelon_full_pivoting(a_work=aw, b_work=bw, max_rank=max_rank)
    assert ef.rank == max_rank
    x = ef.back_substitution(
      free_values=flex.double(ef.nullity, 0), epsilon=epsilon)
    if (x is not None):
      x = ct.matrix_multiply(x)
  return x
Ejemplo n.º 16
0
 def hessian(self, x):
   analytical = self.hessian_analytical(x=x)
   if (self.check_with_finite_differences):
     finite = self.hessian_finite(x=x)
     scale = max(1, flex.max(flex.abs(analytical)))
     assert approx_equal(analytical/scale, finite/scale,
       self.check_hessian_tolerance)
   return analytical
Ejemplo n.º 17
0
 def gradients(self,x, f_x=None):
   analytical = self.gradients_analytical(x=x, f_x=f_x)
   if (self.check_with_finite_differences):
     finite = self.gradients_finite(x=x)
     scale = max(1, flex.max(flex.abs(analytical)))
     assert approx_equal(analytical/scale, finite/scale,
       self.check_gradients_tolerance)
   return analytical
Ejemplo n.º 18
0
 def sort(self):
   perm = flex.sort_permutation(
     data=flex.abs(flex.double(self.array_of_a())),
     reverse=True)
   return sum(
     flex.select(self.array_of_a(), perm),
     flex.select(self.array_of_b(), perm),
     self.c(),
     self.use_c())
Ejemplo n.º 19
0
def zero_test(asu_mask, fc, tolerance = 1.0E-9):
  radii = []
  sites = []
  assert len(radii) == len(sites)
  asu_mask.compute( sites, radii )
  fm_asu = asu_mask.structure_factors( fc.indices() )
  fm_asu = fc.set().array( data = fm_asu )
  max_zero = flex.max( flex.abs(fm_asu.data()) )
  assert isinstance(max_zero, float), max_zero.__class__
  assert max_zero < tolerance, "Maximum deviation from zero = "+str(max_zero)
Ejemplo n.º 20
0
 def do_scale_shifts(self, max_shift_over_esd):
   x = self.non_linear_ls.step()
   esd = self.non_linear_ls.covariance_matrix().matrix_packed_u_diagonal()
   x_over_esd = flex.abs(x/flex.sqrt(esd))
   max_val = flex.max(x_over_esd)
   if max_val < self.convergence_as_shift_over_esd:
     return True
   if max_val > max_shift_over_esd:
     shift_scale = max_shift_over_esd/max_val
     x *= shift_scale
   return False
Ejemplo n.º 21
0
def zero_test(asu_mask, fc, tolerance=1.0E-9):
    radii = []
    sites = []
    assert len(radii) == len(sites)
    asu_mask.compute(sites, radii)
    fm_asu = asu_mask.structure_factors(fc.indices())
    fm_asu = fc.set().array(data=fm_asu)
    max_zero = flex.max(flex.abs(fm_asu.data()))
    assert isinstance(max_zero, float), max_zero.__class__
    assert max_zero < tolerance, "Maximum deviation from zero = " + str(
        max_zero)
Ejemplo n.º 22
0
 def do_scale_shifts(self, max_shift_over_esd):
   x = self.non_linear_ls.step()
   esd = self.non_linear_ls.covariance_matrix().matrix_packed_u_diagonal()
   x_over_esd = flex.abs(x/flex.sqrt(esd))
   max_val = flex.max(x_over_esd)
   if max_val < self.convergence_as_shift_over_esd:
     return True
   if max_val > max_shift_over_esd:
     shift_scale = max_shift_over_esd/max_val
     x *= shift_scale
   return False
Ejemplo n.º 23
0
    def __init__(self, crystal, beam, detector, goniometer, scan, reflections):
        """Initialise the algorithm. Calculate the list of tau and zetas.

        Params:
            reflections The list of reflections
            experiment The experiment object

        """
        from dials.array_family import flex

        # Get the oscillation width
        dphi2 = scan.get_oscillation(deg=False)[1] / 2.0

        # Calculate a list of angles and zeta's
        tau, zeta = self._calculate_tau_and_zeta(crystal, beam, detector,
                                                 goniometer, scan, reflections)

        # Calculate zeta * (tau +- dphi / 2) / math.sqrt(2)
        self.e1 = (tau + dphi2) * flex.abs(zeta) / math.sqrt(2.0)
        self.e2 = (tau - dphi2) * flex.abs(zeta) / math.sqrt(2.0)
Ejemplo n.º 24
0
def exercise_real_to_complex_3d():
  print("real_to_complex_3d")
  for n_real,n_repeats in [((100,80,90),8),
                           ((200,160,180),2),
                           ((300,240,320),1)]:
    print("  dimensions:", n_real)
    print("  repeats:", n_repeats)
    fft = fftpack.real_to_complex_3d(n_real)
    m_real = fft.m_real()
    np = n_real[0]*n_real[1]*n_real[2]
    mp = m_real[0]*m_real[1]*m_real[2]
    d0 = flex.random_double(size=mp)*2-1
    d0.reshape(flex.grid(m_real).set_focus(n_real))
    #
    t0 = time.time()
    for i_trial in range(n_repeats):
      d = d0.deep_copy()
    overhead = time.time()-t0
    print("    overhead: %.2f seconds" % overhead)
    #
    t0 = time.time()
    for i_trial in range(n_repeats):
      d = d0.deep_copy()
      c = fftw3tbx.real_to_complex_3d_in_place(data=d)
      assert c.all() == fft.n_complex()
      assert c.focus() == fft.n_complex()
      assert c.id() == d.id()
      r = fftw3tbx.complex_to_real_3d_in_place(data=c, n=n_real)
      assert r.all() == fft.m_real()
      assert r.focus() == fft.n_real()
      assert r.id() == d.id()
    print("    fftw:     %.2f seconds" % (time.time()-t0-overhead))
    if (maptbx is not None):
      maptbx.unpad_in_place(map=d)
      rw = d / np
    #
    t0 = time.time()
    for i_trial in range(n_repeats):
      d = d0.deep_copy()
      c = fftpack.real_to_complex_3d(n_real).forward(d)
      assert c.all() == fft.n_complex()
      assert c.focus() == fft.n_complex()
      assert c.id() == d.id()
      r = fftpack.real_to_complex_3d(n_real).backward(c)
      assert r.all() == fft.m_real()
      assert r.focus() == fft.n_real()
      assert r.id() == d.id()
    print("    fftpack:  %.2f seconds" % (time.time()-t0-overhead))
    sys.stdout.flush()
    if (maptbx is not None):
      maptbx.unpad_in_place(map=d)
      rp = d / np
      #
      assert flex.max(flex.abs(rw-rp)) < 1.e-6
Ejemplo n.º 25
0
def exercise_real_to_complex_3d():
  print "real_to_complex_3d"
  for n_real,n_repeats in [((100,80,90),8),
                           ((200,160,180),2),
                           ((300,240,320),1)]:
    print "  dimensions:", n_real
    print "  repeats:", n_repeats
    fft = fftpack.real_to_complex_3d(n_real)
    m_real = fft.m_real()
    np = n_real[0]*n_real[1]*n_real[2]
    mp = m_real[0]*m_real[1]*m_real[2]
    d0 = flex.random_double(size=mp)*2-1
    d0.reshape(flex.grid(m_real).set_focus(n_real))
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
    overhead = time.time()-t0
    print "    overhead: %.2f seconds" % overhead
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      c = fftw3tbx.real_to_complex_3d_in_place(data=d)
      assert c.all() == fft.n_complex()
      assert c.focus() == fft.n_complex()
      assert c.id() == d.id()
      r = fftw3tbx.complex_to_real_3d_in_place(data=c, n=n_real)
      assert r.all() == fft.m_real()
      assert r.focus() == fft.n_real()
      assert r.id() == d.id()
    print "    fftw:     %.2f seconds" % (time.time()-t0-overhead)
    if (maptbx is not None):
      maptbx.unpad_in_place(map=d)
      rw = d / np
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      c = fftpack.real_to_complex_3d(n_real).forward(d)
      assert c.all() == fft.n_complex()
      assert c.focus() == fft.n_complex()
      assert c.id() == d.id()
      r = fftpack.real_to_complex_3d(n_real).backward(c)
      assert r.all() == fft.m_real()
      assert r.focus() == fft.n_real()
      assert r.id() == d.id()
    print "    fftpack:  %.2f seconds" % (time.time()-t0-overhead)
    sys.stdout.flush()
    if (maptbx is not None):
      maptbx.unpad_in_place(map=d)
      rp = d / np
      #
      assert flex.max(flex.abs(rw-rp)) < 1.e-6
Ejemplo n.º 26
0
Archivo: npp.py Proyecto: isikhar/xia2
def npp(hklin):
    reader = any_reflection_file(hklin)
    intensities = [
        ma for ma in reader.as_miller_arrays(merge_equivalents=False)
        if ma.info().labels == ["I", "SIGI"]
    ][0]
    indices = intensities.indices()

    # merging: use external variance i.e. variances derived from SIGI column
    merger = intensities.merge_equivalents(use_internal_variance=False)
    mult = merger.redundancies().data()
    imean = merger.array()
    unique = imean.indices()
    iobs = imean.data()
    # scale up variance to account for sqrt(multiplicity) effective scaling
    variobs = (imean.sigmas()**2) * mult.as_double()

    all = flex.double()
    cen = flex.double()

    for hkl, i, v, m in zip(unique, iobs, variobs, mult):

        # only consider if meaningful number of observations
        if m < 3:
            continue

        sel = indices == hkl
        data = intensities.select(sel).data()

        assert m == len(data)

        _x, _y = npp_ify(data, input_mean_variance=(i, v))

        # perform linreg on (i) all data and (ii) subset between +/- 2 sigma

        sel = flex.abs(_x) < 2
        _x_ = _x.select(sel)
        _y_ = _y.select(sel)

        fit_all = flex.linear_regression(_x, _y)
        fit_cen = flex.linear_regression(_x_, _y_)

        all.append(fit_all.slope())
        cen.append(fit_cen.slope())

        print(
            "%3d %3d %3d" % hkl,
            "%.2f %.2f %.2f" % (i, v, i / math.sqrt(v)),
            "%.2f %.2f" % (fit_all.slope(), fit_cen.slope()),
            "%d" % m,
        )

    sys.stderr.write("Mean gradients: %.2f %.2f\n" %
                     (flex.sum(all) / all.size(), flex.sum(cen) / cen.size()))
Ejemplo n.º 27
0
Archivo: npp.py Proyecto: xia2/xia2
def npp(hklin):
  from iotbx.reflection_file_reader import any_reflection_file
  from xia2.Toolkit.NPP import npp_ify, mean_variance
  from scitbx.array_family import flex
  import math
  import sys
  reader = any_reflection_file(hklin)
  mtz_object = reader.file_content()
  intensities = [ma for ma in reader.as_miller_arrays(merge_equivalents=False)
                 if ma.info().labels == ['I', 'SIGI']][0]
  indices = intensities.indices()

  # merging: use external variance i.e. variances derived from SIGI column
  merger = intensities.merge_equivalents(use_internal_variance=False)
  mult = merger.redundancies().data()
  imean = merger.array()
  unique = imean.indices()
  iobs = imean.data()
  # scale up variance to account for sqrt(multiplicity) effective scaling
  variobs = (imean.sigmas() ** 2) * mult.as_double()

  all = flex.double()
  cen = flex.double()

  for hkl, i, v, m in zip(unique, iobs, variobs, mult):

    # only consider if meaningful number of observations
    if m < 3:
      continue

    sel = indices == hkl
    data = intensities.select(sel).data()

    assert(m == len(data))

    _x, _y = npp_ify(data, input_mean_variance=(i,v))

    # perform linreg on (i) all data and (ii) subset between +/- 2 sigma

    sel = (flex.abs(_x) < 2)
    _x_ = _x.select(sel)
    _y_ = _y.select(sel)

    fit_all = flex.linear_regression(_x, _y)
    fit_cen = flex.linear_regression(_x_, _y_)

    all.append(fit_all.slope())
    cen.append(fit_cen.slope())

    print '%3d %3d %3d' % hkl, '%.2f %.2f %.2f' % (i, v, i/math.sqrt(v)), \
      '%.2f %.2f' % (fit_all.slope(), fit_cen.slope()), '%d' % m

  sys.stderr.write('Mean gradients: %.2f %.2f\n' % (flex.sum(all) / all.size(),
                                                    flex.sum(cen) / cen.size()))
def write_sorted_moduli_as_mathematica_plot(f, filename):
  """ To obtain fig. 1 in ref [2] in module charge_flipping """
  abs_f = flex.abs(f.data())
  sorted = abs_f.select(flex.sort_permutation(abs_f))
  sorted /= flex.max(sorted)
  mf = open(os.path.expanduser(filename), 'w')
  print >> mf, 'fp1 = {'
  for f in sorted:
    print >> mf, "%f, " % f
  print >> mf, "1 };"
  print >> mf, "ListPlot[fp1]"
  mf.close()
Ejemplo n.º 29
0
def make_start_gaussian(null_fit,
                        existing_gaussian,
                        i_split,
                        i_x,
                        start_fraction,
                        b_range=1.e-3):
  x_sq = null_fit.table_x()[i_x]**2
  y0_table = null_fit.table_y()[0]
  yx_table = null_fit.table_y()[i_x]
  y0_existing = existing_gaussian.at_x_sq(0)
  yx_existing = existing_gaussian.at_x_sq(x_sq)
  n_terms = existing_gaussian.n_terms() + 1
  if (n_terms == 1):
    a = flex.double([y0_table])
    b = flex.double()
    yx_part = yx_table
  else:
    scale_old = 1 - start_fraction
    b = flex.double(existing_gaussian.array_of_b())
    b_max = flex.max(flex.abs(b))
    b_min = b_max * b_range
    sel = b < b_min
    b.set_selected(sel, flex.double(sel.count(True), b_min))
    if (i_split < 0):
      a = flex.double(existing_gaussian.array_of_a()) * scale_old
      a.append(y0_table - flex.sum(a))
      yx_part = yx_table - yx_existing * scale_old
    else:
      t_split = scitbx.math.gaussian.term(
        existing_gaussian.array_of_a()[i_split],
        existing_gaussian.array_of_b()[i_split])
      a = flex.double(existing_gaussian.array_of_a())
      a.append(a[i_split] * start_fraction)
      a[i_split] *= scale_old
      yx_part = t_split.at_x_sq(x_sq) * start_fraction
  addl_b = 0
  if (a[-1] != 0 and x_sq != 0):
    r = yx_part / a[-1]
    if (0 < r <= 1):
      addl_b = -math.log(r) / x_sq
  b.append(addl_b)
  if (addl_b != 0):
    assert abs(a[-1] * math.exp(-b[-1] * x_sq) - yx_part) < 1.e-6
  result = scitbx.math.gaussian.fit(
    null_fit.table_x(),
    null_fit.table_y(),
    null_fit.table_sigmas(),
    scitbx.math.gaussian.sum(iter(a), iter(b)))
  if (addl_b != 0 and i_split < 0):
    assert abs(result.at_x_sq(0) - y0_table) < 1.e-4
  if (n_terms == 1):
    assert abs(result.at_x_sq(x_sq) - yx_table) < 1.e-4
  return result
Ejemplo n.º 30
0
  def __init__(self, crystal, beam, detector, goniometer, scan, reflections):
    '''Initialise the algorithm. Calculate the list of tau and zetas.

    Params:
        reflections The list of reflections
        experiment The experiment object

    '''
    from dials.array_family import flex
    from math import sqrt

    # Get the oscillation width
    dphi2 = scan.get_oscillation(deg=False)[1] / 2.0

    # Calculate a list of angles and zeta's
    tau, zeta = self._calculate_tau_and_zeta(crystal, beam, detector,
                                             goniometer, scan, reflections)

    # Calculate zeta * (tau +- dphi / 2) / sqrt(2)
    self.e1 = (tau + dphi2) * flex.abs(zeta) / sqrt(2.0)
    self.e2 = (tau - dphi2) * flex.abs(zeta) / sqrt(2.0)
Ejemplo n.º 31
0
def exercise_real_to_complex():
  print "real_to_complex"
  for n in xrange(1,256+1):
    fft = fftpack.real_to_complex(n)
    dp = flex.random_double(size=n)*2-1
    dp.resize(flex.grid(fft.m_real()).set_focus(n))
    dw = dp.deep_copy()
    cw = fftw3tbx.real_to_complex_in_place(dw)
    cp = fft.forward(dp)
    assert flex.max(flex.abs(cw-cp)) < 1.e-6
    rw = fftw3tbx.complex_to_real_in_place(cw, n)
    rp = fft.backward(cp)
    assert flex.max(flex.abs(rw[:n]-rp[:n])) < 1.e-6
  for n,n_repeats in [(2400,500), (19200,250)]:
    fft = fftpack.real_to_complex(n)
    print "  factors of %d:" % n, list(fft.factors())
    print "  repeats:", n_repeats
    d0 = flex.random_double(size=n)*2-1
    d0.resize(flex.grid(fft.m_real()).set_focus(n))
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
    overhead = time.time()-t0
    print "    overhead: %.2f seconds" % overhead
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      c = fftw3tbx.real_to_complex_in_place(data=d)
      fftw3tbx.complex_to_real_in_place(data=c, n=n)
    print "    fftw:     %.2f seconds" % (time.time()-t0-overhead)
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      c = fftpack.real_to_complex(n).forward(d)
      fftpack.real_to_complex(n).backward(c)
    print "    fftpack:  %.2f seconds" % (time.time()-t0-overhead)
    sys.stdout.flush()
Ejemplo n.º 32
0
def run(file_name="tst_tls_as_xyz.pdb"):
    of = open(file_name, "w")
    print(pdb_str, file=of)
    of.close()
    uc = iotbx.pdb.input(file_name=file_name).crystal_symmetry().unit_cell()
    #for n in range(10,100,10)+range(100,1000,100)+range(1000,10001,1000)+[15000,20000]:
    for n in [
            1000,
    ]:
        easy_run.call("phenix.tls_as_xyz %s n_models=%s > tst_tls_as_xyz.log" %
                      (file_name, str(n)))
        for i in [0, 1]:
            u1 = iotbx.pdb.input(
                file_name="tst_tls_as_xyz_u_from_ensemble_%s.pdb" %
                str(i)).xray_structure_simple().scatterers().extract_u_cart(uc)
            u2 = iotbx.pdb.input(
                file_name="tst_tls_as_xyz_u_from_tls_%s.pdb" %
                str(i)).xray_structure_simple().scatterers().extract_u_cart(uc)

        u1, u2 = u1.as_double(), u2.as_double()
        cc = flex.linear_correlation(x=u1, y=u2).coefficient()
        r = flex.sum(flex.abs(flex.abs(u1)-flex.abs(u2)))/\
            flex.sum(flex.abs(flex.abs(u1)+flex.abs(u2)))*2
        print("%5d %6.4f %6.4f" % (n, cc, r))
    assert cc > 0.99, cc
    assert r < 0.06, r
Ejemplo n.º 33
0
    def _round_of_outlier_rejection(self):
        """
        Calculate normal deviations from the data in the Ih_table.
        """
        Ih_table = self._Ih_table_block
        intensity = Ih_table.intensities
        g = Ih_table.inverse_scale_factors
        w = self.weights
        wgIsum = ((w * g * intensity) *
                  Ih_table.h_index_matrix) * Ih_table.h_expand_matrix
        wg2sum = (
            (w * g * g) * Ih_table.h_index_matrix) * Ih_table.h_expand_matrix
        wgIsum_others = wgIsum - (w * g * intensity)
        wg2sum_others = wg2sum - (w * g * g)
        # Now do the rejection analyis if n_in_group > 2
        nh = Ih_table.calc_nh()
        sel = nh > 2
        wg2sum_others_sel = wg2sum_others.select(sel)
        wgIsum_others_sel = wgIsum_others.select(sel)

        # guard against zero divison errors - can happen due to rounding errors
        # or bad data giving g values are very small
        zero_sel = wg2sum_others_sel == 0.0
        # set as one for now, then mark as outlier below. This will only affect if
        # g is near zero, if w is zero then throw an assertionerror.
        wg2sum_others_sel.set_selected(zero_sel, 1.0)
        g_sel = g.select(sel)
        I_sel = intensity.select(sel)
        w_sel = w.select(sel)

        assert w_sel.all_gt(0)  # guard against division by zero
        norm_dev = (I_sel -
                    (g_sel * wgIsum_others_sel / wg2sum_others_sel)) / (
                        flex.sqrt((1.0 / w_sel) +
                                  (flex.pow2(g_sel) / wg2sum_others_sel)))
        norm_dev.set_selected(zero_sel, 1000)  # to trigger rejection
        z_score = flex.abs(norm_dev)
        # Want an array same size as Ih table.
        all_z_scores = flex.double(Ih_table.size, 0.0)
        all_z_scores.set_selected(sel.iselection(), z_score)
        outlier_indices, other_potential_outliers = determine_outlier_indices(
            Ih_table.h_index_matrix, all_z_scores, self._zmax)
        self._outlier_indices.extend(
            self._Ih_table_block.Ih_table["loc_indices"].select(
                outlier_indices))
        self._datasets.extend(
            self._Ih_table_block.Ih_table["dataset_id"].select(
                outlier_indices))
        sel = flex.bool(Ih_table.size, False)
        sel.set_selected(other_potential_outliers, True)
        self._Ih_table_block = self._Ih_table_block.select(sel)
        self.weights = self.weights.select(sel)
Ejemplo n.º 34
0
    def _round_of_outlier_rejection(self):
        """
        Calculate normal deviations from the data in the Ih_table.

        Returns:
            (tuple): tuple containing:
                outlier_indices: A flex.size_t array of outlier indices w.r.t
                    the current Ih_table
                other_potential_outliers: A flex.size_t array of indices from
                    the symmetry groups where outliers were found, excluding the
                    indices of the outliers themselves (indices w.r.t current
                    Ih_table).

        """
        Ih_table = self._Ih_table_block
        I = Ih_table.intensities
        g = Ih_table.inverse_scale_factors
        w = Ih_table.weights
        wgIsum = (
            (w * g * I) * Ih_table.h_index_matrix) * Ih_table.h_expand_matrix
        wg2sum = (
            (w * g * g) * Ih_table.h_index_matrix) * Ih_table.h_expand_matrix
        wgIsum_others = wgIsum - (w * g * I)
        wg2sum_others = wg2sum - (w * g * g)
        # Now do the rejection analyis if n_in_group > 2
        nh = Ih_table.calc_nh()
        sel = nh > 2
        wg2sum_others_sel = wg2sum_others.select(sel)
        wgIsum_others_sel = wgIsum_others.select(sel)

        # guard against zero divison errors - can happen due to rounding errors
        # or bad data giving g values are very small
        zero_sel = wg2sum_others_sel == 0.0
        # set as one for now, then mark as outlier below. This will only affect if
        # g is near zero, if w is zero then throw an assertionerror.
        wg2sum_others_sel.set_selected(zero_sel, 1.0)
        g_sel = g.select(sel)
        I_sel = I.select(sel)
        w_sel = w.select(sel)

        assert w_sel.all_gt(0)  # guard against division by zero
        norm_dev = (I_sel -
                    (g_sel * wgIsum_others_sel / wg2sum_others_sel)) / ((
                        (1.0 / w_sel) + (g_sel**2 / wg2sum_others_sel))**0.5)
        norm_dev.set_selected(zero_sel, 1000)  # to trigger rejection
        z_score = flex.abs(norm_dev)
        # Want an array same size as Ih table.
        all_z_scores = flex.double(Ih_table.size, 0.0)
        all_z_scores.set_selected(sel.iselection(), z_score)
        outlier_indices, other_potential_outliers = determine_outlier_indices(
            Ih_table.h_index_matrix, all_z_scores, self._zmax)
        return outlier_indices, other_potential_outliers
Ejemplo n.º 35
0
def fft(d):
  from scitbx import fftpack
  f = fftpack.real_to_complex(d.size())
  _d = flex.double(f.m_real(), 0.0)
  for j in range(d.size()):
    _d[j] = d[j]
  t = f.forward(_d)
  p = flex.abs(t) ** 2

  # remove the DC component
  p[0] = 0.0

  return p
Ejemplo n.º 36
0
 def self_check(self, show=True):
   if(show):
     print_step("Recover T_M, L_M,S_M from base elements:", self.log)
   r = self.result
   r = tls_from_motions(
     dx=r.dx, dy=r.dy, dz=r.dz,
     l_x=r.l_x, l_y=r.l_y, l_z=r.l_z,
     sx=r.sx, sy=r.sy, sz=r.sz,
     tx=r.tx, ty=r.ty, tz=r.tz,
     v_x=r.v_x, v_y=r.v_y, v_z=r.v_z,
     w_M_lx=r.w_M_lx,
     w_M_ly=r.w_M_ly,
     w_M_lz=r.w_M_lz)
   #
   T_M = r.T_M
   if(show):
     show_matrix(x=self.T_M, title="Input T_M:", log=self.log)
     show_matrix(x=T_M, title="Recovered T_M:", log=self.log)
   if(flex.max(flex.abs(flex.double(T_M - self.T_M))) > self.self_check_eps):
     raise Sorry("Cannot reconstruct T_M")
   # L_M
   L_M = r.L_M
   if(show):
     show_matrix(x=self.L_M, title="Input L_M:", log=self.log)
     show_matrix(x=L_M, title="Recovered L_M:", log=self.log)
   if(flex.max(flex.abs(flex.double(L_M - self.L_M))) > self.self_check_eps):
     raise Sorry("Cannot reconstruct L_M")
   # S_M
   S_M = r.S_M
   if(show):
     show_matrix(x=self.S_M, title="Input S_M:", log=self.log)
     show_matrix(x=S_M, title="Recovered S_M:", log=self.log)
   d = matrix.sqr(self.S_M-S_M)
   # remark: diagonal does not have to match, can be different by a constant
   max_diff = flex.max(flex.abs(
     flex.double([d[1],d[2],d[3],d[5],d[6],d[7],d[0]-d[4],d[0]-d[8]])))
   if(max_diff > self.self_check_eps):
     raise Sorry("Cannot reconstruct S_M")
   return r
Ejemplo n.º 37
0
def validate(pdb_str, threshold_bonds=0.02 * 4, threshold_angles=2.5 * 4):
    pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str)
    params = mmtbx.model.manager.get_default_pdb_interpretation_params()
    params.pdb_interpretation.use_neutron_distances = True
    params.pdb_interpretation.restraints_library.cdl = False
    model = mmtbx.model.manager(
        model_input=pdb_inp,
        build_grm=True,
        stop_for_unknowns=True,  #False,
        pdb_interpretation_params=params,
        log=null_out())
    grm = model.get_restraints_manager().geometry
    sites_cart = model.get_sites_cart()
    b_deltas = flex.abs(
        grm.get_all_bond_proxies()[0].deltas(sites_cart=sites_cart))
    b_outl = b_deltas.select(b_deltas > threshold_bonds)
    if (b_outl.size() > 0): return None
    a_deltas = flex.abs(
        grm.get_all_angle_proxies().deltas(sites_cart=sites_cart))
    a_outl = a_deltas.select(a_deltas > threshold_angles)
    if (a_outl.size() > 0): return None
    return pdb_str
Ejemplo n.º 38
0
def exercise_complex_to_complex():
  print "complex_to_complex"
  for n in xrange(1,256+1):
    dp = (flex.random_double(size=n)*2-1) * flex.polar(
      1, flex.random_double(size=n)*2-1)
    dw = dp.deep_copy()
    fft = fftpack.complex_to_complex(n)
    fftw3tbx.complex_to_complex_in_place(data=dw, exp_sign=-1)
    fft.forward(dp)
    assert flex.max(flex.abs(dw-dp)) < 1.e-6
    fftw3tbx.complex_to_complex_in_place(data=dw, exp_sign=+1)
    fft.backward(dp)
    assert flex.max(flex.abs(dw-dp)) < 1.e-6
  for n,n_repeats in [(1200,500), (9600,250)]:
    print "  factors of %d:" % n, list(fftpack.complex_to_complex(n).factors())
    print "  repeats:", n_repeats
    d0 = (flex.random_double(size=n)*2-1) * flex.polar(
      1, flex.random_double(size=n)*2-1)
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
    overhead = time.time()-t0
    print "    overhead: %.2f seconds" % overhead
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      fftw3tbx.complex_to_complex_in_place(data=d, exp_sign=-1)
      fftw3tbx.complex_to_complex_in_place(data=d, exp_sign=+1)
    print "    fftw:     %.2f seconds" % (time.time()-t0-overhead)
    #
    t0 = time.time()
    for i_trial in xrange(n_repeats):
      d = d0.deep_copy()
      fftpack.complex_to_complex(n).forward(d)
      fftpack.complex_to_complex(n).backward(d)
    print "    fftpack:  %.2f seconds" % (time.time()-t0-overhead)
    sys.stdout.flush()
Ejemplo n.º 39
0
def build_scat_pat(data):
  print "DO FFT"
  np,np = data.focus()
  flex_grid=flex.grid(np,np)
  fft_input=flex.complex_double(flex_grid)
  for ii in range( fft_input.size() ):
    fft_input[ii] = complex( data[ii],0 )
  fft_obj = fftpack.complex_to_complex_2d( (np,np) )
  result = fft_obj.forward( fft_input )
  result = flex.abs( result )
  result = result*result
  result = reorder_and_cut_data(result,np,np/10)
  return result
Ejemplo n.º 40
0
def test_resolution_cc_half(merging_stats):
    result = resolution_analysis.resolution_cc_half(merging_stats, limit=0.82)
    assert result.d_min == pytest.approx(1.242, abs=1e-3)
    result = resolution_analysis.resolution_cc_half(
        merging_stats,
        limit=0.82,
        cc_half_method="sigma_tau",
        model=resolution_analysis.polynomial_fit,
    )
    assert result.d_min == pytest.approx(1.233, abs=1e-3)
    assert flex.max(flex.abs(result.y_obs - result.y_fit)) < 0.04
    assert result.critical_values is not None
    assert len(result.critical_values) == len(result.d_star_sq)
Ejemplo n.º 41
0
def exercise_mask_data_2(space_group_info,
                         n_sites=100,
                         d_min=2.0,
                         resolution_factor=1. / 4):
    from cctbx import maptbx
    from cctbx.masks import vdw_radii_from_xray_structure
    for yn2 in [0, 1]:
        for yn in [0, 1]:
            xrs = random_structure.xray_structure(
                space_group_info=space_group_info,
                elements=(("O", "N", "C") * (n_sites // 3 + 1))[:n_sites],
                volume_per_atom=50,
                min_distance=1.5)
            xrs.shake_sites_in_place(mean_distance=10)
            if (yn2): xrs = xrs.expand_to_p1(sites_mod_positive=True)
            atom_radii = vdw_radii_from_xray_structure(xray_structure=xrs)
            asu_mask = masks.atom_mask(unit_cell=xrs.unit_cell(),
                                       group=xrs.space_group(),
                                       resolution=d_min,
                                       grid_step_factor=resolution_factor,
                                       solvent_radius=1.0,
                                       shrink_truncation_radius=1.0)
            asu_mask.compute(xrs.sites_frac(), atom_radii)
            mask_data = asu_mask.mask_data_whole_uc()
            #
            xrs_p1 = xrs.expand_to_p1(sites_mod_positive=True)
            for site_frac in xrs_p1.sites_frac():
                mv = mask_data.value_at_closest_grid_point(site_frac)
                assert mv == 0
            #
            mask_data = mask_data / xrs.space_group().order_z()
            if (yn == 1):
                mask_data = maptbx.copy(mask_data,
                                        flex.grid(mask_data.focus()))
            #
            for site_frac in xrs_p1.sites_frac():
                mv = mask_data.value_at_closest_grid_point(site_frac)
                assert mv == 0
            #
            fc = xrs.structure_factors(d_min=d_min).f_calc()
            f_mask_1 = fc.set().array(
                data=asu_mask.structure_factors(fc.indices()))
            f_mask_2 = f_mask_1.structure_factors_from_map(
                map=mask_data,
                use_scale=True,
                anomalous_flag=False,
                use_sg=True)
            fm1 = abs(f_mask_1).data()
            fm2 = abs(f_mask_2).data()
            r = flex.sum(flex.abs(fm1 - fm2)) / flex.sum(fm1 + fm2)
            assert approx_equal(r, 0.0)
Ejemplo n.º 42
0
 def choose_best(self, use_r_work):
     # Do not inlude initial model in decision-making.
     rfs = self.r_frees[1:]
     rws = self.r_works[1:]
     bs = self.bs[1:]
     gaps = rfs - rws
     xrss = self.xrss[1:]
     rewescas = self.restraints_weight_scales[1:]
     # Select all that satisfy bonds and Rfree-Rwork gap criteria
     s = bs < self.max_bond_rmsd
     rfs = rfs.select(s)
     rws = rws.select(s)
     bs = bs.select(s)
     gaps = gaps.select(s)
     rewescas = rewescas.select(s)
     xrss = selxrs(xrss=xrss, s=s)
     # Rfree-Rwork gap
     filtered_by_gap = False
     s = gaps > 0
     s &= flex.abs(gaps) * 100. < self.max_r_work_r_free_gap
     if (s.count(True) > 0):
         rfs = rfs.select(s)
         rws = rws.select(s)
         bs = bs.select(s)
         gaps = gaps.select(s)
         rewescas = rewescas.select(s)
         xrss = selxrs(xrss=xrss, s=s)
         filtered_by_gap = True
     if (rfs.size() == 0):
         return None, None, None, None
     else:
         # Choose the one that has lowest Rfree
         if (use_r_work): rs = rws.deep_copy()
         else: rs = rfs.deep_copy()
         min_r = flex.min(rs)
         min_gap = flex.min(gaps)
         index_best = None
         if (filtered_by_gap):
             for i in xrange(rs.size()):
                 if (abs(rs[i] - min_r) < 1.e-5):
                     index_best = i
                     break
         else:
             for i in xrange(gaps.size()):
                 if (abs(gaps[i] - min_gap) < 1.e-5):
                     index_best = i
                     break
         # This is the result
         self.pdb_hierarchy.adopt_xray_structure(xrss[index_best])
         return xrss[index_best], rws[index_best], rfs[index_best],\
           rewescas[index_best]
Ejemplo n.º 43
0
    def r_merge_per_batch(pairs):
        """Calculate R_merge for the list of (merged-I, I) pairs."""

        merged_indices, unmerged_indices = zip(*pairs)

        unmerged_Ij = intensities.data().select(flex.size_t(unmerged_indices))
        merged_Ij = merged_intensities.data().select(flex.size_t(merged_indices))

        numerator = flex.sum(flex.abs(unmerged_Ij - merged_Ij))
        denominator = flex.sum(unmerged_Ij)

        if denominator > 0:
            return numerator / denominator
        return 0
Ejemplo n.º 44
0
 def set_k_isotropic_exp(self, r_start, verbose, b_lower_limit=-100):
     if (self.verbose):
         print("    set_k_isotropic_exp:", file=self.log)
         print("      r_start: %6.4f (r_low: %6.4f)" %
               (r_start, self._r_low()))
     k_iso = flex.double(self.core.k_isotropic.size(),
                         1)  # Done at start only!
     k_aniso = flex.double(self.core.k_isotropic.size(),
                           1)  # Done at start only!
     arrays = mmtbx.arrays.init(f_calc=self.core.f_calc,
                                f_masks=self.core.f_mask(),
                                k_isotropic=k_iso,
                                k_anisotropic=k_aniso,
                                k_masks=self.core.k_mask())
     sel = self.selection_work.data()
     #
     # At least in one example this gives more accurate answer but higher R than start!
     #
     rf = scitbx.math.gaussian_fit_1d_analytical(
         x=flex.sqrt(self.ss).select(sel),
         y=self.f_obs.data().select(sel),
         z=abs(arrays.f_model).data().select(sel))
     if (rf.b < b_lower_limit): return r_start
     k1 = rf.a * flex.exp(-self.ss * rf.b)
     r1 = self.try_scale(k_isotropic_exp=k1)
     #
     # At least in one example this gives less accurate answer but lower R than start!
     #
     o = bulk_solvent.f_kb_scaled(f1=self.f_obs.data().select(sel),
                                  f2=flex.abs(
                                      arrays.f_model.data()).select(sel),
                                  b_range=flex.double(range(-100, 100, 1)),
                                  ss=self.ss.select(sel))
     k2 = o.k() * flex.exp(-self.ss * o.b())
     r2 = self.try_scale(k_isotropic_exp=k2)
     #
     if (r1 < r2):
         r = r1
         k = k1
     else:
         r = r2
         k = k2
     if (r < r_start):
         self.core = self.core.update(k_isotropic_exp=k)
     r = self.r_factor()
     if (self.verbose):
         print("      r1: %6.4f" % r1)
         print("      r2: %6.4f" % r2)
         print("      r_final: %6.4f (r_low: %6.4f)" % (r, self._r_low()))
     return r
Ejemplo n.º 45
0
def tst_smv(filename):
    from dxtbx.format.image import SMVReader
    from scitbx.array_family import flex

    image = SMVReader(filename).image()
    assert image.n_tiles() == 1
    data1 = image.tile(0).as_int()

    data2 = read_smv_image(filename)

    diff = flex.abs(data1 - data2)
    assert flex.max(diff) < 1e-7

    print 'OK'
    def target(self, vector):
        #    self.rbe.rotate_translate( vector[0:3], vector[3:], 1)
        if (self.translate):
            self.rbe.rotate_translate(vector, self.angle, 1)
        else:
            self.rbe.rotate_translate(self.vector, vector, 1)

        calc_pr = self.rbe.get_norm_pr()
        print '&', list(vector)
        #    for cp,ep in zip(calc_pr, self.pr):
        #      print cp,ep
        score = flex.sum(flex.abs(calc_pr - self.pr) * flex.exp(-self.pr))
        print '#', score
        return score
Ejemplo n.º 47
0
 def get_z_scores(self, scale, b_value):
   i_scaled = flex.exp( self.calc_d_star_sq*b_value )*self.mean_calc*scale
   sel = ((self.mean_obs > 0) & (i_scaled > 0)) .iselection()
   ratio  = self.mean_obs.select(sel) / i_scaled.select(sel)
   mean = self.curve( self.calc_d_star_sq ).select(sel)
   assert ratio.all_gt(0) # FIXME need to filter first!
   ratio = flex.log(ratio)
   var = self.std(self.calc_d_star_sq).select(sel)
   d_star_sq = self.calc_d_star_sq.select(sel)
   assert var.all_ne(0)
   z = flex.abs(ratio-mean)/var
   z_ = flex.double(self.mean_obs.size(), -1)
   z_.set_selected(sel, z)
   return z_
Ejemplo n.º 48
0
def run(prefix):
    """
  Exercise gradients match: using clustering vs not using clustering.
  Altlocs.
  """
    for i, pdb_str_in in enumerate([pdb_str_in1, pdb_str_in2]):
        if (i == 0): print("Altlocs present", "-" * 30)
        else: print("No altlocs", "-" * 30)
        pdb_in = "%s.pdb" % prefix
        open(pdb_in, "w").write(pdb_str_in)
        #
        # for fast_interaction in [True, False]:
        for fast_interaction in [True]:
            print("fast_interaction:", fast_interaction)
            for restraints in ["cctbx", "qm"]:
                print("  restraints:", restraints)
                for two_buffers in [False, True]:
                    print("    two_buffers=", two_buffers)
                    for clustering in ["true", "false"]:
                        print("      clustering=", clustering)

                        if (not clustering): expansion = True
                        else: expansion = False

                        cmd = " ".join([
                            "qr.refine", pdb_in,
                            "expansion=%s" % str(expansion), "mode=opt",
                            "altloc_method=subtract",
                            "fast_interaction=%s" % fast_interaction,
                            "stpmax=0.2", "gradient_only=true",
                            "clustering=%s" % clustering,
                            "dump_gradients=cluster_%s.pkl" % clustering,
                            "restraints=%s" % restraints,
                            "quantum.engine_name=mopac",
                            "number_of_micro_cycles=1",
                            "max_iterations_refine=5",
                            "two_buffers=%s" % str(two_buffers),
                            "> %s.log" % prefix
                        ])
                        #print cmd
                        assert easy_run.call(cmd) == 0
                    g1 = easy_pickle.load("cluster_false.pkl")
                    g2 = easy_pickle.load("cluster_true.pkl")
                    g1 = g1.as_double()
                    g2 = g2.as_double()
                    diff = flex.abs(g1 - g2)
                    print("        min/max/mean of (gradient1 - gradient2):", \
                        diff.min_max_mean().as_tuple())
                    os.remove("cluster_false.pkl")
                    os.remove("cluster_true.pkl")
Ejemplo n.º 49
0
def tst_cbf_fast(filename):
    from scitbx.array_family import flex
    from dxtbx.format.image import CBFFastReader

    image = CBFFastReader(filename).image()
    assert image.n_tiles() == 1
    data1 = image.tile(0).as_int()

    data2 = read_cbf_image(filename)

    diff = flex.abs(data1 - data2)
    assert flex.max(diff) < 1e-7

    print 'OK'
Ejemplo n.º 50
0
def run(prefix):
    """
  Exercise combined energy and gradients from cluster qm.
  """
    for restraints in ["cctbx", "qm"]:
        if 0:
            print("Using restraints:", restraints)
        result = []
        for clustering in [True, False]:
            if 0:
                print("  clustering", clustering, "-" * 30)
            model = get_model()
            if (restraints == "qm"):
                fq = from_qm(pdb_hierarchy=model.get_hierarchy(),
                             qm_engine_name="mopac",
                             method="PM3",
                             crystal_symmetry=model.crystal_symmetry(),
                             clustering=clustering)
            else:
                fq = from_cctbx(
                    restraints_manager=model.get_restraints_manager())
            if (clustering):
                fm = fragments(
                    working_folder=os.path.split("./ase/tmp_ase.pdb")[0] + "/",
                    clustering_method=betweenness_centrality_clustering,
                    maxnum_residues_in_cluster=8,
                    charge_embedding=False,
                    two_buffers=False,
                    fast_interaction=True,
                    pdb_hierarchy=model.get_hierarchy().deep_copy(
                    ),  # deep copy just in case
                    qm_engine_name="mopac",
                    crystal_symmetry=model.crystal_symmetry())
                fc = from_cluster(restraints_manager=fq,
                                  fragment_manager=fm,
                                  parallel_params=get_master_phil().extract())
            else:
                fc = fq
            energy, gradients = fc.target_and_gradients(
                sites_cart=model.get_sites_cart())
            if (restraints == "qm"):
                energy = energy * (kcal / mol) * (kcal / mol) / Hartree
                gradients = gradients * (kcal / mol) * (kcal / mol) * (Bohr /
                                                                       Hartree)
            gradients = gradients.as_double()
            result.append(gradients.deep_copy())
        #
        diff = flex.abs(result[0] - result[1])
        max_diff = flex.max(diff)
Ejemplo n.º 51
0
def exercise_complex_to_complex_3d():
    from scitbx.array_family import flex
    from cudatbx import cufft
    from scitbx import fftpack
    import time
    import sys
    print ""
    print "complex_to_complex_3d"
    for n_complex, n_repeats in [((100, 80, 90), 16), ((200, 160, 180), 16)]:
        print "  dimensions:", n_complex
        print "  repeats:", n_repeats
        np = n_complex[0] * n_complex[1] * n_complex[2]
        d0 = flex.polar(
            flex.random_double(size=np) * 2 - 1,
            flex.random_double(size=np) * 2 - 1)
        d0.reshape(flex.grid(n_complex))
        #
        t0 = time.time()
        for i_trial in xrange(n_repeats):
            d = d0.deep_copy()
        overhead = time.time() - t0
        print "    overhead: %.2f seconds" % overhead
        #
        # XXX extra CuFFT to initialize device - can we avoid this somehow?
        d = d0.deep_copy()
        cufft.complex_to_complex_3d(n_complex).forward(d)
        cufft.complex_to_complex_3d(n_complex).backward(d)
        # benchmarking run
        t0 = time.time()
        for i_trial in xrange(n_repeats):
            d = d0.deep_copy()
            cufft.complex_to_complex_3d(n_complex).forward(d)
            cufft.complex_to_complex_3d(n_complex).backward(d)
        print "    cufft:    %6.2f seconds" % (
            (time.time() - t0 - overhead) / n_repeats)
        rw = d / np
        #
        t0 = time.time()
        for i_trial in xrange(n_repeats):
            d = d0.deep_copy()
            fftpack.complex_to_complex_3d(n_complex).forward(d)
            fftpack.complex_to_complex_3d(n_complex).backward(d)
        print "    fftpack:  %6.2f seconds" % (
            (time.time() - t0 - overhead) / n_repeats)
        sys.stdout.flush()
        rp = d / np
        #
        print ""
        assert flex.max(flex.abs(rw - rp)) < 1.e-6
Ejemplo n.º 52
0
def main(filenames,
         map_file,
         npoints=192,
         max_resolution=6,
         reverse_phi=False):
    rec_range = 1 / max_resolution

    image = ImageFactory(filenames[0])
    panel = image.get_detector()[0]
    beam = image.get_beam()
    s0 = beam.get_s0()
    pixel_size = panel.get_pixel_size()

    xlim, ylim = image.get_raw_data().all()

    xy = recviewer.get_target_pixels(panel, s0, xlim, ylim, max_resolution)

    s1 = panel.get_lab_coord(xy * pixel_size[0])  # FIXME: assumed square pixel
    s1 = s1 / s1.norms() * (1 / beam.get_wavelength())  # / is not supported...
    S = s1 - s0

    grid = flex.double(flex.grid(npoints, npoints, npoints), 0)
    cnts = flex.int(flex.grid(npoints, npoints, npoints), 0)

    for filename in filenames:
        print "Processing image", filename
        try:
            fill_voxels(ImageFactory(filename), grid, cnts, S, xy, reverse_phi,
                        rec_range)
        except:
            print " Failed to process. Skipped this."

    recviewer.normalize_voxels(grid, cnts)

    uc = uctbx.unit_cell((npoints, npoints, npoints, 90, 90, 90))
    ccp4_map.write_ccp4_map(map_file, uc, sgtbx.space_group("P1"), (0, 0, 0),
                            grid.all(), grid,
                            flex.std_string(["cctbx.miller.fft_map"]))
    return
    from scitbx import fftpack
    fft = fftpack.complex_to_complex_3d(grid.all())
    grid_complex = flex.complex_double(reals=flex.pow2(grid),
                                       imags=flex.double(grid.size(), 0))
    grid_transformed = flex.abs(fft.backward(grid_complex))
    print flex.max(grid_transformed), flex.min(
        grid_transformed), grid_transformed.all()
    ccp4_map.write_ccp4_map(map_file, uc, sgtbx.space_group("P1"), (0, 0, 0),
                            grid.all(), grid_transformed,
                            flex.std_string(["cctbx.miller.fft_map"]))
Ejemplo n.º 53
0
def test_tiff(dials_regression, tiff_image):
    filename = os.path.join(dials_regression, tiff_image)

    image = TIFFReader(filename).image()
    if image.is_double():
        image = image.as_double()
    else:
        image = image.as_int()
    assert image.n_tiles() == 1
    data1 = image.tile(0).data()

    data2 = read_tiff_image(filename)

    diff = flex.abs(data1 - data2)
    assert flex.max(diff) < 1e-7
Ejemplo n.º 54
0
 def check():
     a, b, c, d = abcd(x=[x1, x2, x3])
     answer = [x1, x2, x3]
     answer.sort()
     answer = flex.double(answer)
     r = scitbx.math.cubic_equation_real(a=a, b=b, c=c, d=d)
     for ri in r.residual():
         assert approx_equal(ri, 0.0)
     solution = list(r.x)
     solution.sort()
     solution = flex.double(solution)
     diff = flex.abs(solution - answer)
     assert approx_equal(diff, [0, 0, 0])
     for x in [x1, x2, x3, r.x[0], r.x[1], r.x[2]]:
         assert approx_equal(residual(a=a, b=b, c=c, d=d, x=x), 0)
Ejemplo n.º 55
0
def tst_tiff(filename):
    from dxtbx.datablock import DataBlockFactory
    from scitbx.array_family import flex
    from dxtbx.format.image import TIFFReader

    image = TIFFReader(filename).image()
    assert image.n_tiles() == 1
    data1 = image.tile(0).as_int()

    data2 = read_tiff_image(filename)

    diff = flex.abs(data1 - data2)
    assert flex.max(diff) < 1e-7

    print 'OK'
Ejemplo n.º 56
0
def r1_factor(self,
              other,
              scale_factor=None,
              assume_index_matching=False,
              use_binning=False):
    """Get the R1 factor according to this formula

    .. math::
       R1 = \dfrac{\sum{||F| - k|F'||}}{\sum{|F|}}

    where F is self.data() and F' is other.data() and
    k is the factor to put F' on the same scale as F"""
    assert not use_binning or self.binner() is not None
    assert other.indices().size() == self.indices().size()
    if not use_binning:
        if self.data().size() == 0: return None
        if (assume_index_matching):
            o, c = self, other
        else:
            o, c = self.common_sets(other=other, assert_no_singles=True)
        o = flex.abs(o.data())
        c = flex.abs(c.data())
        if (scale_factor is None):
            den = flex.sum(c * c)
            if (den != 0):
                c *= (flex.sum(o * c) / den)
        elif (scale_factor is not None):
            c *= scale_factor
        return flex.sum(flex.abs(o - c)) / flex.sum(o)
    results = []
    for i_bin in self.binner().range_all():
        sel = self.binner().selection(i_bin)
        results.append(
            r1_factor(self.select(sel), other.select(sel),
                      scale_factor.data[i_bin], assume_index_matching))
    return binned_data(binner=self.binner(), data=results, data_fmt="%7.4f")