def calc_partiality_anisotropy_set(self, my_uc, rotx, roty, miller_indices,
     ry, rz, r0, re, nu,
     bragg_angle_set, alpha_angle_set, wavelength, crystal_init_orientation,
     spot_pred_x_mm_set, spot_pred_y_mm_set, detector_distance_mm,
     partiality_model, flag_beam_divergence):
   #use III.4 in Winkler et al 1979 (A35; P901) for set of miller indices
   O = sqr(my_uc.orthogonalization_matrix()).transpose()
   R = sqr(crystal_init_orientation.crystal_rotation_matrix()).transpose()
   CO = crystal_orientation(O*R, basis_type.direct)
   CO_rotate = CO.rotate_thru((1,0,0), rotx
                ).rotate_thru((0,1,0), roty)
   A_star = sqr(CO_rotate.reciprocal_matrix())
   S0 = -1*col((0,0,1./wavelength))
   #caculate rs
   rs_set = r0 + (re * flex.tan(bragg_angle_set))
   if flag_beam_divergence:
     rs_set += ((ry * flex.cos(alpha_angle_set))**2 + (rz * flex.sin(alpha_angle_set))**2)**(1/2)
   #calculate rh
   x = A_star.elems * miller_indices.as_vec3_double()
   sd_array = x + S0.elems
   rh_set = sd_array.norms() - (1/wavelength)
   #calculate partiality
   if partiality_model == "Lorentzian":
     partiality_set = ((rs_set**2)/((2*(rh_set**2))+(rs_set**2)))
   elif partiality_model == "Voigt":
     partiality_set = self.voigt(rh_set, rs_set, nu)
   elif partiality_model == "Lognormal":
     partiality_set = self.lognpdf(rh_set, rs_set, nu)
   #calculate delta_xy
   d_ratio = -detector_distance_mm/sd_array.parts()[2]
   calc_xy_array = flex.vec3_double(sd_array.parts()[0]*d_ratio, \
       sd_array.parts()[1]*d_ratio, flex.double([0]*len(d_ratio)))
   pred_xy_array = flex.vec3_double(spot_pred_x_mm_set, spot_pred_y_mm_set, flex.double([0]*len(d_ratio)))
   delta_xy_set = (pred_xy_array - calc_xy_array).norms()
   return partiality_set, delta_xy_set, rs_set, rh_set
Пример #2
0
def fit_rotatable(
      pdb_hierarchy,
      xray_structure,
      map_data,
      rotatable_h_selection):
  unit_cell = xray_structure.unit_cell()
  sites_cart = xray_structure.sites_cart()
  scatterers = xray_structure.scatterers()
  for sel_ in rotatable_h_selection:
    ed_val = -1
    angle = 0.
    angular_step = 1
    axis = sel_[0]
    points_i_seqs = sel_[1]
    sites_frac_best = flex.vec3_double(len(points_i_seqs))
    while angle <= 360:
      sites_frac_tmp  = flex.vec3_double(len(points_i_seqs))
      ed_val_ = 0
      for i_seq, point_i_seq in enumerate(points_i_seqs):
        site_cart_new = rotate_point_around_axis(
          axis_point_1 = sites_cart[axis[0]],
          axis_point_2 = sites_cart[axis[1]],
          point        = sites_cart[point_i_seq],
          angle        = angle,
          deg          = True)
        site_frac_new = unit_cell.fractionalize(site_cart_new)
        ed_val_ += abs(maptbx.eight_point_interpolation(map_data,site_frac_new))
        sites_frac_tmp[i_seq] = site_frac_new
      if(ed_val_ > ed_val):
        ed_val = ed_val_
        sites_frac_best = sites_frac_tmp.deep_copy()
      angle += angular_step
    for i_seq, point_i_seq in enumerate(points_i_seqs):
      scatterers[point_i_seq].site = sites_frac_best[i_seq]
  pdb_hierarchy.adopt_xray_structure(xray_structure)
Пример #3
0
def get_matching_sites_cart_in_both_h(old_h, new_h):
  old_h.reset_atom_i_seqs()
  new_h.reset_atom_i_seqs()
  fixed_sites = flex.vec3_double()
  moving_sites = flex.vec3_double()
  isel_for_old = flex.size_t()
  isel_for_new = flex.size_t()
  if old_h.atoms_size() == new_h.atoms_size():
    good = True
    for a1, a2 in zip(old_h.atoms(), new_h.atoms()):
      if a1.id_str()[:-6] != a2.id_str()[:-6]:
        # print "No match: '%s', '%s'" % (a1.id_str()[:-6], a2.id_str()[:-6])
        good = False
        break
      else:
        fixed_sites.append(a1.xyz)
        moving_sites.append(a2.xyz)
    if good:
      # print "SHORTCUT DONE"
      assert fixed_sites.size() == moving_sites.size()
      return fixed_sites, moving_sites
  fixed_sites = flex.vec3_double()
  moving_sites = flex.vec3_double()
  for old_rg, new_rg in zip(old_h.only_chain().residue_groups(), new_h.only_chain().residue_groups()):
    for old_ag, new_ag in zip(old_rg.atom_groups(), new_rg.atom_groups()):
      for atom in old_ag.atoms():
        a = new_ag.get_atom(atom.name.strip())
        if a is not None:
          fixed_sites.append(atom.xyz)
          moving_sites.append(a.xyz)
  assert fixed_sites.size() == moving_sites.size()
  return fixed_sites, moving_sites
 def compute_functional_and_gradients(O):
   if (O.number_of_function_evaluations == 0):
     O.number_of_function_evaluations += 1
     return O.f_start, O.g_start
   O.number_of_function_evaluations += 1
   O.sites_cart_variable = flex.vec3_double(O.x)
   if (O.real_space_target_weight == 0):
     rs_f = 0.
     rs_g = flex.vec3_double(O.sites_cart_variable.size(), (0,0,0))
   else:
     if (O.local_standard_deviations_radius is None):
       rs_f = maptbx.real_space_target_simple(
         unit_cell   = O.unit_cell,
         density_map = O.density_map,
         sites_cart  = O.sites_cart_variable,
         selection   = O.selection_variable_real_space)
       rs_g = maptbx.real_space_gradients_simple(
         unit_cell   = O.unit_cell,
         density_map = O.density_map,
         sites_cart  = O.sites_cart_variable,
         delta       = O.real_space_gradients_delta,
         selection   = O.selection_variable_real_space)
     else:
       rs_f = local_standard_deviations_target(
         unit_cell=O.unit_cell,
         density_map=O.density_map,
         weight_map=O.weight_map,
         weight_map_scale_factor=O.weight_map_scale_factor,
         sites_cart=O.sites_cart_variable,
         site_radii=O.site_radii)
       rs_g = local_standard_deviations_gradients(
         unit_cell=O.unit_cell,
         density_map=O.density_map,
         weight_map=O.weight_map,
         weight_map_scale_factor=O.weight_map_scale_factor,
         sites_cart=O.sites_cart_variable,
         site_radii=O.site_radii,
         delta=O.real_space_gradients_delta)
     rs_f *= -O.real_space_target_weight
     rs_g *= -O.real_space_target_weight
   if (O.geometry_restraints_manager is None):
     f = rs_f
     g = rs_g
   else:
     if (O.selection_variable is None):
       O.sites_cart = O.sites_cart_variable
     else:
       O.sites_cart.set_selected(O.selection_variable, O.sites_cart_variable)
       if(O.states_collector is not None):
         O.states_collector.add(sites_cart = O.sites_cart)
     gr_e = O.geometry_restraints_manager.energies_sites(
       sites_cart=O.sites_cart,
       compute_gradients=True)
     gr_e_gradients = gr_e.gradients
     if (O.selection_variable is not None):
       gr_e_gradients = gr_e.gradients.select(O.selection_variable)
     f = rs_f + gr_e.target
     g = rs_g + gr_e_gradients
   return f, g.as_double()
Пример #5
0
 def center_of_mass_info(self):
     self.rcm = self.structure.center_of_mass()
     result = dynamics.center_of_mass_info(self.rcm, self.structure.sites_cart(), self.vxyz, self.weights)
     self.vcm = flex.vec3_double()
     self.acm = flex.vec3_double()
     self.vcm.append(result.vcm())
     self.acm.append(result.acm())
     self.ekcm = result.ekcm()
Пример #6
0
    def zipper(self, initial_rms, level):
        matches = []
        for jj in xrange(1, self.n - 1):
            ii = jj - 1
            kk = jj + 1
            # make triplets of sequence related sites
            xi = self.set_a[ii]
            xpi = self.set_b[ii]
            xj = self.set_a[jj]
            xpj = self.set_b[jj]
            xk = self.set_a[kk]
            xpk = self.set_b[kk]
            # get the lsq matrix
            ref = flex.vec3_double([xi, xj, xk])
            mov = flex.vec3_double([xpi, xpj, xpk])

            lsq = superpose.least_squares_fit(ref, mov)
            # here we have the rotation and translation operators
            r = lsq.r
            t = lsq.t
            rmsd = 10.0
            # we would like to know the rmsd on the coords used for superposition
            new_sites = lsq.other_sites_best_fit()
            deltas = ref - new_sites
            rmsd = deltas.rms_length()
            if rmsd < initial_rms:
                # please apply this rotation to the full set
                converged = False
                count = 0
                match_size = 0
                previous_match_size = 0
                tmp_a = None
                tmp_b = None
                select = flex.bool()
                while not converged:
                    previous_match_size = match_size
                    tmp_a, tmp_b, select = self.pair_sites(r, t, level)
                    # print count, tmp_a.size()
                    match_size = tmp_a.size()
                    if match_size <= previous_match_size:
                        converged = True
                        break
                    if count > self.max_iter:
                        converged = True
                        break
                    if tmp_b.size() > 0:
                        lsq = superpose.least_squares_fit(tmp_a, tmp_b)
                        tmp_sites = lsq.other_sites_best_fit()
                        rmsd = tmp_a.rms_difference(tmp_sites)
                        r = lsq.r
                        t = lsq.t
                        count += 1
                if converged:

                    matches.append(
                        [select.deep_copy().iselection(), r, t, rmsd, select.deep_copy().iselection().size()]
                    )
        return matches
Пример #7
0
def run():
  for site in ([0,0,0], [0.1,0.1,0.1], [6,6,6], [3,3,3], [-0.1,0.1,-0.1],
                                                           [6,-6,6], [3,-3,3]):
      exercise_1(grid_step    = 0.1,
                 radius       = 1.0,
                 shell        = 0.0,
                 a            = 6.0,
                 b            = 3.0,
                 site_cart    = flex.vec3_double([site]),
                 buffer_layer = 3.)
  for volume_per_atom in range(100,550,100)*10:
      exercise_2(grid_step    = 0.08,
                 radius       = 1.0,
                 shell        = 0.0,
                 a            = 6.0,
                 b            = 3.0,
                 volume_per_atom    = volume_per_atom)
  for site in ([0,0,0], [0.1,0.1,0.1], [6,6,6], [3,3,3], [-0.1,0.1,-0.1],
                                                           [6,-6,6], [3,-3,3]):
      exercise_3(grid_step    = 0.1,
                 radius       = 0.9,
                 shell        = 0.0,
                 a            = 6.0,
                 b            = 3.0,
                 d_min        = 0.08,
                 site_cart    = flex.vec3_double([site]),
                 buffer_layer = 3.)
  for volume_per_atom in range(100,550,100)*5:
      exercise_4(grid_step    = 0.1,
                 radius       = 0.9,
                 shell        = 0.0,
                 a            = 6.0,
                 b            = 3.0,
                 d_min        = 0.08,
                 volume_per_atom = volume_per_atom,
                 use_weights            = False,
                 optimize_cutoff_radius = True)
  for use_weights, optimize_cutoff_radius in zip([True,False], [True,True]):
      exercise_4(grid_step    = 0.1,
                 radius       = 0.9,
                 shell        = 0.0,
                 a            = 6.0,
                 b            = 3.0,
                 d_min        = 0.08,
                 volume_per_atom = 300,
                 use_weights            = use_weights,
                 optimize_cutoff_radius = optimize_cutoff_radius)
  for site in ([0,0,0], [0.1,0.1,0.1], [6,6,6], [3,3,3], [-0.1,0.1,-0.1],
                                                           [6,-6,6], [3,-3,3]):
      exercise_5(grid_step    = 0.1,
                 radius       = 0.9,
                 shell        = 0.0,
                 a            = 6.0,
                 b            = 3.0,
                 d_min        = 0.08,
                 site_cart    = flex.vec3_double([site]),
                 buffer_layer = 3.)
Пример #8
0
def main(xparm_file, spot_file):

  import dxtbx
  from dxtbx.serialize.xds import to_crystal
  models = dxtbx.load(xparm_file)
  crystal_model = to_crystal(xparm_file)

  from dxtbx.model.experiment.experiment_list import Experiment, ExperimentList
  experiment = Experiment(beam=models.get_beam(),
                          detector=models.get_detector(),
                          goniometer=models.get_goniometer(),
                          scan=models.get_scan(),
                          crystal=crystal_model)

  detector = experiment.detector
  beam = experiment.beam
  goniometer = experiment.goniometer
  scan = experiment.scan

  from iotbx.xds import spot_xds
  spot_xds_handle = spot_xds.reader()
  spot_xds_handle.read_file(spot_file)

  from cctbx.array_family import flex
  centroids_px = flex.vec3_double(spot_xds_handle.centroid)
  miller_indices = flex.miller_index(spot_xds_handle.miller_index)

  # only those reflections that were actually indexed
  centroids_px = centroids_px.select(miller_indices != (0,0,0))
  miller_indices = miller_indices.select(miller_indices != (0,0,0))

  ub = crystal_model.get_A()

  d_spacings = [1.0 / (ub * mi).length() for mi in miller_indices]

  print max(d_spacings)

  # Convert Pixel coordinate into mm/rad
  x, y, z = centroids_px.parts()
  x_mm, y_mm = detector[0].pixel_to_millimeter(flex.vec2_double(x, y)).parts()
  z_rad = scan.get_angle_from_array_index(z, deg=False)
  centroids_mm = flex.vec3_double(x_mm, y_mm, z_rad)

  # then convert detector position to reciprocal space position

  # based on code in dials/algorithms/indexing/indexer2.py
  s1 = detector[0].get_lab_coord(flex.vec2_double(x_mm, y_mm))
  s1 = s1/s1.norms() * (1/beam.get_wavelength())
  S = s1 - beam.get_s0()
  reciprocal_space_points = S.rotate_around_origin(
    goniometer.get_rotation_axis(),
    -z_rad)

  d_spacings = 1/reciprocal_space_points.norms()
  dmax = flex.max(d_spacings)
  print dmax
Пример #9
0
 def compute_functional_and_gradients(self):
     sites_cart = flex.vec3_double(self.x)
     f = 0
     g = flex.vec3_double(sites_cart.size(), (0, 0, 0))
     for sorted_asu_proxies in [self.conservative_pair_proxies.bond, self.conservative_pair_proxies.angle]:
         if sorted_asu_proxies is None:
             continue
         f += geometry_restraints.bond_residual_sum(
             sites_cart=sites_cart, sorted_asu_proxies=sorted_asu_proxies, gradient_array=g
         )
     return f, g.as_double()
Пример #10
0
def side_chain_placement(ag_to_place, current_reference_ag, rotamer_manager):
  """
  Works with poly_gly truncated hierarchy.
  Also used in fix_rama_outliers.
  """
  resname = current_reference_ag.resname.upper()
  c = one_three.get(resname, None)
  if c is None:
    msg = "Only standard protein residues are currently supported.\n"
    msg += "The residue %s (chain %s, resid %s) chain is not standard." % (
        resname,
        current_reference_ag.parent().parent().id,
        current_reference_ag.parent().resid())
    raise Sorry(msg)
  ag_to_place.resname = three_one[c]
  if c == 'G':
    return

  # align residue from ideal_res_dict to just placed ALA (ag_to_place)
  # or from pdb_hierarchy_template
  fixed_sites = flex.vec3_double()
  moving_sites = flex.vec3_double()
  reper_atoms = ["C","CA", "N"]
  for (ag, arr) in [(ag_to_place, fixed_sites),
                    (current_reference_ag, moving_sites)]:
    for a in ag.atoms():
      if a.name.strip() in reper_atoms:
        arr.append(a.xyz)
  assert len(fixed_sites) == 3
  if len(moving_sites) < 3:
    error_msg = "C, CA or N atoms are absent in secondary structure element." +\
        "\nPlease add them to the model and try again."
    raise Sorry(error_msg)
  assert len(moving_sites) == 3
  lsq_fit_obj = superpose.least_squares_fit(reference_sites = fixed_sites,
                                            other_sites = moving_sites)
  ideal_correct_ag = current_reference_ag.detached_copy()
  ideal_correct_ag.atoms().set_xyz(
      lsq_fit_obj.r.elems*ideal_correct_ag.atoms().extract_xyz()+\
      lsq_fit_obj.t.elems)
  ideal_correct_ag.atoms().set_xyz(
      rotamer_manager.nearest_rotamer_sites_cart(ideal_correct_ag))
  if len(ideal_correct_ag.atoms()) > 4:
    ag_to_place.pre_allocate_atoms(number_of_additional_atoms=\
                                                len(ideal_correct_ag.atoms())-4)
    for a in ideal_correct_ag.atoms():
      if a.name.strip() not in ["N","CA","C","O"]:
        at = a.detached_copy()
        at.uij_erase()
        ag_to_place.append_atom(atom=at)
  else:
    # This means something wrong with input model, e.g. only 3 atoms in
    # the residue and they happened to be N, CA, C
    pass
def get_cc(mc1, mc2, xrs):
  crystal_gridding = mc1.crystal_gridding(
    d_min             = mc1.d_min(),
    symmetry_flags    = maptbx.use_space_group_symmetry,
    resolution_factor = 0.25)
  fft_map = miller.fft_map(
    crystal_gridding     = crystal_gridding,
    fourier_coefficients = mc1)
  fft_map.apply_sigma_scaling()
  m1 = fft_map.real_map_unpadded()
  fft_map = miller.fft_map(
    crystal_gridding     = crystal_gridding,
    fourier_coefficients = mc2)
  fft_map.apply_sigma_scaling()
  m2 = fft_map.real_map_unpadded()
  assert m1.focus()==m2.focus()
  assert m1.all()==m2.all()
  sel = maptbx.grid_indices_around_sites(
    unit_cell  = mc1.unit_cell(),
    fft_n_real = m1.focus(),
    fft_m_real = m1.all(),
    sites_cart = flex.vec3_double(xrs.sites_cart()),
    site_radii = flex.double([1.5]*xrs.scatterers().size()))
  cc = flex.linear_correlation(x=m1.select(sel), y=m2.select(sel)).coefficient()
  def md(m, xrs):
    r = flex.double()
    for sf in xrs.sites_frac():
      r.append(m.eight_point_interpolation(sf))
    return flex.mean(r)
  return cc, md(m=m1, xrs=xrs), md(m=m2, xrs=xrs)
Пример #12
0
 def __init__(O, f_obs, i_obs, i_sig, i_calc=None, f_calc=None, wa=0.1, wb=0):
   assert [i_calc, f_calc].count(None) == 1
   if (i_calc is None):
     from cctbx.array_family import flex
     i_calc = flex.norm(f_calc)
   from cctbx import xray
   raw = xray.targets_shelxl_wght_ls_kwt_b_dv(
     f_obs=f_obs,
     i_obs=i_obs,
     i_sig=i_sig,
     ic=i_calc,
     wa=wa,
     wb=wb)
   assert len(raw) == 5
   O.scale_factor, \
   O.weights, \
   O.target, \
   O.i_gradients, \
   O.i_curvatures = raw
   if (f_calc is None):
     O.f_gradients = None
     O.f_hessians = None
   else:
     g = O.i_gradients
     c = O.i_curvatures
     O.f_gradients = 2 * g * f_calc
     a = flex.real(f_calc)
     b = flex.imag(f_calc)
     aa = 2 * g + 4 * a * a * c
     bb = 2 * g + 4 * b * b * c
     ab =         4 * a * b * c
     O.f_hessians = flex.vec3_double(aa, bb, ab)
Пример #13
0
  def __init__(self,map_data,unit_cell,label,site_cart_1,site_cart_2,step=0.005):
    x1,y1,z1 = site_cart_1
    x2,y2,z2 = site_cart_2
    self.one_dim_point  = None
    self.peak_value     = None
    self.peak_site_cart = None
    self.status = None
    self.bond_length = math.sqrt((x1-x2)**2+(y1-y2)**2+(z1-z2)**2)
    alp = 0
    self.data = flex.double()
    self.dist = flex.double()
    self.peak_sites = flex.vec3_double()
    i_seq = 0
    while alp <= 1.0+1.e-6:
      xp = x1+alp*(x2-x1)
      yp = y1+alp*(y2-y1)
      zp = z1+alp*(z2-z1)
      site_frac = unit_cell.fractionalize((xp,yp,zp))
      ed_ = maptbx.eight_point_interpolation(map_data, site_frac)
      self.dist.append( math.sqrt((x1-xp)**2+(y1-yp)**2+(z1-zp)**2) )
      self.data.append(ed_)
      self.peak_sites.append(unit_cell.orthogonalize(site_frac))
      alp += step
      i_seq += 1
    i_seq_left, i_seq_right, max_peak_i_seq = self.find_peak()
    self.b_estimated, self.q_estimated = None, None
    self.a, self.b = None, None
    self.peak_data, self.peak_dist = None, None
    if([i_seq_left, i_seq_right].count(None) == 0):
      self.one_dim_point  = self.dist[max_peak_i_seq]
      self.peak_value     = self.data[max_peak_i_seq]
      self.peak_site_cart = self.peak_sites[max_peak_i_seq]
      self.peak_data = self.data[i_seq_left:i_seq_right+1]
      self.peak_dist = self.dist[i_seq_left:i_seq_right+1]
      assert (self.peak_data < 0.0).count(True) == 0
      origin = self.dist[max_peak_i_seq]

      dist = (self.peak_dist - origin).deep_copy()
      sel = self.peak_data > 0.0
      data = self.peak_data.select(sel)
      dist = dist.select(sel)
      if(data.size() > 0):
         approx_obj = maptbx.one_gaussian_peak_approximation(
                                                data_at_grid_points    = data,
                                                distances              = dist,
                                                use_weights            = False,
                                                optimize_cutoff_radius = False)
         a_real = approx_obj.a_real_space()
         b_real = approx_obj.b_real_space()
         gof = approx_obj.gof()
         self.a = ias_scattering_dict[label].array_of_a()[0]
         self.b = ias_scattering_dict[label].array_of_b()[0]
         self.b_estimated = approx_obj.b_reciprocal_space()-self.b
         self.q_estimated = approx_obj.a_reciprocal_space()/self.a
         #print "%.2f %.2f"%(self.q_estimated, self.b_estimated)
         if(self.b_estimated <= 0.0):
            self.b_estimated = self.b
         if(self.q_estimated <= 0.0):
            self.q_estimated = self.a
    self.set_status()
Пример #14
0
def finite_differences_site(cartesian_flag, target_ftor, structure,
                            delta=0.00001):
  unit_cell = structure.unit_cell()
  abc = unit_cell.parameters()[:3]
  derivatives = flex.vec3_double()
  for i_scatterer in xrange(structure.scatterers().size()):
    d_target_d_site = [0,0,0]
    for ix in xrange(3):
      target_values = []
      for d_sign in (-1, 1):
        modified_structure = structure.deep_copy_scatterers()
        ms = modified_structure.scatterers()[i_scatterer]
        site = list(ms.site)
        if (not cartesian_flag):
          site[ix] += d_sign * delta / abc[ix]
        else:
          site_cart = list(unit_cell.orthogonalize(site))
          site_cart[ix] += d_sign * delta
          site = unit_cell.fractionalize(site_cart)
        ms.site = site
        f_calc = target_ftor.f_obs().structure_factors_from_scatterers(
            xray_structure=modified_structure,
            algorithm="direct").f_calc()
        target_result = target_ftor(f_calc, compute_derivatives=False)
        target_values.append(target_result.target())
      derivative = (target_values[1] - target_values[0]) / (2 * delta)
      if (not cartesian_flag): derivative *= abc[ix]
      d_target_d_site[ix] = derivative
    derivatives.append(d_target_d_site)
  return derivatives
Пример #15
0
 def _accumulate_significant(self, site, height, site_symmetry, equiv_sites):
   unit_cell = self.special_position_settings().unit_cell()
   orth = unit_cell.orthogonalize
   frac = unit_cell.fractionalize
   sum_w_sites = matrix.col(orth(site)) * height
   sum_w = height
   height_cutoff = height * self._cluster_height_fraction
   for i in xrange(self._peak_list_index, self._peak_list.size()):
     if (self._is_processed[i]): continue
     other_height = self._peak_list.heights()[i]
     if (other_height < height_cutoff): break
     other_site = self._peak_list.sites()[i]
     other_site_symmetry = self._special_position_settings.site_symmetry(
       other_site)
     if (    self._general_positions_only
         and not other_site_symmetry.is_point_group_1()):
       self._is_processed[i] = True
       continue
     other_site = other_site_symmetry.exact_site()
     dist_info = sgtbx.min_sym_equiv_distance_info(equiv_sites, other_site)
     dist = dist_info.dist()
     if (dist < self._min_cross_distance):
       self._is_processed[i] = True
       close_site = dist_info.apply(flex.vec3_double([other_site]))[0]
       close_site = site_symmetry.special_op() * close_site
       sum_w_sites += matrix.col(orth(close_site)) * other_height
       sum_w += other_height
   return frac(sum_w_sites / sum_w), height
Пример #16
0
 def d_target_d_site_cart(self):
   manager = self.manager
   xray.set_scatterer_grad_flags(
     scatterers=manager.xray_structure.scatterers(),
     site=True)
   return flex.vec3_double(
     self.gradients_wrt_atomic_parameters().packed())
Пример #17
0
def map_peak_filter(sites_frac, obs_map, cutoff):
  result = flex.vec3_double()
  for site_frac in sites_frac:
    val = obs_map.eight_point_interpolation(site_frac)
    if val>cutoff:
      result.append(site_frac)
  return result
Пример #18
0
 def minimum_covering_sphere(self, epsilon=None):
   if (epsilon is None): epsilon = 1.e-3
   points = flex.vec3_double()
   orth = self.unit_cell.orthogonalize
   for vertex in self.shape_vertices():
     points.append(orth([float(e) for e in vertex]))
   return minimum_covering_sphere(points=points, epsilon=epsilon)
Пример #19
0
 def gradients(self, xray_structure, force_update_mask=False):
   factor = 1.0
   sites_cart = xray_structure.sites_cart()
   if(self.fmodel is not None):
     max_shift = flex.max(flex.sqrt((self.sites_cart - sites_cart).dot()))
     if(max_shift > self.update_gradient_threshold):
       self.fmodel.update_xray_structure(
         xray_structure = xray_structure,
         update_f_calc  = True,
         update_f_mask  = False)
       self.gx = flex.vec3_double(self.x_target_functor(compute_gradients=True).\
         gradients_wrt_atomic_parameters(site=True).packed())
       self.sites_cart = sites_cart
   if(self.restraints_manager is not None):
     c = self.restraints_manager.energies_sites(sites_cart = sites_cart,
       compute_gradients=True)
     self.gc = c.gradients
     factor *= self.wc
     if(c.normalization_factor is not None): factor *= c.normalization_factor
   result = None
   if(self.wx is not None):
     result = self.wx * self.gx
   if(self.wc is not None):
     gcw = self.wc * self.gc
     if(result is None): result = gcw
     else: result = result + gcw
   if(factor != 1.0): result *= 1.0 / factor
   #print "norms:", self.gc.norm(), self.gx.norm(), result.norm()
   return result
Пример #20
0
 def __init__(self, centers_of_mass,
                    sites_cart,
                    target_functor,
                    rot_objs,
                    selections,
                    suppress_gradients):
   t_r = target_functor(compute_gradients=not suppress_gradients)
   self.f = t_r.target_work()
   if (suppress_gradients):
     self.grads_wrt_r = None
     self.grads_wrt_t = None
     return
   target_grads_wrt_xyz = t_r.gradients_wrt_atomic_parameters(site=True)
   self.grads_wrt_r = []
   self.grads_wrt_t = []
   target_grads_wrt_xyz = flex.vec3_double(target_grads_wrt_xyz.packed())
   for sel,rot_obj, cm in zip(selections, rot_objs, centers_of_mass):
       sites_cart_cm = sites_cart.select(sel) - cm
       target_grads_wrt_xyz_sel = target_grads_wrt_xyz.select(sel)
       target_grads_wrt_r = matrix.sqr(
                   sites_cart_cm.transpose_multiply(target_grads_wrt_xyz_sel))
       self.grads_wrt_t.append(flex.double(target_grads_wrt_xyz_sel.sum()))
       g_phi = (rot_obj.r_phi() * target_grads_wrt_r).trace()
       g_psi = (rot_obj.r_psi() * target_grads_wrt_r).trace()
       g_the = (rot_obj.r_the() * target_grads_wrt_r).trace()
       self.grads_wrt_r.append(flex.double([g_phi, g_psi, g_the]))
Пример #21
0
  def compute_zeta_multi(self, experiments):
    '''
    Compute zeta for each reflection.

    :param experiments: The list of experiments
    :return: Zeta for each reflection

    '''
    from dials.algorithms.profile_model.gaussian_rs import zeta_factor
    m2 = flex.vec3_double(len(experiments))
    s0 = flex.vec3_double(len(experiments))
    for i, e in enumerate(experiments):
      m2[i] = e.goniometer.get_rotation_axis()
      s0[i] = e.beam.get_s0()
    self['zeta'] = zeta_factor(m2, s0, self['s1'], self['id'])
    return self['zeta']
Пример #22
0
 def check_f_derivs():
   g_ana = trg.f_gradients
   c_ana = trg.f_hessians
   eps = 1e-6
   g_fin = flex.complex_double()
   c_fin = flex.vec3_double()
   for ih in xrange(i_calc.size()):
     c_orig = f_calc[ih]
     g_fin_ab = []
     c_fin_ab = []
     for iab in [0,1]:
       fs = []
       gs = []
       for signed_eps in [eps, -eps]:
         if (iab == 0):
           f_calc[ih] = complex(c_orig.real + signed_eps, c_orig.imag)
         else:
           f_calc[ih] = complex(c_orig.real, c_orig.imag + signed_eps)
         trg_eps = kwt2(
           f_obs=f_obs, i_obs=i_obs, i_sig=i_sig,
           f_calc=f_calc, i_calc=None, wa=wa, wb=wb)
         fs.append(trg_eps.target)
         gs.append(trg_eps.f_gradients[ih])
       g_fin_ab.append((fs[0]-fs[1])/(2*eps))
       c_fin_ab.append((gs[0]-gs[1])/(2*eps))
     g_fin.append(complex(*g_fin_ab))
     assert approx_equal(c_fin_ab[0].imag, c_fin_ab[1].real)
     c_fin.append((c_fin_ab[0].real, c_fin_ab[1].imag, c_fin_ab[0].imag))
     f_calc[ih] = c_orig
   assert approx_equal(g_ana, g_fin)
   assert approx_equal(c_ana, c_fin)
def testing_function_for_rsfit(basic_map,delta_h,xray_structure,out):
  for i_trial in xrange(100):
    sites_cart = flex.vec3_double((flex.random_double(size=3)-0.5)*1)
    tmp_sites_cart = sites_cart.deep_copy()
    for i in xrange(3):
      ref = lbfgs(
        basic_map=basic_map,
        sites_cart=tmp_sites_cart,
        delta_h=delta_h)
      temp = flex.double(ref.sites_cart[0])-flex.double((0,0,0))
      temp = math.sqrt(temp.dot(temp))
      if temp <= 2*delta_h:
        break
      print >> out, "recycling:", ref.sites_cart[0]
      tmp_sites_cart = ref.sites_cart
    for site,sitec in zip(ref.sites_cart,xray_structure.sites_cart()):
      print >> out, i_trial
      print >> out, sitec
      print >> out, sites_cart[0]
      print >> out, site
      temp = flex.double(site)-flex.double(sitec)
      temp = math.sqrt(temp.dot(temp))
      print >> out, temp, delta_h
      assert temp <= delta_h*2
      print >> out
Пример #24
0
  def cluster_analysis_dbscan(self, vectors):
    import numpy as np

    from sklearn.cluster import DBSCAN
    from sklearn.preprocessing import StandardScaler

    vectors = flex.vec3_double(vectors)

    X = np.array(vectors)
    # scale the data - is this necessary/does it help or hinder?
    X = StandardScaler().fit_transform(X)

    # Compute DBSCAN
    params = self.params.multiple_lattice_search.cluster_analysis.dbscan
    db = DBSCAN(eps=params.eps, min_samples=params.min_samples).fit(X)
    core_samples = db.core_sample_indices_
    # core_samples is a list of numpy.int64 objects
    core_samples = flex.int([int(i) for i in core_samples])
    labels = flex.int(db.labels_.astype(np.int32))

    # Number of clusters in labels, ignoring noise if present.
    n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)

    logger.info('Estimated number of clusters: %d' % n_clusters_)

    return labels
 def collect_map_values (map, get_selections=False) :
   values = []
   selections = []
   if (map is None) :
     assert (not get_selections)
     return [ None ] * len(pdb_atoms)
   for i_seq, atom in enumerate(pdb_atoms) :
     if (selection[i_seq]) :
       site_frac = sites_frac[i_seq]
       values.append(map.eight_point_interpolation(site_frac))
       if (get_selections) :
         sel = maptbx.grid_indices_around_sites(
           unit_cell  = unit_cell,
           fft_n_real = map.focus(),
           fft_m_real = map.all(),
           sites_cart = flex.vec3_double([sites_cart[i_seq]]),
           site_radii = flex.double([1.5]))
         selections.append(map.select(sel))
     else :
       values.append(None)
       selections.append(None)
   if (get_selections) :
     return values, selections
   else :
     return values
Пример #26
0
 def change_residue_rotamer_in_place(self,sites_cart, residue,
     m_chis, r_chis, mon_lib_srv):
   assert m_chis.count(None) == 0
   assert r_chis.count(None) == 0
   axis_and_atoms_to_rotate= \
     rotatable_bonds.axes_and_atoms_aa_specific(
         residue=residue,
         mon_lib_srv=mon_lib_srv,
         remove_clusters_with_all_h=True,
         log=None)
   if axis_and_atoms_to_rotate is None:
     return
   assert len(m_chis) == len(axis_and_atoms_to_rotate)
   assert len(r_chis) >= len(m_chis)
   counter = 0
   residue_iselection = residue.atoms().extract_i_seq()
   sites_cart_residue = sites_cart.select(residue_iselection)
   for aa in axis_and_atoms_to_rotate:
     axis = aa[0]
     atoms = aa[1]
     residue.atoms().set_xyz(new_xyz=sites_cart_residue)
     new_xyz = flex.vec3_double()
     angle_deg = r_chis[counter] - m_chis[counter]
     if angle_deg < 0:
       angle_deg += 360.0
     for atom in atoms:
       new_xyz = rotate_point_around_axis(
                   axis_point_1=sites_cart_residue[axis[0]],
                   axis_point_2=sites_cart_residue[axis[1]],
                   point=sites_cart_residue[atom],
                   angle=angle_deg, deg=True)
       sites_cart_residue[atom] = new_xyz
     sites_cart = sites_cart.set_selected(
           residue_iselection, sites_cart_residue)
     counter += 1
Пример #27
0
def exercise_flood_fill():
  uc = uctbx.unit_cell('10 10 10 90 90 90')
  for uc in (uctbx.unit_cell('10 10 10 90 90 90'),
             uctbx.unit_cell('9 10 11 87 91 95')):
    gridding = maptbx.crystal_gridding(
      unit_cell=uc,
      pre_determined_n_real=(5,5,5))
    corner_cube = (0,4,20,24,100,104,120,124) # cube across all 8 corners
    channel = (12,37,38,39,42,43,62,63,67,68,87,112)
    data = flex.int(flex.grid(gridding.n_real()))
    for i in (corner_cube + channel): data[i] = 1
    flood_fill = masks.flood_fill(data, uc)
    assert data.count(0) == 105
    for i in corner_cube: assert data[i] == 2
    for i in channel: assert data[i] == 3
    assert approx_equal(flood_fill.centres_of_mass(),
                        ((-0.5, -0.5, -0.5), (-2.5, 7/3, 2.5)))
    assert approx_equal(flood_fill.centres_of_mass_frac(),
                        ((-0.1, -0.1, -0.1), (-0.5, 7/15, 0.5)))
    assert approx_equal(flood_fill.centres_of_mass_cart(),
                        uc.orthogonalize(flood_fill.centres_of_mass_frac()))
    assert flood_fill.n_voids() == 2
    assert approx_equal(flood_fill.grid_points_per_void(), (8, 12))
    if 0:
      from crys3d import wx_map_viewer
      wx_map_viewer.display(raw_map=data.as_double(), unit_cell=uc, wires=False)
    #
    gridding = maptbx.crystal_gridding(
      unit_cell=uc,
      pre_determined_n_real=(10,10,10))
    data = flex.int(flex.grid(gridding.n_real()))
    # parallelogram
    points = [(2,4,5),(3,4,5),(4,4,5),(5,4,5),(6,4,5),
              (3,5,5),(4,5,5),(5,5,5),(6,5,5),(7,5,5),
              (4,6,5),(5,6,5),(6,6,5),(7,6,5),(8,6,5)]
    points_frac = flex.vec3_double()
    for p in points:
      data[p] = 1
      points_frac.append([p[i]/gridding.n_real()[i] for i in range(3)])
    points_cart = uc.orthogonalize(points_frac)
    flood_fill = masks.flood_fill(data, uc)
    assert data.count(2) == 15
    assert approx_equal(flood_fill.centres_of_mass_frac(), ((0.5,0.5,0.5),))
    pai_cart = math.principal_axes_of_inertia(
      points=points_cart, weights=flex.double(points_cart.size(),1.0))
    F = matrix.sqr(uc.fractionalization_matrix())
    O = matrix.sqr(uc.orthogonalization_matrix())
    assert approx_equal(
      pai_cart.center_of_mass(), flood_fill.centres_of_mass_cart()[0])
    assert approx_equal(
      flood_fill.covariance_matrices_cart()[0],
      (F.transpose() * matrix.sym(
        sym_mat3=flood_fill.covariance_matrices_frac()[0]) * F).as_sym_mat3())
    assert approx_equal(
      pai_cart.inertia_tensor(), flood_fill.inertia_tensors_cart()[0])
    assert approx_equal(pai_cart.eigensystem().vectors(),
                        flood_fill.eigensystems_cart()[0].vectors())
    assert approx_equal(pai_cart.eigensystem().values(),
                        flood_fill.eigensystems_cart()[0].values())
  return
Пример #28
0
 def compute_functional_and_gradients(O):
   if (O.number_of_function_evaluations == 0):
     O.number_of_function_evaluations += 1
     return O.f_start, O.g_start
   O.number_of_function_evaluations += 1
   O.sites_cart_residue = flex.vec3_double(O.x)
   rs_f = maptbx.real_space_target_simple(
     unit_cell=O.unit_cell,
     density_map=O.density_map,
     sites_cart=O.sites_cart_residue,
     selection=flex.bool(O.sites_cart_residue.size(),True))
   O.real_space_target = rs_f
   rs_g = maptbx.real_space_gradients_simple(
     unit_cell=O.unit_cell,
     density_map=O.density_map,
     sites_cart=O.sites_cart_residue,
     delta=O.real_space_gradients_delta,
     selection=flex.bool(O.sites_cart_residue.size(),True))
   O.rs_f = rs_f
   rs_f *= -O.real_space_target_weight
   rs_g *= -O.real_space_target_weight
   if (O.geometry_restraints_manager is None):
     f = rs_f
     g = rs_g
   else:
     O.sites_cart_all.set_selected(O.residue_i_seqs, O.sites_cart_residue)
     gr_e = O.geometry_restraints_manager.energies_sites(
       sites_cart=O.sites_cart_all, compute_gradients=True)
     f = rs_f + gr_e.target
     g = rs_g + gr_e.gradients.select(indices=O.residue_i_seqs)
   return f, g.as_double()
Пример #29
0
 def compute_functional_and_gradients(self):
   sites_cart = flex.vec3_double(self.x)
   tmp = self.restraints_manager.energies_sites(sites_cart = sites_cart,
     compute_gradients=True)
   f = tmp.target
   g = tmp.gradients
   return f, g.as_double()
def exercise_crystallographic():
  crystal_symmetry = crystal.symmetry(
    unit_cell=(10, 10, 10, 90, 90, 90),
    space_group_symbol="P 1 1 2")
  sites_frac = flex.vec3_double([
    (0.1, 0.1, 0.0),
    (0.9, 0.1, 0.0)])
  for distance_cutoff in [1,2]:
    pair_asu_table = \
      crystal_symmetry.special_position_settings().pair_asu_table(
        distance_cutoff=distance_cutoff,
        sites_frac=sites_frac)
    for strictly_in_asu in [True, False]:
      cluster = crystal.asu_clusters(
        pair_asu_table=pair_asu_table,
        strictly_in_asu=strictly_in_asu).sort_index_groups_by_size()
      cluster_sizes = [cluster.size() for cluster in cluster.index_groups]
      if (distance_cutoff == 1 or strictly_in_asu):
        assert cluster_sizes == [1, 1]
      else:
        assert cluster_sizes == [2]
  sites_frac = flex.vec3_double([
    (0.1, 0.1, 0.0),
    (0.2, 0.2, 0.0),
    (0.1, 0.3, 0.0),
    (0.9, 0.1, 0.0),
    (0.8, 0.2, 0.0)])
  for i_trial in xrange(10):
    if (i_trial > 0):
      sites_frac = sites_frac.select(
        flex.random_permutation(size=sites_frac.size()))
    for distance_cutoff in [1.5,2]:
      asu_mappings = crystal_symmetry.asu_mappings(
        buffer_thickness=distance_cutoff).process_sites_frac(
          original_sites=sites_frac)
      pair_asu_table = crystal.pair_asu_table(
        asu_mappings=asu_mappings).add_all_pairs(
          distance_cutoff=distance_cutoff)
      for strictly_in_asu in [True, False]:
        cluster = crystal.asu_clusters(
          pair_asu_table=pair_asu_table,
          strictly_in_asu=strictly_in_asu).sort_index_groups_by_size()
        cluster_sizes = [cluster.size() for cluster in cluster.index_groups]
        if (distance_cutoff == 1.5 or strictly_in_asu):
          assert cluster_sizes == [3, 2]
        else:
          assert cluster_sizes == [5]
def exercise(file_name, out=sys.stdout):

    # Set up source data

    if not os.path.isfile(file_name):
        raise Sorry("Missing the file: %s" % (file_name) + "\n")

    print("Reading from %s" % (file_name))
    from iotbx.map_manager import map_manager
    m = map_manager(file_name)

    print("Header information from %s:" % (file_name))
    m.show_summary(out=out)

    map_data = m.map_data().deep_copy()
    crystal_symmetry = m.crystal_symmetry()
    unit_cell_parameters = m.crystal_symmetry().unit_cell().parameters()

    print("\nMap origin: %s Extent %s" % (map_data.origin(), map_data.all()))
    print("Original unit cell, not just unit cell of part in this file): %s" %
          (str(unit_cell_parameters)))

    grid_point = (1, 2, 3)
    if map_data.origin() != (0, 0, 0):  # make sure it is inside
        from scitbx.matrix import col
        grid_point = tuple(col(grid_point) + col(map_data.origin()))
    print("\nValue of map_data at grid point %s: %.3f" %
          (str(grid_point), map_data[grid_point]))
    print("Map data is %s" % (type(map_data)))

    random_position = (10, 5, 7.9)
    point_frac = crystal_symmetry.unit_cell().fractionalize(random_position)
    value_at_point_frac = map_data.eight_point_interpolation(point_frac)
    print("Value of map_data at coordinates %s: %.3f" %
          (str(random_position), value_at_point_frac))

    map_data_as_float = map_data.as_float()
    print("Map data as float is %s" % (type(map_data_as_float)))

    # make a little model
    sites_cart = flex.vec3_double(((8, 10, 12), (14, 15, 16)))
    model = model_manager.from_sites_cart(atom_name=' CA ',
                                          resname='ALA',
                                          chain_id='A',
                                          b_iso=30.,
                                          occ=1.,
                                          scatterer='C',
                                          sites_cart=sites_cart,
                                          crystal_symmetry=crystal_symmetry)

    # Move map and a model to place origin at (0, 0, 0)
    # map data is new copy but model is shifted in place.

    from iotbx.map_model_manager import map_model_manager
    mam = map_model_manager(
        map_manager=m,
        model=model.deep_copy(),
    )
    print("ORIGINALZZ", mam)
    mam.box_all_maps_around_model_and_shift_origin()
    print("boxedORIGINALZZ", mam)

    shifted_crystal_symmetry = mam.model().crystal_symmetry()
    shifted_model = mam.model()
    shifted_map_data = mam.map_data()

    print("\nOriginal map origin (grid units):", map_data.origin())
    print("Original model:\n", model.model_as_pdb())

    print("Shifted map origin:", shifted_map_data.origin())
    print("Shifted model:\n", shifted_model.model_as_pdb())

    # Save the map_model manager
    mam_dc = mam.deep_copy()
    print("dc", mam)
    print("dc mam_dc", mam_dc)

    # Mask map around atoms
    mam = mam_dc.deep_copy()
    print("dc mam_dc dc", mam_dc)
    print(mam)
    mam.mask_all_maps_around_atoms(mask_atoms_atom_radius=3,
                                   set_outside_to_mean_inside=True,
                                   soft_mask=False)
    print("Mean before masking", mam.map_data().as_1d().min_max_mean().mean)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().mean,
                        -0.0585683621466)
    print("Max before masking", mam.map_data().as_1d().min_max_mean().max)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().max,
                        -0.0585683621466)

    # Mask map around atoms, with soft mask
    mam = mam_dc.deep_copy()
    mam.mask_all_maps_around_atoms(mask_atoms_atom_radius=3,
                                   soft_mask=True,
                                   soft_mask_radius=5,
                                   set_outside_to_mean_inside=True)
    print("Mean after first masking",
          mam.map_data().as_1d().min_max_mean().mean)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().mean,
                        -0.00177661714805)
    print("Max after first masking", mam.map_data().as_1d().min_max_mean().max)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().max,
                        0.236853733659)

    # Mask map around atoms again
    mam.mask_all_maps_around_atoms(mask_atoms_atom_radius=3,
                                   set_outside_to_mean_inside=True,
                                   soft_mask=False)
    print("Mean after second masking",
          mam.map_data().as_1d().min_max_mean().mean)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().mean,
                        -0.0585683621466)
    print("Max after second masking",
          mam.map_data().as_1d().min_max_mean().max)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().max,
                        -0.0585683621466)

    # Mask around edges
    mam = mam_dc.deep_copy()
    mam.mask_all_maps_around_edges(soft_mask_radius=3)
    print("Mean after masking edges",
          mam.map_data().as_1d().min_max_mean().mean)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().mean,
                        0.0155055604192)
    print("Max after masking edges", mam.map_data().as_1d().min_max_mean().max)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().max,
                        0.249827131629)

    print(
        "\nWriting map_data and model in shifted position (origin at 0, 0, 0)")

    output_file_name = 'shifted_map.ccp4'
    print("Writing to %s" % (output_file_name))
    mrcfile.write_ccp4_map(
        file_name=output_file_name,
        crystal_symmetry=shifted_crystal_symmetry,
        map_data=shifted_map_data,
    )

    output_file_name = 'shifted_model.pdb'
    f = open(output_file_name, 'w')
    print(shifted_model.model_as_pdb(), file=f)
    f.close()

    print("\nWriting map_data and model in original position (origin at %s)" %
          (str(mam.map_manager().origin_shift_grid_units)))

    output_file_name = 'new_map_original_position.ccp4'
    print("Writing to %s" % (output_file_name))
    mrcfile.write_ccp4_map(
        file_name=output_file_name,
        crystal_symmetry=shifted_crystal_symmetry,
        map_data=shifted_map_data,
        origin_shift_grid_units=mam.map_manager().origin_shift_grid_units)
    print(shifted_model.model_as_pdb())
    output_pdb_file_name = 'new_model_original_position.pdb'
    f = open(output_pdb_file_name, 'w')
    print(shifted_model.model_as_pdb(), file=f)
    f.close()

    # Write as mmcif
    output_cif_file_name = 'new_model_original_position.cif'
    f = open(output_cif_file_name, 'w')
    print(shifted_model.model_as_mmcif(), file=f)
    f.close()

    # Read the new map and model
    import iotbx.pdb
    new_model = model_manager(model_input=iotbx.pdb.input(
        source_info=None,
        lines=flex.split_lines(open(output_pdb_file_name).read())),
                              crystal_symmetry=crystal_symmetry)
    assert new_model.model_as_pdb() == model.model_as_pdb()

    new_model_from_cif = model_manager(model_input=iotbx.pdb.input(
        source_info=None,
        lines=flex.split_lines(open(output_cif_file_name).read())),
                                       crystal_symmetry=crystal_symmetry)
    assert new_model_from_cif.model_as_pdb() == model.model_as_pdb()

    # Read and box the original file again in case we modified m in any
    #   previous tests
    m = map_manager(file_name)
    mam = map_model_manager(model=model.deep_copy(), map_manager=m)
    mam.box_all_maps_around_model_and_shift_origin()

    file_name = output_file_name
    print("Reading from %s" % (file_name))
    new_map = iotbx.mrcfile.map_reader(file_name=file_name, verbose=False)
    new_map.data = new_map.data.shift_origin()
    print("Header information from %s:" % (file_name))
    new_map.show_summary(out=out)
    assert new_map.map_data().origin() == mam.map_manager().map_data().origin()
    assert new_map.crystal_symmetry().is_similar_symmetry(
        mam.map_manager().crystal_symmetry())

    # make a map_model_manager with lots of maps and model and ncs
    from mmtbx.ncs.ncs import ncs
    ncs_object = ncs()
    ncs_object.set_unit_ncs()
    mam = map_model_manager(
        map_manager=m,
        ncs_object=ncs_object,
        map_manager_1=m.deep_copy(),
        map_manager_2=m.deep_copy(),
        extra_model_list=[model.deep_copy(),
                          model.deep_copy()],
        extra_model_id_list=["model_1", "model_2"],
        extra_map_manager_list=[m.deep_copy(), m.deep_copy()],
        extra_map_manager_id_list=["extra_1", "extra_2"],
        model=model.deep_copy(),
    )

    # make a map_model_manager with lots of maps and model and ncs and run
    # with wrapping and ignore_symmetry_conflicts on
    from mmtbx.ncs.ncs import ncs
    ncs_object = ncs()
    ncs_object.set_unit_ncs()
    m.set_ncs_object(ncs_object.deep_copy())
    mam2 = map_model_manager(
        map_manager=m.deep_copy(),
        ncs_object=ncs_object.deep_copy(),
        map_manager_1=m.deep_copy(),
        map_manager_2=m.deep_copy(),
        extra_model_list=[model.deep_copy(),
                          model.deep_copy()],
        extra_model_id_list=["model_1", "model_2"],
        extra_map_manager_list=[m.deep_copy(), m.deep_copy()],
        extra_map_manager_id_list=["extra_1", "extra_2"],
        model=model.deep_copy(),
        ignore_symmetry_conflicts=True,
        wrapping=m.wrapping(),
    )
    assert mam.map_manager().is_similar(mam2.map_manager())
    assert mam.map_manager().is_similar(mam2.map_manager_1())
    for m in mam2.map_managers():
        assert mam.map_manager().is_similar(m)
    assert mam.model().shift_cart() == mam2.model().shift_cart()
    assert mam.model().shift_cart() == mam2.get_model_by_id(
        'model_2').shift_cart()

    print("OK")
def hcp_fill_box(cb_op_original_to_sampling,
                 float_asu,
                 continuous_shift_flags,
                 point_distance,
                 buffer_thickness=-1,
                 all_twelve_neighbors=False,
                 exercise_cpp=True):
    if (exercise_cpp):
        cpp = close_packing.hexagonal_sampling_generator(
            cb_op_original_to_sampling=cb_op_original_to_sampling,
            float_asu=float_asu,
            continuous_shift_flags=continuous_shift_flags,
            point_distance=point_distance,
            buffer_thickness=buffer_thickness,
            all_twelve_neighbors=all_twelve_neighbors)
    assert point_distance > 0
    if (buffer_thickness < 0):
        buffer_thickness = point_distance * (2 / 3. * (.5 * math.sqrt(3)))
    if (exercise_cpp):
        assert cpp.cb_op_original_to_sampling().c(
        ) == cb_op_original_to_sampling.c()
        assert cpp.float_asu().unit_cell().is_similar_to(float_asu.unit_cell())
        assert cpp.continuous_shift_flags() == continuous_shift_flags
        assert approx_equal(cpp.point_distance(), point_distance)
        assert approx_equal(cpp.buffer_thickness(), buffer_thickness)
        assert cpp.all_twelve_neighbors() == all_twelve_neighbors
    float_asu_buffer = float_asu.add_buffer(thickness=buffer_thickness)
    hex_cell = hexagonal_sampling_cell(point_distance=point_distance)
    hex_box = hexagonal_box(
        hex_cell=hex_cell,
        vertices_cart=float_asu.shape_vertices(cartesian=True))
    hex_box_buffer = hexagonal_box(
        hex_cell=hex_cell,
        vertices_cart=float_asu_buffer.shape_vertices(cartesian=True))
    box_lower = []
    box_upper = []
    for i in xrange(3):
        if (continuous_shift_flags[i]):
            box_lower.append(0)
            box_upper.append(0)
        else:
            n = iceil(abs(hex_box.max[i] - hex_box.pivot[i]))
            box_lower.append(
                min(-2, ifloor(hex_box_buffer.min[i] - hex_box.pivot[i])))
            box_upper.append(
                n + max(2, iceil(hex_box_buffer.max[i] - hex_box.max[i])))
    if (exercise_cpp):
        assert list(cpp.box_lower()) == box_lower
        assert list(cpp.box_upper()) == box_upper
    hex_to_frac_matrix = (
        matrix.sqr(float_asu.unit_cell().fractionalization_matrix()) *
        matrix.sqr(hex_cell.orthogonalization_matrix()))
    sites_frac = flex.vec3_double()
    for point in flex.nested_loop(begin=box_lower,
                                  end=box_upper,
                                  open_range=False):
        site_hex = matrix.col(hex_box.pivot) \
                 + matrix.col(hex_indices_as_site(point))
        site_frac = hex_to_frac_matrix * site_hex
        if (float_asu_buffer.is_inside(site_frac)):
            sites_frac.append(site_frac)
        elif (all_twelve_neighbors):
            for offset in [(1, 0, 0), (1, 1, 0), (0, 1, 0), (-1, 0, 0),
                           (-1, -1, 0), (0, -1, 0), (0, 0, 1), (-1, -1, 1),
                           (0, -1, 1), (0, 0, -1), (-1, -1, -1), (0, -1, -1)]:
                offset_hex = hex_indices_as_site(offset, layer=point[2])
                offset_frac = hex_to_frac_matrix * matrix.col(offset_hex)
                other_site_frac = site_frac + offset_frac
                if (float_asu.is_inside(other_site_frac)):
                    sites_frac.append(site_frac)
                    break
    assert sites_frac.size() > 0
    rt = cb_op_original_to_sampling.c_inv().as_double_array()
    sites_frac = rt[:9] * sites_frac
    sites_frac += rt[9:]
    if (exercise_cpp):
        assert not cpp.at_end()
        cpp_sites_frac = cpp.all_sites_frac()
        assert cpp.at_end()
        assert cpp_sites_frac.size() == sites_frac.size()
        assert approx_equal(cpp_sites_frac, sites_frac)
        cpp.restart()
        assert not cpp.at_end()
        assert approx_equal(cpp.next_site_frac(), sites_frac[0])
        assert cpp.count_sites() == sites_frac.size() - 1
        assert cpp.at_end()
        cpp.restart()
        n = 0
        for site in cpp:
            n += 1
        assert n == sites_frac.size()
    return sites_frac
Пример #33
0
def exercise_real_space_gradients_simple(timing):
  uc = uctbx.unit_cell((11,13,17))
  def check():
    map = flex.double(flex.grid(22,26,36).set_focus(22,26,34))
    site_frac = [i/n for i,n in zip(grid_point, map.focus())]
    sites_cart = flex.vec3_double([uc.orthogonalize(site_frac)])
    target = maptbx.real_space_target_simple(
      unit_cell=uc, density_map=map, sites_cart=sites_cart,
      selection=flex.bool(sites_cart.size(), True))
    assert approx_equal(target, 0)
    terms = maptbx.real_space_target_simple_per_site(
      unit_cell=uc, density_map=map, sites_cart=sites_cart)
    assert approx_equal(terms, [0])
    grads = maptbx.real_space_gradients_simple(
      unit_cell=uc, density_map=map, sites_cart=sites_cart, delta=0.1,
      selection=flex.bool(sites_cart.size(), True))
    assert approx_equal(grads, [(0,0,0)])
    grid_point_mod = [i%n for i,n in zip(grid_point, map.focus())]
    map[grid_point_mod] = 1
    target = maptbx.real_space_target_simple(
      unit_cell=uc, density_map=map, sites_cart=sites_cart,
      selection=flex.bool(sites_cart.size(), True))
    assert approx_equal(target, 1)
    terms = maptbx.real_space_target_simple_per_site(
      unit_cell=uc, density_map=map, sites_cart=sites_cart)
    assert approx_equal(terms, [1])
    grads = maptbx.real_space_gradients_simple(
      unit_cell=uc, density_map=map, sites_cart=sites_cart, delta=0.1,
      selection=flex.bool(sites_cart.size(), True))
    assert approx_equal(grads, [(0,0,0)])
    i,j,k = grid_point_mod
    u,v,w = map.focus()
    map[((i+1)%u,j,k)] = 0.3
    map[(i,(j+1)%v,k)] = 0.5
    map[(i,j,(k+1)%w)] = 0.7
    target = maptbx.real_space_target_simple(
      unit_cell=uc, density_map=map, sites_cart=sites_cart,
      selection=flex.bool(sites_cart.size(), True))
    assert approx_equal(target, 1)
    for delta in [0.1, 0.2]:
      grads = maptbx.real_space_gradients_simple(
        unit_cell=uc, density_map=map, sites_cart=sites_cart, delta=delta,
        selection=flex.bool(sites_cart.size(), True))
      assert approx_equal(grads, [(0.3,0.5,0.7)])
  for grid_point in [(0,0,0), (3,4,5), (-3,15,20)]:
    check()
  for i_trial in range(10):
    grid_point = [random.randrange(-100,100) for i in [0,1,2]]
    check()
  if (timing): n = 1000000
  else:        n = 10
  sites_cart = flex.vec3_double(flex.random_double(size=n*3)*40-20)
  map = flex.double(flex.grid(22,26,36).set_focus(22,26,34), 1)
  target = maptbx.real_space_target_simple(
    unit_cell=uc, density_map=map, sites_cart=sites_cart,
    selection=flex.bool(sites_cart.size(), True))
  assert approx_equal(target, n)
  t0 = time.time()
  maptbx.real_space_gradients_simple(
    unit_cell=uc, density_map=map, sites_cart=sites_cart, delta=0.1,
    selection=flex.bool(sites_cart.size(), True))
  tm = time.time() - t0
  msg = "real_space_gradients_simple: %.2f s / %d sites" % (tm, n)
  if (tm >= 0.01): msg += ", %.0f sites / s" % (n / tm)
  if (timing): print(msg)
Пример #34
0
    def find_peaks_clean(self):
        import omptbx
        # doesn't seem to be any benefit to using more than say 8 threads
        num_threads = min(8, omptbx.omp_get_num_procs(), self.params.nproc)
        omptbx.omp_set_num_threads(num_threads)
        d_min = self.params.fft3d.reciprocal_space_grid.d_min
        rlgrid = 2 / (d_min * self.gridding[0])

        frame_number = self.reflections['xyzobs.px.value'].parts()[2]
        scan_range_min = max(int(math.floor(flex.min(frame_number))),
                             self.imagesets[0].get_array_range()
                             [0])  # XXX what about multiple imagesets?
        scan_range_max = min(int(math.ceil(flex.max(frame_number))),
                             self.imagesets[0].get_array_range()
                             [1])  # XXX what about multiple imagesets?
        scan_range = self.params.scan_range
        if not len(scan_range):
            scan_range = [[scan_range_min, scan_range_max]]

        scan = self.imagesets[0].get_scan(
        )  # XXX what about multiple imagesets?
        angle_ranges = [[
            scan.get_angle_from_array_index(i, deg=False) for i in range_
        ] for range_ in scan_range]

        grid = flex.double(flex.grid(self.gridding), 0)
        sampling_volume_map(
            grid, flex.vec2_double(angle_ranges),
            self.imagesets[0].get_beam().get_s0(),
            self.imagesets[0].get_goniometer().get_rotation_axis(), rlgrid,
            d_min, self.params.b_iso)

        fft = fftpack.complex_to_complex_3d(self.gridding)
        grid_complex = flex.complex_double(reals=grid,
                                           imags=flex.double(grid.size(), 0))
        grid_transformed = fft.forward(grid_complex)
        grid_real = flex.pow2(flex.real(grid_transformed))

        gamma = 1
        peaks = flex.vec3_double()
        #n_peaks = 200
        n_peaks = 100  # XXX how many do we need?

        dirty_beam = grid_real
        dirty_map = self.grid_real.deep_copy()
        import time
        t0 = time.time()
        peaks = clean_3d(dirty_beam, dirty_map, n_peaks, gamma=gamma)
        t1 = time.time()
        #print "clean_3d took %.2f s" %(t1-t0)

        reciprocal_lattice_points = self.reflections['rlp'].select(
            self.reflections_used_for_indexing)

        peaks = self.optimise_peaks(peaks, reciprocal_lattice_points)

        peaks_frac = flex.vec3_double()
        for p in peaks:
            peaks_frac.append(
                (p[0] / self.gridding[0], p[1] / self.gridding[1],
                 p[2] / self.gridding[2]))
            #print p, peaks_frac[-1]

        if self.params.debug:
            self.debug_write_ccp4_map(grid, "sampling_volume.map")
            self.debug_write_ccp4_map(grid_real, "sampling_volume_FFT.map")
            self.debug_write_ccp4_map(dirty_map, "clean.map")

        self.sites = peaks_frac
        # we don't really know the "volume"" of the peaks, but this method should
        # find the peaks in order of their intensity (strongest first)
        self.volumes = flex.double(range(len(self.sites), 0, -1))

        return
Пример #35
0
    def __call__(self, reflections, experiments, d_min=None):
        from scitbx import matrix
        from libtbx.math_utils import nearest_integer as nint

        reciprocal_lattice_points = reflections["rlp"]
        if "miller_index" not in reflections:
            reflections["miller_index"] = flex.miller_index(len(reflections))
        if d_min is not None:
            d_spacings = 1 / reciprocal_lattice_points.norms()
            inside_resolution_limit = d_spacings > d_min
        else:
            inside_resolution_limit = flex.bool(
                reciprocal_lattice_points.size(), True)
        sel = inside_resolution_limit & (reflections["id"] == -1)
        isel = sel.iselection()
        rlps = reciprocal_lattice_points.select(isel)
        refs = reflections.select(isel)
        phi = refs["xyzobs.mm.value"].parts()[2]

        if len(rlps) <= self._nearest_neighbours:
            raise DialsIndexError(
                "index_assignment.local.nearest_neighbour must be smaller than the number of accepted reflections (%d)"
                % len(rlps))

        UB_matrices = flex.mat3_double(
            [cm.get_A() for cm in experiments.crystals()])

        result = ext.AssignIndicesLocal(
            rlps,
            phi,
            UB_matrices,
            epsilon=self._epsilon,
            delta=self._delta,
            l_min=self._l_min,
            nearest_neighbours=self._nearest_neighbours,
        )
        miller_indices = result.miller_indices()
        crystal_ids = result.crystal_ids()
        hkl = miller_indices.as_vec3_double().iround()

        assert miller_indices.select(crystal_ids < 0).all_eq((0, 0, 0))

        for i_cryst in set(crystal_ids):
            if i_cryst < 0:
                continue

            A = matrix.sqr(experiments[i_cryst].crystal.get_A())
            A_inv = A.inverse()

            cryst_sel = crystal_ids == i_cryst
            rlp_sel = rlps.select(cryst_sel)
            hkl_sel = hkl.select(cryst_sel).as_vec3_double()

            d_sel = 1 / rlp_sel.norms()
            d_perm = flex.sort_permutation(d_sel, reverse=True)

            hf_0 = A_inv * rlp_sel[d_perm[0]]
            h_0 = matrix.col([nint(j) for j in hf_0.elems])
            offset = h_0 - matrix.col(hkl_sel[d_perm[0]])
            # print "offset:", offset.elems

            h = hkl_sel + flex.vec3_double(hkl_sel.size(), offset.elems)

            refs["miller_index"].set_selected(
                cryst_sel, flex.miller_index(list(h.iround())))
            refs["id"].set_selected(cryst_sel, i_cryst)

        crystal_ids.set_selected(crystal_ids < 0, -1)
        refs["id"] = crystal_ids
        refs["miller_index"].set_selected(crystal_ids < 0, (0, 0, 0))

        reflections["miller_index"].set_selected(isel, refs["miller_index"])
        reflections["id"].set_selected(isel, refs["id"])
        reflections.set_flags(reflections["miller_index"] != (0, 0, 0),
                              reflections.flags.indexed)
Пример #36
0
def exercise(file_name=None,
             pdb_file_name=None,
             map_file_name=None,
             split_pdb_file_name=None,
             out=sys.stdout):

    # Set up source data

    if not os.path.isfile(file_name):
        raise Sorry("Missing the file: %s" % (file_name) + "\n")

    print("Reading from %s" % (file_name))
    from iotbx.map_manager import map_manager
    m = map_manager(file_name)

    print("Header information from %s:" % (file_name))
    m.show_summary(out=out)

    map_data = m.map_data().deep_copy()
    crystal_symmetry = m.crystal_symmetry()
    unit_cell_parameters = m.crystal_symmetry().unit_cell().parameters()

    print("\nMap origin: %s Extent %s" % (map_data.origin(), map_data.all()))
    print("Original unit cell, not just unit cell of part in this file): %s" %
          (str(unit_cell_parameters)))

    grid_point = (1, 2, 3)
    if map_data.origin() != (0, 0, 0):  # make sure it is inside
        from scitbx.matrix import col
        grid_point = tuple(col(grid_point) + col(map_data.origin()))
    print("\nValue of map_data at grid point %s: %.3f" %
          (str(grid_point), map_data[grid_point]))
    print("Map data is %s" % (type(map_data)))

    random_position = (10, 5, 7.9)
    point_frac = crystal_symmetry.unit_cell().fractionalize(random_position)
    value_at_point_frac = map_data.eight_point_interpolation(point_frac)
    print("Value of map_data at coordinates %s: %.3f" %
          (str(random_position), value_at_point_frac))

    map_data_as_float = map_data.as_float()
    print("Map data as float is %s" % (type(map_data_as_float)))

    # make a little model
    sites_cart = flex.vec3_double(((8, 10, 12), (14, 15, 16)))
    model = model_manager.from_sites_cart(atom_name=' CA ',
                                          resname='ALA',
                                          chain_id='A',
                                          b_iso=30.,
                                          occ=1.,
                                          scatterer='C',
                                          sites_cart=sites_cart,
                                          crystal_symmetry=crystal_symmetry)

    # Move map and a model to place origin at (0, 0, 0)
    # map data is new copy but model is shifted in place.

    from iotbx.map_model_manager import map_model_manager
    mam = map_model_manager(
        map_manager=m,
        model=model.deep_copy(),
    )

    # Read in map and model and split up
    dm = DataManager()
    aa = dm.get_map_model_manager(model_file=pdb_file_name,
                                  map_files=map_file_name)
    bb = dm.get_map_model_manager(model_file=split_pdb_file_name,
                                  map_files=map_file_name)

    for selection_method in [
            'by_chain', 'by_segment', 'supplied_selections', 'boxes'
    ]:
        if selection_method == 'boxes':
            choices = [True, False]
        else:
            choices = [True]
        if selection_method == 'by_chain':
            mask_choices = [True, False]
        else:
            mask_choices = [False]
        for select_final_boxes_based_on_model in choices:
            for skip_empty_boxes in choices:
                for mask_choice in mask_choices:
                    if mask_choice:  # use split model
                        a = bb.deep_copy()
                    else:  # usual
                        a = aa.deep_copy()
                    print("\nRunning split_up_map_and_model with \n" +
                          "select_final_boxes_based_on_model=" +
                          "%s   skip_empty_boxes=%s selection_method=%s" %
                          (select_final_boxes_based_on_model, skip_empty_boxes,
                           selection_method))

                    if selection_method == 'by_chain':
                        print("Mask around unused atoms: %s" % (mask_choice))
                        box_info = a.split_up_map_and_model_by_chain(
                            mask_around_unselected_atoms=mask_choice)
                    elif selection_method == 'by_segment':
                        box_info = a.split_up_map_and_model_by_segment()
                    elif selection_method == 'supplied_selections':
                        selection = a.model().selection('all')
                        box_info = a.split_up_map_and_model_by_supplied_selections(
                            selection_list=[selection])
                    elif selection_method == 'boxes':
                        box_info = a.split_up_map_and_model_by_boxes(
                            skip_empty_boxes=skip_empty_boxes,
                            select_final_boxes_based_on_model=
                            select_final_boxes_based_on_model)
                    print(selection_method, skip_empty_boxes,
                          len(box_info.selection_list),
                          box_info.selection_list[0].count(True))
                    assert (selection_method, skip_empty_boxes,
                            len(box_info.selection_list),
                            box_info.selection_list[0].count(True)) in [
                                ('by_chain', True, 3, 19),
                                (
                                    "by_chain",
                                    True,
                                    1,
                                    86,
                                ),
                                (
                                    "by_segment",
                                    True,
                                    1,
                                    86,
                                ),
                                (
                                    "supplied_selections",
                                    True,
                                    1,
                                    86,
                                ),
                                ("boxes", True, 7, 9),
                                (
                                    "boxes",
                                    False,
                                    12,
                                    0,
                                ),
                                (
                                    "boxes",
                                    True,
                                    13,
                                    1,
                                ),
                                (
                                    "boxes",
                                    False,
                                    36,
                                    0,
                                ),
                            ], 'failed to find %s %s %s %s' % (
                                selection_method, skip_empty_boxes,
                                len(box_info.selection_list),
                                box_info.selection_list[0].count(True))

                    # Change the coordinates in one box
                    small_model = box_info.mmm_list[0].model()
                    small_sites_cart = small_model.get_sites_cart()
                    from scitbx.matrix import col
                    small_sites_cart += col((1, 0, 0))
                    small_model.set_crystal_symmetry_and_sites_cart(
                        sites_cart=small_sites_cart,
                        crystal_symmetry=small_model.crystal_symmetry())
                    # Put everything back together
                    a.merge_split_maps_and_models(box_info=box_info)

    mam.box_all_maps_around_model_and_shift_origin()

    shifted_crystal_symmetry = mam.model().crystal_symmetry()
    shifted_model = mam.model()
    shifted_map_data = mam.map_data()

    print("\nOriginal map origin (grid units):", map_data.origin())
    print("Original model:\n", model.model_as_pdb())

    print("Shifted map origin:", shifted_map_data.origin())
    print("Shifted model:\n", shifted_model.model_as_pdb())

    # Save the map_model manager
    mam_dc = mam.deep_copy()
    print("dc", mam)
    print("dc mam_dc", mam_dc)

    # Mask map around atoms
    mam = mam_dc.deep_copy()
    print("dc mam_dc dc", mam_dc)
    print(mam)
    mam.mask_all_maps_around_atoms(mask_atoms_atom_radius=3,
                                   set_outside_to_mean_inside=True,
                                   soft_mask=False)
    print("Mean before masking", mam.map_data().as_1d().min_max_mean().mean)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().mean,
                        -0.0585683621466)
    print("Max before masking", mam.map_data().as_1d().min_max_mean().max)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().max,
                        -0.0585683621466)

    # Mask map around atoms, with soft mask
    mam = mam_dc.deep_copy()
    mam.mask_all_maps_around_atoms(mask_atoms_atom_radius=3,
                                   soft_mask=True,
                                   soft_mask_radius=5,
                                   set_outside_to_mean_inside=True)
    print("Mean after first masking",
          mam.map_data().as_1d().min_max_mean().mean)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().mean,
                        0,
                        eps=0.05)
    print("Max after first masking", mam.map_data().as_1d().min_max_mean().max)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().max,
                        0.10,
                        eps=0.05)

    # Mask map around atoms again
    mam.mask_all_maps_around_atoms(mask_atoms_atom_radius=3,
                                   set_outside_to_mean_inside=True,
                                   soft_mask=False)
    print("Mean after second masking",
          mam.map_data().as_1d().min_max_mean().mean)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().mean, 0, eps=0.1)
    print("Max after second masking",
          mam.map_data().as_1d().min_max_mean().max)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().max, 0, eps=0.1)

    # Mask around edges
    mam = mam_dc.deep_copy()
    mam.mask_all_maps_around_edges(soft_mask_radius=3)
    print("Mean after masking edges",
          mam.map_data().as_1d().min_max_mean().mean)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().mean,
                        0,
                        eps=0.05)
    print("Max after masking edges", mam.map_data().as_1d().min_max_mean().max)
    assert approx_equal(mam.map_data().as_1d().min_max_mean().max,
                        0.20,
                        eps=0.05)

    print(
        "\nWriting map_data and model in shifted position (origin at 0, 0, 0)")

    output_file_name = 'shifted_map.ccp4'
    print("Writing to %s" % (output_file_name))
    mrcfile.write_ccp4_map(
        file_name=output_file_name,
        crystal_symmetry=shifted_crystal_symmetry,
        map_data=shifted_map_data,
    )

    output_file_name = 'shifted_model.pdb'
    f = open(output_file_name, 'w')
    print(shifted_model.model_as_pdb(), file=f)
    f.close()

    print("\nWriting map_data and model in original position (origin at %s)" %
          (str(mam.map_manager().origin_shift_grid_units)))

    output_file_name = 'new_map_original_position.ccp4'
    print("Writing to %s" % (output_file_name))
    mrcfile.write_ccp4_map(
        file_name=output_file_name,
        crystal_symmetry=shifted_crystal_symmetry,
        map_data=shifted_map_data,
        origin_shift_grid_units=mam.map_manager().origin_shift_grid_units)
    print(shifted_model.model_as_pdb())
    output_pdb_file_name = 'new_model_original_position.pdb'
    f = open(output_pdb_file_name, 'w')
    print(shifted_model.model_as_pdb(), file=f)
    f.close()

    # Write as mmcif
    output_cif_file_name = 'new_model_original_position.cif'
    f = open(output_cif_file_name, 'w')
    print(shifted_model.model_as_mmcif(), file=f)
    f.close()

    # Read the new map and model
    import iotbx.pdb
    new_model = model_manager(model_input=iotbx.pdb.input(
        source_info=None,
        lines=flex.split_lines(open(output_pdb_file_name).read())),
                              crystal_symmetry=crystal_symmetry)
    assert new_model.model_as_pdb() == model.model_as_pdb()

    new_model_from_cif = model_manager(model_input=iotbx.pdb.input(
        source_info=None,
        lines=flex.split_lines(open(output_cif_file_name).read())),
                                       crystal_symmetry=crystal_symmetry)
    assert new_model_from_cif.model_as_pdb() == model.model_as_pdb()

    # Read and box the original file again in case we modified m in any
    #   previous tests
    m = map_manager(file_name)
    mam = map_model_manager(model=model.deep_copy(), map_manager=m)
    mam.box_all_maps_around_model_and_shift_origin()

    file_name = output_file_name
    print("Reading from %s" % (file_name))
    new_map = iotbx.mrcfile.map_reader(file_name=file_name, verbose=False)
    new_map.data = new_map.data.shift_origin()
    print("Header information from %s:" % (file_name))
    new_map.show_summary(out=out)
    assert new_map.map_data().origin() == mam.map_manager().map_data().origin()
    assert new_map.crystal_symmetry().is_similar_symmetry(
        mam.map_manager().crystal_symmetry())

    # make a map_model_manager with lots of maps and model and ncs
    from mmtbx.ncs.ncs import ncs
    ncs_object = ncs()
    ncs_object.set_unit_ncs()
    mam = map_model_manager(
        map_manager=m,
        ncs_object=ncs_object,
        map_manager_1=m.deep_copy(),
        map_manager_2=m.deep_copy(),
        extra_model_list=[model.deep_copy(),
                          model.deep_copy()],
        extra_model_id_list=["model_1", "model_2"],
        extra_map_manager_list=[m.deep_copy(), m.deep_copy()],
        extra_map_manager_id_list=["extra_1", "extra_2"],
        model=model.deep_copy(),
    )

    # make a map_model_manager with lots of maps and model and ncs and run
    # with wrapping and ignore_symmetry_conflicts on
    from mmtbx.ncs.ncs import ncs
    ncs_object = ncs()
    ncs_object.set_unit_ncs()
    m.set_ncs_object(ncs_object.deep_copy())
    mam2 = map_model_manager(
        map_manager=m.deep_copy(),
        ncs_object=ncs_object.deep_copy(),
        map_manager_1=m.deep_copy(),
        map_manager_2=m.deep_copy(),
        extra_model_list=[model.deep_copy(),
                          model.deep_copy()],
        extra_model_id_list=["model_1", "model_2"],
        extra_map_manager_list=[m.deep_copy(), m.deep_copy()],
        extra_map_manager_id_list=["extra_1", "extra_2"],
        model=model.deep_copy(),
        ignore_symmetry_conflicts=True,
        wrapping=m.wrapping(),
    )
    assert mam.map_manager().is_similar(mam2.map_manager())
    assert mam.map_manager().is_similar(mam2.map_manager_1())
    for m in mam2.map_managers():
        assert mam.map_manager().is_similar(m)
    assert mam.model().shift_cart() == mam2.model().shift_cart()
    assert mam.model().shift_cart() == mam2.get_model_by_id(
        'model_2').shift_cart()

    print("OK")
Пример #37
0
    def find_basis_vector_combinations_cluster_analysis(self):
        self.fft()

        # hijack the xray.structure class to facilitate calculation of distances
        xs = xray.structure(crystal_symmetry=self.crystal_symmetry)
        for i, site in enumerate(self.sites):
            xs.add_scatterer(xray.scatterer("C%i" % i, site=site))

        xs = xs.sites_mod_short()
        xs = xs.select(xs.sites_frac().norms() < 0.45)
        cell_multiplier = 10
        xs1 = xs.customized_copy(unit_cell=uctbx.unit_cell(
            [xs.unit_cell().parameters()[0] * cell_multiplier] * 3))
        xs1.set_sites_cart(xs.sites_cart())
        xs = xs1
        sites_cart = xs.sites_cart()
        lengths = flex.double([matrix.col(sc).length() for sc in sites_cart])
        xs = xs.select(flex.sort_permutation(lengths))
        if self.params.debug:
            with open('peaks.pdb', 'wb') as f:
                print >> f, xs.as_pdb_file()

        vector_heights = flex.double()

        sites_frac = xs.sites_frac()
        pair_asu_table = xs.pair_asu_table(
            distance_cutoff=self.params.max_cell)
        asu_mappings = pair_asu_table.asu_mappings()
        distances = crystal.calculate_distances(pair_asu_table, sites_frac)
        vectors = []
        difference_vectors = []
        pairs = []
        for di in distances:
            if di.distance < self.params.min_cell: continue
            i_seq, j_seq = di.i_seq, di.j_seq
            if i_seq > j_seq: continue
            pairs.append((i_seq, j_seq))
            rt_mx_ji = di.rt_mx_ji
            site_frac_ji = rt_mx_ji * sites_frac[j_seq]
            site_cart_ji = xs.unit_cell().orthogonalize(site_frac_ji)
            site_cart_i = xs.unit_cell().orthogonalize(sites_frac[i_seq])
            vectors.append(matrix.col(site_cart_ji))
            diff_vec = matrix.col(site_cart_i) - matrix.col(site_cart_ji)
            if diff_vec[0] < 0:
                # only one hemisphere of difference vector space
                diff_vec = -diff_vec
            difference_vectors.append(diff_vec)

        params = self.params.multiple_lattice_search.cluster_analysis
        if params.method == 'dbscan':
            i_cluster = self.cluster_analysis_dbscan(difference_vectors)
            min_cluster_size = 1
        elif params.method == 'hcluster':
            i_cluster = self.cluster_analysis_hcluster(difference_vectors)
            i_cluster -= 1  # hcluster starts counting at 1
            min_cluster_size = params.min_cluster_size

        if self.params.debug_plots:
            self.debug_plot_clusters(difference_vectors,
                                     i_cluster,
                                     min_cluster_size=min_cluster_size)

        clusters = []
        min_cluster_size = params.min_cluster_size
        for i in range(max(i_cluster) + 1):
            isel = (i_cluster == i).iselection()
            if len(isel) < min_cluster_size:
                continue
            clusters.append(isel)

        cluster_point_sets = []
        centroids = []
        cluster_sizes = flex.int()

        difference_vectors = flex.vec3_double(difference_vectors)

        from libtbx.utils import flat_list
        for cluster in clusters:
            points = flat_list([pairs[i] for i in cluster])
            cluster_point_sets.append(set(points))
            d_vectors = difference_vectors.select(cluster)
            cluster_sizes.append(len(d_vectors))
            centroids.append(d_vectors.mean())

        # build a graph where each node is a centroid from the difference vector
        # cluster analysis above, and an edge is defined when there is a
        # significant overlap between the sets of peaks in the FFT map that
        # contributed to the difference vectors in two clusters
        import networkx as nx
        G = nx.Graph()
        G.add_nodes_from(range(len(cluster_point_sets)))

        cutoff_frac = 0.25
        for i in range(len(cluster_point_sets)):
            for j in range(i + 1, len(cluster_point_sets)):
                intersection_ij = cluster_point_sets[i].intersection(
                    cluster_point_sets[j])
                union_ij = cluster_point_sets[i].union(cluster_point_sets[j])
                frac_connected = len(intersection_ij) / len(union_ij)
                if frac_connected > cutoff_frac:
                    G.add_edge(i, j)

        # iteratively find the maximum cliques in the graph
        # break from the loop if there are no cliques remaining or there are
        # fewer than 3 vectors in the remaining maximum clique
        # Allow 1 basis vector to be shared between two cliques, to allow for
        # cases where two lattices share one basis vectors (e.g. two plate
        # crystals exactly aligned in one direction, but not in the other two)
        distinct_cliques = []
        cliques = list(nx.find_cliques(G))
        cliques = sorted(cliques, key=len, reverse=True)
        for i, clique in enumerate(cliques):
            clique = set(clique)
            if len(clique) < 3:
                break
            is_distinct = True
            for c in distinct_cliques:
                if len(c.intersection(clique)) > 1:
                    is_distinct = False
                    break
            if is_distinct:
                distinct_cliques.append(clique)
                this_set = set()
                for i_cluster in clique:
                    this_set = this_set.union(cluster_point_sets[i_cluster])
                logger.info("Clique %i: %i lattice points" %
                            (i + 1, len(this_set)))

        assert len(distinct_cliques) > 0

        logger.info("Estimated number of lattices: %i" % len(distinct_cliques))

        self.candidate_basis_vectors = []
        self.candidate_crystal_models = []

        for clique in distinct_cliques:
            sel = flex.size_t(list(clique))
            vectors = flex.vec3_double(centroids).select(sel)
            perm = flex.sort_permutation(vectors.norms())
            vectors = [matrix.col(vectors[p]) for p in perm]

            # exclude vectors that are (approximately) integer multiples of a shorter
            # vector
            unique_vectors = []
            for v in vectors:
                is_unique = True
                for v_u in unique_vectors:
                    if is_approximate_integer_multiple(v_u,
                                                       v,
                                                       relative_tolerance=0.01,
                                                       angular_tolerance=0.5):
                        is_unique = False
                        break
                if is_unique:
                    unique_vectors.append(v)
            vectors = unique_vectors

            self.candidate_basis_vectors.extend(vectors)
            candidate_orientation_matrices \
              = self.find_candidate_orientation_matrices(vectors)
            if len(candidate_orientation_matrices) == 0:
                continue
            crystal_model, n_indexed = self.choose_best_orientation_matrix(
                candidate_orientation_matrices)
            if crystal_model is None: continue
            # map to minimum reduced cell
            crystal_symmetry = crystal.symmetry(
                unit_cell=crystal_model.get_unit_cell(),
                space_group=crystal_model.get_space_group())
            cb_op = crystal_symmetry.change_of_basis_op_to_minimum_cell()
            crystal_model = crystal_model.change_basis(cb_op)
            self.candidate_crystal_models.append(crystal_model)

        if self.params.debug:
            file_name = "vectors.pdb"
            a = self.params.max_cell
            cs = crystal.symmetry(unit_cell=(a, a, a, 90, 90, 90),
                                  space_group="P1")
            xs = xray.structure(crystal_symmetry=cs)
            for v in difference_vectors:
                v = matrix.col(v)
                xs.add_scatterer(xray.scatterer("C", site=v / (a / 10)))
            xs.sites_mod_short()
            with open(file_name, 'wb') as f:
                print >> f, xs.as_pdb_file()

        for crystal_model in self.candidate_crystal_models:
            logger.debug(crystal_model)
Пример #38
0
    def __init__(self,
                 map_data,
                 unit_cell,
                 label,
                 site_cart_1,
                 site_cart_2,
                 step=0.005):
        x1, y1, z1 = site_cart_1
        x2, y2, z2 = site_cart_2
        self.one_dim_point = None
        self.peak_value = None
        self.peak_site_cart = None
        self.status = None
        self.bond_length = math.sqrt((x1 - x2)**2 + (y1 - y2)**2 +
                                     (z1 - z2)**2)
        alp = 0
        self.data = flex.double()
        self.dist = flex.double()
        self.peak_sites = flex.vec3_double()
        i_seq = 0
        while alp <= 1.0 + 1.e-6:
            xp = x1 + alp * (x2 - x1)
            yp = y1 + alp * (y2 - y1)
            zp = z1 + alp * (z2 - z1)
            site_frac = unit_cell.fractionalize((xp, yp, zp))
            ed_ = maptbx.eight_point_interpolation(map_data, site_frac)
            self.dist.append(
                math.sqrt((x1 - xp)**2 + (y1 - yp)**2 + (z1 - zp)**2))
            self.data.append(ed_)
            self.peak_sites.append(unit_cell.orthogonalize(site_frac))
            alp += step
            i_seq += 1
        i_seq_left, i_seq_right, max_peak_i_seq = self.find_peak()
        self.b_estimated, self.q_estimated = None, None
        self.a, self.b = None, None
        self.peak_data, self.peak_dist = None, None
        if ([i_seq_left, i_seq_right].count(None) == 0):
            self.one_dim_point = self.dist[max_peak_i_seq]
            self.peak_value = self.data[max_peak_i_seq]
            self.peak_site_cart = self.peak_sites[max_peak_i_seq]
            self.peak_data = self.data[i_seq_left:i_seq_right + 1]
            self.peak_dist = self.dist[i_seq_left:i_seq_right + 1]
            assert (self.peak_data < 0.0).count(True) == 0
            origin = self.dist[max_peak_i_seq]

            dist = (self.peak_dist - origin).deep_copy()
            sel = self.peak_data > 0.0
            data = self.peak_data.select(sel)
            dist = dist.select(sel)
            if (data.size() > 0):
                approx_obj = maptbx.one_gaussian_peak_approximation(
                    data_at_grid_points=data,
                    distances=dist,
                    use_weights=False,
                    optimize_cutoff_radius=False)
                a_real = approx_obj.a_real_space()
                b_real = approx_obj.b_real_space()
                gof = approx_obj.gof()
                self.a = ias_scattering_dict[label].array_of_a()[0]
                self.b = ias_scattering_dict[label].array_of_b()[0]
                self.b_estimated = approx_obj.b_reciprocal_space() - self.b
                self.q_estimated = approx_obj.a_reciprocal_space() / self.a
                #print "%.2f %.2f"%(self.q_estimated, self.b_estimated)
                if (self.b_estimated <= 0.0):
                    self.b_estimated = self.b
                if (self.q_estimated <= 0.0):
                    self.q_estimated = self.a
        self.set_status()
def check_with_grid_tags(inp_symmetry, symmetry_flags, sites_cart,
                         point_distance, strictly_inside, flag_write_pdb,
                         verbose):
    cb_op_inp_ref = inp_symmetry.change_of_basis_op_to_reference_setting()
    if (verbose):
        print "cb_op_inp_ref.c():", cb_op_inp_ref.c()
    ref_symmetry = inp_symmetry.change_basis(cb_op_inp_ref)
    search_symmetry = sgtbx.search_symmetry(
        flags=symmetry_flags,
        space_group_type=ref_symmetry.space_group_info().type(),
        seminvariant=ref_symmetry.space_group_info().structure_seminvariants())
    assert search_symmetry.continuous_shifts_are_principal()
    continuous_shift_flags = search_symmetry.continuous_shift_flags()
    if (flag_write_pdb):
        tag_sites_frac = flex.vec3_double()
    else:
        tag_sites_frac = None
    if (strictly_inside):
        inp_tags = inp_symmetry.gridding(step=point_distance * .7,
                                         symmetry_flags=symmetry_flags).tags()
        if (tag_sites_frac is not None):
            for point in flex.nested_loop(inp_tags.n_real()):
                if (inp_tags.tags().tag_array()[point] < 0):
                    point_frac_inp = [
                        float(n) / d for n, d in zip(point, inp_tags.n_real())
                    ]
                    tag_sites_frac.append(point_frac_inp)
        if (inp_tags.tags().n_independent() < sites_cart.size()):
            print "FAIL:", inp_symmetry.space_group_info(), \
                           inp_tags.tags().n_independent(), sites_cart.size()
            raise AssertionError
    else:
        inp_tags = inp_symmetry.gridding(step=point_distance / 2.,
                                         symmetry_flags=symmetry_flags).tags()
        sites_frac_inp = inp_symmetry.unit_cell().fractionalize(
            sites_cart=sites_cart)
        rt = cb_op_inp_ref.c().as_double_array()
        sites_frac_ref = rt[:9] * sites_frac_inp
        sites_frac_ref += rt[9:]
        max_distance = 2 * ((.5 * math.sqrt(3) * point_distance) * 2 / 3.)
        if (verbose):
            print "max_distance:", max_distance
        for point in flex.nested_loop(inp_tags.n_real()):
            if (inp_tags.tags().tag_array()[point] < 0):
                point_frac_inp = [
                    float(n) / d for n, d in zip(point, inp_tags.n_real())
                ]
                if (tag_sites_frac is not None):
                    tag_sites_frac.append(point_frac_inp)
                point_frac_ref = cb_op_inp_ref.c() * point_frac_inp
                equiv_points = sgtbx.sym_equiv_sites(
                    unit_cell=ref_symmetry.unit_cell(),
                    space_group=search_symmetry.subgroup(),
                    original_site=point_frac_ref,
                    minimum_distance=2.e-6,
                    tolerance=1.e-6)
                min_dist = sgtbx.min_sym_equiv_distance_info(
                    reference_sites=equiv_points,
                    others=sites_frac_ref,
                    principal_continuous_allowed_origin_shift_flags=
                    continuous_shift_flags).dist()
                if (min_dist > max_distance):
                    print "FAIL:", inp_symmetry.space_group_info(), \
                                   point_frac_ref, min_dist
                    raise AssertionError
        if (inp_tags.tags().n_independent() + 10 < sites_cart.size()):
            print "FAIL:", inp_symmetry.space_group_info(), \
                           inp_tags.tags().n_independent(), sites_cart.size()
            raise AssertionError
    if (tag_sites_frac is not None):
        dump_pdb(file_name="tag_sites.pdb",
                 crystal_symmetry=inp_symmetry,
                 sites_cart=inp_symmetry.unit_cell().orthogonalize(
                     sites_frac=tag_sites_frac))
Пример #40
0
    def __init__(self,
                 sites_cart,
                 unit_cell=None,
                 bond_proxies=None,
                 nonbonded_proxies=None,
                 nonbonded_function=None,
                 angle_proxies=None,
                 dihedral_proxies=None,
                 reference_coordinate_proxies=None,
                 reference_dihedral_manager=None,
                 ncs_dihedral_manager=None,
                 den_manager=None,
                 chirality_proxies=None,
                 planarity_proxies=None,
                 parallelity_proxies=None,
                 bond_similarity_proxies=None,
                 ramachandran_manager=None,
                 external_energy_function=None,
                 compute_gradients=True,
                 gradients=None,
                 disable_asu_cache=False,
                 normalization=False,
                 extension_objects=[]):
        # runsnaked away...
        #adopt_init_args(self, locals())
        #for local in sorted(locals()):
        #  print "    self.%(local)s=%(local)s" % locals()
        #assert 0
        #
        self.angle_proxies = angle_proxies
        self.bond_proxies = bond_proxies
        self.bond_similarity_proxies = bond_similarity_proxies
        self.chirality_proxies = chirality_proxies
        self.compute_gradients = compute_gradients
        self.den_manager = den_manager
        self.dihedral_proxies = dihedral_proxies
        self.disable_asu_cache = disable_asu_cache
        self.extension_objects = extension_objects
        self.external_energy_function = external_energy_function
        self.gradients = gradients
        # self.ncs_dihedral_manager=ncs_dihedral_manager
        self.nonbonded_function = nonbonded_function
        self.nonbonded_proxies = nonbonded_proxies
        self.normalization = normalization
        self.parallelity_proxies = parallelity_proxies
        self.planarity_proxies = planarity_proxies
        self.ramachandran_manager = ramachandran_manager
        self.reference_coordinate_proxies = reference_coordinate_proxies
        self.reference_dihedral_manager = reference_dihedral_manager
        self.sites_cart = sites_cart
        self.unit_cell = unit_cell
        #
        scitbx.restraints.energies.__init__(
            self,
            compute_gradients=compute_gradients,
            gradients=gradients,
            gradients_size=sites_cart.size(),
            gradients_factory=flex.vec3_double,
            normalization=normalization)
        self.n_dihedral_restraints = None
        self.dihedral_restraints_residual_sum = 0
        if (nonbonded_proxies is not None):
            assert nonbonded_function is not None
        if (compute_gradients):
            if (self.gradients is None):
                self.gradients = flex.vec3_double(sites_cart.size(), [0, 0, 0])
            else:
                assert self.gradients.size() == sites_cart.size()

        if (bond_proxies is None):
            self.n_bond_proxies = None
            self.bond_residual_sum = 0
        else:
            self.n_bond_proxies = bond_proxies.n_total()
            self.bond_residual_sum = geometry_restraints.bond_residual_sum(
                sites_cart=sites_cart,
                sorted_asu_proxies=bond_proxies,
                gradient_array=self.gradients,
                disable_cache=disable_asu_cache)
            self.number_of_restraints += self.n_bond_proxies
            self.residual_sum += self.bond_residual_sum
        if (nonbonded_proxies is None):
            self.n_nonbonded_proxies = None
            self.nonbonded_residual_sum = 0
        else:
            self.n_nonbonded_proxies = nonbonded_proxies.n_total()
            self.nonbonded_residual_sum = geometry_restraints.nonbonded_residual_sum(
                sites_cart=sites_cart,
                sorted_asu_proxies=nonbonded_proxies,
                gradient_array=self.gradients,
                function=nonbonded_function,
                disable_cache=False)
            self.number_of_restraints += self.n_nonbonded_proxies
            self.residual_sum += self.nonbonded_residual_sum

        # ====================================================================
        # Unit cell dependent
        # ====================================================================
        # name, parameter, function to call
        for name, proxies, residual_sum_function in [
            ("angle", angle_proxies, geometry_restraints.angle_residual_sum),
            ("dihedral", dihedral_proxies,
             geometry_restraints.dihedral_residual_sum),
            ("planarity", planarity_proxies,
             geometry_restraints.planarity_residual_sum),
            ("parallelity", parallelity_proxies,
             geometry_restraints.parallelity_residual_sum),
            ("bond_similarity", bond_similarity_proxies,
             geometry_restraints.bond_similarity_residual_sum)
        ]:
            setattr(self, "n_%s_proxies" % name, None)
            setattr(self, "%s_residual_sum" % name, 0)
            if proxies is not None:
                n_proxies = proxies.size()
                # setattr(self, "n_%s_proxies" % name, proxies.size())
                if unit_cell is None:
                    res_sum = residual_sum_function(
                        sites_cart=sites_cart,
                        proxies=proxies,
                        gradient_array=self.gradients)
                else:
                    res_sum = residual_sum_function(
                        unit_cell=unit_cell,
                        sites_cart=sites_cart,
                        proxies=proxies,
                        gradient_array=self.gradients)
                self.number_of_restraints += n_proxies
                self.residual_sum += res_sum
                setattr(self, "n_%s_proxies" % name, n_proxies)
                setattr(self, "%s_residual_sum" % name, res_sum)

        # ====================================================================
        # Managers
        # ====================================================================
        for name, manager in [("reference_dihedral",
                               reference_dihedral_manager),
                              ("ncs_dihedral", ncs_dihedral_manager),
                              ("den", den_manager),
                              ("ramachandran", ramachandran_manager)]:
            setattr(self, "n_%s_proxies" % name, None)
            setattr(self, "%s_residual_sum" % name, 0)
            if manager is not None:
                n_proxies = manager.get_n_proxies()
                res_sum = manager.target_and_gradients(
                    unit_cell=unit_cell,
                    sites_cart=sites_cart,
                    gradient_array=self.gradients)
                self.number_of_restraints += n_proxies
                self.residual_sum += res_sum
                setattr(self, "n_%s_proxies" % name, n_proxies)
                setattr(self, "%s_residual_sum" % name, res_sum)

        # ====================================================================
        # The rest (not yet unified)
        # ====================================================================
        if reference_coordinate_proxies is None:
            self.n_reference_coordinate_proxies = None
            self.reference_coordinate_residual_sum = 0
        else:
            import boost.python
            ext = boost.python.import_ext("mmtbx_reference_coordinate_ext")
            self.n_reference_coordinate_proxies = reference_coordinate_proxies.size(
            )
            self.reference_coordinate_residual_sum = \
                ext.reference_coordinate_residual_sum(
                    sites_cart=sites_cart,
                    proxies=reference_coordinate_proxies,
                    gradient_array=self.gradients)
            self.number_of_restraints += self.n_reference_coordinate_proxies
            self.residual_sum += self.reference_coordinate_residual_sum

        if (chirality_proxies is None):
            self.n_chirality_proxies = None
            self.chirality_residual_sum = 0
        else:
            self.n_chirality_proxies = len(chirality_proxies)
            self.chirality_residual_sum = geometry_restraints.chirality_residual_sum(
                sites_cart=sites_cart,
                proxies=chirality_proxies,
                gradient_array=self.gradients)
            self.number_of_restraints += self.n_chirality_proxies
            self.residual_sum += self.chirality_residual_sum

        if (external_energy_function is not None):
            self.external_energy = external_energy_function(
                sites_cart=sites_cart, gradient_array=self.gradients)
            self.residual_sum += self.external_energy
        else:
            self.external_energy = 0
        for extension_obj in self.extension_objects:
            extension_obj.energies_add(energies_obj=self)
        self.finalize_target_and_gradients()
Пример #41
0
def exercise_grid_indices_around_sites():
  unit_cell = uctbx.unit_cell((5,5,5))
  fft_n_real = (5,5,5)
  fft_m_real = (5,5,5)
  site_radii = flex.double([0.5*3**0.5+1e-6])
  def get():
    grid_indices = maptbx.grid_indices_around_sites(
      unit_cell=unit_cell, fft_n_real=fft_n_real, fft_m_real=fft_m_real,
      sites_cart=sites_cart, site_radii=site_radii)
    return list(grid_indices)
  sites_cart = flex.vec3_double([(0.5,0.5,0.5)])
  assert get() == [0, 1, 5, 6, 25, 26, 30, 31]
  sites_cart = flex.vec3_double([(1.5,1.5,1.5)])
  assert get() == [31, 32, 36, 37, 56, 57, 61, 62]
  def sample():
    for i in range(-2,7):
      for j in range(-2,7):
        for k in range(-2,7):
          sites_cart = flex.vec3_double([(i+.5,j+.5,k+.5)])
          assert len(get()) == 8
  sample()
  #
  unit_cell = uctbx.unit_cell((5,6,7))
  fft_n_real = (5,6,7)
  fft_m_real = (5,6,7)
  sites_cart = flex.vec3_double([(0.5,0.5,0.5)])
  assert get() == [0, 1, 7, 8, 42, 43, 49, 50]
  fft_m_real = (5,6,8)
  assert get() == [0, 1, 8, 9, 48, 49, 56, 57]
  fft_m_real = (5,7,8)
  assert get() == [0, 1, 8, 9, 56, 57, 64, 65]
  sample()
  #
  site_radii = flex.double([2])
  assert len(get()) == 8 + 6*4
  site_radii = flex.double([1000])
  assert len(get()) == 5*6*7
  #
  unit_cell = uctbx.unit_cell((18,26,27))
  fft_n_real = (18,26,27)
  fft_m_real = (18,27,28)
  for ish in range(5):
    x = 2*ish+.5
    sites_cart = flex.vec3_double([[x]*3])
    sh = 3**0.5*(ish+0.5)
    site_radii = flex.double([sh-1e-6])
    s1 = set(get())
    site_radii = flex.double([sh+1e-6])
    s2 = set(get())
    for gi in sorted(s2-s1):
      i,j,k = n_dim_index_from_one_dim(gi, fft_m_real)
      assert approx_equal(abs(matrix.col((i-x,j-x,k-x))), sh)
    assert len(s1) == [0, 56, 304, 912, 1904][ish]
    assert len(s2) == [8, 88, 360, 968, 2008][ish]
  #
  unit_cell = uctbx.unit_cell((8,9,7,80,100,110))
  fft_n_real = (11,13,15)
  fft_m_real = (18,26,19)
  sites_cart = flex.vec3_double([(3,11,5)])
  ls = []
  prev = 0
  for r in itertools.count(1):
    site_radii = flex.double([r])
    l = len(get())
    assert l > prev
    ls.append(l)
    if (l == 11*13*15):
      break
    assert r < 7
    prev = l
  assert ls == [18, 155, 524, 1225, 1940, 2139, 2145]
  #
  fft_m_real = (1073741824, 1073741824, 1073741824)
  try:
    maptbx.grid_indices_around_sites(
      unit_cell=unit_cell, fft_n_real=fft_n_real, fft_m_real=fft_m_real,
      sites_cart=sites_cart, site_radii=site_radii)
  except RuntimeError as e:
    assert str(e).startswith("product of fft_m_real")
  else: raise Exception_expected