예제 #1
0
def get_matching_sites_cart_in_both_h(old_h, new_h):
  old_h.reset_atom_i_seqs()
  new_h.reset_atom_i_seqs()
  fixed_sites = flex.vec3_double()
  moving_sites = flex.vec3_double()
  isel_for_old = flex.size_t()
  isel_for_new = flex.size_t()
  if old_h.atoms_size() == new_h.atoms_size():
    good = True
    for a1, a2 in zip(old_h.atoms(), new_h.atoms()):
      if a1.id_str()[:-6] != a2.id_str()[:-6]:
        # print "No match: '%s', '%s'" % (a1.id_str()[:-6], a2.id_str()[:-6])
        good = False
        break
      else:
        fixed_sites.append(a1.xyz)
        moving_sites.append(a2.xyz)
    if good:
      # print "SHORTCUT DONE"
      assert fixed_sites.size() == moving_sites.size()
      return fixed_sites, moving_sites
  fixed_sites = flex.vec3_double()
  moving_sites = flex.vec3_double()
  for old_rg, new_rg in zip(old_h.only_chain().residue_groups(), new_h.only_chain().residue_groups()):
    for old_ag, new_ag in zip(old_rg.atom_groups(), new_rg.atom_groups()):
      for atom in old_ag.atoms():
        a = new_ag.get_atom(atom.name.strip())
        if a is not None:
          fixed_sites.append(atom.xyz)
          moving_sites.append(a.xyz)
  assert fixed_sites.size() == moving_sites.size()
  return fixed_sites, moving_sites
 def join(self,data_dict):
   """The join() function closes the database.
   """
   # Terminate the consumer process by feeding it a None command and
   # wait for it to finish.
   self._db_commands_queue.put(None)
   self._db_commands_queue.join()
   self._db_results_queue.join()
   self._semaphore.acquire()
   nrows = data_dict["rows"].get_obj()[0]
   print "writing observation pickle with %d rows"%nrows
   kwargs = dict(
     miller_lookup =      flex.size_t(data_dict["miller_proxy"].get_obj()[:nrows]),
     observed_intensity = flex.double(data_dict["intensity_proxy"].get_obj()[:nrows]),
     observed_sigI =      flex.double(data_dict["sigma_proxy"].get_obj()[:nrows]),
     frame_lookup =       flex.size_t(data_dict["frame_proxy"].get_obj()[:nrows]),
     original_H =         flex.int   (data_dict["H_proxy"].get_obj()[:nrows]),
     original_K =         flex.int   (data_dict["K_proxy"].get_obj()[:nrows]),
     original_L =         flex.int   (data_dict["L_proxy"].get_obj()[:nrows]),
   )
   import cPickle as pickle
   pickle.dump(kwargs, open(self.params.output.prefix+"_observation.pickle","wb"),
               pickle.HIGHEST_PROTOCOL)
   pickle.dump(self.miller, open(self.params.output.prefix+"_miller.pickle","wb"),
               pickle.HIGHEST_PROTOCOL)
   pickle.dump(data_dict["xtal_proxy"].get_obj().raw.replace('\0','').strip(),
                       open(self.params.output.prefix+"_frame.pickle","wb"),pickle.HIGHEST_PROTOCOL)
   return kwargs
예제 #3
0
 def _add_to_single_size_t(self, x, next_to_i_seq, squeeze_in, mode=1):
   tmp_x = []
   new_independent_element = None
   added = 0
   for sel in x:
     if(sel < next_to_i_seq):
       tmp_x.append(sel)
     elif(sel == next_to_i_seq):
       tmp_x.append(sel)
       if(squeeze_in):
         if(mode == 1):
           if(x.size() > 1):
             tmp_x.append(next_to_i_seq+1)
             added = 1
           elif(x.size() == 1):
             new_independent_element = [next_to_i_seq+1]
             added = 1
           else: raise RuntimeError
         elif(mode == 2):
           new_independent_element = [next_to_i_seq+1]
           added = 1
         else: raise RuntimeError
     else:
       tmp_x.append(sel+1)
   if(new_independent_element is not None):
     new_independent_element = flex.size_t(new_independent_element)
   return flex.size_t(tmp_x), new_independent_element, added
예제 #4
0
 def discard_symmetry(self):
   assert len(self.table())>0
   site_sym = self.table()[0]
   assert site_sym.is_point_group_1()
   return site_symmetry_table(
     indices=flex.size_t(self.indices().size(), 0),
     table=[site_symmetry_ops(1, site_sym.special_op(), site_sym.matrices())],
     special_position_indices=flex.size_t())
예제 #5
0
 def fit_side_chain(self, clusters):
   rotamer_iterator = \
     mmtbx.refinement.real_space.fit_residue.get_rotamer_iterator(
       mon_lib_srv = self.mon_lib_srv,
       residue     = self.residue)
   if(rotamer_iterator is None): return
   selection = flex.size_t(flatten(clusters[0].vector))
   if(self.target_map is not None):
     start_target_value = self.get_target_value(
       sites_cart = self.residue.atoms().extract_xyz(),
       selection  = selection)
   sites_cart_start = self.residue.atoms().extract_xyz()
   sites_cart_first_rotamer = list(rotamer_iterator)[0][1]
   self.residue.atoms().set_xyz(sites_cart_first_rotamer)
   axes = []
   atr = []
   for i, angle in enumerate(self.chi_angles[0]):
     cl = clusters[i]
     axes.append(flex.size_t(cl.axis))
     atr.append(flex.size_t(cl.atoms_to_rotate))
   if(self.target_map is not None):
     ro = ext.fit(
       target_value             = start_target_value,
       axes                     = axes,
       rotatable_points_indices = atr,
       angles_array             = self.chi_angles,
       density_map              = self.target_map,
       all_points               = self.residue.atoms().extract_xyz(),
       unit_cell                = self.unit_cell,
       selection                = selection,
       sin_table                = self.sin_cos_table.sin_table,
       cos_table                = self.sin_cos_table.cos_table,
       step                     = self.sin_cos_table.step,
       n                        = self.sin_cos_table.n)
   else:
     ro = ext.fit(
       sites_cart_start         = sites_cart_start.deep_copy(),
       axes                     = axes,
       rotatable_points_indices = atr,
       angles_array             = self.chi_angles,
       all_points               = self.residue.atoms().extract_xyz(),
       sin_table                = self.sin_cos_table.sin_table,
       cos_table                = self.sin_cos_table.cos_table,
       step                     = self.sin_cos_table.step,
       n                        = self.sin_cos_table.n)
   sites_cart_result = ro.result()
   if(sites_cart_result.size()>0):
     dist = None
     if(self.accept_only_if_max_shift_is_smaller_than is not None):
       dist = flex.max(flex.sqrt((sites_cart_start - sites_cart_result).dot()))
     if(dist is None):
       self.residue.atoms().set_xyz(sites_cart_result)
     else:
       if(dist is not None and
          dist < self.accept_only_if_max_shift_is_smaller_than):
         self.residue.atoms().set_xyz(sites_cart_result)
       else:
         self.residue.atoms().set_xyz(sites_cart_start)
예제 #6
0
파일: flex.py 프로젝트: biochem-fan/dials
  def find_overlaps(self, experiments=None, border=0):
    '''
    Check for overlapping reflections.

    :param experiments: The experiment list
    :param tolerance: A positive integer specifying border around shoebox
    :return: The overlap list

    '''
    from dials.algorithms.shoebox import OverlapFinder
    from itertools import groupby
    from scitbx.array_family import shared

    # Expand the bbox if necessary
    if border > 0:
      x0, x1, y0, y1, z0, z1 = self['bbox'].parts()
      x0 -= border
      x1 += border
      y0 -= border
      y1 += border
      z0 -= border
      z1 += border
      bbox = int6(x0,x1,y0,y1,z0,z1)
    else:
      bbox = self['bbox']

    # Get the panel and id
    panel = self['panel']
    exp_id = self['id']

    # Group according to imageset
    if experiments is not None:
      groups = groupby(
        range(len(experiments)),
        lambda x: experiments[x].imageset)

      # Get the experiment ids we're to treat together
      lookup = {}
      for j, (key, indices) in enumerate(groups):
        for i in indices:
          lookup[i] = j
      group_id = flex.size_t([lookup[i] for i in self['id']])
    elif "imageset_id" in self:
      imageset_id = self['imageset_id']
      assert imageset_id.all_ge(0)
      group_id = flex.size_t(list(imageset_id))
    else:
      raise RuntimeError('Either need to supply experiments or have imageset_id')

    # Create the overlap finder
    find_overlapping = OverlapFinder()

    # Find the overlaps
    overlaps = find_overlapping(group_id, panel, bbox)
    assert(overlaps.num_vertices() == len(self))

    # Return the overlaps
    return overlaps
예제 #7
0
파일: PyChef.py 프로젝트: xia2/xia2
 def __init__(self, asu_index, is_centric, iplus=None, iminus=None):
   self._asu_index = asu_index
   self._centric = is_centric
   if iplus is None:
     iplus = flex.size_t()
   if iminus is None:
     iminus = flex.size_t()
   self._iplus = iplus
   self._iminus = iminus
예제 #8
0
def exercise_miller_export_as_shelx_hklf():
  s = """\
   1   2  -1   23.34    4.56
   2  -3   9   12.45    6.12
99999999999999999.9999999.99
-999-999-999-9999.99-9999.99
   3   4   5999999.99999999.
   3   4   5-99999.9-999999.
"""
  ma = hklf.reader(file_object=StringIO(s)).as_miller_arrays()[0]
  sio = StringIO()
  ma.export_as_shelx_hklf(file_object=sio)
  ma2 = hklf.reader(file_object=StringIO(sio.getvalue())).as_miller_arrays()[0]
  assert approx_equal(ma.indices(), ma2.indices())
  assert approx_equal(ma.data(), ma2.data())
  assert approx_equal(ma.sigmas(), ma2.sigmas())
  #
  ma = ma.select(flex.size_t([0]))
  def check(d, s, f):
    if (s is not None): s = flex.double([s])
    ma2 = ma.array(data=flex.double([d]), sigmas=s)
    sio = StringIO()
    ma2.export_as_shelx_hklf(sio, normalise_if_format_overflow=True)
    assert not show_diff(sio.getvalue(), """\
   1   2  -1%s
   0   0   0    0.00    0.00
""" % f)
    try: ma2.export_as_shelx_hklf(sio)
    except RuntimeError: pass
    else: raise Exception_expected
  check(-12345678, 1, "-999999.    0.08")
  check(-12345678, None, "-999999.    0.00")
  check(2, -12345678, "    0.16-999999.")
  check(123456789, 30, "9999999.    2.43")
  check(123456789, None, "9999999.    0.00")
  check(40, 123456789, "    3.249999999.")
  check(-23456789, 123456789, "-999999.5263153.")
  check(123456789, -23456789, "5263153.-999999.")
  #
  ma = hklf.reader(file_object=StringIO(s)).as_miller_arrays()[0]
  ma = ma.select(flex.size_t([0,1]))
  ma2 = ma.array(data=flex.double([123456789, -23456789]))
  sio = StringIO()
  ma2.export_as_shelx_hklf(sio, normalise_if_format_overflow=True)
  assert not show_diff(sio.getvalue(), """\
   1   2  -15263153.    0.00
   2  -3   9-999999.    0.00
   0   0   0    0.00    0.00
""")
  ma2 = ma.array(data=flex.double([-23456789, 823456789]))
  sio = StringIO()
  ma2.export_as_shelx_hklf(sio, normalise_if_format_overflow=True)
  assert not show_diff(sio.getvalue(), """\
   1   2  -1-284858.    0.00
   2  -3   99999999.    0.00
   0   0   0    0.00    0.00
""")
예제 #9
0
 def __init__(self,obs_arrays,unique_set,d_max,d_min,n_bins):
   from cctbx import miller
   self.obs_arrays = obs_arrays
   extended_indices = unique_set.indices().select(obs_arrays.miller)
   self.obs_set = unique_set.customized_copy(indices=extended_indices, data=obs_arrays.frame)
   self.obs_set.setup_binner(d_max=d_max,
                             d_min=d_min,
                             n_bins=n_bins)
   self.n_meas_cache = flex.size_t(n_bins+1)
   self.n_neg_cache = flex.size_t(n_bins+1)
예제 #10
0
def generate_evec(selections,
		  selections_1d,
                  xray_structure,
                  pdb_hierarchy,
                  filename,
                  n_modes,
                  zero_mode_input_flag = False,
                  zero_mode_flag = True):
    global time_generate_evec
    t1 = time.time()
    atoms = pdb_hierarchy.atoms()
    nm_init_manager = nm_init(filename = filename,
                            n_modes = n_modes, 
                            atoms = atoms, 
                            zero_mode_input_flag = zero_mode_input_flag, 
                            zero_mode_flag = zero_mode_flag)
    modes = []
    for i in range(n_modes):
        modes.append(nm_init_manager.return_modes(i))
    if zero_mode_flag == True and zero_mode_input_flag == False:
        count = 0
        for selection in selections:
#	    print len(selection)
#	    selection is size_t array
# 	    print len(selections)
#	    for selection_ in selection:
#		print selection_
	    bool_selection = flex.bool(xray_structure.sites_cart().size(), False)
	    bool_selection.set_selected(selection, True)
            sites_cart_selected = xray_structure.sites_cart().select(selection)
            atomic_weights_selected = xray_structure.atomic_weights().select(selection)
            nm_init_manager.gen_zero_modes(sites_cart_selected, atomic_weights_selected, bool_selection)        
            padd = 6*count
            for i in range(6):
                selected_zero_modes = nm_init_manager.return_zero_modes(i+padd)
                modes[i].set_selected(selection, selected_zero_modes)
            count += 1
	new_sel_st = flex.size_t()
        for i in range(count):
            new_bool_selection = nm_init_manager.return_new_selection(i)
	    new_selection = []
            for selection_ in selections[i]:
	        if new_bool_selection[selection_] != False:
		    new_selection.append(selection_)
	    new_selection = flex.size_t(new_selection)
	    new_sel_st.extend(new_selection)
            selections[i] = new_selection
	selections_1d = new_sel_st
# selection will be modified in this step, we will only keep those atoms with normal modes being assigned.
    nm_init_manager.normalize(n_modes)
    t2 = time.time()
    time_generate_evec += (t2 - t1)
    return modes
def exercise_optimise_shelxl_weights():
  def calc_goof(fo2, fc, w, k, n_params):
    fc2 = fc.as_intensity_array()
    w = w(fo2.data(), fo2.sigmas(), fc2.data(), k)
    return math.sqrt(flex.sum(
      w * flex.pow2(fo2.data() - k*fc2.data()))/(fo2.size() - n_params))
  xs = smtbx.development.sucrose()
  k = 0.05 + 10 * flex.random_double()
  fc = xs.structure_factors(anomalous_flag=False, d_min=0.7).f_calc()
  fo = fc.as_amplitude_array()
  fo = fo.customized_copy(data=fo.data()*math.sqrt(k))
  fo = fo.customized_copy(sigmas=0.03*fo.data())
  sigmas = fo.sigmas()
  for i in range(fo.size()):
    fo.data()[i] += 2 * scitbx.random.variate(
      scitbx.random.normal_distribution(sigma=sigmas[i]))() \
      + 0.5*random.random()
  fo2 = fo.as_intensity_array()
  fc2 = fc.as_intensity_array()
  w = least_squares.mainstream_shelx_weighting(a=0.1)
  s = calc_goof(fo2, fc, w, k, xs.n_parameters())
  w2 = w.optimise_parameters(fo2, fc2, k, xs.n_parameters())
  s2 = calc_goof(fo2, fc, w2, k, xs.n_parameters())
  # sort data and setup binning by fc/fc_max
  fc_sq = fc.as_intensity_array()
  fc_sq_over_fc_sq_max = fc_sq.data()/flex.max(fc_sq.data())
  permutation = flex.sort_permutation(fc_sq_over_fc_sq_max)
  fc_sq_over_fc_sq_max = fc_sq.customized_copy(
    data=fc_sq_over_fc_sq_max).select(permutation)
  fc_sq = fc_sq.select(permutation)
  fo_sq = fo2.select(permutation)
  n_bins = 10
  bin_max = 0
  bin_limits = flex.size_t(1, 0)
  bin_count = flex.size_t()
  for i in range(n_bins):
    bin_limits.append(int(math.ceil((i+1) * fc_sq.size()/n_bins)))
    bin_count.append(bin_limits[i+1] - bin_limits[i])
  goofs_w = flex.double()
  goofs_w2 = flex.double()
  for i_bin in range(n_bins):
    sel = flex.size_t_range(bin_limits[i_bin], bin_limits[i_bin+1])
    goofs_w2.append(calc_goof(fo_sq.select(sel),
                              fc_sq.select(sel),
                              w2, k, xs.n_parameters()))
    goofs_w.append(calc_goof(fo_sq.select(sel),
                              fc_sq.select(sel),
                              w, k, xs.n_parameters()))
  a = flex.mean_and_variance(goofs_w).unweighted_sample_variance()
  b = flex.mean_and_variance(goofs_w2).unweighted_sample_variance()
  assert a > b or abs(1-s) > abs(1-s2)
  assert a > b # flat analysis of variance
  assert abs(1-s) > abs(1-s2) # GooF close to 1
예제 #12
0
def exercise_covariance():
  xs = xray.structure(
    crystal_symmetry=crystal.symmetry(
      (5.01,5.01,5.47,90,90,120), "P6222"),
    scatterers=flex.xray_scatterer([
      xray.scatterer("Si", (1/2.,1/2.,1/3.)),
      xray.scatterer("O", (0.197,-0.197,0.83333))]))
  uc = xs.unit_cell()
  flags = xs.scatterer_flags()
  for f in flags:
    f.set_grad_site(True)
  xs.set_scatterer_flags(flags)
  cov = flex.double((1e-8,1e-9,2e-9,3e-9,4e-9,5e-9,
                          2e-8,1e-9,2e-9,3e-9,4e-9,
                               3e-8,1e-9,2e-9,3e-9,
                                    2e-8,1e-9,2e-9,
                                         3e-8,1e-9,
                                              4e-8))
  param_map = xs.parameter_map()
  assert approx_equal(cov,
    covariance.extract_covariance_matrix_for_sites(flex.size_t([0,1]), cov, param_map))
  cov_cart = covariance.orthogonalize_covariance_matrix(cov, uc, param_map)
  O = matrix.sqr(uc.orthogonalization_matrix())
  for i in range(param_map.n_scatterers):
    cov_i = covariance.extract_covariance_matrix_for_sites(flex.size_t([i]), cov, param_map)
    cov_i_cart = covariance.extract_covariance_matrix_for_sites(flex.size_t([i]), cov_cart, param_map)
    assert approx_equal(
      O * matrix.sym(sym_mat3=cov_i) * O.transpose(),
      matrix.sym(sym_mat3=cov_i_cart).as_mat3())
  for f in flags: f.set_grads(False)
  flags[0].set_grad_u_aniso(True)
  flags[0].set_use_u_aniso(True)
  flags[1].set_grad_u_iso(True)
  flags[1].set_use_u_iso(True)
  xs.set_scatterer_flags(flags)
  param_map = xs.parameter_map()
  cov = flex.double(7*7, 0)
  cov.reshape(flex.grid(7,7))
  cov.matrix_diagonal_set_in_place(flex.double([i for i in range(7)]))
  cov = cov.matrix_symmetric_as_packed_u()
  assert approx_equal([i for i in range(6)],
                      covariance.extract_covariance_matrix_for_u_aniso(
                        0, cov, param_map).matrix_packed_u_diagonal())
  assert covariance.variance_for_u_iso(1, cov, param_map) == 6
  try: covariance.variance_for_u_iso(0, cov, param_map)
  except RuntimeError: pass
  else: raise Exception_expected
  try: covariance.extract_covariance_matrix_for_u_aniso(1, cov, param_map)
  except RuntimeError: pass
  else: raise Exception_expected
  approx_equal(covariance.extract_covariance_matrix_for_sites(
    flex.size_t([1]), cov, param_map), (0,0,0,0,0,0))
예제 #13
0
def exercise_miller_array_data_types():
    miller_set = crystal.symmetry(unit_cell=(10, 10, 10, 90, 90, 90), space_group_symbol="P1").miller_set(
        indices=flex.miller_index([(1, 2, 3), (4, 5, 6)]), anomalous_flag=False
    )
    for data in [
        flex.bool([False, True]),
        flex.int([0, 1]),
        flex.size_t([0, 1]),
        flex.double([0, 1]),
        flex.complex_double([0, 1]),
    ]:
        miller_array = miller_set.array(data=data)
        if op.isfile("tmp_iotbx_mtz.mtz"):
            os.remove("tmp_iotbx_mtz.mtz")
        assert not op.isfile("tmp_iotbx_mtz.mtz")
        miller_array.as_mtz_dataset(column_root_label="DATA").mtz_object().write(file_name="tmp_iotbx_mtz.mtz")
        assert op.isfile("tmp_iotbx_mtz.mtz")
        mtz_obj = mtz.object(file_name="tmp_iotbx_mtz.mtz")
        miller_arrays_read_back = mtz_obj.as_miller_arrays()
        assert len(miller_arrays_read_back) == 1
        miller_array_read_back = miller_arrays_read_back[0]
        assert miller_array_read_back.indices().all_eq(miller_array.indices())
        if miller_array.is_integer_array() or miller_array.is_bool_array():
            assert miller_array_read_back.data().all_eq(flex.int([0, 1]))
        elif miller_array.is_real_array():
            assert miller_array_read_back.data().all_eq(flex.double([0, 1]))
        elif miller_array.is_complex_array():
            assert miller_array_read_back.data().all_eq(flex.complex_double([0, 1]))
        else:
            raise RuntimeError("Programming error.")
예제 #14
0
def exercise_zeolite_atlas(distance_cutoff=3.5):
  atlas_file = libtbx.env.find_in_repositories(
    relative_path="phenix_regression/misc/strudat_zeolite_atlas",
    test=os.path.isfile)
  if (atlas_file is None):
    print "Skipping exercise_zeolite_atlas(): input file not available"
    return
  all_entries = strudat.read_all_entries(open(atlas_file))
  for i,entry in enumerate(all_entries.entries):
    structure = entry.as_xray_structure()
    if ("--full" in sys.argv[1:] or i % 20 == 0):
      tst_direct_space_asu.exercise_neighbors_pair_generators(
        structure=structure,
        verbose="--Verbose" in sys.argv[1:])
    asu_mappings = structure.asu_mappings(buffer_thickness=distance_cutoff)
    pair_generator = crystal.neighbors_fast_pair_generator(
      asu_mappings=asu_mappings,
      distance_cutoff=distance_cutoff)
    bond_counts = flex.size_t(structure.scatterers().size(), 0)
    for pair in pair_generator:
      bond_counts[pair.i_seq] += 1
      if (pair.j_sym == 0):
        bond_counts[pair.j_seq] += 1
    for atom,bond_count in zip(entry.atoms, bond_counts):
      assert atom.connectivity is not None
      assert atom.connectivity == bond_count
예제 #15
0
 def _nonbonded_pair_objects(max_bonded_cutoff=3.,
                             i_seqs=None,
                             ):
   if i_seqs is None:
     atoms = self.pdb_hierarchy.atoms()
     i_seqs = flex.size_t()
     for atom in atoms:
       i_seqs.append(atom.i_seq)
   if (self.model_indices is not None):
     model_indices = self.model_indices.select(i_seqs)
   conformer_indices = self.conformer_indices.select(i_seqs)
   sym_excl_indices = self.sym_excl_indices.select(i_seqs)
   donor_acceptor_excl_groups = self.donor_acceptor_excl_groups.select(i_seqs)
   asu_mappings = self.special_position_settings.asu_mappings(
     buffer_thickness=max_bonded_cutoff)
   sites_cart = self.sites_cart.select(i_seqs)
   asu_mappings.process_sites_cart(
     original_sites=sites_cart,
     site_symmetry_table=self.site_symmetry_table().select(i_seqs))
   pair_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
   nonbonded_proxies = geometry_restraints.nonbonded_sorted_asu_proxies(
     model_indices=model_indices,
     conformer_indices=conformer_indices,
     sym_excl_indices=sym_excl_indices,
     donor_acceptor_excl_groups=donor_acceptor_excl_groups,
     nonbonded_params=geometry_restraints.nonbonded_params(
       default_distance=1),
     nonbonded_types=flex.std_string(conformer_indices.size()),
     nonbonded_charges=flex.int(conformer_indices.size(), 0),
     nonbonded_distance_cutoff_plus_buffer=max_bonded_cutoff,
     min_cubicle_edge=5,
     shell_asu_tables=[pair_asu_table])
   return nonbonded_proxies, sites_cart, pair_asu_table, asu_mappings, i_seqs
예제 #16
0
  def extract_residues(self, model_i, number_previous_scatters, combine = True):
    result = []
    model = self.pdb_hierarchy.models()[model_i]
    rm = []
    for chain in model.chains():
      for rg in chain.residue_groups():
        rg_i_seqs = []
        r_name = None
        for ag in rg.atom_groups():
          if(r_name is None): r_name = ag.resname
          for atom in ag.atoms():
            if(self.selection[atom.i_seq - number_previous_scatters]):
              rg_i_seqs.append(atom.i_seq - number_previous_scatters)
        if(len(rg_i_seqs) != 0):
          rm.append(group_args(
            selection = flex.size_t(rg_i_seqs),
            name      = r_name,
            model_id  = model_i,
            resid     = rg.resid(),
            chain_id  = chain.id))
    result.append(rm)

    if(combine):
      r0 = result[0]
      for r in result[1:]:
        for i, ri in enumerate(r):
          r0[i].selection.extend(ri.selection)
          assert r0[i].name == ri.name
    else:
      r0 = result[0]
      for r in result[1:]:
        r0.extend(r)

    return r0
예제 #17
0
def exclude_outliers_from_reference_restraints_selection(
    pdb_hierarchy,
    restraints_selection):
  from mmtbx.validation.ramalyze import ramalyze
  # the import below is SLOW!!!
  from mmtbx.rotamer.rotamer_eval import RotamerEval
  assert restraints_selection is not None
  # ramachandran plot outliers
  rama_outlier_selection = ramalyze(pdb_hierarchy=pdb_hierarchy,
    outliers_only=False).outlier_selection()
  rama_outlier_selection = flex.bool(restraints_selection.size(),
    rama_outlier_selection)
  # rotamer outliers
  rota_outlier_selection = flex.size_t()
  rotamer_manager = RotamerEval() # SLOW!!!
  for model in pdb_hierarchy.models():
    for chain in model.chains():
      for residue_group in chain.residue_groups():
        conformers = residue_group.conformers()
        if(len(conformers)>1): continue
        for conformer in residue_group.conformers():
          residue = conformer.only_residue()
          if(rotamer_manager.evaluate_residue(residue)=="OUTLIER"):
            rota_outlier_selection.extend(residue.atoms().extract_i_seq())
  rota_outlier_selection = flex.bool(restraints_selection.size(),
    rota_outlier_selection)
  outlier_selection = rama_outlier_selection | rota_outlier_selection
  return restraints_selection & (~outlier_selection)
  def map_to_grid(self, sweep, centroids):
    b_iso = 200
    beam = sweep.get_beam()
    wavelength = beam.get_wavelength()
    d_min = self.d_min

    n_points = self.gridding[0]
    rlgrid = 2 / (d_min * n_points)

    # real space FFT grid dimensions
    cell_lengths = [n_points * d_min/2 for i in range(3)]
    self.fft_cell = uctbx.unit_cell(cell_lengths+[90]*3)
    self.crystal_symmetry = crystal.symmetry(unit_cell=self.fft_cell,
                                             space_group_symbol="P1")

    print "FFT gridding: (%i,%i,%i)" %self.gridding

    grid = flex.double(flex.grid(self.gridding), 0)

    reflections_used_for_indexing = flex.size_t()

    for i_pnt, point in enumerate(centroids):
      point = scitbx.matrix.col(point)
      spot_resolution = 1/point.length()
      if spot_resolution < d_min:
        continue

      grid_coordinates = [int(round(point[i]/rlgrid)+n_points/2) for i in range(3)]
      if max(grid_coordinates) >= n_points: continue # this reflection is outside the grid
      if min(grid_coordinates) < 0: continue # this reflection is outside the grid
      T = math.exp(b_iso * point.length()**2 / 4)
      grid[grid_coordinates] = T

    self.reciprocal_space_grid = grid
def show_terms(structure, term_table, coseq_dict=None):
  assert len(term_table) == structure.scatterers().size()
  for scatterer,terms in zip(structure.scatterers(), term_table):
    print scatterer.label, list(terms),
    if (coseq_dict is not None):
      terms_to_match = list(terms[1:])
      have_match = False
      tags = coseq_dict.keys()
      tags.sort()
      for tag in tags:
        for coseq_terms in coseq_dict[tag]:
          n = min(len(coseq_terms), len(terms_to_match))
          if (coseq_terms[:n] == terms_to_match[:n]):
            print tag,
            have_match = True
      if (not have_match):
        print "Unknown",
    print
  sums_terms = flex.double()
  multiplicities = flex.double()
  for scatterer,terms in zip(structure.scatterers(), term_table):
    sums_terms.append(flex.sum(flex.size_t(list(terms))))
    multiplicities.append(scatterer.multiplicity())
  print "TD%d: %.2f" % (
    len(terms)-1, flex.mean_weighted(sums_terms, multiplicities))
예제 #20
0
 def __init__(self,
              structure_monitor,
              rotamer_manager,
              sin_cos_table,
              mon_lib_srv,
              backbone_sample=True,
              log = None):
   adopt_init_args(self, locals())
   self.unit_cell = self.structure_monitor.xray_structure.unit_cell()
   if(self.log is None): self.log = sys.stdout
   assert approx_equal(
     self.structure_monitor.xray_structure.sites_cart(),
     self.structure_monitor.pdb_hierarchy.atoms().extract_xyz())
   self.special_position_indices = \
     self.structure_monitor.xray_structure.special_position_indices()
   self.atom_radius_to_negate_map_within = None
   if(self.structure_monitor.target_map_object.miller_array.d_min()>3.5):
     self.atom_radius_to_negate_map_within = 2.0
   else:
     self.atom_radius_to_negate_map_within = 1.5
   #
   self.selection_good = self.structure_monitor.map_cc_per_atom > 0.8
   self.selection_water = self.structure_monitor.pdb_hierarchy.\
     atom_selection_cache().selection(string = "water")
   self.iselection_backbone=flex.size_t()
   for r in self.structure_monitor.residue_monitors:
     if(r.selection_backbone is not None):
       self.iselection_backbone.extend(r.selection_backbone)
   self.loop_over_residues()
예제 #21
0
 def mask(self, solvent_fraction):
   hist = self.histogram()
   cutoff = hist.get_cutoff(int(self.map.size()*(1-solvent_fraction)))
   mask = flex.size_t()
   mask.resize(self.map.accessor(), 1)
   mask.set_selected(self.map > cutoff, 0)
   return mask
def run_call_back(flags, space_group_info):
  d_min = 2.0
  structure = random_structure.xray_structure(
    space_group_info=space_group_info,
    elements=["N", "C", "O", "S"]*3 + ["Fe"]*2,
    volume_per_atom=100)
  if (not space_group_info.group().is_centric()):
    fp_fdp_targets = [(-1,2), (-2,6)]
  else:
    fp_fdp_targets = [(-1,0), (-2,0)]
  anomalous_scatterer_groups = [
    xray.anomalous_scatterer_group(
      iselection=flex.size_t(),
      f_prime=fp,
      f_double_prime=fdp,
      refine=["f_prime", "f_double_prime"]) for fp,fdp in fp_fdp_targets]
  for i_seq,scatterer in enumerate(structure.scatterers()):
    if (scatterer.scattering_type == "S"):
      anomalous_scatterer_groups[0].iselection.append(i_seq)
    if (scatterer.scattering_type == "Fe"):
      anomalous_scatterer_groups[1].iselection.append(i_seq)
  for group in anomalous_scatterer_groups:
    group.copy_to_scatterers_in_place(scatterers=structure.scatterers())
  if (flags.Verbose):
    structure.show_summary().show_scatterers()
  f_obs = abs(structure.structure_factors(
    d_min=2.0, anomalous_flag=True).f_calc())
  if (flags.Verbose):
    f_obs.show_comprehensive_summary()
  #
  for group in anomalous_scatterer_groups:
    group.f_prime = 0
    group.f_double_prime = 0
    group.copy_to_scatterers_in_place(scatterers=structure.scatterers())
  sfg_params = mmtbx.f_model.sf_and_grads_accuracy_master_params.extract()
  sfg_params.algorithm = "direct"
  fmodel = mmtbx.f_model.manager(
    xray_structure=structure,
    f_obs=f_obs,
    r_free_flags=f_obs.generate_r_free_flags(),
    sf_and_grads_accuracy_params = sfg_params,
    target_name="ls")
  #
  n_cycles = [0]
  def call_back(minimizer):
    n_cycles[0] += 1
    return True
  minimized = mmtbx.refinement.anomalous_scatterer_groups.minimizer(
    fmodel=fmodel,
    groups=anomalous_scatterer_groups,
    call_back_after_minimizer_cycle=call_back,
    number_of_finite_difference_tests=3)
  assert n_cycles == [3]
  #
  for group,(fp,fdp) in zip(anomalous_scatterer_groups, fp_fdp_targets):
    # Large eps because the minimization doesn't reliably converge.
    # We don't want to exercise the minimizer here, the important
    # test is the finite difference test embedded in the minimizer.
    assert approx_equal(group.f_prime, fp, eps=1)
    assert approx_equal(group.f_double_prime, fdp, eps=1)
예제 #23
0
def set_refinable_parameters(xray_structure, parameters, selections,
                             enforce_positivity=False):
  # XXX PVA: Code below is terribly inefficient and MUST be moved into C++
  sz = xray_structure.scatterers().size()
  i = 0
  for sel in selections:
    # pre-check for positivity begin
    # spread negative occupancies across i_seqs having positive ones
    par_all = flex.double()
    par_neg = flex.double()
    i_p = i
    for sel_ in sel:
      p = parameters[i_p]
      par_all.append(p)
      if(p<0): par_neg.append(p)
      i_p += 1
    if(enforce_positivity and par_neg.size()>0):
      par_all = par_all - flex.min(par_all)
      fs = flex.sum(par_all)
      if(fs != 0):
        par_all = par_all / fs
    # pre-check for positivity end
    for j, sel_ in enumerate(sel):
      sel__b = flex.bool(sz, flex.size_t(sel_))
      xray_structure.set_occupancies(par_all[j], sel__b)
      i+=1
예제 #24
0
 def _add(self, x, next_to_i_seq, squeeze_in):
   if(x is None): return x
   elif(self.is_bool(x)):
     x_new = []
     result = self._add_to_single_size_t(x.iselection(),
       next_to_i_seq, squeeze_in)
     if(result[1] is None): x_new.extend([result[0]])
     else: x_new.extend([result[0], result[1]])
     if(result[2] == 0 and squeeze_in):
       x_new.extend([flex.size_t([next_to_i_seq+1])])
     x_new_new = flex.size_t()
     for i_x_new in x_new: x_new_new.extend(i_x_new)
     return flex.bool(x.size()+1, x_new_new)
   #elif(self.is_size_t(x)): # XXX not testes so disabled
   #  return self._add_to_single_size_t(x, next_to_i_seq, squeeze_in)
   elif(len(x)==0): raise RuntimeError
   elif(self.is_size_t(x[0])):
     x_new = []
     added = 0
     for x_ in x:
       result = self._add_to_single_size_t(x_, next_to_i_seq, squeeze_in)
       added += result[2]
       if(result[1] is None): x_new.extend([result[0]])
       else: x_new.extend([result[0], result[1]])
     if(added == 0 and squeeze_in):
       x_new.extend([flex.size_t([next_to_i_seq+1])])
     return x_new
   elif(self.is_size_t(x[0][0])):
     xx_new = []
     added = 0
     result_1 = None
     for xx in x:
       x_new = []
       for x_ in xx:
         result = self._add_to_single_size_t(x_, next_to_i_seq, squeeze_in, 2)
         added += result[2]
         if(result[1] is None):
           x_new.extend([result[0]])
         else:
           x_new.extend([result[0]])
         if(result[1] is not None):
           xx_new.append([result[1]])
       if(len(x_new) > 0): xx_new.append(x_new)
     if(added == 0 and squeeze_in):
       xx_new.append([flex.size_t([next_to_i_seq+1])])
     return xx_new
   else: raise RuntimeError("Bad selection array type.")
예제 #25
0
 def ca(self, x):
   if(x is None):           return str(0)
   elif(self.is_bool(x)):   return str(x.count(True))
   elif(self.is_size_t(x)): return str(x.size())
   elif(len(x)==0):         return str(0)
   elif(self.is_size_t(x[0])):
     return str(flex.sum(flex.size_t([i.size() for i in x])))
   else: raise RuntimeError("Bad selection array type.")
예제 #26
0
 def __init__(O, n_reserve, i_calc, use_symmetry):
   from cctbx.array_family import flex
   O.i_calc = i_calc
   O.use_symmetry = use_symmetry
   if (use_symmetry):
     O.n_indices = O.i_calc.asu.indices().size()
   else:
     O.n_indices = O.i_calc.p1_anom.indices().size()
   O.completeness_history = flex.double()
   O.completeness_history.reserve(n_reserve)
   O.completeness_history.append(0)
   O.min_count_history = flex.size_t()
   O.min_count_history.reserve(n_reserve)
   O.min_count_history.append(0)
   O.counts = flex.size_t(O.n_indices, 0)
   O.currently_zero = O.n_indices
   O.new_0 = None
예제 #27
0
 def count_occupancies(self, x):
   result = flex.size_t()
   if(x is not None):
     for i in x:
       for j in i:
         for k in j:
           result.append(k)
   return str(result.size())
예제 #28
0
def expand_model_or_conformer_indices(
      indices,
      x_n_seq,
      related_x_i_seqs):
  result = flex.size_t(x_n_seq, x_n_seq)
  for i_seq,i in enumerate(indices):
    result.set_selected(related_x_i_seqs[i_seq], i)
  assert result.count(x_n_seq) == 0
  return result
def map_statistics_for_atom_selection (
    atom_selection,
    fmodel=None,
    resolution_factor=0.25,
    map1=None,
    map2=None,
    xray_structure=None,
    map1_type="2mFo-DFc",
    map2_type="Fmodel",
    atom_radius=1.5,
    exclude_hydrogens=False) :
  """
  Simple-but-flexible function to give the model-to-map CC and mean density
  values (sigma-scaled, unless pre-calculated maps are provided) for any
  arbitrary atom selection.
  """
  assert (atom_selection is not None) and (len(atom_selection) > 0)
  if (fmodel is not None) :
    assert (map1 is None) and (map2 is None) and (xray_structure is None)
    edm = fmodel.electron_density_map()
    map1_coeffs = edm.map_coefficients(map1_type)
    map1 = map1_coeffs.fft_map(
      resolution_factor=resolution_factor).apply_sigma_scaling().real_map()
    map2_coeffs = edm.map_coefficients(map2_type)
    map2 = map2_coeffs.fft_map(
      resolution_factor=resolution_factor).apply_sigma_scaling().real_map()
    xray_structure = fmodel.xray_structure
  else :
    assert (not None in [map1, map2, xray_structure])
    assert isinstance(map1, flex.double) and isinstance(map2, flex.double)
  if (exclude_hydrogens) :
    hd_selection = xray_structure.hd_selection()
    if (type(atom_selection).__name__ == "size_t") :
      atom_selection_new = flex.size_t()
      for i_seq in atom_selection :
        if (not hd_selection[i_seq]) :
          atom_selection_new.append(i_seq)
      atom_selection = atom_selection_new
      assert (len(atom_selection) > 0)
    else :
      assert (type(atom_selection).__name__ == "bool")
      atom_selection &= ~hd_selection
  manager = selection_map_statistics_manager(
    atom_selection=atom_selection,
    xray_structure=xray_structure,
    fft_n_real = map1.focus(),
    fft_m_real = map1.all(),
    exclude_hydrogens=exclude_hydrogens)
  stats = manager.analyze_map(
    map=map1,
    model_map=map2)
  return group_args(
    cc=stats.cc,
    map1_mean=stats.mean,
    map2_mean=stats.model_mean)
예제 #30
0
 def find_similar_matches(self, target, matches, used, overlap_thres):
     tmp_a = flex.double(self.set_a.size(), 0.0).set_selected(target[0], 1.0)
     result = flex.size_t()
     for ii in xrange(len(matches)):
         if not used[ii]:
             match = matches[ii][0]
             tmp_b = flex.double(self.set_a.size(), 0.0).set_selected(match, 1.0)
             similar = flex.sum(tmp_a * tmp_b) / flex.sum(tmp_a)
             if similar > overlap_thres:
                 result.append(ii)
     return result
예제 #31
0
    def compute_d(self, experiments):
        '''
    Compute the resolution for each reflection.

    :param experiments: The experiment list
    :return: The resolution for each reflection

    '''
        from dials.array_family import flex
        uc = flex.unit_cell(len(experiments))
        for i, e in enumerate(experiments):
            uc[i] = e.crystal.get_unit_cell()
        assert self['id'].all_ge(0)
        self['d'] = uc.d(self['miller_index'], flex.size_t(list(self['id'])))
        return self['d']
예제 #32
0
 def __init__(
         self,
         target_map,
         residue,
         vdw_radii,
         xray_structure,
         mon_lib_srv,
         rotamer_manager,
         # This is cctbx.geometry_restraints.manager.manager
         geometry_restraints_manager,
         real_space_gradients_delta,
         selection_radius=5,
         rms_bonds_limit=0.03,  # XXX probably needs to be much lower
         rms_angles_limit=3.0,  # XXX
         backbone_sample_angle=None,
         allow_modified_residues=False):
     adopt_init_args(self, locals())
     # load rotamer manager
     self.rotamer_manager = mmtbx.idealized_aa_residues.rotamer_manager.load(
     )
     # pre-compute sin and cos tables
     self.sin_cos_table = scitbx.math.sin_cos_table(n=10000)
     self.backbone_atom_names = ["N", "CA", "O", "CB", "C"]
     self.residue_iselection = self.residue.atoms().extract_i_seq()
     assert (not self.residue_iselection.all_eq(0))
     self.residue_selection = flex.bool(xray_structure.scatterers().size(),
                                        self.residue_iselection)
     self.residue_backbone_selection = flex.size_t()
     for atom in self.residue.atoms():
         if (atom.name.strip() in self.backbone_atom_names):
             self.residue_backbone_selection.append(atom.i_seq)
     self.residue_backbone_selection = flex.bool(
         xray_structure.scatterers().size(),
         self.residue_backbone_selection)
     self.target_map_work = target_map
     self.target_map_orig = target_map.deep_copy()
     self.fit_backbone()
     negate_selection = mmtbx.refinement.real_space.selection_around_to_negate(
         xray_structure=self.xray_structure,
         selection_within_radius=self.selection_radius,
         iselection=self.residue.atoms().extract_i_seq())
     self.target_map_work = mmtbx.refinement.real_space.\
       negate_map_around_selected_atoms_except_selected_atoms(
         xray_structure   = self.xray_structure,
         map_data         = target_map,
         negate_selection = negate_selection,
         atom_radius      = 1.5)
     self.fit_rotamers()
예제 #33
0
def run_fit_rotatable(fmodel,
                      ref_model,
                      angular_step,
                      log=None,
                      use_h_omit_map=False,
                      map_type="2mFo-DFc"):
    pdb_hierarchy = ref_model.get_hierarchy()
    xrs = fmodel.xray_structure
    rotatable_h_selection = rotatable(
        pdb_hierarchy=pdb_hierarchy,
        mon_lib_srv=ref_model.get_mon_lib_srv(),
        restraints_manager=ref_model.get_restraints_manager(),
        log=log)
    rotatable_h_selection_1d = []
    for s in rotatable_h_selection:
        rotatable_h_selection_1d.extend(s[1])
    rotatable_h_selection_1d = flex.size_t(rotatable_h_selection_1d)
    rotatable_h_selection_1d_bool = flex.bool(xrs.scatterers().size(),
                                              rotatable_h_selection_1d)
    if (log is not None):
        print("Real-space grid search fit H (or D) atoms:", file=log)
        print("  start:  r_work=%6.4f r_free=%6.4f" %
              (fmodel.r_work(), fmodel.r_free()),
              file=log)
    if (use_h_omit_map):
        xrs_omit = fmodel.xray_structure.select(~rotatable_h_selection_1d_bool)
        fmodel.update_xray_structure(xray_structure=xrs_omit,
                                     update_f_calc=True)
        if (log is not None):
            print("  H omit: r_work=%6.4f r_free=%6.4f" %
                  (fmodel.r_work(), fmodel.r_free()),
                  file=log)
    fft_map = fmodel.electron_density_map().fft_map(
        resolution_factor=1. / 4.,
        map_type=map_type,
        symmetry_flags=maptbx.use_space_group_symmetry)
    fft_map.apply_sigma_scaling()
    map_data = fft_map.real_map_unpadded()
    fit_rotatable(pdb_hierarchy=pdb_hierarchy,
                  xray_structure=xrs,
                  rotatable_h_selection=rotatable_h_selection,
                  map_data=map_data)
    fmodel.update_xray_structure(xray_structure=xrs, update_f_calc=True)
    ref_model.xray_structure = xrs
    if (log is not None):
        print("  final:  r_work=%6.4f r_free=%6.4f" %
              (fmodel.r_work(), fmodel.r_free()),
              file=log)
def exercise_split_unmerged():
  import random
  random.seed(42)
  flex.set_random_seed(42)

  from cctbx import crystal
  base_set = miller.build_set(
    crystal_symmetry=crystal.symmetry(
      unit_cell=(10,10,10,90,90,90), space_group_symbol="P1"),
    d_min=1.6,
    anomalous_flag=False)
  indices = base_set.indices()
  assert (len(indices) == 510)
  unmerged_hkl = flex.miller_index()
  unmerged_data = flex.double()
  unmerged_sigmas = flex.double()
  redundancies = flex.size_t()
  # XXX grossly overengineered, but I wanted to get a realistic CC to make sure
  # the reflections are being split properly
  for i, hkl in enumerate(indices):
    n_obs = min(8, 1 + i % 12)
    redundancies.append(n_obs)
    intensity_merged = (510 - i) + (510 % 27)
    for j in range(n_obs):
      unmerged_hkl.append(hkl)
      intensity = intensity_merged + 20 * (510 % (7 * (j+1)))
      sigma = max(0.5, i % 10)
      unmerged_data.append(intensity)
      unmerged_sigmas.append(sigma)
  assert (unmerged_hkl.size() == 2877)
  unmerged_array = miller.set(
    crystal_symmetry=base_set,
    indices=unmerged_hkl,
    anomalous_flag=False).array(data=unmerged_data, sigmas=unmerged_sigmas)
  split = miller.split_unmerged(
    unmerged_indices=unmerged_hkl,
    unmerged_data=unmerged_data,
    unmerged_sigmas=unmerged_sigmas)
  assert (split.data_1.size() == split.data_2.size() == 467)
  cc = miller.compute_cc_one_half(unmerged_array)
  assert approx_equal(cc, 0.861, eps=0.001)
  unmerged_array.setup_binner(n_bins=10)
  unmerged_array.set_observation_type_xray_intensity()
  result = unmerged_array.cc_one_half(use_binning=True)
  assert approx_equal(
    result.data[1:-1],
    [0.549, 0.789, 0.843, 0.835, 0.863, 0.860, 0.893, 0.847, 0.875, 0.859],
    eps=0.05)
def exercise_1(mon_lib_srv, ener_lib):
    pdb_in = simple_pdb()
    params = pdb_interpretation.master_params.extract()
    processed_pdb_file = pdb_interpretation.process(mon_lib_srv=mon_lib_srv,
                                                    ener_lib=ener_lib,
                                                    params=params,
                                                    pdb_inp=pdb_in,
                                                    log=StringIO())
    grm = processed_pdb_file.geometry_restraints_manager()
    pdb_hierarchy = processed_pdb_file.all_chain_proxies.pdb_hierarchy
    sites_cart = pdb_hierarchy.atoms().extract_xyz()

    proxies = reference.add_coordinate_restraints(sites_cart=sites_cart)
    assert proxies.size() == 29, "expected 29, got %d" % proxies.size()
    import boost.python
    ext = boost.python.import_ext("mmtbx_reference_coordinate_ext")
    grads = flex.vec3_double(sites_cart.size(), (0.0, 0.0, 0.0))
    residual = ext.reference_coordinate_residual_sum(sites_cart=sites_cart,
                                                     proxies=proxies,
                                                     gradient_array=grads)
    assert approx_equal(residual, 0.0)

    #test selection
    ca_selection = pdb_hierarchy.get_peptide_c_alpha_selection()
    ca_sites_cart = sites_cart.select(ca_selection)
    proxies = reference.add_coordinate_restraints(sites_cart=ca_sites_cart,
                                                  selection=ca_selection)
    assert proxies.size() == 3, "expected 3, got %d" % proxies.size()
    tst_iselection = flex.size_t()
    for atom in pdb_hierarchy.atoms():
        if atom.name == " CA " or atom.name == " N  ":
            tst_iselection.append(atom.i_seq)
    tst_sites_cart = sites_cart.select(tst_iselection)
    proxies = reference.add_coordinate_restraints(sites_cart=tst_sites_cart,
                                                  selection=tst_iselection)
    assert proxies.size() == 6, "expected 6, got %d" % proxies.size()

    #test remove
    selection = flex.bool([False] * 29)
    proxies = proxies.proxy_remove(selection=selection)
    assert proxies.size() == 6, "expected 6, got %d" % proxies.size()
    proxies = proxies.proxy_remove(selection=ca_selection)
    assert proxies.size() == 3, "expected 3, got %d" % proxies.size()
    selection = flex.bool([True] * 29)
    proxies = proxies.proxy_remove(selection=selection)
    assert proxies.size() == 0, "expected 0, got %d" % proxies.size()
예제 #36
0
 def formatted_distance(self, i_seq, j_seq, distance, rt_mx_ji):
   if rt_mx_ji is None: rt_mx_ji = unit_mx
   if self.covariance_matrix_cart is not None:
     cov = covariance.extract_covariance_matrix_for_sites(
       flex.size_t((i_seq,j_seq)),
       self.covariance_matrix_cart,
       self.parameter_map)
     if self.cell_covariance_matrix is not None:
       var = distance.variance(
         cov, self.cell_covariance_matrix, self.unit_cell, rt_mx_ji)
     else:
       var = distance.variance(cov, self.unit_cell, rt_mx_ji)
     if var > self.eps and not(self.fixed_distances is not None and
       ((i_seq, j_seq) in self.fixed_distances or
        (j_seq, i_seq) in self.fixed_distances)):
       return format_float_with_su(distance.distance_model, math.sqrt(var))
   return "%.4f" %distance.distance_model
예제 #37
0
 def get_cluster(self):
   axis=[]
   atoms_to_rotate=[]
   use_in_target_selection = flex.size_t()
   counter = 0
   for atom in self.residue.atoms():
     if(atom.name.strip() in ["N", "C"]):
       axis.append(counter)
     else:
       atoms_to_rotate.append(counter)
     if(atom.name.strip() in self.backbone_atom_names):
       use_in_target_selection.append(counter)
     counter += 1
   return mmtbx.refinement.real_space.cluster(
     axis            = axis,
     atoms_to_rotate = atoms_to_rotate,
     selection       = use_in_target_selection)
예제 #38
0
 def formatted_angle(self, i_seq, j_seq, k_seq, angle, rt_mx_ki):
   if rt_mx_ki is None: rt_mx_ki = unit_mx
   if self.covariance_matrix_cart is not None:
     cov = covariance.extract_covariance_matrix_for_sites(
       flex.size_t((i_seq,j_seq,k_seq)),
       self.covariance_matrix_cart,
       self.parameter_map)
     if self.cell_covariance_matrix is not None:
       var = angle.variance(cov, self.cell_covariance_matrix, self.unit_cell,
                            (unit_mx, unit_mx, rt_mx_ki))
     else:
       var = angle.variance(cov, self.unit_cell, (unit_mx, unit_mx, rt_mx_ki))
     if var > self.eps and not(self.fixed_angles is not None and
       ((i_seq, j_seq, k_seq) in self.fixed_angles or
         (k_seq, j_seq, i_seq) in self.fixed_angles)):
       return format_float_with_su(angle.angle_model, math.sqrt(var))
   return "%.1f" %angle.angle_model
예제 #39
0
def expand_planarity_proxies(planarity_proxies, x_n_seq, related_x_i_seqs):
    result = cctbx.geometry_restraints.shared_planarity_proxy()
    planarity_proxy_t = cctbx.geometry_restraints.planarity_proxy
    for proxy in planarity_proxies:
        x_i_seqs_list = [
            tuple(related_x_i_seqs[i_seq]) for i_seq in proxy.i_seqs
        ]
        loop_n = [len(x_i_seqs) for x_i_seqs in x_i_seqs_list]
        sym_ops = proxy.sym_ops
        weights = proxy.weights / matrix.col(loop_n).product()
        for loop_i in nested_loop(loop_n):
            i_seqs = flex.size_t(
                [x_i_seqs[i] for x_i_seqs, i in zip(x_i_seqs_list, loop_i)])
            result.append(
                planarity_proxy_t(i_seqs=i_seqs,
                                  sym_ops=sym_ops,
                                  weights=weights).sort_i_seqs())
    return result
예제 #40
0
  def exclude_H_on_links(self):
    origin_ids = linking_class()
    rm = self.model.get_restraints_manager()
    bond_proxies_simple, asu = rm.geometry.get_all_bond_proxies(
      sites_cart = self.model.get_sites_cart())
    elements = self.model.get_hierarchy().atoms().extract_element()
    exclusion_iseqs = list()
    exclusion_dict = dict()
    all_proxies = [p for p in bond_proxies_simple]
    for proxy in asu:
      all_proxies.append(proxy)
    # Loop through bond proxies to find links (origin_id != 0)
    for proxy in all_proxies:
      if(  isinstance(proxy, ext.bond_simple_proxy)): i,j=proxy.i_seqs
      elif(isinstance(proxy, ext.bond_asu_proxy)):    i,j=proxy.i_seq,proxy.j_seq
      else: assert 0 # never goes here
      if proxy.origin_id != 0:
        exclusion_iseqs.extend([i,j])
        exclusion_dict[i] = proxy.origin_id
        exclusion_dict[j] = proxy.origin_id
    sel_remove = flex.size_t()

    # Now find H atoms bound to linked atoms
    removed_dict = dict()
    for proxy in all_proxies:
      if(  isinstance(proxy, ext.bond_simple_proxy)): i,j=proxy.i_seqs
      elif(isinstance(proxy, ext.bond_asu_proxy)):    i,j=proxy.i_seq,proxy.j_seq
      else: assert 0 # never goes here
      if(elements[i] in ["H","D"] and j in exclusion_iseqs):
        sel_remove.append(i)
        removed_dict[i] = exclusion_dict[j]
      if(elements[j] in ["H","D"] and i in exclusion_iseqs):
        sel_remove.append(j)
        removed_dict[j] = exclusion_dict[i]
    #

    sl_removed = [(atom.id_str().replace('pdb=','').replace('"',''),
                   origin_ids.get_origin_key(removed_dict[atom.i_seq]))
        for atom in self.model.get_hierarchy().atoms().select(sel_remove)]
#    self.site_labels_removed = list(OrderedDict.fromkeys(sl_removed))
    self.sl_removed = sl_removed
    self.exclusion_iseqs = exclusion_iseqs
    #
    self.model = self.model.select(~flex.bool(self.model.size(), sel_remove))
예제 #41
0
def exclude_h_on_coordinated_S(model): # XXX if edits used it should be like in exclude_h_on_SS
  rm = model.get_restraints_manager().geometry
  els = model.get_hierarchy().atoms().extract_element()
  # Find possibly coordinated S
  exclusion_list = ["H","D","T","S","O","P","N","C","SE"]
  sel_s = []
  for proxy in rm.pair_proxies().nonbonded_proxies.simple:
    i,j = proxy.i_seqs
    if(els[i] == "S" and not els[j] in exclusion_list): sel_s.append(i)
    if(els[j] == "S" and not els[i] in exclusion_list): sel_s.append(j)
  # Find H attached to possibly coordinated S
  bond_proxies_simple, asu = rm.get_all_bond_proxies(
    sites_cart = model.get_sites_cart())
  sel_remove = flex.size_t()
  for proxy in bond_proxies_simple:
    i,j = proxy.i_seqs
    if(els[i] in ["H","D"] and j in sel_s): sel_remove.append(i)
    if(els[j] in ["H","D"] and i in sel_s): sel_remove.append(j)
  return model.select(~flex.bool(model.size(), sel_remove))
예제 #42
0
파일: tst_miller.py 프로젝트: dials/cctbx
def exercise_match_indices():
  h0 = flex.miller_index(((1,2,3), (-1,-2,-3), (2,3,4), (-2,-3,-4), (3,4,5)))
  d0 = flex.double((1,2,3,4,5))
  h1 = flex.miller_index(((-1,-2,-3), (-2,-3,-4), (1,2,3), (2,3,4)))
  d1 = flex.double((10,20,30,40))
  mi = miller.match_indices(h0, h0)
  assert mi.have_singles() == 0
  assert list(mi.pairs()) == list(zip(range(5), range(5)))
  mi = miller.match_indices(h0, h1)
  assert tuple(mi.singles(0)) == (4,)
  assert tuple(mi.singles(1)) == ()
  assert tuple(mi.pairs()) == ((0,2), (1,0), (2,3), (3,1))
  assert tuple(mi.pair_selection(0)) == (1, 1, 1, 1, 0)
  assert tuple(mi.single_selection(0)) == (0, 0, 0, 0, 1)
  assert tuple(mi.pair_selection(1)) == (1, 1, 1, 1)
  assert tuple(mi.single_selection(1)) == (0, 0, 0, 0)
  assert tuple(mi.paired_miller_indices(0)) \
      == tuple(h0.select(mi.pair_selection(0)))
  l1 = list(mi.paired_miller_indices(1))
  l2 = list(h1.select(mi.pair_selection(1)))
  l1.sort()
  l2.sort()
  assert l1 == l2
  assert approx_equal(tuple(mi.plus(d0, d1)), (31, 12, 43, 24))
  assert approx_equal(tuple(mi.minus(d0, d1)), (-29,-8,-37,-16))
  assert approx_equal(tuple(mi.multiplies(d0, d1)), (30,20,120,80))
  assert approx_equal(tuple(mi.divides(d0, d1)), (1/30.,2/10.,3/40.,4/20.))
  assert approx_equal(tuple(mi.additive_sigmas(d0, d1)), [
    math.sqrt(x*x+y*y) for x,y in ((1,30), (2,10), (3,40), (4,20))])
  q = flex.size_t((3,2,0,4,1))
  h1 = h0.select(q)
  assert tuple(miller.match_indices(h1, h0).permutation()) == tuple(q)
  p = miller.match_indices(h0, h1).permutation()
  assert tuple(p) == (2,4,1,0,3)
  assert tuple(h1.select(p)) == tuple(h0)
  cd0 = [ complex(a,b) for (a,b) in ((1,1),(2,0),(3.5,-1.5),(5, -3),(-8,5.4)) ]
  cd1 = [ complex(a,b) for (a,b) in ((1,-1),(2,1),(0.5,1.5),(-1, -8),(10,0)) ]
  cd2 = flex.complex_double(cd0)
  cd3 = flex.complex_double(cd1)
  mi = miller.match_indices(h0, h0)
  assert approx_equal(tuple(mi.plus(cd2,cd3)),
    ((2+0j), (4+1j), (4+0j), (4-11j), (2+5.4j)))
예제 #43
0
def exercise_1():
  pdb_in = simple_pdb()
  pdb_hierarchy = pdb_in.construct_hierarchy()
  sites_cart = pdb_hierarchy.atoms().extract_xyz()

  proxies = reference.add_coordinate_restraints(sites_cart=sites_cart)
  assert proxies.size() == 29, "expected 29, got %d" % proxies.size()
  import boost.python
  ext = boost.python.import_ext("mmtbx_reference_coordinate_ext")
  grads = flex.vec3_double(sites_cart.size(), (0.0,0.0,0.0))
  residual = ext.reference_coordinate_residual_sum(
      sites_cart=sites_cart,
      proxies=proxies,
      gradient_array=grads)
  assert approx_equal(residual, 0.0)

  #test selection
  ca_selection = pdb_hierarchy.get_peptide_c_alpha_selection()
  ca_sites_cart = sites_cart.select(ca_selection)
  proxies = reference.add_coordinate_restraints(
      sites_cart=ca_sites_cart,
      selection=ca_selection)
  assert proxies.size() == 3, "expected 3, got %d" % proxies.size()
  tst_iselection = flex.size_t()
  for atom in pdb_hierarchy.atoms():
    if atom.name == " CA " or atom.name == " N  ":
      tst_iselection.append(atom.i_seq)
  tst_sites_cart = sites_cart.select(tst_iselection)
  proxies = reference.add_coordinate_restraints(
      sites_cart=tst_sites_cart,
      selection=tst_iselection)
  assert proxies.size() == 6, "expected 6, got %d" % proxies.size()

  #test remove
  selection = flex.bool([False]*29)
  proxies = proxies.proxy_remove(selection=selection)
  assert proxies.size() == 6, "expected 6, got %d" % proxies.size()
  proxies = proxies.proxy_remove(selection=ca_selection)
  assert proxies.size() == 3, "expected 3, got %d" % proxies.size()
  selection = flex.bool([True]*29)
  proxies = proxies.proxy_remove(selection=selection)
  assert proxies.size() == 0, "expected 0, got %d" % proxies.size()
예제 #44
0
 def pack_variables(O, xray_structure=None):
     if (xray_structure is None):
         xray_structure = O.xray_structure
     O.x = flex.double()
     O.x_info = []
     O.gact_indices = flex.size_t()
     O.dynamic_shift_limits = []
     site_limits = [
         0.15 / p for p in xray_structure.unit_cell().parameters()[:3]
     ]
     d_min = O.f_obs.d_min()
     i_all = 0
     sstab = xray_structure.site_symmetry_table()
     for i_sc, sc in enumerate(xray_structure.scatterers()):
         assert sc.flags.use_u_iso()
         assert not sc.flags.use_u_aniso()
         #
         site_symmetry = sstab.get(i_sc)
         if (site_symmetry.is_point_group_1()):
             p = sc.site
             l = site_limits
         else:
             p = site_symmetry.site_constraints().independent_params(
                 all_params=sc.site)
             l = site_symmetry.site_constraints().independent_params(
                 all_params=site_limits)
         O.x.extend(flex.double(p))
         O.x_info.extend([(i_sc, "xyz"[i]) for i in xrange(len(p))])
         O.dynamic_shift_limits.extend(
             [dynamic_shift_limit_site(width=width) for width in l])
         for i in xrange(len(p)):
             O.gact_indices.append(i_all)
             i_all += 1
         #
         O.x.append(sc.u_iso)
         O.x_info.append((i_sc, "u"))
         O.dynamic_shift_limits.append(
             dynamic_shift_limit_u_iso(d_min=d_min))
         O.gact_indices.append(i_all)
         i_all += 1
         #
         i_all += 3  # occ, fp, fdp
예제 #45
0
 def compute_step_using_curvs(O):
   if (len(O.xfgc_infos) > 1 and O.params.use_gradient_flips):
     prev = O.xfgc_infos[-2]
   else:
     prev = None
   dests = flex.double()
   approx_quads = flex.size_t()
   if (O.params.try_approx_curvs):
     O.approx_curvs()
   for ix,dsl,g,c in zip(count(), O.dynamic_shift_limits, O.grads, O.curvs):
     limit = dsl.pair(x=O.x[ix]).get(grad=g)
     dest = None
     if (prev is not None):
       prev_g = prev.grads[ix]
       if (sign0(g) != sign0(prev_g)):
         x = O.x[ix]
         prev_x = prev.x[ix]
         xm = (g*prev_x - prev_g*x) / (g - prev_g)
         dest = xm - x
         if   (dest >  limit): dest =  limit
         elif (dest < -limit): dest = -limit
     if (dest is None):
       if (c > 0
             and O.params.approx_quad_limit_factor > 0
             and abs(g) < O.params.approx_quad_limit_factor * limit * c):
         dest = -g / c
         approx_quads.append(ix)
       else:
         dest = -delta_estimation_minus_cos(
           limit=limit, grad=g, curv=c)
     dests.append(dest)
   O.xfgc_infos[-1].approx_quads = approx_quads
   O.update_dests_using_bfgs_formula(dests)
   O.x_before_line_search = O.xfgc_infos[-1].x
   if (O.params.use_line_search):
     dest_adj = O.line_search(dests, stpmax=1.0)
     print "dest_adj:", dest_adj
     if (dest_adj is not None and dest_adj < 1):
       dests *= dest_adj
   if (O.params.show_dests): O.show_dests(dests)
   O.x = O.x_before_line_search + dests
   O.update_fgc(is_iterate=True)
예제 #46
0
 def compute_negate_mask(self, residue, radius):
   residue_i_selection = residue.atoms().extract_i_seq()
   residue_b_selection = flex.bool(self.sites_cart.size(), residue_i_selection)
   selection_around_residue = self.special_position_settings.pair_generator(
     sites_cart      = self.sites_cart,
     distance_cutoff = radius
       ).neighbors_of(primary_selection = residue_b_selection).iselection()
   selection_around_residue_minus_residue = flex.size_t(
     list(set(selection_around_residue).difference(
       set(residue_i_selection)).difference(self.selection_water_as_set)))
   sites_cart = self.sites_cart.select(selection_around_residue_minus_residue)
   sites_frac_p1 = self.crystal_symmetry.unit_cell().fractionalize(
     self.crystal_symmetry.expand_to_p1(sites_cart =
       self.sites_cart.select(selection_around_residue_minus_residue)))
   return cctbx_maptbx_ext.mask(
     sites_frac = sites_frac_p1,
     unit_cell  = self.crystal_symmetry.cell_equivalent_p1().unit_cell(),
     n_real     = self.target_map.all(),
     mask_value_inside_molecule = -10,
     mask_value_outside_molecule = 0,
     radii = flex.double(sites_frac_p1.size(), 2.))
예제 #47
0
파일: cosym.py 프로젝트: dials/cctbx
    def task_a(params):
        # add an anchor
        sampling_experiments_for_cosym = ExperimentList()
        sampling_reflections_for_cosym = []
        if params.modify.cosym.anchor:
            from xfel.merging.application.model.crystal_model import crystal_model
            #P = Timer("construct the anchor reference model")
            XM = crystal_model(params=params, purpose="cosym")
            model_intensities = XM.run([], [])
            #del P
            from dxtbx.model import Experiment, Crystal
            from scitbx.matrix import sqr
            O = sqr(model_intensities.unit_cell().orthogonalization_matrix()
                    ).transpose().elems
            real_a = (O[0], O[1], O[2])
            real_b = (O[3], O[4], O[5])
            real_c = (O[6], O[7], O[8])
            nc = Crystal(real_a, real_b, real_c,
                         model_intensities.space_group())
            sampling_experiments_for_cosym.append(
                Experiment(crystal=nc)
            )  # prepends the reference model to the cosym E-list
            from dials.array_family import flex

            exp_reflections = flex.reflection_table()
            exp_reflections['intensity.sum.value'] = model_intensities.data()
            exp_reflections['intensity.sum.variance'] = flex.pow(
                model_intensities.sigmas(), 2)
            exp_reflections['miller_index'] = model_intensities.indices()
            exp_reflections[
                'miller_index_asymmetric'] = model_intensities.indices()
            exp_reflections['flags'] = flex.size_t(
                model_intensities.size(),
                flex.reflection_table.flags.integrated_sum)

            # prepare individual reflection tables for each experiment
            cosym.experiment_id_detail(sampling_experiments_for_cosym,
                                       sampling_reflections_for_cosym,
                                       exp_reflections)
        return sampling_experiments_for_cosym, sampling_reflections_for_cosym
예제 #48
0
    def exclude_H_on_disulfides(self):
        rm = self.model.get_restraints_manager()
        bond_proxies_simple, asu = rm.geometry.get_all_bond_proxies(
            sites_cart=self.model.get_sites_cart())
        elements = self.model.get_hierarchy().atoms().extract_element()
        ss_i_seqs = []
        all_proxies = [p for p in bond_proxies_simple]
        for proxy in asu:
            all_proxies.append(proxy)
        for proxy in all_proxies:
            if (isinstance(proxy, ext.bond_simple_proxy)): i, j = proxy.i_seqs
            elif (isinstance(proxy, ext.bond_asu_proxy)):
                i, j = proxy.i_seq, proxy.j_seq
            else:
                assert 0  # never goes here
            if ([
                    elements[i], elements[j]
            ].count("S") == 2):  # XXX may be coordinated if metal edits used
                ss_i_seqs.extend([i, j])
        sel_remove = flex.size_t()
        for proxy in all_proxies:
            if (isinstance(proxy, ext.bond_simple_proxy)): i, j = proxy.i_seqs
            elif (isinstance(proxy, ext.bond_asu_proxy)):
                i, j = proxy.i_seq, proxy.j_seq
            else:
                assert 0  # never goes here
            if (elements[i] in ["H", "D"] and j in ss_i_seqs):
                sel_remove.append(i)
            if (elements[j] in ["H", "D"] and i in ss_i_seqs):
                sel_remove.append(j)
        #
        sl_disulfides = [
            atom.id_str().replace('pdb=', '').replace('"', '')
            for atom in self.model.get_hierarchy().atoms().select(sel_remove)
        ]
        self.site_labels_disulfides = list(OrderedDict.fromkeys(sl_disulfides))

        self.model = self.model.select(
            ~flex.bool(self.model.size(), sel_remove))
    def map_to_grid(self, sweep, centroids):
        b_iso = 200
        beam = sweep.get_beam()
        wavelength = beam.get_wavelength()
        d_min = self.d_min

        n_points = self.gridding[0]
        rlgrid = 2 / (d_min * n_points)

        # real space FFT grid dimensions
        cell_lengths = [n_points * d_min / 2 for i in range(3)]
        self.fft_cell = uctbx.unit_cell(cell_lengths + [90] * 3)
        self.crystal_symmetry = crystal.symmetry(
            unit_cell=self.fft_cell, space_group_symbol="P1"
        )

        print("FFT gridding: (%i,%i,%i)" % self.gridding)

        grid = flex.double(flex.grid(self.gridding), 0)

        reflections_used_for_indexing = flex.size_t()

        for i_pnt, point in enumerate(centroids):
            point = scitbx.matrix.col(point)
            spot_resolution = 1 / point.length()
            if spot_resolution < d_min:
                continue

            grid_coordinates = [
                int(round(point[i] / rlgrid) + n_points / 2) for i in range(3)
            ]
            if max(grid_coordinates) >= n_points:
                continue  # this reflection is outside the grid
            if min(grid_coordinates) < 0:
                continue  # this reflection is outside the grid
            T = math.exp(b_iso * point.length() ** 2 / 4)
            grid[grid_coordinates] = T

        self.reciprocal_space_grid = grid
예제 #50
0
    def extract_residues(self,
                         model_i,
                         number_previous_scatters,
                         combine=True):
        result = []
        model = self.pdb_hierarchy.models()[model_i]
        rm = []
        for chain in model.chains():
            for rg in chain.residue_groups():
                rg_i_seqs = []
                r_name = None
                for ag in rg.atom_groups():
                    if (r_name is None): r_name = ag.resname
                    for atom in ag.atoms():
                        if (self.selection[atom.i_seq -
                                           number_previous_scatters]):
                            rg_i_seqs.append(atom.i_seq -
                                             number_previous_scatters)
                if (len(rg_i_seqs) != 0):
                    rm.append(
                        group_args(selection=flex.size_t(rg_i_seqs),
                                   name=r_name,
                                   model_id=model_i,
                                   resid=rg.resid(),
                                   chain_id=chain.id))
        result.append(rm)

        if (combine):
            r0 = result[0]
            for r in result[1:]:
                for i, ri in enumerate(r):
                    r0[i].selection.extend(ri.selection)
                    assert r0[i].name == ri.name
        else:
            r0 = result[0]
            for r in result[1:]:
                r0.extend(r)

        return r0
예제 #51
0
 def fit_c_beta(self, c_beta_rotation_cluster):
     selection = flex.size_t(c_beta_rotation_cluster.selection)
     sites_cart = self.residue.atoms().extract_xyz()
     start_target_value = self.get_target_value(
         sites_cart=sites_cart,
         selection=selection,
         target_map=self.target_map_for_cb)
     ro = ext.fit(
         target_value=start_target_value,
         axes=[c_beta_rotation_cluster.axis],
         rotatable_points_indices=[c_beta_rotation_cluster.atoms_to_rotate],
         angles_array=[[i * math.pi / 180] for i in range(-20, 21, 1)],
         density_map=self.target_map_for_cb,
         all_points=sites_cart,
         unit_cell=self.unit_cell,
         selection=selection,
         sin_table=self.sin_cos_table.sin_table,
         cos_table=self.sin_cos_table.cos_table,
         step=self.sin_cos_table.step,
         n=self.sin_cos_table.n)
     sites_cart_result = ro.result()
     if (sites_cart_result.size() > 0):
         self.residue.atoms().set_xyz(sites_cart_result)
예제 #52
0
def exclude_h_on_SS(model):
  rm = model.get_restraints_manager()
  bond_proxies_simple, asu = rm.geometry.get_all_bond_proxies(
    sites_cart = model.get_sites_cart())
  els = model.get_hierarchy().atoms().extract_element()
  ss_i_seqs = []
  all_proxies = [p for p in bond_proxies_simple]
  for proxy in asu:
    all_proxies.append(proxy)
  for proxy in all_proxies:
    if(  isinstance(proxy, ext.bond_simple_proxy)): i,j=proxy.i_seqs
    elif(isinstance(proxy, ext.bond_asu_proxy)):    i,j=proxy.i_seq,proxy.j_seq
    else: assert 0 # never goes here
    if([els[i],els[j]].count("S")==2): # XXX may be coordinated if metal edits used
      ss_i_seqs.extend([i,j])
  sel_remove = flex.size_t()
  for proxy in all_proxies:
    if(  isinstance(proxy, ext.bond_simple_proxy)): i,j=proxy.i_seqs
    elif(isinstance(proxy, ext.bond_asu_proxy)):    i,j=proxy.i_seq,proxy.j_seq
    else: assert 0 # never goes here
    if(els[i] in ["H","D"] and j in ss_i_seqs): sel_remove.append(i)
    if(els[j] in ["H","D"] and i in ss_i_seqs): sel_remove.append(j)
  return model.select(~flex.bool(model.size(), sel_remove))
예제 #53
0
def exercise_zeolite_atlas(distance_cutoff=3.5):
    atlas_file = libtbx.env.find_in_repositories(
        relative_path="phenix_regression/misc/strudat_zeolite_atlas",
        test=os.path.isfile)
    if (atlas_file is None):
        print("Skipping exercise_zeolite_atlas(): input file not available")
        return
    all_entries = strudat.read_all_entries(open(atlas_file))
    for i, entry in enumerate(all_entries.entries):
        structure = entry.as_xray_structure()
        if ("--full" in sys.argv[1:] or i % 20 == 0):
            tst_direct_space_asu.exercise_neighbors_pair_generators(
                structure=structure, verbose="--Verbose" in sys.argv[1:])
        asu_mappings = structure.asu_mappings(buffer_thickness=distance_cutoff)
        pair_generator = crystal.neighbors_fast_pair_generator(
            asu_mappings=asu_mappings, distance_cutoff=distance_cutoff)
        bond_counts = flex.size_t(structure.scatterers().size(), 0)
        for pair in pair_generator:
            bond_counts[pair.i_seq] += 1
            if (pair.j_sym == 0):
                bond_counts[pair.j_seq] += 1
        for atom, bond_count in zip(entry.atoms, bond_counts):
            assert atom.connectivity is not None
            assert atom.connectivity == bond_count
예제 #54
0
  def optimise_parameters(self, fo_sq, fc_sq,
                          scale_factor, n_independent_params):
    """ Find optimal values of a and b that give a flat analysis of the variance
        when binned by Fc/max(Fc), and a goodness of fit close to 1.

        This is done in a grid search fashion similar to Shelxl.

        self is not modified in place; instead a new instance of the weighting
        scheme is returned.

        It is intended that f_calc should already contain the contribution from
        f_mask (if a solvent mask is used).
    """
    assert fc_sq.is_xray_intensity_array()
    weighting = ext.mainstream_shelx_weighting(a=self.a, b=self.b)

    def compute_chi_sq(fo_sq, fc_sq, a,b):
      weighting.a = a
      weighting.b = b
      weights = weighting(
        fo_sq.data(), fo_sq.sigmas(), fc_sq.data(), scale_factor)
      return (flex.sum(
        weights * flex.pow2(fo_sq.data() - scale_factor * fc_sq.data())))

    fo_sq = fo_sq.deep_copy()
    fo_sq.data().set_selected(fo_sq.data() < 0, 0)

    fo2 = fo_sq.data().deep_copy()
    fo2 /= scale_factor
    sigmas = fo_sq.sigmas() / scale_factor
    sigmas_sq = flex.pow2(sigmas)
    fc2 = fc_sq.data()

    # determine starting values for a and b, formulae taken from shelxl code
    p = (fo2 + 2 * fc2)/3
    p_sq = flex.pow2(p)
    x = flex.sum((flex.pow2(fo2-fc2)-sigmas) * (p_sq/sigmas_sq))
    y = flex.sum( flex.pow2(p_sq)/sigmas_sq)
    z = flex.sum(p)
    start_a = math.sqrt(max(0.0001, 0.64*x/max(1e-8, y)))
    start_b = 0.5 * z * start_a**2 /fo_sq.size()
    a_step = 0.2 * start_a
    b_step = 0.4 * start_b

    # sort data and setup binning by fc/fc_max
    fc_sq_over_fc_sq_max = fc_sq.data()/flex.max(fc_sq.data())
    permutation = flex.sort_permutation(fc_sq_over_fc_sq_max)
    fc_sq_over_fc_sq_max = fc_sq.customized_copy(
      data=fc_sq_over_fc_sq_max).select(permutation)
    fc_sq = fc_sq.select(permutation)
    fo_sq = fo_sq.select(permutation)
    n_bins = 10
    bin_max = 0
    bin_limits = flex.size_t(1, 0)
    bin_count = flex.size_t()
    for i in range(n_bins):
      bin_limits.append(int(math.ceil((i+1) * fc_sq.size()/n_bins)))
      bin_count.append(bin_limits[i+1] - bin_limits[i])

    n = fo_sq.size()//(fo_sq.size()-n_independent_params)

    # search on a 9x9 grid to determine best values of a and b
    gridding = flex.grid(9,9)
    while (a_step > 1e-4 and b_step > 5e-3):
      tmp = flex.double(gridding, 0)
      binned_chi_sq = [tmp.deep_copy() for i in range(n_bins)]
      start_a = max(start_a, 4*a_step) - 4*a_step
      start_b = max(start_b, 4*b_step) - 4*b_step
      for i_bin in range(n_bins):
        sel = flex.size_t_range(bin_limits[i_bin], bin_limits[i_bin+1])
        fc2 = fc_sq.select(sel)
        fo2 = fo_sq.select(sel)
        b = start_b
        for j in range(9):
          a = start_a
          b += b_step
          for k in range(9):
            a += a_step
            binned_chi_sq[i_bin][j,k] += compute_chi_sq(fo2, fc2, a, b)
      min_variance = 9e9
      j_min, k_min = (0, 0)
      for j in range(9):
        for k in range(9):
          variance = 0
          for i_bin in range(n_bins):
            if bin_count[i_bin] == 0: continue
            goof = math.sqrt(binned_chi_sq[i_bin][j,k]*n/bin_count[i_bin])
            variance += (goof-1)**2
          min_variance = min(variance, min_variance)
          if variance == min_variance:
            j_min = j
            k_min = k
      start_a += k_min*a_step
      start_b += j_min*b_step
      if k_min == 8:
        a_step *= 2
        continue
      elif k_min != 0:
        a_step /= 4
      if j_min == 8:
        b_step *= 2
        continue
      elif j_min != 0:
        b_step /=4
      if start_a <= 1e-4: a_step /= 4
      if start_b <= 1e-3: b_step /= 4
    if start_a > 0.2:
      start_a = 0.2
      start_b = 0
    weighting.a = start_a
    weighting.b = start_b
    return weighting
예제 #55
0
def exercise():
    verbose = "--verbose" in sys.argv[1:]
    quick = "--quick" in sys.argv[1:]
    list_cif = server.mon_lib_list_cif()
    srv = server.server(list_cif=list_cif)
    print("srv.root_path:", srv.root_path)
    default_switch = "--default_off" not in sys.argv[1:]
    if (False or default_switch):
        monomers_with_commas = {}
        atom_id_counts = dicts.with_default_value(0)
        for row_id in list_cif.cif["comp_list"]["_chem_comp.id"]:
            if (quick and random.random() < 0.95): continue
            if (verbose): print("id:", row_id)
            comp_comp_id = srv.get_comp_comp_id_direct(comp_id=row_id)
            if (comp_comp_id is None):
                print("Could not instantiating comp_comp_id(%s)" % row_id)
            else:
                has_primes = False
                has_commas = False
                for atom in comp_comp_id.atom_list:
                    atom_id_counts[atom.atom_id] += 1
                    if (atom.atom_id.find("'") >= 0):
                        has_primes = True
                    if (atom.atom_id.find(",") >= 0):
                        has_commas = True
                if (has_commas):
                    monomers_with_commas[
                        comp_comp_id.chem_comp.id] = has_primes
        print(monomers_with_commas)
        atom_ids = flex.std_string(list(atom_id_counts.keys()))
        counts = flex.size_t(list(atom_id_counts.values()))
        perm = flex.sort_permutation(data=counts, reverse=True)
        atom_ids = atom_ids.select(perm)
        counts = counts.select(perm)
        for atom_id, count in zip(atom_ids, counts):
            print(atom_id, count)
    if (False or default_switch):
        for row in list_cif.cif["comp_list"]["_chem_comp"].iterrows():
            if (quick and random.random() < 0.95): continue
            if (verbose): print("id:", row["_chem_comp.id"])
            comp_comp_id = srv.get_comp_comp_id_direct(
                comp_id=row["_chem_comp.id"])
            check_chem_comp(cif_types.chem_comp(**row), comp_comp_id)
        if ("--pickle" in sys.argv[1:]):
            easy_pickle.dump("mon_lib.pickle", srv)
    if (False or default_switch):
        comp = srv.get_comp_comp_id_direct("GLY")
        comp.show()
        mod = srv.mod_mod_id_dict["COO"]
        comp.apply_mod(mod).show()
    if (False or default_switch):
        comp = srv.get_comp_comp_id_direct("LYS")
        comp.show()
        mod = srv.mod_mod_id_dict["B2C"]
        comp.apply_mod(mod).show()
    if (False or default_switch):
        for row in list_cif.cif["comp_list"]["_chem_comp"].iterrows():
            if (quick and random.random() < 0.95): continue
            comp_comp_id = srv.get_comp_comp_id_direct(row["_chem_comp.id"])
            if (comp_comp_id is not None):
                if (comp_comp_id.classification == "peptide"):
                    print(comp_comp_id.chem_comp.id,
                          comp_comp_id.chem_comp.name,
                          end=' ')
                    print(row["_chem_comp.group"], end=' ')
                    grp = row["_chem_comp.group"].lower().strip()
                    if (grp not in ("l-peptide", "d-peptide", "polymer")):
                        print("LOOK", end=' ')
                        #if (not os.path.isdir("look")): os.makedirs("look")
                        #open("look/%s.cif" % row["_chem_comp.id"], "w").write(
                        #open(comp_comp_id.file_name).read())
                    print()
                elif (row["_chem_comp.group"].lower().find("peptide") >= 0
                      or comp_comp_id.chem_comp.group.lower().find("peptide")
                      >= 0):
                    print(comp_comp_id.chem_comp.id,
                          comp_comp_id.chem_comp.name,
                          end=' ')
                    print(row["_chem_comp.group"], "MISMATCH")
                if (comp_comp_id.classification in ("RNA", "DNA")):
                    print(comp_comp_id.chem_comp.id,
                          comp_comp_id.chem_comp.name,
                          end=' ')
                    print(row["_chem_comp.group"], end=' ')
                    if (comp_comp_id.classification !=
                            row["_chem_comp.group"].strip()):
                        print(comp_comp_id.classification, "MISMATCH", end=' ')
                    print()
                elif (row["_chem_comp.group"].lower().find("NA") >= 0
                      or comp_comp_id.chem_comp.group.lower().find("NA") >= 0):
                    print(comp_comp_id.chem_comp.id,
                          comp_comp_id.chem_comp.name,
                          end=' ')
                    print(row["_chem_comp.group"], "MISMATCH")
    if (False or default_switch):
        for row in list_cif.cif["comp_list"]["_chem_comp"].iterrows():
            if (quick and random.random() < 0.95): continue
            comp_comp_id = srv.get_comp_comp_id_direct(row["_chem_comp.id"])
            if (comp_comp_id is not None):
                planes = comp_comp_id.get_planes()
                for plane in planes:
                    dist_esd_dict = {}
                    for plane_atom in plane.plane_atoms:
                        dist_esd_dict[str(plane_atom.dist_esd)] = 0
                    # FIXME: might break compat for py2/3 because indexing a values call
                    if (len(dist_esd_dict) != 1
                            or list(dist_esd_dict.keys())[0] != "0.02"):
                        print(comp_comp_id.chem_comp.id,
                              plane.plane_id,
                              end=' ')
                        print(list(dist_esd_dict.keys()))
    if (False or default_switch):
        standard_amino_acids = [
            "GLY", "VAL", "ALA", "LEU", "ILE", "PRO", "MET", "PHE", "TRP",
            "SER", "THR", "TYR", "CYS", "ASN", "GLN", "ASP", "GLU", "LYS",
            "ARG", "HIS"
        ]
        for row in list_cif.cif["comp_list"]["_chem_comp"].iterrows():
            is_standard_aa = row["_chem_comp.id"] in standard_amino_acids
            if (1 and not is_standard_aa):
                continue
            comp_comp_id = srv.get_comp_comp_id_direct(row["_chem_comp.id"])
            if (is_standard_aa):
                assert comp_comp_id is not None
                assert comp_comp_id.chem_comp.group.strip() == "L-peptide"
            if (comp_comp_id is not None):
                print(comp_comp_id.chem_comp.id.strip(), end=' ')
                print(comp_comp_id.chem_comp.name.strip(), end=' ')
                print(comp_comp_id.chem_comp.group.strip())
                for tor in comp_comp_id.tor_list:
                    print("  tor:", tor.atom_id_1, tor.atom_id_2, end=' ')
                    print(tor.atom_id_3,
                          tor.atom_id_4,
                          tor.value_angle,
                          end=' ')
                    print(tor.value_angle_esd, tor.period)
                for chir in comp_comp_id.chir_list:
                    print("  chir:",
                          chir.atom_id_centre,
                          chir.atom_id_1,
                          end=' ')
                    print(chir.atom_id_2, chir.atom_id_3, chir.volume_sign)
    if (False or default_switch):
        elib = server.ener_lib()
        if (False or default_switch):
            for syn in elib.lib_synonym.items():
                print(syn)
        if (False or default_switch):
            for vdw in elib.lib_vdw:
                vdw.show()
    print("OK")
예제 #56
0
    def scale_frame_detail(self, result, file_name, db_mgr, out):
        # If the pickled integration file does not contain a wavelength,
        # fall back on the value given on the command line.  XXX The
        # wavelength parameter should probably be removed from master_phil
        # once all pickled integration files contain it.
        if ("wavelength" in result):
            wavelength = result["wavelength"]
        elif (self.params.wavelength is not None):
            wavelength = self.params.wavelength
        else:
            # XXX Give error, or raise exception?
            return None
        assert (wavelength > 0)

        observations = result["observations"][0]
        cos_two_polar_angle = result["cos_two_polar_angle"]

        assert observations.size() == cos_two_polar_angle.size()
        tt_vec = observations.two_theta(wavelength)
        #print "mean tt degrees",180.*flex.mean(tt_vec.data())/math.pi
        cos_tt_vec = flex.cos(tt_vec.data())
        sin_tt_vec = flex.sin(tt_vec.data())
        cos_sq_tt_vec = cos_tt_vec * cos_tt_vec
        sin_sq_tt_vec = sin_tt_vec * sin_tt_vec
        P_nought_vec = 0.5 * (1. + cos_sq_tt_vec)

        F_prime = -1.0  # Hard-coded value defines the incident polarization axis
        P_prime = 0.5 * F_prime * cos_two_polar_angle * sin_sq_tt_vec
        # XXX added as a diagnostic
        prange = P_nought_vec - P_prime

        other_F_prime = 1.0
        otherP_prime = 0.5 * other_F_prime * cos_two_polar_angle * sin_sq_tt_vec
        otherprange = P_nought_vec - otherP_prime
        diff2 = flex.abs(prange - otherprange)
        print "mean diff is", flex.mean(diff2), "range", flex.min(
            diff2), flex.max(diff2)
        # XXX done
        observations = observations / (P_nought_vec - P_prime)
        # This corrects observations for polarization assuming 100% polarization on
        # one axis (thus the F_prime = -1.0 rather than the perpendicular axis, 1.0)
        # Polarization model as described by Kahn, Fourme, Gadet, Janin, Dumas & Andre
        # (1982) J. Appl. Cryst. 15, 330-337, equations 13 - 15.

        print "Step 3. Correct for polarization."
        indexed_cell = observations.unit_cell()

        observations_original_index = observations.deep_copy()
        if result.get(
                "model_partialities", None
        ) is not None and result["model_partialities"][0] is not None:
            # some recordkeeping useful for simulations
            partialities_original_index = observations.customized_copy(
                crystal_symmetry=self.miller_set.crystal_symmetry(),
                data=result["model_partialities"][0]["data"],
                sigmas=flex.double(result["model_partialities"][0]
                                   ["data"].size()),  #dummy value for sigmas
                indices=result["model_partialities"][0]["indices"],
            ).resolution_filter(d_min=self.params.d_min)

        assert len(observations_original_index.indices()) == len(
            observations.indices())

        # Now manipulate the data to conform to unit cell, asu, and space group
        # of reference.  The resolution will be cut later.
        # Only works if there is NOT an indexing ambiguity!
        observations = observations.customized_copy(
            anomalous_flag=not self.params.merge_anomalous,
            crystal_symmetry=self.miller_set.crystal_symmetry()).map_to_asu()

        observations_original_index = observations_original_index.customized_copy(
            anomalous_flag=not self.params.merge_anomalous,
            crystal_symmetry=self.miller_set.crystal_symmetry())
        print "Step 4. Filter on global resolution and map to asu"
        print >> out, "Data in reference setting:"
        #observations.show_summary(f=out, prefix="  ")
        show_observations(observations, out=out)

        #if self.params.significance_filter.apply is True:
        #  raise Exception("significance filter not implemented in samosa")
        if self.params.significance_filter.apply is True:  #------------------------------------
            # Apply an I/sigma filter ... accept resolution bins only if they
            #   have significant signal; tends to screen out higher resolution observations
            #   if the integration model doesn't quite fit
            N_obs_pre_filter = observations.size()
            N_bins_small_set = N_obs_pre_filter // self.params.significance_filter.min_ct
            N_bins_large_set = N_obs_pre_filter // self.params.significance_filter.max_ct

            # Ensure there is at least one bin.
            N_bins = max([
                min([self.params.significance_filter.n_bins,
                     N_bins_small_set]), N_bins_large_set, 1
            ])
            print "Total obs %d Choose n bins = %d" % (N_obs_pre_filter,
                                                       N_bins)
            bin_results = show_observations(observations,
                                            out=out,
                                            n_bins=N_bins)
            #show_observations(observations, out=sys.stdout, n_bins=N_bins)
            acceptable_resolution_bins = [
                bin.mean_I_sigI > self.params.significance_filter.sigma
                for bin in bin_results
            ]
            acceptable_nested_bin_sequences = [
                i for i in xrange(len(acceptable_resolution_bins))
                if False not in acceptable_resolution_bins[:i + 1]
            ]
            if len(acceptable_nested_bin_sequences) == 0:
                return null_data(file_name=file_name,
                                 log_out=out.getvalue(),
                                 low_signal=True)
            else:
                N_acceptable_bins = max(acceptable_nested_bin_sequences) + 1
                imposed_res_filter = float(bin_results[N_acceptable_bins -
                                                       1].d_range.split()[2])
                imposed_res_sel = observations.resolution_filter_selection(
                    d_min=imposed_res_filter)
                observations = observations.select(imposed_res_sel)
                observations_original_index = observations_original_index.select(
                    imposed_res_sel)
                print "New resolution filter at %7.2f" % imposed_res_filter, file_name
            print "N acceptable bins", N_acceptable_bins
            print "Old n_obs: %d, new n_obs: %d" % (N_obs_pre_filter,
                                                    observations.size())
            print "Step 5. Frame by frame resolution filter"
            # Finished applying the binwise I/sigma filter---------------------------------------

        if self.params.raw_data.sdfac_auto is True:
            raise Exception("sdfac auto not implemented in samosa.")

        print "Step 6.  Match to reference intensities, filter by correlation, filter out negative intensities."
        assert len(observations_original_index.indices()) \
          ==   len(observations.indices())

        data = frame_data(self.n_refl, file_name)
        data.set_indexed_cell(indexed_cell)
        data.d_min = observations.d_min()

        # Ensure that match_multi_indices() will return identical results
        # when a frame's observations are matched against the
        # pre-generated Miller set, self.miller_set, and the reference
        # data set, self.i_model.  The implication is that the same match
        # can be used to map Miller indices to array indices for intensity
        # accumulation, and for determination of the correlation
        # coefficient in the presence of a scaling reference.
        if self.i_model is not None:
            assert len(self.i_model.indices()) == len(self.miller_set.indices()) \
              and  (self.i_model.indices() ==
                    self.miller_set.indices()).count(False) == 0

        matches = miller.match_multi_indices(
            miller_indices_unique=self.miller_set.indices(),
            miller_indices=observations.indices())

        use_weights = False  # New facility for getting variance-weighted correlation
        if self.params.scaling.algorithm in ['mark1', 'levmar']:
            # Because no correlation is computed, the correlation
            # coefficient is fixed at zero.  Setting slope = 1 means
            # intensities are added without applying a scale factor.
            sum_x = 0
            sum_y = 0
            for pair in matches.pairs():
                data.n_obs += 1
                if not self.params.include_negatives and observations.data()[
                        pair[1]] <= 0:
                    data.n_rejected += 1
                else:
                    sum_y += observations.data()[pair[1]]
            N = data.n_obs - data.n_rejected

        # Early return if there are no positive reflections on the frame.
        if data.n_obs <= data.n_rejected:
            return null_data(file_name=file_name,
                             log_out=out.getvalue(),
                             low_signal=True)

        # Update the count for each matched reflection.  This counts
        # reflections with non-positive intensities, too.
        data.completeness += matches.number_of_matches(0).as_int()
        data.wavelength = wavelength

        if not self.params.scaling.enable:  # Do not scale anything
            print "Scale factor to an isomorphous reference PDB will NOT be applied."
            slope = 1.0
            offset = 0.0

        observations_original_index_indices = observations_original_index.indices(
        )
        if db_mgr is None:
            return unpack(MINI.x)  # special exit for two-color indexing

        kwargs = {
            'wavelength': wavelength,
            'beam_x': result['xbeam'],
            'beam_y': result['ybeam'],
            'distance': result['distance'],
            'unique_file_name': data.file_name
        }

        ORI = result["current_orientation"][0]
        Astar = matrix.sqr(ORI.reciprocal_matrix())

        kwargs['res_ori_1'] = Astar[0]
        kwargs['res_ori_2'] = Astar[1]
        kwargs['res_ori_3'] = Astar[2]
        kwargs['res_ori_4'] = Astar[3]
        kwargs['res_ori_5'] = Astar[4]
        kwargs['res_ori_6'] = Astar[5]
        kwargs['res_ori_7'] = Astar[6]
        kwargs['res_ori_8'] = Astar[7]
        kwargs['res_ori_9'] = Astar[8]
        assert self.params.scaling.report_ML is True
        kwargs['half_mosaicity_deg'] = result["ML_half_mosaicity_deg"][0]
        kwargs['domain_size_ang'] = result["ML_domain_size_ang"][0]

        frame_id_0_base = db_mgr.insert_frame(**kwargs)

        xypred = result["mapped_predictions"][0]
        indices = flex.size_t([pair[1] for pair in matches.pairs()])

        sel_observations = flex.intersection(size=observations.data().size(),
                                             iselections=[indices])
        set_original_hkl = observations_original_index_indices.select(
            flex.intersection(size=observations_original_index_indices.size(),
                              iselections=[indices]))
        set_xypred = xypred.select(
            flex.intersection(size=xypred.size(), iselections=[indices]))

        kwargs = {
            'hkl_id_0_base': [pair[0] for pair in matches.pairs()],
            'i': observations.data().select(sel_observations),
            'sigi': observations.sigmas().select(sel_observations),
            'detector_x': [xy[0] for xy in set_xypred],
            'detector_y': [xy[1] for xy in set_xypred],
            'frame_id_0_base': [frame_id_0_base] * len(matches.pairs()),
            'overload_flag': [0] * len(matches.pairs()),
            'original_h': [hkl[0] for hkl in set_original_hkl],
            'original_k': [hkl[1] for hkl in set_original_hkl],
            'original_l': [hkl[2] for hkl in set_original_hkl]
        }

        db_mgr.insert_observation(**kwargs)

        print >> out, "Lattice: %d reflections" % (data.n_obs -
                                                   data.n_rejected)
        print >> out, "average obs", sum_y / (data.n_obs - data.n_rejected), \
          "average calc", sum_x / (data.n_obs - data.n_rejected)
        print >> out, "Rejected %d reflections with negative intensities" % \
            data.n_rejected

        data.accept = True
        for pair in matches.pairs():
            if not self.params.include_negatives and (
                    observations.data()[pair[1]] <= 0):
                continue
            Intensity = observations.data()[pair[1]]
            # Super-rare exception. If saved sigmas instead of I/sigmas in the ISIGI dict, this wouldn't be needed.
            if Intensity == 0:
                continue

            # Add the reflection as a two-tuple of intensity and I/sig(I)
            # to the dictionary of observations.
            index = self.miller_set.indices()[pair[0]]
            isigi = (Intensity, observations.data()[pair[1]] /
                     observations.sigmas()[pair[1]], 1.0)
            if index in data.ISIGI:
                data.ISIGI[index].append(isigi)
            else:
                data.ISIGI[index] = [isigi]

            sigma = observations.sigmas()[pair[1]]
            variance = sigma * sigma
            data.summed_N[pair[0]] += 1
            data.summed_wt_I[pair[0]] += Intensity / variance
            data.summed_weight[pair[0]] += 1 / variance

        data.set_log_out(out.getvalue())
        return data
예제 #57
0
def occupancy_selections(
      model,
      add_water                          = False,
      other_individual_selection_strings = None,
      other_constrained_groups           = None,
      remove_selection                   = None,
      as_flex_arrays                     = True,
      constrain_correlated_3d_groups     = False,
      log                                = None):
  # set up defaults
  if(other_individual_selection_strings is not None and
     len(other_individual_selection_strings) == 0):
    other_individual_selection_strings = None
  if(other_constrained_groups is not None and
     len(other_constrained_groups) == 0):
    other_constrained_groups = None
  if(remove_selection is not None and len(remove_selection) == 0):
    remove_selection = None
  result = model.get_hierarchy().occupancy_groups_simple(
    common_residue_name_class_only = None,
    always_group_adjacent          = False,
    ignore_hydrogens               = False)
  exchangable_hd_pairs = mmtbx.utils.combine_hd_exchangable(hierarchy =
    model.get_hierarchy())
  if(len(exchangable_hd_pairs)==0 and result is not None):
    occupancy_regroupping(
      pdb_hierarchy = model.get_hierarchy(),
      cgs           = result)
  result = mmtbx.utils.remove_selections(selection = result,
    other = exchangable_hd_pairs,
    size = model.get_number_of_atoms())
  result.extend(exchangable_hd_pairs)
  # extract group-[0,1]-constrained atoms withing a residue
  pogl = extract_partial_occupancy_selections(hierarchy = model.get_hierarchy())
  rm_duplicate_with_pogl = []
  for t_ in pogl:
    for t__ in t_:
      for t___ in t__:
        rm_duplicate_with_pogl.append(t___)
  result = mmtbx.utils.remove_selections(selection = result, other = pogl,
    size = model.get_number_of_atoms())
  result.extend(pogl)
  # add partial occupancies
  occupancies = model.get_xray_structure().scatterers().extract_occupancies()
  sel = (occupancies != 1.) & (occupancies != 0.)
  result = add_occupancy_selection(
    result     = result,
    size       = model.get_number_of_atoms(),
    selection  = sel,
    hd_special = None)
  # check user's input
  all_sel_strgs = []
  if(other_individual_selection_strings is not None):
    all_sel_strgs = all_sel_strgs + other_individual_selection_strings
  if(other_constrained_groups is not None):
    for other_constrained_group in other_constrained_groups:
      for other_constrained_group in other_constrained_groups:
        if(len(other_constrained_group.selection)>0):
          all_sel_strgs = all_sel_strgs + other_constrained_group.selection
  if(len(all_sel_strgs) > 0):
    for sel_str in all_sel_strgs:
      sel_str_sel = get_atom_selections(
        model               = model,
        selection_strings   = [sel_str],
        iselection          = True,
        one_selection_array = True)
      if(sel_str_sel.size() == 0):
        raise Sorry("Empty selection: %s"%sel_str)
  #
  if([other_individual_selection_strings,
      other_constrained_groups].count(None) == 0):
    sel1 = get_atom_selections(
      model               = model,
      selection_strings   = other_individual_selection_strings,
      iselection          = True,
      one_selection_array = True)
    for other_constrained_group in other_constrained_groups:
      for other_constrained_group in other_constrained_groups:
        for cg_sel_strs in other_constrained_group.selection:
          sel2 = get_atom_selections(
            model               = model,
            selection_strings   = cg_sel_strs,
            iselection          = True,
            one_selection_array = True)
          if(sel1.intersection(sel2).size() > 0):
            raise Sorry("Duplicate selection: same atoms selected for individual and group occupancy refinement.")
  # check user's input and apply remove_selection to default selection
  if(remove_selection is not None):
    sel1 = get_atom_selections(
      model               = model,
      selection_strings   = remove_selection,
      iselection          = True,
      one_selection_array = True)
    if(sel1.size() == 0): # XXX check all and not total.
      raise Sorry("Empty selection: remove_selection.")
    if(other_individual_selection_strings is not None):
      sel2 = get_atom_selections(
        model               = model,
        selection_strings   = other_individual_selection_strings,
        iselection          = True,
        one_selection_array = True)
      if(sel1.intersection(sel2).size() > 0):
        raise Sorry("Duplicate selection: occupancies of same atoms selected to be fixed and to be refined.")
    if(other_constrained_groups is not None):
      for other_constrained_group in other_constrained_groups:
        for cg_sel_strs in other_constrained_group.selection:
          sel2 = get_atom_selections(
            model               = model,
            selection_strings   = cg_sel_strs,
            iselection          = True,
            one_selection_array = True)
          if(sel1.intersection(sel2).size() > 0):
            raise Sorry("Duplicate selection: occupancies of same atoms selected to be fixed and to be refined.")
    result = mmtbx.utils.remove_selections(selection = result, other = sel1,
      size = model.get_number_of_atoms())
  #
  if(other_individual_selection_strings is not None):
    sel = get_atom_selections(
      model               = model,
      selection_strings   = other_individual_selection_strings,
      iselection          = True,
      one_selection_array = True)
    result = mmtbx.utils.remove_selections(selection = result, other = sel,
      size = model.get_number_of_atoms())
    result = add_occupancy_selection(
      result     = result,
      size       = model.get_number_of_atoms(),
      selection  = sel,
      hd_special = None)
  if(other_constrained_groups is not None):
    for other_constrained_group in other_constrained_groups:
      cg_sel = []
      for cg_sel_strs in other_constrained_group.selection:
        sel = get_atom_selections(
          model               = model,
          selection_strings   = cg_sel_strs,
          iselection          = True,
          one_selection_array = True)
        result = mmtbx.utils.remove_selections(selection = result, other = sel,
          size = model.get_number_of_atoms())
        if(sel.size() > 0):
          cg_sel.append(list(sel))
      if(len(cg_sel) > 0):
        result.append(cg_sel)
  if(add_water):
    water_selection = get_atom_selections(
      model                 = model,
      selection_strings     = ['water'],
      iselection            = True,
      allow_empty_selection = True,
      one_selection_array   = True)
    result = add_occupancy_selection(
      result     = result,
      size       = model.get_number_of_atoms(),
      selection  = water_selection,
      hd_special = None)
  list_3d_as_bool_selection(
    list_3d=result, size=model.get_number_of_atoms())
  if(len(result) == 0): result = None
  if(as_flex_arrays and result is not None):
    result_ = []
    for gsel in result:
      result__ = []
      for sel in gsel:
        result__.append(flex.size_t(sel))
      result_.append(result__)
    result = result_
    if (constrain_correlated_3d_groups) and (len(result) > 0):
      result = assemble_constraint_groups_3d(
        xray_structure=model.get_xray_structure(),
        pdb_atoms=model.get_atoms(),
        constraint_groups=result,
        log=log)
  return result
예제 #58
0
 def __init__(self, fmodels,
                    model,
                    max_number_of_iterations    = 25,
                    number_of_macro_cycles      = 3,
                    occupancy_max               = None,
                    occupancy_min               = None,
                    log                         = None,
                    exclude_hd                  = False):
   self.show(fmodels=fmodels, log= log, message="occupancy refinement: start")
   fmodels.update_xray_structure(xray_structure = model.get_xray_structure(),
                                 update_f_calc  = True)
   selections = model.refinement_flags.s_occupancies
   # exclude H or D from refinement if requested
   if(exclude_hd):
     hd_sel = model.get_hd_selection()
     tmp_sel = []
     for sel in selections:
       tmp_sel_ = []
       for sel_ in sel:
         tmp_sel__ = flex.size_t()
         for sel__ in sel_:
           if(not hd_sel[sel__]):
             tmp_sel__.append(sel__)
         if(tmp_sel__.size()>0):
           tmp_sel_.append(tmp_sel__)
       if(len(tmp_sel_)>0):
         tmp_sel.append(tmp_sel_)
     selections = tmp_sel
   #
   if(len(selections)>0):
     i_selection = flex.size_t()
     for s in selections:
       for ss in s:
         i_selection.extend(ss)
     fmodels.fmodel_xray().xray_structure.scatterers().flags_set_grads(
       state=False)
     fmodels.fmodel_xray().xray_structure.scatterers().flags_set_grad_occupancy(
       iselection = i_selection)
     fmodels.fmodel_xray().xray_structure.adjust_occupancy(
       occ_max   = occupancy_max,
       occ_min   = occupancy_min,
       selection = i_selection)
     xray_structure_dc = fmodels.fmodel_xray().xray_structure.\
       deep_copy_scatterers()
     par_initial = flex.double()
     occupancies = xray_structure_dc.scatterers().extract_occupancies()
     constrained_groups_selections = []
     group_counter = 0
     for sel in selections:
       ss = []
       for sel_ in sel:
         ss.append(group_counter)
         group_counter += 1
         val = flex.mean(occupancies.select(sel_))
         par_initial.append(val)
       constrained_groups_selections.append(ss)
     minimized = None
     for macro_cycle in range(number_of_macro_cycles):
       if(minimized is not None): par_initial = minimized.par_min
       minimized = minimizer(
         fmodels                       = fmodels,
         selections                    = selections,
         constrained_groups_selections = constrained_groups_selections,
         par_initial                   = par_initial,
         max_number_of_iterations      = max_number_of_iterations)
       if(minimized is not None): par_initial = minimized.par_min
       set_refinable_parameters(
         xray_structure     = fmodels.fmodel_xray().xray_structure,
         parameters         = par_initial,
         selections         = selections,
         enforce_positivity = (occupancy_min>=0))
       fmodels.fmodel_xray().xray_structure.adjust_occupancy(
         occ_max   = occupancy_max,
         occ_min   = occupancy_min,
         selection = i_selection)
     xray_structure_final = fmodels.fmodel_xray().xray_structure
     model.set_xray_structure(xray_structure_final)
     fmodels.update_xray_structure(xray_structure = xray_structure_final,
                                   update_f_calc  = True)
     refined_occ = xray_structure_final.scatterers().extract_occupancies().\
       select(i_selection)
     assert flex.min(refined_occ) >= occupancy_min
     assert flex.max(refined_occ) <= occupancy_max
     self.show(fmodels= fmodels, log = log, message="occupancy refinement: end")
예제 #59
0
def run(args):

    from cctbx.array_family import flex
    from dials.util.options import OptionParser
    from dials.util.options import flatten_reflections
    import libtbx.load_env

    usage = "%s [options] reflections_1.pickle reflections_2.pickle" % (
        libtbx.env.dispatcher_name)

    parser = OptionParser(usage=usage,
                          phil=phil_scope,
                          read_reflections=True,
                          epilog=help_message)

    params, options, args = parser.parse_args(show_diff_phil=True,
                                              return_unhandled=True)
    reflections = flatten_reflections(params.input.reflections)

    if flex.max(reflections[0]["id"]) > 0:
        reflections = list(reversed(reflections))
    assert flex.max(reflections[0]["id"]) == 0

    assert len(reflections) == 2
    partialities = []
    intensities = []
    sigmas = []
    ids = []
    xyz = []

    # only want fully-recorded reflections in full dataset
    # reflections[0] = reflections[0].select(reflections[0]['partiality'] > 0.99)
    print(reflections[0].size())
    # only want partial reflections in sliced dataset
    # reflections[1] = reflections[1].select(reflections[1]['partiality'] < 0.99)
    print(reflections[1].size())

    for refl in reflections:
        # sel = refl.get_flags(refl.flags.integrated_sum)
        sel = refl.get_flags(refl.flags.integrated)
        sel &= refl["intensity.sum.value"] > 0
        sel &= refl["intensity.sum.variance"] > 0
        refl = refl.select(sel)
        hkl = refl["miller_index"]
        partiality = refl["partiality"]
        intensity = refl["intensity.sum.value"]
        vari = refl["intensity.sum.variance"]
        assert vari.all_gt(0)
        sigi = flex.sqrt(vari)
        intensities.append(intensity)
        partialities.append(partiality)
        sigmas.append(sigi)
        ids.append(refl["id"])
        xyz.append(refl["xyzcal.px"])

    from annlib_ext import AnnAdaptor as ann_adaptor

    ann = ann_adaptor(xyz[0].as_double().as_1d(), 3)
    ann.query(xyz[1].as_double().as_1d())
    distances = flex.sqrt(ann.distances)
    matches = distances < 2  # pixels
    isel0 = flex.size_t(list(ann.nn.select(matches)))
    isel1 = flex.size_t(list(matches.iselection()))

    p0 = partialities[0].select(isel0)
    p1 = partialities[1].select(isel1)
    i0 = intensities[0].select(isel0)
    i1 = intensities[1].select(isel1)

    print((p0 > p1).count(True), (p0 < p1).count(True))

    h0 = flex.histogram(p0, data_min=0, data_max=1, n_slots=20)
    h1 = flex.histogram(p1, data_min=0, data_max=1, n_slots=20)
    h0.show()
    h1.show()

    from matplotlib import pyplot

    perm0 = flex.sort_permutation(p0)
    perm1 = flex.sort_permutation(p1)
    fig, axes = pyplot.subplots(nrows=2, sharex=True)
    axes[0].plot(p0.select(perm0), flex.int_range(p0.size()))
    axes[1].plot(p1.select(perm1), flex.int_range(p1.size()))
    axes[1].set_xlabel("Partiality")
    for ax in axes:
        ax.set_ylabel("Cumulative frequency")
    for ax in axes:
        ax.set_yscale("log")
    pyplot.savefig("sorted_partialities.png")
    pyplot.clf()

    blue = "#3498db"
    fig, axes = pyplot.subplots(nrows=2, sharex=True)
    axes[0].bar(
        h0.slot_centers(),
        h0.slots(),
        width=h0.slot_width(),
        align="center",
        color=blue,
        edgecolor=blue,
    )
    axes[1].bar(
        h1.slot_centers(),
        h1.slots(),
        width=h1.slot_width(),
        align="center",
        color=blue,
        edgecolor=blue,
    )
    axes[1].set_xlabel("Partiality")
    for ax in axes:
        ax.set_ylabel("Frequency")
    for ax in axes:
        ax.set_yscale("log")
    pyplot.savefig("partiality_histogram.png")
    # pyplot.show()
    pyplot.clf()

    pyplot.scatter(p0, p1, s=5, alpha=0.3, marker="+")
    pyplot.xlabel("Partiality (full)")
    pyplot.ylabel("Partiality (sliced)")
    pyplot.savefig("partiality_full_vs_sliced.png")
    pyplot.clf()

    pyplot.scatter(i0, i1, s=5, alpha=0.3, marker="+")
    pyplot.xlim(flex.min(i0), flex.max(i0))
    pyplot.ylim(flex.min(i1), flex.max(i1))
    pyplot.xlabel("Intensity (full)")
    pyplot.ylabel("Intensity (sliced)")
    pyplot.xscale("log")
    pyplot.yscale("log")
    pyplot.savefig("intensity_full_vs_sliced.png")
    pyplot.clf()

    i_ratio = i1 / i0
    p_ratio = p1 / p0
    pyplot.scatter(p_ratio, i_ratio, s=5, alpha=0.3, marker="+")
    pyplot.ylim(flex.min(i_ratio), flex.max(i_ratio))
    pyplot.yscale("log")
    pyplot.xlabel("P(full)/P(sliced)")
    pyplot.ylabel("I(full)/I(sliced)")
    pyplot.savefig("partiality_ratio_vs_intensity_ratio.png")
    pyplot.clf()
예제 #60
0
def recycle_one_dano(missing, verbose):
  assert missing in [None, "+", "-"]
  from cctbx import crystal
  cs = crystal.symmetry(
    unit_cell=(13,17,19,85,95,105),
    space_group_symbol="P1")
  from cctbx.array_family import flex
  mi = flex.miller_index([(1,2,3), (-1,-2,-3)])
  fpm = flex.double([2.5, 5.5])
  spm = flex.double([0.1, 0.3])
  from cctbx import miller
  ms = miller.set(crystal_symmetry=cs, indices=mi, anomalous_flag=True)
  ma = ms.array(data=fpm, sigmas=spm)
  mtz_dataset = ma.as_mtz_dataset(column_root_label="X")
  if (missing is not None):
    for col in mtz_dataset.columns():
      if (col.label() in ["X(%s)" % missing, "SIGX(%s)" % missing]):
        col.set_values(
          values=flex.float([0]),
          selection_valid=flex.bool([False]))
    if (missing == "+"): i = 1
    else:                i = 0
    ma = ma.select(flex.size_t([i]))
  mtz_obj = mtz_dataset.mtz_object()
  from cctbx.xray import observation_types
  ma.set_observation_type(observation_types.reconstructed_amplitude())
  mtz_obj_reco = ma.as_mtz_dataset(column_root_label="R").mtz_object()
  sio = StringIO()
  print >> sio, "Resulting mtz from .as_mtz_dataset():"
  mtz_obj_reco.show_column_data_human_readable(out=sio)
  print >> sio
  ma_reco = mtz_obj_reco.as_miller_arrays()[0]
  print >> sio, "mtz_obj_reco.as_miller_arrays result:"
  ma_reco.show_array(f=sio)
  print >> sio
  if (verbose):
    sys.stdout.write(sio.getvalue())
  if (missing is None):
    expected = """\
Resulting mtz from .as_mtz_dataset():
Column data:
-------------------------------------------------------------------------------
                       R            SIGR           DANOR        SIGDANOR
                   ISYMR

 1  2  3               4        0.158114              -3        0.316228
                       0
-------------------------------------------------------------------------------

mtz_obj_reco.as_miller_arrays result:
(1, 2, 3) 2.5 0.223606796247
(-1, -2, -3) 5.5 0.223606796247

"""
  elif (missing == "+"):
    expected = """\
Resulting mtz from .as_mtz_dataset():
Column data:
-------------------------------------------------------------------------------
                       R            SIGR           DANOR        SIGDANOR
                   ISYMR

 1  2  3             5.5             0.3            None            None
                       2
-------------------------------------------------------------------------------

mtz_obj_reco.as_miller_arrays result:
(-1, -2, -3) 5.5 0.300000011921

"""
  elif (missing == "-"):
    expected = """\
Resulting mtz from .as_mtz_dataset():
Column data:
-------------------------------------------------------------------------------
                       R            SIGR           DANOR        SIGDANOR
                   ISYMR

 1  2  3             2.5             0.1            None            None
                       1
-------------------------------------------------------------------------------

mtz_obj_reco.as_miller_arrays result:
(1, 2, 3) 2.5 0.10000000149

"""
  else:
    raise RuntimeError("Unreachable.")
  from libtbx.test_utils import show_diff
  assert not show_diff(sio.getvalue(), expected)