コード例 #1
0
def common_map_values(pdb_hierarchy, unit_cell, map_data):
  d = {}
  for model in pdb_hierarchy.models():
    for chain in model.chains():
      for residue_group in chain.residue_groups():
        conformers = residue_group.conformers()
        for conformer in conformers:
          residue = conformer.only_residue()
          for atom in residue.atoms():
            sf = unit_cell.fractionalize(atom.xyz)
            mv = map_data.eight_point_interpolation(sf)
            key = "%s_%s_%s"%(chain.id, residue.resname, atom.name.strip())
            d.setdefault(key, flex.double()).append(mv)
  def mean_filtered(x):
    me = flex.mean_default(x,0)
    sel  = x < me*3
    sel &= x > me/3
    return sel
  result = {}
  all_vals = flex.double()
  for v in d.values():
    all_vals.extend(v)
  sel = mean_filtered(all_vals)
  overall_mean = flex.mean_default(all_vals.select(sel),0)
  for k,v in zip(d.keys(), d.values()):
    sel = mean_filtered(v)
    if(sel.count(True)>10): result[k] = flex.mean_default(v.select(sel),0)
    else:                   result[k] = overall_mean
  return result
コード例 #2
0
def merge_groups_by_connectivity(pdb_hierarchy,
                                 xray_structure,
                                 selection_strings=None,
                                 selection_arrays=None):
    assert [selection_strings, selection_arrays].count(None) == 1
    if (selection_strings is None): selections = selection_arrays
    else:
        selections = []
        for ss in selection_strings:
            sa = pdb_hierarchy.atom_selection_cache().selection(
                string=ss.replace('"', ""))
            selections.append(sa)
    for i_seq, si in enumerate(selections):
        for j_seq, sj in enumerate(selections):
            if (i_seq < j_seq):
                xi = xray_structure.select(si)
                xj = xray_structure.select(sj)
                if (xi.scatterers().size() > xj.scatterers().size()):
                    distances = xi.closest_distances(
                        xj.sites_frac(), distance_cutoff=6).smallest_distances
                    cnt = ((distances > 0) & (distances < 3)).count(True)
                    assert distances.size() == xj.scatterers().size()
                    distances = distances.select(distances > 0)
                    p = cnt * 100. / xj.scatterers().size()
                    if (p >= 1):
                        print()
                        if (selection_strings is not None):
                            print(sj)
                            print(si)
                        print(i_seq, j_seq, p, flex.min_default(distances, 0),
                              flex.mean_default(distances, 0))
                else:
                    distances = xj.closest_distances(
                        xi.sites_frac(), distance_cutoff=6).smallest_distances
                    cnt = ((distances > 0) & (distances < 3)).count(True)
                    assert distances.size() == xi.scatterers().size()
                    distances = distances.select(distances > 0)
                    p = cnt * 100. / xi.scatterers().size()
                    if (p >= 1):
                        print()
                        if (selection_strings is not None):
                            print(sj)
                            print(si)
                        print(i_seq, j_seq, p, flex.min_default(distances, 0),
                              flex.mean_default(distances, 0))

    #
    print()
コード例 #3
0
def angle_rmsZ(parm, sites_cart, ignore_hd, get_deltas=False):
    if ignore_hd:
        angles = parm.angles_without_h
    else:
        angles = itertools(parm.angles_inc_h, parm.angles_without_h)
    angle_Zs = []
    # save coordinates here since calling parm.coordinates is time consumming
    parm_coordinates = parm.coordinates
    for i, angle in enumerate(angles):
        atom1_idx = angle.atom1.idx
        atom2_idx = angle.atom2.idx
        atom3_idx = angle.atom3.idx
        natoms = len(sites_cart)
        if atom1_idx >= natoms or atom2_idx >= natoms or atom3_idx >= natoms:
            continue
        atom1 = parm_coordinates[atom1_idx]
        atom2 = parm_coordinates[atom2_idx]
        atom3 = parm_coordinates[atom3_idx]
        a = [atom1[0] - atom2[0], atom1[1] - atom2[1], atom1[2] - atom2[2]]
        b = [atom3[0] - atom2[0], atom3[1] - atom2[1], atom3[2] - atom2[2]]
        a = flex.double(a)
        b = flex.double(b)
        Z = sqrt(angle.type.k) * (angle.type.theteq -
                                  acos(a.dot(b) /
                                       (a.norm() * b.norm())) * 180 / pi)
        angle_Zs.append(Z)
    angle_Zs = flex.double(angle_Zs)
    a_sq = angle_Zs * angle_Zs
    a_ave = sqrt(flex.mean_default(a_sq, 0))
    a_max = sqrt(flex.max_default(a_sq, 0))
    a_min = sqrt(flex.min_default(a_sq, 0))
    if not get_deltas:
        return (a_min, a_max, a_ave)
    else:
        return (a_min, a_max, a_ave), angle_Zs
コード例 #4
0
def bond_rmsZ(parm, sites_cart, ignore_hd, get_deltas=False):
    if ignore_hd:
        bonds = parm.bonds_without_h
    else:
        bonds = itertools.chain(parm.bonds_inc_h, parm.bonds_without_h)
    bond_Zs = []
    # save coordinates here since calling parm.coordinates is time consumming
    parm_coordinates = parm.coordinates
    for i, bond in enumerate(bonds):
        atom1_idx = bond.atom1.idx
        atom2_idx = bond.atom2.idx
        natoms = len(sites_cart)
        # in non-P1 space groups, amber topology knows entire unit cell bonds
        # only use bonds from 1st ASU
        if atom1_idx >= natoms or atom2_idx >= natoms:
            continue
        atom1 = parm_coordinates[atom1_idx]
        atom2 = parm_coordinates[atom2_idx]
        dx = atom1[0] - atom2[0]
        dy = atom1[1] - atom2[1]
        dz = atom1[2] - atom2[2]
        Z = sqrt(
            bond.type.k) * (bond.type.req - sqrt(dx * dx + dy * dy + dz * dz))
        bond_Zs.append(Z)
    bond_Zs = flex.double(bond_Zs)
    b_sq = bond_Zs * bond_Zs
    b_ave = sqrt(flex.mean_default(b_sq, 0))
    b_max = sqrt(flex.max_default(b_sq, 0))
    b_min = sqrt(flex.min_default(b_sq, 0))
    if not get_deltas:
        return b_min, b_max, b_ave
    else:
        return (b_min, b_max, b_ave), bond_Zs
コード例 #5
0
def macro_cycle(
      xray_structure,
      target_map,
      geometry_restraints,
      max_iterations = 50,
      expload        = False,
      n_expload      = 1,
      log            = None):
  if(not log):
    if(log is None): log = sys.stdout
  d_min = maptbx.d_min_from_map(
      map_data  = target_map,
      unit_cell = xray_structure.unit_cell())
  all_selection = flex.bool(xray_structure.scatterers().size(),True)
  rsr_simple_refiner = individual_sites.simple(
    target_map                  = target_map,
    selection                   = all_selection,
    real_space_gradients_delta  = d_min/4,
    max_iterations              = max_iterations,
    geometry_restraints_manager = geometry_restraints)
  xray_structure = shift_to_center_of_mass(xray_structure=xray_structure,
    target_map=target_map)
  cm = xray_structure.center_of_mass()
  sites_cart = xray_structure.sites_cart()
  sc = scorer(
    unit_cell  = xray_structure.unit_cell(),
    sites_frac = xray_structure.sites_frac(),
    target_map = target_map,
    log        = log)
  weights = flex.double()
  sampling_range = [0, 90, 180, 270]
  for the in sampling_range:
    for psi in sampling_range:
      for phi in sampling_range:
        sites_cart_new = apply_rigid_body_shift(
          sites_cart=sites_cart,
          cm=cm, x=0,y=0,z=0, the=the, psi=psi, phi=phi)
        xray_structure = xray_structure.replace_sites_cart(
          new_sites=sites_cart_new)
        w = run_refine(
          rsr_simple_refiner = rsr_simple_refiner,
          xray_structure     = xray_structure,
          scorer             = sc,
          log                = log,
          weight             = flex.mean_default(weights, 1.0))
        weights.append(w)
        if(expload):
          for i in xrange(n_expload):
            xray_structure_ = xray_structure.deep_copy_scatterers()
            xray_structure_.shake_sites_in_place(mean_distance=1.0)
            run_refine(
              rsr_simple_refiner = rsr_simple_refiner,
              xray_structure     = xray_structure_,
              scorer             = sc,
              log                = log)
  if(log): print >> log, "Final target:", sc.target
  return xray_structure.replace_sites_frac(new_sites=sc.sites_frac_best)
コード例 #6
0
def macro_cycle(xray_structure,
                target_map,
                geometry_restraints,
                max_iterations=50,
                expload=False,
                n_expload=1,
                log=None):
    if (not log):
        if (log is None): log = sys.stdout
    d_min = maptbx.d_min_from_map(map_data=target_map,
                                  unit_cell=xray_structure.unit_cell())
    all_selection = flex.bool(xray_structure.scatterers().size(), True)
    rsr_simple_refiner = individual_sites.simple(
        target_map=target_map,
        selection=all_selection,
        real_space_gradients_delta=d_min / 4,
        max_iterations=max_iterations,
        geometry_restraints_manager=geometry_restraints)
    xray_structure = shift_to_center_of_mass(xray_structure=xray_structure,
                                             target_map=target_map)
    cm = xray_structure.center_of_mass()
    sites_cart = xray_structure.sites_cart()
    sc = scorer(unit_cell=xray_structure.unit_cell(),
                sites_frac=xray_structure.sites_frac(),
                target_map=target_map,
                log=log)
    weights = flex.double()
    sampling_range = [0, 90, 180, 270]
    for the in sampling_range:
        for psi in sampling_range:
            for phi in sampling_range:
                sites_cart_new = apply_rigid_body_shift(sites_cart=sites_cart,
                                                        cm=cm,
                                                        x=0,
                                                        y=0,
                                                        z=0,
                                                        the=the,
                                                        psi=psi,
                                                        phi=phi)
                xray_structure = xray_structure.replace_sites_cart(
                    new_sites=sites_cart_new)
                w = run_refine(rsr_simple_refiner=rsr_simple_refiner,
                               xray_structure=xray_structure,
                               scorer=sc,
                               log=log,
                               weight=flex.mean_default(weights, 1.0))
                weights.append(w)
                if (expload):
                    for i in xrange(n_expload):
                        xray_structure_ = xray_structure.deep_copy_scatterers()
                        xray_structure_.shake_sites_in_place(mean_distance=1.0)
                        run_refine(rsr_simple_refiner=rsr_simple_refiner,
                                   xray_structure=xray_structure_,
                                   scorer=sc,
                                   log=log)
    if (log): print >> log, "Final target:", sc.target
    return xray_structure.replace_sites_frac(new_sites=sc.sites_frac_best)
コード例 #7
0
def angle_rmsd(
    parm,
    sites_cart,
    ignore_hd,
    get_deltas=False,
    get_extremes=False,
    verbose=False,
):
    if verbose: print("starting angle_rmsd: %s" % time.strftime("%H:%M:%S"))
    ignore_hd = True  # dac timing test
    if ignore_hd:
        angles = parm.angles_without_h
    else:
        angles = itertools.chain(parm.angles_inc_h, parm.angles_without_h)
    angle_deltas = []
    angle_extremes = extreme()
    angle_extremes.header = '  Angle deltas from Amber ideals\n'
    angle_extremes.header += '    Atoms %s ideal   model   delta\n' % (' ' *
                                                                       59)
    # save coordinates here since calling parm.coordinates is time consumming
    parm_coordinates = parm.coordinates
    for i, angle in enumerate(angles):
        # in non-P1 space groups, amber topology knows entire unit cell angles
        # only use angles from 1st ASU
        atom1_idx = angle.atom1.idx
        atom2_idx = angle.atom2.idx
        atom3_idx = angle.atom3.idx
        natoms = len(sites_cart)
        if atom1_idx >= natoms or atom2_idx >= natoms or atom3_idx >= natoms:
            continue
        atom1 = parm_coordinates[atom1_idx]
        atom2 = parm_coordinates[atom2_idx]
        atom3 = parm_coordinates[atom3_idx]
        a = [atom1[0] - atom2[0], atom1[1] - atom2[1], atom1[2] - atom2[2]]
        b = [atom3[0] - atom2[0], atom3[1] - atom2[1], atom3[2] - atom2[2]]
        a = flex.double(a)
        b = flex.double(b)
        acosarg = a.dot(b) / (a.norm() * b.norm())
        if acosarg >= 1.0: acosarg = 0.9999999
        if acosarg <= -1.0: acosarg = -0.9999999
        delta = angle.type.theteq - acos(acosarg) * 180 / pi
        assert abs(delta) < 360
        if get_extremes:
            angle.model = acos(acosarg) * 180 / pi
            angle_extremes.process(delta, angle)
        angle_deltas.append(delta)
    angle_deltas = flex.double(angle_deltas)
    a_sq = angle_deltas * angle_deltas
    a_ave = sqrt(flex.mean_default(a_sq, 0))
    a_max = sqrt(flex.max_default(a_sq, 0))
    a_min = sqrt(flex.min_default(a_sq, 0))
    if verbose: print("done with angle_rmsd: %s" % time.strftime("%H:%M:%S"))
    if not get_deltas:
        return (a_min, a_max, a_ave)
    else:
        return (a_min, a_max, a_ave), angle_deltas, angle_extremes
コード例 #8
0
def merge_groups_by_connectivity(pdb_hierarchy, xray_structure,
                                 selection_strings=None, selection_arrays=None):
  assert [selection_strings, selection_arrays].count(None)==1
  if(selection_strings is None): selections = selection_arrays
  else:
    selections = []
    for ss in selection_strings:
      sa = pdb_hierarchy.atom_selection_cache().selection(string = ss.replace('"',""))
      selections.append(sa)
  for i_seq, si in enumerate(selections):
    for j_seq, sj in enumerate(selections):
      if(i_seq < j_seq):
        xi = xray_structure.select(si)
        xj = xray_structure.select(sj)
        if(xi.scatterers().size() > xj.scatterers().size()):
          distances = xi.closest_distances(xj.sites_frac(), distance_cutoff=6).smallest_distances
          cnt = ((distances > 0) & (distances < 3)).count(True)
          assert distances.size() == xj.scatterers().size()
          distances = distances.select(distances > 0)
          p = cnt*100./xj.scatterers().size()
          if(p>=1):
            print
            if(selection_strings is not None):
              print sj
              print si
            print i_seq,j_seq, p, flex.min_default(distances,0), flex.mean_default(distances,0)
        else:
          distances = xj.closest_distances(xi.sites_frac(), distance_cutoff=6).smallest_distances
          cnt = ((distances > 0) & (distances < 3)).count(True)
          assert distances.size() == xi.scatterers().size()
          distances = distances.select(distances > 0)
          p = cnt*100./xi.scatterers().size()
          if(p>=1):
            print
            if(selection_strings is not None):
              print sj
              print si
            print i_seq,j_seq, p, flex.min_default(distances,0), flex.mean_default(distances,0)

  #
  print
コード例 #9
0
    def __init__(self,
                 n_terms,
                 x_obs,
                 y_obs,
                 w_obs=None,
                 free_flags=None,
                 low_limit=None,
                 high_limit=None,
                 randomise=False):
        self.x_obs = x_obs
        self.y_obs = y_obs
        self.free_flags = free_flags
        if self.free_flags is None:
            self.free_flags = flex.bool(x_obs.size(), True)

        self.w_obs = None
        if w_obs is not None:
            self.w_obs = w_obs
        else:
            self.w_obs = flex.double(x_obs.size(), 1.0)

        self.x = flex.double(n_terms, 0)
        if randomise:
            self.x = (flex.random_double(n_terms) - 0.5) * 10.0
        self.low_limit = flex.min_default(self.x_obs, 0)
        self.high_limit = flex.max_default(self.x_obs, 0)
        self.f = None
        if low_limit is not None:
            self.low_limit = low_limit
        if high_limit is not None:
            self.high_limit = high_limit

        ## Set the first term equal to twice mean of the data points.
        ## Although not really needed, seems like a good idea anyway.
        ## It should speed up convergence.
        self.x[0] = flex.mean_default(self.y_obs, 0) * 2.0
        self.lsq_object = chebyshev_lsq(n_terms, self.low_limit,
                                        self.high_limit, self.x_obs,
                                        self.y_obs, self.w_obs,
                                        self.free_flags)
        self.lsq_object.replace(self.x)
        lbfgs_exception_handling_params = lbfgs.exception_handling_parameters(
            ignore_line_search_failed_step_at_lower_bound=True,
            ignore_line_search_failed_step_at_upper_bound=True,
            ignore_line_search_failed_maxfev=True)
        self.minimizer = lbfgs.run(
            target_evaluator=self,
            exception_handling_params=lbfgs_exception_handling_params)
        self.coefs = self.lsq_object.coefs()
        self.f = self.lsq_object.residual()
        self.free_f = self.lsq_object.free_residual()
        del self.x
コード例 #10
0
def bond_rmsd(
    parm,
    sites_cart,
    ignore_hd,
    get_deltas=False,
    get_extremes=False,
    verbose=False,
):
    if verbose: print("starting bond_rmsd: %s" % time.strftime("%H:%M:%S"))
    ignore_hd = True  # dac timing test
    if ignore_hd:
        bonds = parm.bonds_without_h
    else:
        bonds = itertools.chain(parm.bonds_inc_h, parm.bonds_without_h)
    bond_deltas = []
    bond_extremes = extreme()
    bond_extremes.header = '  Bond deltas from Amber ideals\n'
    bond_extremes.header += '    Atoms %s ideal   model   delta\n' % (' ' * 36)
    # save coordinates here since calling parm.coordinates is time consumming
    parm_coordinates = parm.coordinates
    for i, bond in enumerate(bonds):
        atom1_idx = bond.atom1.idx
        atom2_idx = bond.atom2.idx
        natoms = len(sites_cart)
        # in non-P1 space groups, amber topology knows entire unit cell bonds
        # only use bonds from 1st ASU
        if atom1_idx >= natoms or atom2_idx >= natoms:
            continue
        atom1 = parm_coordinates[atom1_idx]
        atom2 = parm_coordinates[atom2_idx]
        dx = atom1[0] - atom2[0]
        dy = atom1[1] - atom2[1]
        dz = atom1[2] - atom2[2]
        delta = bond.type.req - sqrt(dx * dx + dy * dy + dz * dz)
        #print "bond deltas:  %6d %6d %6d  %7.2f" % ( i, atom1_idx, atom2_idx, delta )
        if get_extremes:
            bond.model = sqrt(dx * dx + dy * dy + dz * dz)
            bond_extremes.process(delta, bond)
        bond_deltas.append(delta)
    bond_deltas = flex.double(bond_deltas)
    b_sq = bond_deltas * bond_deltas
    b_ave = sqrt(flex.mean_default(b_sq, 0))
    b_max = sqrt(flex.max_default(b_sq, 0))
    b_min = sqrt(flex.min_default(b_sq, 0))
    if verbose: print("done with bond_rmsd: %s" % time.strftime("%H:%M:%S"))
    if not get_deltas:
        return b_min, b_max, b_ave
    else:
        return (b_min, b_max, b_ave), bond_deltas, bond_extremes
コード例 #11
0
def get_power_spectrum(mc):

    # Using bins of equal width in d_star_sq matches variation with resolution
    # better, but would have problems with very asymmetric boxes (not used here).
    power_spectrum = flex.double(mc.size(), 1.)
    mc_copy = mc.deep_copy()
    d_star_sq_step = get_d_star_sq_step(mc_copy)
    mc_copy.setup_binner_d_star_sq_step(d_star_sq_step=d_star_sq_step)
    for i_bin in mc_copy.binner().range_used():
        sel = mc_copy.binner().selection(i_bin)
        mcsel = mc_copy.select(sel)
        fsq = flex.pow2(flex.abs(mcsel.data()))
        meanfsq = flex.mean_default(fsq, 0)
        power = math.sqrt(meanfsq)
        power_spectrum.set_selected(sel, power)

    return power_spectrum
コード例 #12
0
    def __init__(
        self, n_terms, x_obs, y_obs, w_obs=None, free_flags=None, low_limit=None, high_limit=None, randomise=False
    ):
        self.x_obs = x_obs
        self.y_obs = y_obs
        self.free_flags = free_flags
        if self.free_flags is None:
            self.free_flags = flex.bool(x_obs.size(), True)

        self.w_obs = None
        if w_obs is not None:
            self.w_obs = w_obs
        else:
            self.w_obs = flex.double(x_obs.size(), 1.0)

        self.x = flex.double(n_terms, 0)
        if randomise:
            self.x = (flex.random_double(n_terms) - 0.5) * 10.0
        self.low_limit = flex.min_default(self.x_obs, 0)
        self.high_limit = flex.max_default(self.x_obs, 0)
        self.f = None
        if low_limit is not None:
            self.low_limit = low_limit
        if high_limit is not None:
            self.high_limit = high_limit

        ## Set the first term equal to twice mean of the data points.
        ## Although not really needed, seems like a good idea anyway.
        ## It should speed up convergence.
        self.x[0] = flex.mean_default(self.y_obs, 0) * 2.0
        self.lsq_object = chebyshev_lsq(
            n_terms, self.low_limit, self.high_limit, self.x_obs, self.y_obs, self.w_obs, self.free_flags
        )
        self.lsq_object.replace(self.x)
        lbfgs_exception_handling_params = lbfgs.exception_handling_parameters(
            ignore_line_search_failed_step_at_lower_bound=True,
            ignore_line_search_failed_step_at_upper_bound=True,
            ignore_line_search_failed_maxfev=True,
        )
        self.minimizer = lbfgs.run(target_evaluator=self, exception_handling_params=lbfgs_exception_handling_params)
        self.coefs = self.lsq_object.coefs()
        self.f = self.lsq_object.residual()
        self.free_f = self.lsq_object.free_residual()
        del self.x
コード例 #13
0
 def mean_filtered(x):
   me = flex.mean_default(x,0)
   sel  = x < me*3
   sel &= x > me/3
   return sel