예제 #1
0
def loadModel(filename):
    dm = DataManager()  #   Initialize the DataManager and call it dm
    dm.set_overwrite(
        True)  #   tell the DataManager to overwrite files with the same name
    #print("Reading file")
    manager = dm.get_model(filename)
    return manager
예제 #2
0
def read_map_and_model(file_name_1, file_name_2):
    '''
    Identify which file is map and which is model, read in and
    create map_model_manager
  '''

    map_file_name = None
    model_file_name = None
    for f in [file_name_1, file_name_2]:
        for ending in ['.ccp4', '.mrc', '.map']:
            if f.endswith(ending):
                map_file_name = f
        for ending in ['.pdb', '.cif']:
            if f.endswith(ending):
                model_file_name = f
    if not map_file_name or not model_file_name:
        raise Sorry("Unable to identify map and model from %s and %s" %
                    (file_name_1, file_name_2))

    from iotbx.data_manager import DataManager
    from iotbx.map_model_manager import map_model_manager
    dm = DataManager()
    dm.process_real_map_file(map_file_name)
    mm = dm.get_real_map(map_file_name)

    dm.process_model_file(model_file_name)
    model = dm.get_model(model_file_name)
    mam = map_model_manager(model=model, map_manager=mm)
    return mam
예제 #3
0
def read_model(filename):
    from iotbx.data_manager import DataManager
    dm = DataManager()
    dm.set_overwrite(True)
    dm.process_model_file(filename)
    model = dm.get_model(filename)
    model.add_crystal_symmetry_if_necessary()
    return model
예제 #4
0
def run(args, log=sys.stdout):
  print("-"*79, file=log)
  print(legend, file=log)
  print("-"*79, file=log)
  inputs = mmtbx.utils.process_command_line_args(args = args,
    master_params = master_params(),
    suppress_symmetry_related_errors = True)
  params = inputs.params.extract()
  # model
  broadcast(m="Input PDB:", log=log)
  file_names = inputs.pdb_file_names
  if(len(file_names) != 1): raise Sorry("PDB file has to given.")
  from iotbx.data_manager import DataManager
  dm = DataManager()
  dm.set_overwrite(True)
  model = dm.get_model(file_names[0])

  # map
  broadcast(m="Input map:", log=log)
  if(inputs.ccp4_map is None): raise Sorry("Map file has to given.")

  from iotbx.map_model_manager import map_model_manager
  mam = map_model_manager(model = model, map_manager = inputs.ccp4_map,
     wrapping = params.wrapping,
     ignore_symmetry_conflicts = params.ignore_symmetry_conflicts)

  mam.model().setup_scattering_dictionaries(
     scattering_table=params.scattering_table)
  mam.model().get_xray_structure().show_summary(f=log, prefix="  ")
  inputs.ccp4_map.show_summary(prefix="  ")

  # estimate resolution
  d_min = params.resolution
  if(d_min is None):
    raise Sorry("Map resolution must be given.")
  print("  d_min: %6.4f"%d_min, file=log)
  #
  result_obj = compdiff(
    map_data_obs = mam.map_manager().map_data(), # NOTE this will always wrap map
    xrs          = mam.model().get_xray_structure(),
    d_min        = d_min,
    vector_map   = False)

  output_map_manager=mam.map_manager().customized_copy(
      map_data=result_obj.map_result)
  dm.write_real_map_file(output_map_manager, "map_model_difference_1.ccp4")

  #
  result_obj = compdiff(
    map_data_obs = mam.map_manager().map_data(),
    xrs          = mam.model().get_xray_structure(),
    d_min        = d_min,
    vector_map   = True)

  output_map_manager=mam.map_manager().customized_copy(
      map_data=result_obj.map_result)
  dm.write_real_map_file(output_map_manager, "map_model_difference_2.ccp4")
def test_01(method='model_sharpen', expected_results=None):

    # Source data

    data_dir = os.path.dirname(os.path.abspath(__file__))
    data_ccp4 = os.path.join(data_dir, 'data', 'non_zero_origin_map.ccp4')
    data_pdb = os.path.join(data_dir, 'data', 'non_zero_origin_model.pdb')
    data_ncs_spec = os.path.join(data_dir, 'data',
                                 'non_zero_origin_ncs_spec.ncs_spec')

    # Read in data

    dm = DataManager(['ncs_spec', 'model', 'real_map', 'phil'])
    dm.set_overwrite(True)

    map_file = data_ccp4
    dm.process_real_map_file(map_file)
    mm = dm.get_real_map(map_file)

    model_file = data_pdb
    dm.process_model_file(model_file)
    model = dm.get_model(model_file)

    ncs_file = data_ncs_spec
    dm.process_ncs_spec_file(ncs_file)
    ncs = dm.get_ncs_spec(ncs_file)

    mmm = map_model_manager(model=model,
                            map_manager_1=mm.deep_copy(),
                            map_manager_2=mm.deep_copy(),
                            ncs_object=ncs,
                            wrapping=False)
    mmm.add_map_manager_by_id(map_id='external_map',
                              map_manager=mmm.map_manager().deep_copy())
    mmm.set_resolution(3)
    mmm.set_log(sys.stdout)

    dc = mmm.deep_copy()

    sharpen_method = getattr(mmm, method)

    # sharpen by method (can be model_sharpen, half_map_sharpen or
    #     external_sharpen)

    mmm = dc.deep_copy()
    sharpen_method(anisotropic_sharpen=False, n_bins=10)
    assert mmm.map_model_cc() > 0.9
    sharpen_method(anisotropic_sharpen=False, n_bins=10, local_sharpen=True)
    assert mmm.map_model_cc() > 0.9
    sharpen_method(anisotropic_sharpen=True, n_bins=10)
    assert mmm.map_model_cc() > 0.9
    sharpen_method(anisotropic_sharpen=True,
                   n_bins=10,
                   local_sharpen=True,
                   n_boxes=1)
    assert mmm.map_model_cc() > 0.9
예제 #6
0
def loadModel(filename):
    dm = DataManager()  #   Initialize the DataManager and call it dm
    dm.set_overwrite(
        True)  #   tell the DataManager to overwrite files with the same name
    #print("Reading file")
    model = dm.get_model(filename)
    #print("Processing model")
    #model.process_input_model(make_restraints=True)
    # removed because Restraints Manager will not operate
    # on unfamiliar residues  KPB 6/10/2021
    return model
예제 #7
0
def exercise_ss_creation_crash():
  pdb_str = """
CRYST1  145.350  135.090  157.320  90.00  90.00  90.00 P 1
SCALE1      0.006880  0.000000  0.000000        0.00000
SCALE2      0.000000  0.007402  0.000000        0.00000
SCALE3      0.000000  0.000000  0.006356        0.00000
ATOM      1  N   ASN A   1      47.095 160.279  31.220  1.00 30.00           N
ATOM      2  CA  ASN A   1      65.985 120.233  34.727  1.00 30.00           C
ATOM      3  C   ASN A   1      56.657 138.700  33.374  1.00 30.00           C
ATOM      4  O   ASN A   1      56.353 138.977  34.561  1.00 30.00           O
ATOM      5  CB  ASN A   1      65.238 120.133  36.068  1.00 30.00           C
ATOM      6  CG  ASN A   1      66.087 119.360  37.057  1.00 30.00           C
ATOM      7  OD1 ASN A   1      65.746 118.217  37.441  1.00 30.00           O
ATOM      8  ND2 ASN A   1      67.240 119.920  37.395  1.00 30.00           N
ATOM      9  N   ASN A   2      56.939 137.441  33.021  1.00 30.00           N
ATOM     10  CA  ASN A   2      67.135 117.384  35.354  1.00 30.00           C
ATOM     11  C   ASN A   2      74.935 104.398  35.546  1.00 30.00           C
ATOM     12  O   ASN A   2      74.423 104.166  34.444  1.00 30.00           O
ATOM     13  CB  ASN A   2      65.828 116.703  35.809  1.00 30.00           C
ATOM     14  CG  ASN A   2      66.092 115.518  36.718  1.00 30.00           C
ATOM     15  OD1 ASN A   2      66.641 114.515  36.266  1.00 30.00           O
ATOM     16  ND2 ASN A   2      65.744 115.556  38.000  1.00 30.00           N
ATOM     17  N   ASN A   3      76.102 103.886  35.920  1.00 30.00           N
ATOM     18  CA  ASN A   3      68.960 115.076  35.163  1.00 30.00           C
ATOM     19  C   ASN A   3      86.047  90.376  35.591  1.00 30.00           C
ATOM     20  O   ASN A   3      87.134  90.903  35.535  1.00 30.00           O
ATOM     21  CB  ASN A   3      70.251 115.882  34.903  1.00 30.00           C
ATOM     22  CG  ASN A   3      71.023 116.208  36.192  1.00 30.00           C
ATOM     23  OD1 ASN A   3      70.637 117.096  36.957  1.00 30.00           O
ATOM     24  ND2 ASN A   3      72.106 115.481  36.436  1.00 30.00           N
ATOM     25  OXT ASN A   3      85.912  89.104  36.045  1.00 30.00           O
TER
END


"""
  with open("exercise_ss_creation_crash_model.pdb","w") as fo:
    fo.write(pdb_str)
  from iotbx.data_manager import DataManager
  dm=DataManager()
  params = mmtbx.model.manager.get_default_pdb_interpretation_params()
  params.pdb_interpretation.secondary_structure.enabled=True
  model = dm.get_model('exercise_ss_creation_crash_model.pdb')
  model.set_pdb_interpretation_params(params)
  model.process_input_model(make_restraints=True)
예제 #8
0
def run(prefix="tst_00_mmtbx_building_ligands"):
  # Ligand file
  with open("%s.pdb"%prefix,"w") as fo:
    fo.write(pdb_str)
  # Read map and model
  from iotbx.data_manager import DataManager
  dm = DataManager()
  map_file = libtbx.env.find_in_repositories(
    relative_path="mmtbx/building/ligands/tst_00_mmtbx_building_ligands.map",
    test=os.path.isfile)
  mm = dm.get_real_map(map_file)
  model = dm.get_model("%s.pdb"%prefix)
  model.set_crystal_symmetry(mm.crystal_symmetry())
  model.process(make_restraints=True)
  # Create map_model_manager
  mmm = iotbx.map_model_manager.map_model_manager(map_manager=mm, model=model)
  # Build ligand
  o = ligands.lifi.run(map_model_manager = mmm, d_min = 2.5)
예제 #9
0
def read_map_and_model(file_name_1,
                       file_name_2,
                       regression_directory=None,
                       prefix=None):
    '''
    Identify which file is map and which is model, read in and
    create map_model_manager
    If regression_directory is specified, look there for these files, assuming
    prefix of $PHENIX/modules/phenix_regression/
  '''

    if regression_directory and not prefix:
        import libtbx.load_env
        prefix = libtbx.env.under_dist(module_name="phenix_regression",
                                       path=regression_directory,
                                       test=os.path.isdir)

    if prefix:
        file_name_1 = os.path.join(prefix, file_name_1)
        file_name_2 = os.path.join(prefix, file_name_2)

    map_file_name = None
    model_file_name = None
    for f in [file_name_1, file_name_2]:
        for ending in ['.ccp4', '.mrc', '.map']:
            if f.endswith(ending):
                map_file_name = f
        for ending in ['.pdb', '.cif']:
            if f.endswith(ending):
                model_file_name = f
    if not map_file_name or not model_file_name:
        raise Sorry("Unable to guess map and model from %s and %s" %
                    (file_name_1, file_name_2))

    from iotbx.data_manager import DataManager
    from iotbx.map_model_manager import map_model_manager
    dm = DataManager()
    dm.process_real_map_file(map_file_name)
    mm = dm.get_real_map(map_file_name)

    dm.process_model_file(model_file_name)
    model = dm.get_model(model_file_name)
    mam = map_model_manager(model=model, map_manager=mm)
    return mam
예제 #10
0
def main():
    dm = DataManager()
    dm.process_model_str('testing', model_1yjp)
    model = dm.get_model()
    rc = model.restraints_as_geo(force=True)
    rc = check_geo(rc)
    assert rc == count_1yjp, check_diff(rc, count_1yjp)

    dm = DataManager()
    dm.process_model_str('testing', model_1yjp_with_waters)
    model = dm.get_model()
    rc = model.restraints_as_geo(force=True)
    rc = check_geo(rc)
    assert rc == count_1yjp_with_waters, rc

    params = model.get_default_pdb_interpretation_params()
    edits_1yjp = params.geometry_restraints.edits

    edits_1yjp.bond[0].action = 'add'
    edits_1yjp.bond[0].atom_selection_1 = 'resname HOH and resid 10 and name O'
    edits_1yjp.bond[
        0].atom_selection_2 = 'resname ASN and resid 2 and name ND2'
    edits_1yjp.bond[0].distance_ideal = 2.1
    edits_1yjp.bond[0].sigma = 0.1
    model.set_pdb_interpretation_params(params)
    rc = model.restraints_as_geo(force=True)
    rc = check_geo(rc)
    current = count_1yjp_with_waters.copy()
    current['User supplied restraints'] = 1
    current['Nonbonded interactions'] = 1176
    assert rc == current, check_diff(rc, current)

    edits_1yjp.angle[0].action = 'add'
    edits_1yjp.angle[
        0].atom_selection_1 = 'resname HOH and resid 10 and name O'
    edits_1yjp.angle[
        0].atom_selection_2 = 'resname ASN and resid 2 and name ND2'
    edits_1yjp.angle[
        0].atom_selection_3 = 'resname ASN and resid 2 and name CG'
    edits_1yjp.angle[0].angle_ideal = 21.9
    edits_1yjp.angle[0].sigma = 1.1
    model.set_pdb_interpretation_params(params)
    rc = model.restraints_as_geo(force=True)
    rc = check_geo(rc)
    current = count_1yjp_with_waters.copy()
    current['User supplied restraints'] = 1
    current['User supplied angle restraints'] = 1
    current['Nonbonded interactions'] = 1176
    assert rc == current, check_diff(rc, current)

    edits_1yjp.dihedral[0].action = 'add'
    edits_1yjp.dihedral[
        0].atom_selection_1 = 'resname HOH and resid 10 and name O'
    edits_1yjp.dihedral[
        0].atom_selection_2 = 'resname ASN and resid 2 and name ND2'
    edits_1yjp.dihedral[
        0].atom_selection_3 = 'resname ASN and resid 2 and name CG'
    edits_1yjp.dihedral[
        0].atom_selection_4 = 'resname ASN and resid 2 and name CB'
    edits_1yjp.dihedral[0].angle_ideal = 121.9
    edits_1yjp.dihedral[0].sigma = 1.12
    edits_1yjp.dihedral[0].periodicity = 10
    model.set_pdb_interpretation_params(params)
    rc = model.restraints_as_geo(force=True)
    rc = check_geo(rc)
    current = count_1yjp_with_waters.copy()
    current['User supplied restraints'] = 1
    current['User supplied angle restraints'] = 1
    current['User supplied torsion angle restraints'] = 1
    #current['  sinusoidal'] = 16
    current['Nonbonded interactions'] = 1176
    assert rc == current, check_diff(rc, current)
    print('OK')
예제 #11
0
def RunProbeTests(inFileName):

    #========================================================================
    # Call the test functions for the libraries we test.

    ret = probeext.DotSpheres_test()
    assert len(ret) == 0, "DotSpheres_test() failed: " + ret

    ret = probeext.SpatialQuery_test()
    assert len(ret) == 0, "SpatialQuery_test() failed: " + ret

    ret = probeext.Scoring_test()
    assert len(ret) == 0, "Scoring_test() failed: " + ret

    AtomTypes.Test()
    Helpers.Test()

    #========================================================================
    # Now ensure that we can use the C++-wrapped classes as intended to make sure
    # that the wrapping code or parameters have not changed.

    #========================================================================
    # Make sure we can get at the DotSphere objects and their methods
    cache = probeext.DotSphereCache(10)
    sphere1 = cache.get_sphere(1)
    dots = sphere1.dots()

    #========================================================================
    # Make sure we can fill in an ExtraAtomInfoList and pass it to scoring
    # Generate an example data model with a small molecule in it
    if inFileName is not None and len(inFileName) > 0:
        # Read a model from a file using the DataManager
        dm = DataManager()
        dm.process_model_file(inFileName)
        model = dm.get_model(inFileName)
    else:
        # Generate a small-molecule model using the map model manager
        mmm = map_model_manager(
        )  #   get an initialized instance of the map_model_manager
        mmm.generate_map(
        )  #   get a model from a generated small library model and calculate a map for it
        model = mmm.model()  #   get the model

    # Fix up bogus unit cell when it occurs by checking crystal symmetry.
    cs = model.crystal_symmetry()
    if (cs is None) or (cs.unit_cell() is None):
        model = shift_and_box_model(model=model)

    # Get the list of all atoms in the model
    atoms = model.get_atoms()

    # Get the bonding information we'll need to exclude our bonded neighbors.
    try:
        p = mmtbx.model.manager.get_default_pdb_interpretation_params()
        model.process(make_restraints=True,
                      pdb_interpretation_params=p)  # make restraints
        geometry = model.get_restraints_manager().geometry
        sites_cart = model.get_sites_cart()  # cartesian coordinates
        bond_proxies_simple, asu = \
            geometry.get_all_bond_proxies(sites_cart = sites_cart)
    except Exception as e:
        raise Exception("Could not get bonding information for input file: " +
                        str(e))
    bondedNeighbors = Helpers.getBondedNeighborLists(atoms,
                                                     bond_proxies_simple)

    # Traverse the hierarchy and look up the extra data to be filled in.
    ret = Helpers.getExtraAtomInfo(model)
    extra = ret.extraAtomInfo

    # Construct a SpatialQuery and fill in the atoms.  Ensure that we can make a
    # query within 1000 Angstroms of the origin.
    sq = probeext.SpatialQuery(atoms)
    nb = sq.neighbors((0, 0, 0), 0, 1000)

    # Construct a DotScorer object.
    # Find the radius of each atom in the structure and construct dot spheres for
    # them. Find the atoms that are bonded to them and add them to an excluded list.
    # Then compute the score for each of them and report the summed score over the
    # whole molecule the way that Reduce will.
    ds = probeext.DotScorer(extra)
    total = 0
    badBumpTotal = 0
    for a in atoms:
        rad = extra.getMappingFor(a).vdwRadius
        assert rad > 0, "Invalid radius for atom look-up: " + a.name + " rad = " + str(
            rad)
        sphere = cache.get_sphere(rad)

        # Excluded atoms that are bonded to me or to one of my neightbors.
        # It has the side effect of excluding myself if I have any neighbors.
        # Construct as a set to avoid duplicates.
        exclude = set()
        for n in bondedNeighbors[a]:
            exclude.add(n)
            for n2 in bondedNeighbors[n]:
                exclude.add(n2)
        exclude = list(exclude)

        dots = sphere.dots()
        res = ds.score_dots(a, 1.0, sq, rad * 3, 0.25, exclude, sphere.dots(),
                            sphere.density(), False)
        total += res.totalScore()
        if res.hasBadBump:
            badBumpTotal += 1

    # Test calling the single-dot checking code as will be used by Probe to make sure
    # all of the Python linkage is working
    dotOffset = [1, 0, 0]
    check = ds.check_dot(atoms[0], dotOffset, 1, atoms, [atoms[0]])
    overlapType = check.overlapType

    # Test calling the interaction_type method to be sure Python linkage is working
    interactionType = ds.interaction_type(check.overlapType, check.gap)
예제 #12
0
def test_modified_residues():
  # modified amino acid example from 1e0z
  seq_str1 = """\
>pdb|1e0z|A
PTVEYLNYETLDDQGWDMDDDDLFEKAADAGLDGEDYGTMEVAEGEYILEAAEAQGYDWPFSCRAGACANCASIVKEG
EIDMDMQQILSDEEVEEKDVRLTCIGSPAADEVKIVYNAKHLDYLQNRVI
"""
  pdb_str1 = """\
HETATM 1707  OH  ALY A 118       9.253 -11.285  16.293  1.00  0.00           O
HETATM 1708  CH  ALY A 118       8.541 -10.835  15.417  1.00  0.00           C
HETATM 1709  CH3 ALY A 118       8.924  -9.562  14.659  1.00  0.00           C
HETATM 1710  NZ  ALY A 118       7.420 -11.417  15.087  1.00 50.00           N
HETATM 1711  CE  ALY A 118       7.573 -12.895  15.202  1.00 50.00           C
HETATM 1712  CD  ALY A 118       6.273 -13.470  14.634  1.00 12.50           C
HETATM 1713  CG  ALY A 118       5.506 -14.188  15.746  1.00 12.50           C
HETATM 1714  CB  ALY A 118       6.259 -15.458  16.148  1.00 12.50           C
HETATM 1715  CA  ALY A 118       5.885 -15.846  17.580  1.00 12.50           C
HETATM 1716  N   ALY A 118       4.407 -15.666  17.660  1.00 12.50           N
HETATM 1717  C   ALY A 118       6.576 -14.924  18.588  1.00 12.50           C
HETATM 1718  O   ALY A 118       7.674 -15.190  19.036  1.00 12.50           O
HETATM 1719 HH31 ALY A 118       9.665  -9.016  15.224  1.00  0.00           H
HETATM 1720 HH32 ALY A 118       8.047  -8.945  14.525  1.00  0.00           H
HETATM 1721 HH33 ALY A 118       9.329  -9.826  13.694  1.00  0.00           H
HETATM 1722  HE3 ALY A 118       7.690 -13.181  16.237  1.00  0.00           H
HETATM 1723  HE2 ALY A 118       8.419 -13.232  14.620  1.00  0.00           H
HETATM 1724  HD3 ALY A 118       6.503 -14.171  13.845  1.00  0.00           H
HETATM 1725  HD2 ALY A 118       5.667 -12.668  14.239  1.00  0.00           H
HETATM 1726  HG3 ALY A 118       4.520 -14.451  15.392  1.00  0.00           H
HETATM 1727  HG2 ALY A 118       5.419 -13.535  16.602  1.00  0.00           H
HETATM 1728  HB3 ALY A 118       7.323 -15.279  16.091  1.00  0.00           H
HETATM 1729  HB2 ALY A 118       5.994 -16.261  15.477  1.00  0.00           H
HETATM 1730  HCA ALY A 118       6.150 -16.875  17.765  1.00  0.00           H
HETATM 1731  H   ALY A 118       3.878 -15.580  16.839  1.00 99.00           H
"""
  # modified nucleic acid example from 4eec
  seq_str2 = """\
>4EEC_1|Chains A,B|StaL|Streptomyces toyocaensis (55952)
MGSSHHHHHHSSGLVPRGSMCWIASYPKAGGHWLRCMLTSYVTGEPVETWPGIQAGVPHLEGLLRDGEAPSADPDEQV
LLATHFTADRPVLRFYRESTAKVVCLIRNPRDAMLSLMRMKGIPPEDVEACRKIAETFIADEGFSSVRIWAGEGSWPE
NIRSWTDSVHESFPNAAVLAVRYEDLRKDPEGELWKVVDFLELGGRDGVADAVANCTLERMREMEERSKLLGLETTGL
MTRGGKQLPFVGKGGQRKSLKFMGDDIEKAYADLLHGETDFAHYARLYGYAE
>4EEC_2|Chain C|desulfo-A47934|Streptomyces toyocaensis (55952)
GXXGXXX
"""
  pdb_str2 = """\
HETATM 3866  P1  A3P A 301     -32.928   0.112  -1.515  1.00 60.90           P
HETATM 3867  O1P A3P A 301     -32.567   0.971  -2.721  1.00 58.74           O
HETATM 3868  O2P A3P A 301     -33.836  -1.041  -1.926  1.00 60.18           O
HETATM 3869  O3P A3P A 301     -33.351   0.858  -0.247  1.00 59.07           O
HETATM 3870  P2  A3P A 301     -26.843  -0.504   2.943  1.00 61.59           P
HETATM 3871  O4P A3P A 301     -25.377  -0.388   2.609  1.00 57.50           O
HETATM 3872  O5P A3P A 301     -27.164  -1.892   3.467  1.00 62.98           O
HETATM 3873  O6P A3P A 301     -27.445   0.681   3.730  1.00 62.21           O
HETATM 3874  O5' A3P A 301     -27.694  -0.432   1.563  1.00 60.14           O
HETATM 3875  C5' A3P A 301     -29.109  -0.472   1.636  1.00 57.46           C
HETATM 3876  C4' A3P A 301     -29.649  -0.859   0.288  1.00 58.10           C
HETATM 3877  O4' A3P A 301     -29.334  -2.235   0.029  1.00 59.74           O
HETATM 3878  C3' A3P A 301     -31.164  -0.780   0.284  1.00 58.44           C
HETATM 3879  O3' A3P A 301     -31.539  -0.556  -1.069  1.00 59.37           O
HETATM 3880  C2' A3P A 301     -31.538  -2.197   0.626  1.00 58.35           C
HETATM 3881  O2' A3P A 301     -32.890  -2.542   0.343  1.00 58.12           O
HETATM 3882  C1' A3P A 301     -30.561  -2.939  -0.264  1.00 58.48           C
HETATM 3883  N9  A3P A 301     -30.576  -4.401   0.059  1.00 58.16           N
HETATM 3884  C8  A3P A 301     -30.559  -4.926   1.312  1.00 57.04           C
HETATM 3885  N7  A3P A 301     -30.604  -6.285   1.255  1.00 57.35           N
HETATM 3886  C5  A3P A 301     -30.645  -6.664  -0.042  1.00 55.60           C
HETATM 3887  C6  A3P A 301     -30.683  -7.957  -0.767  1.00 54.29           C
HETATM 3888  N6  A3P A 301     -30.686  -9.129  -0.066  1.00 54.08           N
HETATM 3889  N1  A3P A 301     -30.706  -7.913  -2.130  1.00 53.41           N
HETATM 3890  C2  A3P A 301     -30.703  -6.743  -2.815  1.00 52.74           C
HETATM 3891  N3  A3P A 301     -30.659  -5.530  -2.220  1.00 53.74           N
HETATM 3892  C4  A3P A 301     -30.631  -5.420  -0.849  1.00 56.63           C
"""

  for pdb_str, seq_str in [(pdb_str1, seq_str1), (pdb_str2, seq_str2)]:
    seq_file = tempfile.NamedTemporaryFile(suffix='.fasta')
    seq_file.write(seq_str)
    seq_file.flush()
    model_file = tempfile.NamedTemporaryFile(suffix='.pdb')
    model_file.write(pdb_str)
    model_file.flush()
    dm = DataManager()
    dm.process_model_file(model_file.name)
    dm.process_sequence_file(seq_file.name)
    model = dm.get_model()
    seq = dm.get_sequence()
    model.set_sequences(seq)
    model_file.close()
    seq_file.close()
예제 #13
0
def test_01():

    # Source data (map and model)

    data_dir = os.path.dirname(os.path.abspath(__file__))
    data_ccp4 = os.path.join(data_dir, 'data', 'non_zero_origin_map.ccp4')
    data_pdb = os.path.join(data_dir, 'data', 'non_zero_origin_model.pdb')

    # Read in map data with data_manager
    dm = DataManager(['real_map'])
    dm.set_overwrite(True)

    # Next step uses map_manager to do the actual reading
    dm.process_real_map_file(data_ccp4)
    mm = dm.get_real_map()

    # Shift the origin of the map; starts at (100,100,100)
    print(mm.map_data().origin())
    assert mm.map_data().origin() == (100, 100, 100)
    assert mm.origin_shift_grid_units == (0, 0, 0)
    mm.shift_origin()
    assert mm.map_data().origin() == (0, 0, 0)
    assert mm.origin_shift_grid_units == (100, 100, 100)
    mm.show_summary()

    # test cc_to_other_map
    assert mm.cc_to_other_map_manager(mm) == 1

    # test writing and reading file
    dm.write_real_map_file(mm,
                           filename='test_map_manager.ccp4',
                           overwrite=True)
    dm.process_real_map_file('test_map_manager.ccp4')
    new_mm = dm.get_real_map('test_map_manager.ccp4')
    os.remove('test_map_manager.ccp4')
    new_mm.shift_origin()
    # Check whether gridding and crystal_symmetry are similar for mm, new_mm
    assert new_mm.is_similar(mm)
    assert approx_equal(new_mm.map_data()[3125], mm.map_data()[3125])

    # test writing and reading file without shifting origin
    dm = DataManager(['real_map'])
    dm.set_overwrite(True)
    dm.process_real_map_file(data_ccp4)
    mm = dm.get_real_map()
    mm.show_summary()
    dm.write_real_map_file(mm,
                           filename='test_map_manager.ccp4',
                           overwrite=True)
    new_mm = map_manager('test_map_manager.ccp4')
    assert (new_mm.is_similar(mm))
    new_mm.shift_origin()
    assert (not new_mm.is_similar(mm))

    # get map_data
    dm = DataManager(['real_map'])
    dm.set_overwrite(True)
    dm.process_real_map_file(data_ccp4)
    mm = dm.get_real_map()
    mm.shift_origin()
    map_data = mm.map_data()
    assert approx_equal(map_data[15, 10, 19], 0.38, eps=0.01)

    # get crystal_symmetry
    cs = mm.crystal_symmetry()
    assert approx_equal(cs.unit_cell().parameters()[0], 22.41, eps=0.01)

    # and full cell symmetry
    full_cs = mm.unit_cell_crystal_symmetry()
    assert approx_equal(full_cs.unit_cell().parameters()[0],
                        149.4066,
                        eps=0.01)

    # write map directly:
    mm.write_map('test_direct.ccp4')

    # read back directly
    new_mm = map_manager('test_direct.ccp4')
    assert (not new_mm.is_similar(mm))

    new_mm.shift_origin()
    assert mm.is_similar(new_mm)
    assert approx_equal(new_mm.map_data()[3125], mm.map_data()[3125])

    # deep_copy
    new_mm = mm.deep_copy()
    assert new_mm.is_similar(mm)
    assert approx_equal(new_mm.map_data()[3125], mm.map_data()[3125])

    # deep_copy a map without shifting origin
    # Make a DataManager that can write a map coeffs file too
    dm = DataManager(['miller_array', 'real_map'])
    dm.set_overwrite(True)
    dm.process_real_map_file(data_ccp4)
    omm = dm.get_real_map()
    omm.show_summary()
    new_omm = omm.deep_copy()
    assert new_omm.is_similar(omm)
    assert (not new_omm.is_similar(mm))

    # customized_copy
    new_mm = mm.customized_copy(map_data=mm.map_data().deep_copy())
    assert new_mm.is_similar(mm)

    # Initialize with parameters
    mm_para = map_manager(
        unit_cell_grid=mm.unit_cell_grid,
        unit_cell_crystal_symmetry=mm.unit_cell_crystal_symmetry(),
        origin_shift_grid_units=mm.origin_shift_grid_units,
        map_data=mm.map_data(),
        wrapping=False)
    assert mm_para.is_similar(mm)

    # Adjust origin and gridding:
    mm_read = map_manager(data_ccp4)
    mm_read.shift_origin()
    mm.show_summary()
    mm_read.show_summary()
    mm_read.set_original_origin_and_gridding((10, 10, 10),
                                             gridding=(100, 100, 100))
    mm_read.show_summary()
    assert (not mm_read.is_similar(mm))
    assert (mm_read.origin_is_zero())

    # Set program name
    mm_read.set_program_name('test program')
    assert mm_read.program_name == 'test program'

    # Set limitation
    mm_read.add_limitation('map_is_sharpened')
    assert mm_read.limitations == ['map_is_sharpened']

    # Add a label
    mm_read.add_label('TEST LABEL')
    assert mm_read.labels[0] == 'TEST LABEL'
    mm_read.write_map('map_with_labels.mrc')
    new_mm = map_manager('map_with_labels.mrc')
    assert 'TEST LABEL' in new_mm.labels
    assert new_mm.is_in_limitations('map_is_sharpened')
    assert new_mm.labels[0].find('test program') > -1

    # change the cell dimensions
    mm_read = map_manager(data_ccp4)
    mm_read.shift_origin()
    assert mm_read.is_similar(mm)
    assert approx_equal(mm_read.pixel_sizes(), (0.7470, 0.7231, 0.7374),
                        eps=0.001)
    from cctbx import crystal
    new_uc_params = list(
        mm_read.unit_cell_crystal_symmetry().unit_cell().parameters())
    new_uc_params[0] += 10
    new_cs = crystal.symmetry(new_uc_params, 1)
    mm_read.set_unit_cell_crystal_symmetry(new_cs)
    assert not mm_read.crystal_symmetry().is_similar_symmetry(
        mm.crystal_symmetry())
    assert not mm_read.is_similar(mm)
    mm_read.show_summary()
    assert approx_equal(mm_read.pixel_sizes(), (0.7970, 0.7231, 0.7374),
                        eps=0.001)

    # Read a map directly
    mm_read = map_manager(data_ccp4)
    mm_read.shift_origin()
    assert mm_read.is_similar(mm)

    # Set log
    import sys
    mm.set_log(sys.stdout)

    # Add map_data
    new_mm = mm_read.customized_copy(map_data=mm.map_data().deep_copy())
    assert new_mm.is_similar(mm)

    # replace data
    new_mm.set_map_data(map_data=mm.map_data().deep_copy())
    assert new_mm.is_similar(mm)

    # create a full-sized map from this one
    mm_full_size = mm_read.deep_copy().as_full_size_map()
    assert not mm_full_size.is_similar(mm_read)
    print(mm_full_size.map_data().origin(), mm_read.map_data().origin())
    print(mm_full_size.map_data().all(), mm_read.map_data().all())

    # Apply a mask to edges of a map
    assert approx_equal(new_mm.map_data().as_1d().min_max_mean().max,
                        mm.map_data().as_1d().min_max_mean().max)
    assert approx_equal((new_mm.map_data()[0], mm.map_data()[0]), (0.0, 0.0))
    new_mm.create_mask_around_edges(soft_mask_radius=3)
    new_mm.soft_mask(soft_mask_radius=3)
    assert approx_equal(new_mm.map_data().as_1d().min_max_mean().max,
                        mm.map_data().as_1d().min_max_mean().max)
    new_mm.apply_mask(set_outside_to_mean_inside=True)
    assert approx_equal((new_mm.map_data()[0], mm.map_data()[0]),
                        (0.0116267086024, 0.0))

    dm.process_real_map_file('test_map_manager.ccp4')
    new_mm = dm.get_real_map('test_map_manager.ccp4')
    new_mm.show_summary()
    assert (not new_mm.is_similar(mm))
    new_mm.shift_origin()
    new_mm.show_summary()
    assert new_mm.is_similar(mm)
    os.remove('test_map_manager.ccp4')

    # Check origin_shifts
    print(new_mm.origin_shift_grid_units)
    print(new_mm.shift_cart())
    assert approx_equal(new_mm.origin_shift_grid_units, (100, 100, 100))
    assert approx_equal(
        new_mm.shift_cart(),
        (-74.70333099365234, -72.30750274658205, -73.7437515258789))

    # Convert to map coeffs, write out, read back, convert back to map

    map_coeffs = mm.map_as_fourier_coefficients(d_min=3)
    mtz_dataset = map_coeffs.as_mtz_dataset(column_root_label='F')
    mtz_object = mtz_dataset.mtz_object()
    dm.write_miller_array_file(mtz_object, filename="map_coeffs.mtz")
    # Note these Fourier coeffs correspond to working map (not original position)

    array_labels = dm.get_miller_array_labels("map_coeffs.mtz")
    labels = array_labels[0]
    dm.get_reflection_file_server(filenames=["map_coeffs.mtz"],
                                  labels=[labels])
    miller_arrays = dm.get_miller_arrays()
    new_map_coeffs = miller_arrays[0]
    mm_from_map_coeffs = mm.fourier_coefficients_as_map_manager(
        map_coeffs=new_map_coeffs)

    assert mm_from_map_coeffs.is_similar(mm)

    # Find map symmetry in a map
    data_d7 = os.path.join(data_dir, 'data', 'D7.ccp4')
    dm = DataManager(['real_map', 'model'])
    dm.process_real_map_file(data_d7)
    dm.process_model_file(data_pdb)
    mm = dm.get_real_map(data_d7)
    model = dm.get_model(data_pdb)
    mm.shift_origin()
    mm.set_original_origin_and_gridding(original_origin=(0, 0, 0))

    # Box it so it is not so easy to find symmetry
    from cctbx.maptbx.box import with_bounds
    box = with_bounds(mm, lower_bounds=(2, 2, 2), upper_bounds=(43, 43, 43))
    new_mm = box.map_manager()
    new_mm.find_map_symmetry(symmetry='d7',
                             min_ncs_cc=0.8,
                             include_helical_symmetry=False)
    ncs_obj = new_mm.ncs_object()
    assert ncs_obj is not None
    print("NCS: ", new_mm.ncs_object().as_ncs_spec_string())
    another_mm = map_manager(
        unit_cell_grid=new_mm.unit_cell_grid,
        unit_cell_crystal_symmetry=new_mm.unit_cell_crystal_symmetry(),
        origin_shift_grid_units=new_mm.origin_shift_grid_units,
        map_data=new_mm.map_data(),
        ncs_object=ncs_obj,
        wrapping=False)
    assert another_mm.is_similar(new_mm)
    assert ncs_obj.is_similar_ncs_object(another_mm.ncs_object())
    assert new_mm.is_similar(another_mm)

    # Adjust model and ncs symmetry to match this map
    assert model.shift_cart() is None
    new_mm.set_model_symmetries_and_shift_cart_to_match_map(model)
    assert approx_equal(
        model.shift_cart(),
        (-0.888888888888889, -0.8888888888888891, -0.888888888888889))

    assert new_mm.is_compatible_ncs_object(ncs_obj)
    ncs_obj.set_shift_cart((0, 0, 0))
    assert not new_mm.is_compatible_ncs_object(ncs_obj)

    new_mm.set_ncs_object_shift_cart_to_match_map(ncs_obj)
    new_mm.set_ncs_object(ncs_obj)
    assert new_mm.is_compatible_ncs_object(new_mm.ncs_object())
    new_mm.show_summary()

    new_mm.shift_origin(desired_origin=(11, 1, 1))
    print(new_mm.shift_cart(), new_mm.ncs_object().shift_cart())
    assert new_mm.is_compatible_ncs_object(new_mm.ncs_object())
    new_mm.shift_origin()
    assert new_mm.is_compatible_ncs_object(new_mm.ncs_object())

    # filter a map
    dm = DataManager()
    mm = dm.get_real_map(data_d7)

    low_pass_filtered = mm.deep_copy()
    low_pass_filtered.resolution_filter(d_min=2.5)

    high_pass_filtered = mm.deep_copy()
    high_pass_filtered.resolution_filter(d_max=2.5)

    gaussian = mm.deep_copy()
    gaussian.gaussian_filter(smoothing_radius=1)

    binary = mm.deep_copy()
    binary.binary_filter(threshold=0.5)

    assert approx_equal(
        (mm.map_data().as_1d()[1073],
         low_pass_filtered.map_data().as_1d()[1073],
         high_pass_filtered.map_data().as_1d()[1073],
         gaussian.map_data().as_1d()[1073], binary.map_data().as_1d()[1073]),
        (0.0171344596893, 0.0227163900537, -0.0072717454565, 0.0149086679298,
         0.0))

    info = mm.get_density_along_line((5, 5, 5), (10, 10, 10))
    assert approx_equal([info.along_density_values[4]] +
                        list(info.along_sites[4]),
                        [-0.562231123447, 8.0, 8.0, 8.0])
    from iotbx.map_model_manager import map_model_manager
    extra_map_manager_id_list = [
        "low_pass_filtered", "high_pass_filtered", "gaussian", "binary"
    ]

    expected_cc = [
        0.999920243317, 0.0129365545729, 0.971491994253, 0.733986499746
    ]
    mam = map_model_manager(
        map_manager=mm,
        extra_map_manager_list=[
            low_pass_filtered, high_pass_filtered, gaussian, binary
        ],
        extra_map_manager_id_list=extra_map_manager_id_list,
    )
    for other_id, cc in zip(extra_map_manager_id_list, expected_cc):
        assert approx_equal(
            cc, mam.map_map_cc(map_id='map_manager', other_map_id=other_id))
예제 #14
0
def create_model_from_file(path_to_pdb_file):
  from iotbx.data_manager import DataManager    # Load in the DataManager
  dm = DataManager()             # Initialize the DataManager and call it dm
  model = dm.get_model(path_to_pdb_file)
  return model
예제 #15
0
def test_01():

  # Source data

  data_dir = os.path.dirname(os.path.abspath(__file__))
  data_ccp4 = os.path.join(data_dir, 'data',
                          'non_zero_origin_map.ccp4')
  data_pdb = os.path.join(data_dir, 'data',
                          'non_zero_origin_model.pdb')
  data_ncs_spec = os.path.join(data_dir, 'data',
                          'non_zero_origin_ncs_spec.ncs_spec')

  # DataManager

  dm = DataManager(['ncs_spec','model', 'real_map', 'phil'])
  dm.set_overwrite(True)

  # Read in map and model and ncs

  map_file=data_ccp4
  dm.process_real_map_file(map_file)
  mm = dm.get_real_map(map_file)

  model_file=data_pdb
  dm.process_model_file(model_file)
  model = dm.get_model(model_file)

  ncs_file=data_ncs_spec
  dm.process_ncs_spec_file(ncs_file)
  ncs = dm.get_ncs_spec(ncs_file)

  ncs_dc = ncs.deep_copy()

  mmmn = match_map_model_ncs()
  mmmn.add_map_manager(mm)
  mmmn.add_model(model)
  mmmn.add_ncs_object(ncs)

  # Save it
  mmmn_dc=mmmn.deep_copy()

  # Make sure we can add an ncs object that is either shifted or not
  mmmn_dcdc=mmmn.deep_copy()
  new_mmmn = match_map_model_ncs()
  new_mmmn.add_map_manager(mmmn_dcdc.map_manager())
  new_mmmn.add_model(mmmn_dcdc.model())
  new_mmmn.add_ncs_object(mmmn_dcdc.ncs_object())
  assert new_mmmn.ncs_object().shift_cart() == new_mmmn.map_manager().shift_cart()

  mmmn_dcdc=mmmn.deep_copy()
  new_mmmn = match_map_model_ncs()
  new_mmmn.add_map_manager(mmmn_dcdc.map_manager())
  new_mmmn.add_model(mmmn_dcdc.model())
  new_mmmn.add_ncs_object(ncs_dc)
  assert new_mmmn.ncs_object().shift_cart() == new_mmmn.map_manager().shift_cart()


  original_ncs=mmmn.ncs_object()
  assert approx_equal((24.0528, 11.5833, 20.0004),
     tuple(original_ncs.ncs_groups()[0].translations_orth()[-1]),
     eps=0.1)

  assert tuple(mmmn._map_manager.origin_shift_grid_units) == (0,0,0)

  # Shift origin to (0,0,0)
  mmmn=mmmn_dc.deep_copy()  # fresh version of match_map_model_ncs
  mmmn.shift_origin()
  new_ncs=mmmn.ncs_object()
  assert tuple(mmmn._map_manager.origin_shift_grid_units) == (100,100,100)

  mmmn.write_model('s.pdb')
  mmmn.write_map('s.mrc')

  shifted_ncs=mmmn.ncs_object()
  assert approx_equal((-153.758, -74.044, -127.487),
      tuple(shifted_ncs.ncs_groups()[0].translations_orth()[-1]),eps=0.1)


  # Shift a model and shift it back

  mmmn=mmmn_dc.deep_copy()  # fresh version of match_map_model_ncs
  model=mmmn.model()
  shifted_model=mmmn.shift_model_to_match_working_map(model=model)
  model_in_original_position=mmmn.shift_model_to_match_original_map(
      model=shifted_model)
  assert (approx_equal(model.get_sites_cart(), # not a copy
                      shifted_model.get_sites_cart()))
  assert approx_equal(model.get_sites_cart(),
                      model_in_original_position.get_sites_cart())


  # Generate a map and model

  import sys
  mmm=map_model_manager(log=sys.stdout)
  mmm.generate_map()
  model=mmm.model()
  mm=mmm.map_manager()
  assert approx_equal(
     model.get_sites_cart()[0], (14.476, 10.57, 8.34) ,eps=0.01)
  assert approx_equal(mm.map_data()[10,10,10],-0.0195,eps=0.001)
  # Save it
  mmm_dc=mmm.deep_copy()

  # Check on wrapping
  assert not mm.wrapping()  # this one should not wrap because it is zero at edges

  # Make a new one with no buffer so it is not zero at edges
  mmm=map_model_manager()
  mmm.generate_map(box_cushion=0)
  mm=mmm.map_manager()
  # check its compatibility with wrapping
  assert mm.is_consistent_with_wrapping()
  mmm.show_summary()

  # now box it
  sel=mmm.model().selection("resseq 221:221")
  new_model=mmm.model().deep_copy().select(sel)
  new_mmm=map_model_manager(model=new_model,map_manager=mm.deep_copy())
  new_mmm.box_all_maps_around_model_and_shift_origin()
  new_mm=new_mmm.map_manager()

  assert not new_mm.wrapping()
  assert not new_mm.is_consistent_with_wrapping()

  # now box it with selection
  new_mmm_1=map_model_manager(
      model=mmm.model().deep_copy(),map_manager=mm.deep_copy())
  new_mmm_1.box_all_maps_around_model_and_shift_origin(
      selection_string="resseq 221:221")
  new_mm_1=new_mmm_1.map_manager()

  assert not new_mm_1.wrapping()
  assert not new_mm_1.is_consistent_with_wrapping()
  assert new_mm_1.map_data().all()== new_mm.map_data().all()

  # create map_model_manager with just half-maps
  mm1=mm.deep_copy()
  mm2=mm.deep_copy()
  map_data=mm2.map_data()
  map_data+=1.
  new_mmm=map_model_manager(model=mmm.model().deep_copy(),
    map_manager_1=mm1,
    map_manager_2=mm2)
  assert new_mmm._map_dict.get('map_manager') is None # should not be any yet
  assert approx_equal(new_mmm.map_manager().map_data()[232],
     mm.deep_copy().map_data()[232]+0.5)
  assert new_mmm._map_dict.get('map_manager') is not None # now should be there

  # generate map data from a model
  mm1=mm.deep_copy()
  mm2=mm.deep_copy()
  new_mmm=map_model_manager(model=mmm.model().deep_copy(), map_manager=mm1)
  mmm.generate_map(model=mmm.model())
  mm=mmm.map_manager()
  mmm.show_summary()
예제 #16
0
def generate_model(file_name=None,
                   n_residues=None,
                   start_res=None,
                   b_iso=30,
                   box_cushion=5,
                   space_group_number=1,
                   output_model_file_name=None,
                   shake=None,
                   random_seed=None,
                   log=sys.stdout):
    '''
    generate_model: Simple utility for generating a model for testing purposes.

    This function typically accessed and tested through map_model_manager

     Summary
    -------

    Generate a model from a user-specified file or from some examples available
    in the cctbx.  Cut out specified number of residues, shift to place on
    positive side of origin, optionally set b values to b_iso,
    place in box with buffering of box_cushion on all
    edges, optionally randomly shift (shake) atomic positions by rms of shake A,
    and write out to output_model_file_name and return model object.

    Parameters:

      file_name (string, None):  File containing model (PDB, CIF format)
      n_residues (int, 10):      Number of residues to include
      start_res (int, None):     Starting residue number
      b_iso (float, 30):         B-value (ADP) to use for all atoms
      box_cushion (float, 5):     Buffer (A) around model
      space_group_number (int, 1):  Space group to use
      output_model_file_name (string, None):  File for output model
      shake (float, None):       RMS variation to add (A) in shake
      random_seed (int, None):    Random seed for shake

    Returns:
      model.manager object (model) in a box defined by a crystal_symmetry object

  '''

    # Get the parameters

    space_group_number = int(space_group_number)
    if n_residues is not None:
        n_residues = int(n_residues)
    box_cushion = float(box_cushion)
    if start_res:
        start_res = int(start_res)
    if shake:
        shake = float(shake)
    if random_seed:
        random_seed = int(random_seed)
        import random
        random.seed(random_seed)
        random_seed = random.randint(1, 714717)
        flex.set_random_seed(random_seed)

    # Choose file with coordinates

    if not file_name:
        if not n_residues:
            n_residues = 10  # default
        import libtbx.load_env
        iotbx_regression = os.path.join(
            libtbx.env.find_in_repositories("iotbx"), 'regression')
        if n_residues < 25:
            file_name = os.path.join(iotbx_regression, 'secondary_structure',
                                     '5a63_chainBp.pdb')  # starts at 219
            if not start_res: start_res = 219
        elif n_residues < 167:
            file_name = os.path.join(iotbx_regression, 'secondary_structure',
                                     '3jd6_noh.pdb')  # starts at 58
            if not start_res: start_res = 58
        else:
            file_name = os.path.join(iotbx_regression, 'secondary_structure',
                                     '4a7h_chainC.pdb')  # starts at 9
            if not start_res: start_res = 9
    else:  # have file_name
        if start_res is None:
            start_res = 1
        if not n_residues:
            n_residues = 100000  #  a big number

    # Read in coordinates and cut out the part of the model we want

    from iotbx.data_manager import DataManager

    dm = DataManager(['model'])
    dm.process_model_file(file_name)
    model = dm.get_model(file_name)

    if not model.crystal_symmetry() or not model.crystal_symmetry().unit_cell(
    ):
        from cctbx.maptbx.box import shift_and_box_model
        model = shift_and_box_model(model=model, box_cushion=box_cushion)

    selection = model.selection('resseq %s:%s' %
                                (start_res, start_res + n_residues - 1))
    model = model.select(selection)

    # shift the model and return it with new crystal_symmetry
    from cctbx.maptbx.box import shift_and_box_model
    model = shift_and_box_model(model=model, box_cushion=box_cushion)

    if b_iso is not None:
        b_values = flex.double(model.get_sites_cart().size(), b_iso)
        ph = model.get_hierarchy()
        ph.atoms().set_b(b_values)

    # Optionally shake model
    if shake:
        model = shake_model(model, shake=shake)

    if output_model_file_name:
        f = open(output_model_file_name, 'w')
        print("%s" % (model.model_as_pdb()), file=f)
        f.close()
        print("Writing model with %s residues and b_iso=%s from %s to %s" %
              (n_residues, b_iso, file_name, output_model_file_name),
              file=log)
    else:
        print("Generated model with %s residues and b_iso=%s from %s " %
              (n_residues, b_iso, file_name),
              file=log)
    return model
예제 #17
0
def test_model_and_restraint():

    # from 3tpj
    model_str = '''
CRYST1  104.428  128.690   76.662  90.00  90.00  90.00 C 2 2 21
ATOM   5877  O   URE A 403     -37.796 -38.296   5.693  1.00 15.43           O
ATOM   5878  C   URE A 403     -36.624 -38.509   5.800  1.00 20.53           C
ATOM   5879  N2  URE A 403     -36.191 -39.836   6.120  1.00 27.82           N
ATOM   5880  N1  URE A 403     -35.679 -37.450   5.644  1.00 21.36           N
ATOM   5881 HN11 URE A 403     -34.792 -37.617   5.732  1.00 25.63           H
ATOM   5882 HN12 URE A 403     -35.965 -36.613   5.445  1.00 25.63           H
ATOM   5883 HN21 URE A 403     -35.307 -40.015   6.211  1.00 33.38           H
ATOM   5884 HN22 URE A 403     -36.801 -40.499   6.221  1.00 33.38           H
'''

    restraint_str = '''
#
data_comp_list
loop_
_chem_comp.id
_chem_comp.three_letter_code
_chem_comp.name
_chem_comp.group
_chem_comp.number_atoms_all
_chem_comp.number_atoms_nh
_chem_comp.desc_level
URE URE Unknown                   ligand 8 4 .
#
data_comp_URE
#
loop_
_chem_comp_atom.comp_id
_chem_comp_atom.atom_id
_chem_comp_atom.type_symbol
_chem_comp_atom.type_energy
_chem_comp_atom.partial_charge
_chem_comp_atom.x
_chem_comp_atom.y
_chem_comp_atom.z
URE        C       C   C     .          0.4968   -0.0000   -0.0000
URE        O       O   O     .          1.7184   -0.0000   -0.0000
URE        N1      N   NH2   .         -0.2180   -0.0000    1.2381
URE        N2      N   NH2   .         -0.2180    0.0000   -1.2381
URE        HN11    H   HNH2  .          0.2355   -0.0000    2.0237
URE        HN12    H   HNH2  .         -1.1251    0.0000    1.2382
URE        HN21    H   HNH2  .          0.2355    0.0000   -2.0237
URE        HN22    H   HNH2  .         -1.1251   -0.0000   -1.2382
#
loop_
_chem_comp_bond.comp_id
_chem_comp_bond.atom_id_1
_chem_comp_bond.atom_id_2
_chem_comp_bond.type
_chem_comp_bond.value_dist
_chem_comp_bond.value_dist_esd
URE  C       O      double        1.222 0.020
URE  C       N1     single        1.430 0.020
URE  C       N2     single        1.430 0.020
URE  N1      HN11   single        0.907 0.020
URE  N1      HN12   single        0.907 0.020
URE  N2      HN21   single        0.907 0.020
URE  N2      HN22   single        0.907 0.020
#
loop_
_chem_comp_angle.comp_id
_chem_comp_angle.atom_id_1
_chem_comp_angle.atom_id_2
_chem_comp_angle.atom_id_3
_chem_comp_angle.value_angle
_chem_comp_angle.value_angle_esd
URE  N2      C       N1           120.00 3.000
URE  N2      C       O            120.00 3.000
URE  N1      C       O            120.00 3.000
URE  HN12    N1      HN11         120.00 3.000
URE  HN12    N1      C            120.00 3.000
URE  HN11    N1      C            120.00 3.000
URE  HN22    N2      HN21         120.00 3.000
URE  HN22    N2      C            120.00 3.000
URE  HN21    N2      C            120.00 3.000
#
loop_
_chem_comp_tor.comp_id
_chem_comp_tor.id
_chem_comp_tor.atom_id_1
_chem_comp_tor.atom_id_2
_chem_comp_tor.atom_id_3
_chem_comp_tor.atom_id_4
_chem_comp_tor.value_angle
_chem_comp_tor.value_angle_esd
_chem_comp_tor.period
URE CONST_01      HN11    N1      C       O              0.00   0.0 0
URE CONST_02      HN12    N1      C       O            180.00   0.0 0
URE CONST_03      HN21    N2      C       O             -0.00   0.0 0
URE CONST_04      HN22    N2      C       O            180.00   0.0 0
URE CONST_05      HN21    N2      C       N1           180.00   0.0 0
URE CONST_06      HN22    N2      C       N1            -0.00   0.0 0
URE CONST_07      HN11    N1      C       N2          -180.00   0.0 0
URE CONST_08      HN12    N1      C       N2            -0.00   0.0 0
#
loop_
_chem_comp_plane_atom.comp_id
_chem_comp_plane_atom.plane_id
_chem_comp_plane_atom.atom_id
_chem_comp_plane_atom.dist_esd
URE plan-1  C      0.020
URE plan-1  O      0.020
URE plan-1  N1     0.020
URE plan-1  N2     0.020
URE plan-1  HN11   0.020
URE plan-1  HN12   0.020
URE plan-1  HN21   0.020
URE plan-1  HN22   0.020
'''

    model_filename = 'ure.pdb'
    restraint_filename = 'ure.cif'

    dm = DataManager(['model', 'restraint'])
    dm.write_model_file(model_str, filename=model_filename, overwrite=True)
    dm.write_restraint_file(restraint_str,
                            filename=restraint_filename,
                            overwrite=True)

    # fails because no restraints are loaded
    dm.process_model_file(model_filename)
    model = dm.get_model()
    try:
        model.get_restraints_manager()
    except Sorry:
        pass

    # automatically add restraints
    dm.process_restraint_file(restraint_filename)
    model = dm.get_model()
    model.get_restraints_manager()

    os.remove(model_filename)
    os.remove(restraint_filename)
예제 #18
0
def tst_01(log=sys.stdout):

    # Check calculations of conversion between rmsd, lddt , and B values
    print("\nChecking conversions between rmsd, lddt and B-values", file=log)
    for maximum_rmsd, minimum_lddt, target_b in [
        (1.5, None, 59.2175263686),
        (None, 0.7, 59.2175263686),
        (1.5, 0.7, 59.2175263686),
        (1.0, None, 26.3189006083),
        (None, 0.5, 293.306328196),
    ]:
        print()
        cutoff_b = get_cutoff_b_value(maximum_rmsd, minimum_lddt, log=log)
        print("maximum_rmsd: %s min lddt %s Cutoff B:  %.2f" %
              (maximum_rmsd, minimum_lddt, cutoff_b),
              file=log)
        assert approx_equal(cutoff_b, target_b)

    # Read in alphafold model and get LDDT from B-value field
    print("\nReading in alphafold model with lddt values in B-value field",
          file=log)

    dm = DataManager()
    dm.set_overwrite(True)
    m = dm.get_model(model_file)
    pae_m = dm.get_model(pae_model_file)
    pae_matrix = parse_pae_file(pae_file)

    lddt_values = m.get_hierarchy().atoms().extract_b().deep_copy()
    print("\nLDDT mean:", lddt_values.min_max_mean().mean)
    assert approx_equal(lddt_values.min_max_mean().mean, 82.5931111111)

    # Multiply lddt_values by 0.01 (fractional)
    fractional_lddt = lddt_values * 0.01

    #  Convert lddt to b
    b_values = get_b_values_from_lddt(lddt_values)
    print("B-value mean:", b_values.min_max_mean().mean)
    assert approx_equal(b_values.min_max_mean().mean, 24.7254093338)

    # Convert b to lddt
    lddt = get_lddt_from_b(b_values)
    assert approx_equal(lddt, fractional_lddt)
    lddt = get_lddt_from_b(b_values, input_lddt_is_fractional=False)
    assert approx_equal(lddt, lddt_values)

    # Convert  lddt to rmsd
    rmsd_values = get_rmsd_from_lddt(lddt_values)
    print("RMSD mean:", rmsd_values.min_max_mean().mean)
    assert approx_equal(rmsd_values.min_max_mean().mean, 0.93559254135)

    # use process_predicted_model to convert lddt or rmsd to B return with
    #  mark_atoms_to_ignore_with_occ_zero

    print(
        "\nConverting lddt to B values and using mark_atoms_to_ignore_with_occ_zero",
        file=log)
    params.process_predicted_model.maximum_fraction_close = 0.5
    params.process_predicted_model.b_value_field_is = 'lddt'
    params.process_predicted_model.remove_low_confidence_residues = True
    params.process_predicted_model.maximum_rmsd = 1.5
    params.process_predicted_model.split_model_by_compact_regions = True
    params.process_predicted_model.maximum_domains = 3

    model_info = process_predicted_model(m,
                                         params,
                                         mark_atoms_to_keep_with_occ_one=True)
    models = model_info.model_list
    for mm, n1, n2 in zip(models, [84, 88], [88, 84]):
        model_occ_values = mm.get_hierarchy().atoms().extract_occ()
        assert model_occ_values.count(1) == n1
        assert model_occ_values.count(0) == n2

    # use process_predicted_model to convert lddt or rmsd to B

    print("\nConverting lddt to B values", file=log)
    params.process_predicted_model.maximum_fraction_close = 0.5
    params.process_predicted_model.b_value_field_is = 'lddt'
    params.process_predicted_model.remove_low_confidence_residues = False
    params.process_predicted_model.split_model_by_compact_regions = False
    params.process_predicted_model.input_lddt_is_fractional = None

    model_info = process_predicted_model(m, params)
    model = model_info.model
    model_b_values = model.get_hierarchy().atoms().extract_b()
    assert approx_equal(b_values, model_b_values,
                        eps=0.02)  # come back rounded

    print("\nConverting fractional lddt to B values", file=log)
    ph = model.get_hierarchy().deep_copy()
    ph.atoms().set_b(fractional_lddt)
    test_model = model.as_map_model_manager().model_from_hierarchy(
        ph, return_as_model=True)
    params.process_predicted_model.maximum_fraction_close = 0.5
    params.process_predicted_model.b_value_field_is = 'lddt'
    params.process_predicted_model.remove_low_confidence_residues = False
    params.process_predicted_model.split_model_by_compact_regions = False
    params.process_predicted_model.input_lddt_is_fractional = None
    model_info = process_predicted_model(test_model, params)
    model = model_info.model
    model_b_values = model.get_hierarchy().atoms().extract_b()
    assert approx_equal(b_values, model_b_values,
                        eps=3)  # come back very rounded

    ph = model.get_hierarchy().deep_copy()
    ph.atoms().set_b(rmsd_values)
    test_model = model.as_map_model_manager().model_from_hierarchy(
        ph, return_as_model=True)

    print("\nConverting rmsd to B values", file=log)
    params.process_predicted_model.maximum_fraction_close = 0.5
    params.process_predicted_model.b_value_field_is = 'rmsd'
    params.process_predicted_model.remove_low_confidence_residues = False
    params.process_predicted_model.split_model_by_compact_regions = False
    params.process_predicted_model.input_lddt_is_fractional = None
    model_info = process_predicted_model(test_model, params)
    model = model_info.model
    model_b_values = model.get_hierarchy().atoms().extract_b()
    assert approx_equal(b_values, model_b_values, eps=0.5)  # come back rounded

    print("B-values > 59: %s of %s" %
          ((model_b_values > 59).count(True), model_b_values.size()),
          file=log)

    print("\nConverting rmsd to B values and selecting rmsd < 1.5", file=log)
    params.process_predicted_model.maximum_fraction_close = 0.5
    params.process_predicted_model.b_value_field_is = 'rmsd'
    params.process_predicted_model.remove_low_confidence_residues = True
    params.process_predicted_model.maximum_rmsd = 1.5
    params.process_predicted_model.split_model_by_compact_regions = False
    params.process_predicted_model.input_lddt_is_fractional = None

    model_info = process_predicted_model(test_model, params)
    model = model_info.model
    print("Residues before: %s   After: %s " % (
        test_model.get_hierarchy().overall_counts().n_residues,
        model.get_hierarchy().overall_counts().n_residues,
    ),
          file=log)

    # Check splitting model into domains
    print("\nSplitting model into domains", file=log)
    model_info = split_model_into_compact_units(model,
                                                maximum_fraction_close=0.5,
                                                log=log)

    chainid_list = model_info.chainid_list
    print("Segments found: %s" % (" ".join(chainid_list)), file=log)
    assert len(chainid_list) == 2

    # Check processing and splitting model into domains
    print("\nProcessing and splitting model into domains", file=log)

    params.process_predicted_model.maximum_fraction_close = 0.5
    params.process_predicted_model.b_value_field_is = 'lddt'
    params.process_predicted_model.remove_low_confidence_residues = True
    params.process_predicted_model.maximum_rmsd = 1.5
    params.process_predicted_model.split_model_by_compact_regions = True
    params.process_predicted_model.maximum_domains = 3
    model_info = process_predicted_model(m, params, log=log)

    chainid_list = model_info.chainid_list
    print("Segments found: %s" % (" ".join(chainid_list)), file=log)
    assert len(chainid_list) == 2

    mmm = model_info.model.as_map_model_manager()
    mmm.write_model('model_with_groupings.pdb')
    residue_count = []
    expected_residue_count = [84, 88]
    for chainid in chainid_list:
        selection_string = "chain %s" % (chainid)
        ph = model_info.model.get_hierarchy()
        asc1 = ph.atom_selection_cache()
        sel = asc1.selection(selection_string)
        m1 = model_info.model.select(sel)
        n = m1.get_hierarchy().overall_counts().n_residues
        print("Residues in %s: %s" % (selection_string, n), file=log)
        residue_count.append(n)
    assert expected_residue_count == residue_count

    # Now process and use pae model and pae model file
    print("\nProcessing and splitting model into domains with pae", file=log)

    params.process_predicted_model.maximum_fraction_close = 0.5
    params.process_predicted_model.b_value_field_is = 'lddt'
    params.process_predicted_model.remove_low_confidence_residues = True
    params.process_predicted_model.maximum_rmsd = 0.7
    params.process_predicted_model.split_model_by_compact_regions = True
    params.process_predicted_model.maximum_domains = 3
    params.process_predicted_model.pae_power = 2
    model_info = process_predicted_model(pae_m,
                                         params,
                                         pae_matrix=pae_matrix,
                                         log=log)
        print("libtbx.python process_predicted_model.py input.pdb output.pdb")
    else:
        input_file_name = args[0]
        output_file_name = args[1]
        if len(args) > 2:
            p.b_value_field_is = args[2]
        else:
            p.b_value_field_is = 'lddt'
        if len(args) > 3:
            p.domain_size = float(args[3])
        else:
            p.domain_size = 15
        from iotbx.data_manager import DataManager
        dm = DataManager()
        dm.set_overwrite(True)
        m = dm.get_model(input_file_name)

        p.remove_low_confidence_residues = True
        p.maximum_rmsd = 1.5
        p.split_model_by_compact_regions = True

        print("\nProcessing and splitting model into domains")
        model_info = process_predicted_model(m, params)

        chainid_list = model_info.chainid_list
        print("Segments found: %s" % (" ".join(chainid_list)))

        mmm = model_info.model.as_map_model_manager()
        mmm.write_model(output_file_name)
        for chainid in chainid_list:
            selection_string = "chain %s" % (chainid)
def test_01():

    # Source data

    data_dir = os.path.dirname(os.path.abspath(__file__))
    data_ccp4 = os.path.join(data_dir, 'data', 'non_zero_origin_map.ccp4')
    data_pdb = os.path.join(data_dir, 'data', 'non_zero_origin_model.pdb')
    data_ncs_spec = os.path.join(data_dir, 'data',
                                 'non_zero_origin_ncs_spec.ncs_spec')

    # Read in data

    dm = DataManager(['ncs_spec', 'model', 'real_map', 'phil'])
    dm.set_overwrite(True)

    map_file = data_ccp4
    dm.process_real_map_file(map_file)
    mm = dm.get_real_map(map_file)

    model_file = data_pdb
    dm.process_model_file(model_file)
    model = dm.get_model(model_file)

    ncs_file = data_ncs_spec
    dm.process_ncs_spec_file(ncs_file)
    ncs = dm.get_ncs_spec(ncs_file)

    mmm = map_model_manager(model=model,
                            map_manager_1=mm.deep_copy(),
                            map_manager_2=mm.deep_copy(),
                            ncs_object=ncs,
                            wrapping=False)
    mmm.add_map_manager_by_id(map_id='external_map',
                              map_manager=mmm.map_manager().deep_copy())
    mmm.set_resolution(3)
    mmm.set_log(sys.stdout)

    dc = mmm.deep_copy()

    # Model sharpening
    mmm = dc.deep_copy()
    tls_info = mmm.tls_from_map(
        n_bins=10,
        model_id='model',
        map_id='map_manager',
        iterations=1,
    )
    tlso = tls_info.tlso_list[0]
    print("t:", tlso.t)
    print("l:", tlso.l)
    print("s:", tlso.s)
    print("origin:", tlso.origin)

    assert approx_equal(
        tlso.t,
        (1.1665511122614693, 1.2026392186971397, 1.1654187623738737,
         -0.08474662045683597, -0.02260930304525043, 0.06492095346560478))
    assert approx_equal(tlso.l,
                        (-0.002162154945537812, -0.0023776908642138776,
                         0.0009748174775374614, -5.9732257180723945e-05,
                         -0.0001342760165428358, -9.055411066345411e-05))
    assert approx_equal(
        tlso.s,
        (3.409944886438518e-08, 6.0542707156228405e-09, -8.938076172958137e-09,
         4.8771411705994806e-09, -2.6247834187732072e-08,
         4.605012474599143e-09, 6.090471572948155e-10, 2.1790753409285795e-09,
         -7.851614684653208e-09))
    assert approx_equal(
        tlso.origin,
        (-64.70331931297399, -62.30573551948903, -63.743687240164604))

    print("TLS: ", tlso.t, tlso.l, tlso.s, tlso.origin)
예제 #21
0
ncs_groups = ncs_obj.ncs_groups()
assert (len(ncs_groups) == 1)
ncs_group = ncs_groups[0]

# or if reading from file...
# ncs_object = ncs()
# ncs_object.read_ncs("../MapSymmetry_4/symmetry_from_map.ncs_spec")

# this already exists
print(ncs_group.format_for_biomt())

# Build cif model from ncs_obj

from iotbx import cif

model = dm.get_model()
h = model.get_hierarchy()
chains = [c.id for c in h.chains()]

n_oper = ncs_group.n_ncs_oper()

# start cif building
builder = cif.builders.cif_model_builder()
builder.add_data_block("assembly_information")

# add pdbx_struct_assembly loop
headers = [
    '_pdbx_struct_assembly.id', '_pdbx_struct_assembly.details',
    '_pdbx_struct_assembly.method_details',
    '_pdbx_struct_assembly.oligomeric_details',
    '_pdbx_struct_assembly.oligomeric_count'
예제 #22
0
def test_01():

    # Source data

    data_dir = os.path.dirname(os.path.abspath(__file__))
    data_ccp4 = os.path.join(data_dir, 'data', 'non_zero_origin_map.ccp4')
    data_pdb = os.path.join(data_dir, 'data', 'non_zero_origin_model.pdb')
    data_ncs_spec = os.path.join(data_dir, 'data',
                                 'non_zero_origin_ncs_spec.ncs_spec')

    # DataManager

    dm = DataManager(['ncs_spec', 'model', 'real_map', 'phil'])
    dm.set_overwrite(True)

    # Read in map and model and ncs

    map_file = data_ccp4
    dm.process_real_map_file(map_file)
    mm = dm.get_real_map(map_file)

    model_file = data_pdb
    dm.process_model_file(model_file)
    model = dm.get_model(model_file)

    ncs_file = data_ncs_spec
    dm.process_ncs_spec_file(ncs_file)
    ncs = dm.get_ncs_spec(ncs_file)

    ncs_dc = ncs.deep_copy()

    mmmn = match_map_model_ncs()
    mmmn.add_map_manager(mm)
    mmmn.add_model(model)
    mmmn.add_ncs_object(ncs)

    # Save it
    mmmn_dc = mmmn.deep_copy()

    # Make sure we can add an ncs object that is either shifted or not
    mmmn_dcdc = mmmn.deep_copy()
    new_mmmn = match_map_model_ncs()
    new_mmmn.add_map_manager(mmmn_dcdc.map_manager())
    new_mmmn.add_model(mmmn_dcdc.model())
    new_mmmn.add_ncs_object(mmmn_dcdc.ncs_object())
    assert new_mmmn.ncs_object().shift_cart() == new_mmmn.map_manager(
    ).shift_cart()

    mmmn_dcdc = mmmn.deep_copy()
    new_mmmn = match_map_model_ncs()
    new_mmmn.add_map_manager(mmmn_dcdc.map_manager())
    new_mmmn.add_model(mmmn_dcdc.model())
    new_mmmn.add_ncs_object(ncs_dc)
    assert new_mmmn.ncs_object().shift_cart() == new_mmmn.map_manager(
    ).shift_cart()

    original_ncs = mmmn.ncs_object()
    assert approx_equal(
        (24.0528, 11.5833, 20.0004),
        tuple(original_ncs.ncs_groups()[0].translations_orth()[-1]),
        eps=0.1)

    assert tuple(mmmn._map_manager.origin_shift_grid_units) == (0, 0, 0)

    # Shift origin to (0,0,0)
    mmmn = mmmn_dc.deep_copy()  # fresh version of match_map_model_ncs
    mmmn.shift_origin()
    new_ncs = mmmn.ncs_object()
    assert tuple(mmmn._map_manager.origin_shift_grid_units) == (100, 100, 100)

    mmmn.write_model('s.pdb')
    mmmn.write_map('s.mrc')

    shifted_ncs = mmmn.ncs_object()
    assert approx_equal(
        (-153.758, -74.044, -127.487),
        tuple(shifted_ncs.ncs_groups()[0].translations_orth()[-1]),
        eps=0.1)

    # Shift a model and shift it back

    mmmn = mmmn_dc.deep_copy()  # fresh version of match_map_model_ncs
    model = mmmn.model()
    shifted_model = mmmn.shift_model_to_match_working_map(model=model)
    model_in_original_position = mmmn.shift_model_to_match_original_map(
        model=shifted_model)
    assert (approx_equal(
        model.get_sites_cart(),  # not a copy
        shifted_model.get_sites_cart()))
    assert approx_equal(model.get_sites_cart(),
                        model_in_original_position.get_sites_cart())

    # test data_manager map_model_manager
    generated_mmm = dm.get_map_model_manager()
    print(generated_mmm)
    assert (isinstance(generated_mmm, map_model_manager))

    # Generate a map and model

    import sys
    mmm = map_model_manager(log=sys.stdout)
    mmm.generate_map()
    model = mmm.model()
    mm = mmm.map_manager()
    assert approx_equal(model.get_sites_cart()[0], (14.476, 10.57, 8.34),
                        eps=0.01)
    assert approx_equal(mm.map_data()[10, 10, 10], -0.0506, eps=0.001)
    # Save it
    mmm_dc = mmm.deep_copy()

    # Create model from sites
    mmm_sites = mmm_dc.deep_copy()
    from scitbx.array_family import flex
    sites_cart = flex.vec3_double()
    sites_cart.append((3, 4, 5))
    mmm_sites.model_from_sites_cart(sites_cart=sites_cart,
                                    model_id='new_model')
    assert mmm_sites.get_model_by_id('new_model').get_sites_cart()[0] == (3, 4,
                                                                          5)

    # Set crystal_symmetry and unit_cell_crystal_symmetry and shift_cart
    # Box and shift the map_model_manager so we have new coordinate system
    mmm_sites.box_all_maps_around_model_and_shift_origin()
    new_model = mmm_sites.get_model_by_id('new_model')
    assert approx_equal(
        (3.747033333333334, 4.723075000000001, 5.0),
        mmm_sites.get_model_by_id('new_model').get_sites_cart()[0])

    # arbitrarily set unit_cell crystal symmetry of model to
    #  match crystal_symmetry. First have to set shift_cart to None
    new_model.set_shift_cart(shift_cart=None)
    new_model.set_unit_cell_crystal_symmetry_and_shift_cart()
    assert new_model.crystal_symmetry() != mmm_sites.crystal_symmetry()

    # now set crystal symmetries and shift cart of model to match the manager
    mmm_sites.set_model_symmetries_and_shift_cart_to_match_map(new_model)
    assert new_model.crystal_symmetry().is_similar_symmetry(
        mmm_sites.crystal_symmetry())
    assert new_model.unit_cell_crystal_symmetry().is_similar_symmetry(
        mmm_sites.unit_cell_crystal_symmetry())
    assert new_model.shift_cart() == mmm_sites.shift_cart()

    # Import hierarchy into a model and set symmetries and shift to match
    mmm_sites.model_from_hierarchy(hierarchy=mmm_sites.model().get_hierarchy(),
                                   model_id='model_from_hierarchy')
    assert mmm_sites.get_model_by_id('model_from_hierarchy').model_as_pdb() \
       == mmm_sites.get_model_by_id('model').model_as_pdb()

    # Check on wrapping
    assert not mm.wrapping(
    )  # this one should not wrap because it is zero at edges

    # Make a new one with no buffer so it is not zero at edges
    mmm = map_model_manager()
    mmm.generate_map(box_cushion=0)
    mm = mmm.map_manager()
    # check its compatibility with wrapping
    assert mm.is_consistent_with_wrapping()
    mmm.show_summary()

    # now box it
    sel = mmm.model().selection("resseq 221:221")
    new_model = mmm.model().deep_copy().select(sel)
    new_mmm = map_model_manager(model=new_model, map_manager=mm.deep_copy())
    new_mmm.box_all_maps_around_model_and_shift_origin()
    new_mm = new_mmm.map_manager()

    assert not new_mm.wrapping()
    assert not new_mm.is_consistent_with_wrapping()

    # now box it with selection
    new_mmm_1 = map_model_manager(model=mmm.model().deep_copy(),
                                  map_manager=mm.deep_copy())
    new_mmm_1.box_all_maps_around_model_and_shift_origin(
        selection_string="resseq 221:221")
    new_mm_1 = new_mmm_1.map_manager()

    assert not new_mm_1.wrapping()
    assert not new_mm_1.is_consistent_with_wrapping()
    assert new_mm_1.map_data().all() == new_mm.map_data().all()

    # create map_model_manager with just half-maps
    mm1 = mm.deep_copy()
    mm2 = mm.deep_copy()
    map_data = mm2.map_data()
    map_data += 1.
    new_mmm = map_model_manager(model=mmm.model().deep_copy(),
                                map_manager_1=mm1,
                                map_manager_2=mm2)
    assert new_mmm._map_dict.get(
        'map_manager') is None  # should not be any yet
    assert approx_equal(new_mmm.map_manager().map_data()[232],
                        mm.deep_copy().map_data()[232] + 0.5)
    assert new_mmm._map_dict.get(
        'map_manager') is not None  # now should be there

    # generate map data from a model
    mm1 = mm.deep_copy()
    mm2 = mm.deep_copy()
    new_mmm = map_model_manager(model=mmm.model().deep_copy(), map_manager=mm1)
    mmm.generate_map(model=mmm.model())
    mm = mmm.map_manager()
    mmm.show_summary()

    # check get_map_model_manager function
    dm = DataManager(['model'])
    assert not hasattr(dm, 'get_map_model_manager')
    dm = DataManager(['real_map'])
    assert not hasattr(dm, 'get_map_model_manager')
    dm = DataManager(['sequence'])
    assert not hasattr(dm, 'get_map_model_manager')
    dm = DataManager(['model', 'real_map'])
    assert hasattr(dm, 'get_map_model_manager')

    # usage
    dm.get_map_model_manager(model_file=data_pdb, map_files=data_ccp4)
    dm.get_map_model_manager(model_file=data_pdb, map_files=[data_ccp4])
    dm.get_map_model_manager(model_file=data_pdb,
                             map_files=[data_ccp4, data_ccp4, data_ccp4])
    dm.get_map_model_manager(model_file=data_pdb,
                             map_files=data_ccp4,
                             ignore_symmetry_conflicts=True)

    # errors
    try:
        dm.get_map_model_manager(model_file=data_pdb,
                                 map_files=data_ccp4,
                                 from_phil=True)
    except Sorry as e:
        assert 'from_phil is set to True' in str(e)
    try:
        dm.get_map_model_manager(model_file=data_pdb,
                                 map_files=data_ccp4,
                                 abc=123)
    except TypeError as e:
        assert 'unexpected keyword argument' in str(e)
    try:
        dm.get_map_model_manager(model_file=data_pdb,
                                 map_files=[data_ccp4, data_ccp4])
    except Sorry as e:
        assert '1 full map and 2 half maps' in str(e)

    # PHIL
    class test_program(ProgramTemplate):
        master_phil_str = '''
include scope iotbx.map_model_manager.map_model_phil_str
'''

    working_phil_str = '''
  map_model {
    full_map = %s
    half_map = %s
    half_map = s.mrc
    model = %s
  }
''' % (data_ccp4, data_ccp4, data_pdb)

    master_phil = parse(test_program.master_phil_str, process_includes=True)
    working_phil = master_phil.fetch(parse(working_phil_str))
    tp = test_program(dm, working_phil.extract())

    try:
        dm.get_map_model_manager(from_phil=True)
    except Exception as e:
        assert 'ignore_symmetry_conflicts' in str(e)
    try:
        dm.get_map_model_manager(from_phil=True,
                                 ignore_symmetry_conflicts=True)
    except AssertionError:
        pass
예제 #23
0
def test_model_datatype():
    import mmtbx.monomer_library.server
    try:
        mon_lib_srv = mmtbx.monomer_library.server.server()
    except mmtbx.monomer_library.server.MonomerLibraryServerError:
        print(
            "Can not initialize monomer_library, skipping test_model_datatype."
        )
        return

    # 1yjp
    model_str = '''
CRYST1   21.937    4.866   23.477  90.00 107.08  90.00 P 1 21 1      2
ORIGX1      1.000000  0.000000  0.000000        0.00000
ORIGX2      0.000000  1.000000  0.000000        0.00000
ORIGX3      0.000000  0.000000  1.000000        0.00000
SCALE1      0.045585  0.000000  0.014006        0.00000
SCALE2      0.000000  0.205508  0.000000        0.00000
SCALE3      0.000000  0.000000  0.044560        0.00000
ATOM      1  N   GLY A   1      -9.009   4.612   6.102  1.00 16.77           N
ATOM      2  CA  GLY A   1      -9.052   4.207   4.651  1.00 16.57           C
ATOM      3  C   GLY A   1      -8.015   3.140   4.419  1.00 16.16           C
ATOM      4  O   GLY A   1      -7.523   2.521   5.381  1.00 16.78           O
ATOM      5  N   ASN A   2      -7.656   2.923   3.155  1.00 15.02           N
ATOM      6  CA  ASN A   2      -6.522   2.038   2.831  1.00 14.10           C
ATOM      7  C   ASN A   2      -5.241   2.537   3.427  1.00 13.13           C
ATOM      8  O   ASN A   2      -4.978   3.742   3.426  1.00 11.91           O
ATOM      9  CB  ASN A   2      -6.346   1.881   1.341  1.00 15.38           C
ATOM     10  CG  ASN A   2      -7.584   1.342   0.692  1.00 14.08           C
ATOM     11  OD1 ASN A   2      -8.025   0.227   1.016  1.00 17.46           O
ATOM     12  ND2 ASN A   2      -8.204   2.155  -0.169  1.00 11.72           N
ATOM     13  N   ASN A   3      -4.438   1.590   3.905  1.00 12.26           N
ATOM     14  CA  ASN A   3      -3.193   1.904   4.589  1.00 11.74           C
ATOM     15  C   ASN A   3      -1.955   1.332   3.895  1.00 11.10           C
ATOM     16  O   ASN A   3      -1.872   0.119   3.648  1.00 10.42           O
ATOM     17  CB  ASN A   3      -3.259   1.378   6.042  1.00 12.15           C
ATOM     18  CG  ASN A   3      -2.006   1.739   6.861  1.00 12.82           C
ATOM     19  OD1 ASN A   3      -1.702   2.925   7.072  1.00 15.05           O
ATOM     20  ND2 ASN A   3      -1.271   0.715   7.306  1.00 13.48           N
ATOM     21  N   GLN A   4      -1.005   2.228   3.598  1.00 10.29           N
ATOM     22  CA  GLN A   4       0.384   1.888   3.199  1.00 10.53           C
ATOM     23  C   GLN A   4       1.435   2.606   4.088  1.00 10.24           C
ATOM     24  O   GLN A   4       1.547   3.843   4.115  1.00  8.86           O
ATOM     25  CB  GLN A   4       0.656   2.148   1.711  1.00  9.80           C
ATOM     26  CG  GLN A   4       1.944   1.458   1.213  1.00 10.25           C
ATOM     27  CD  GLN A   4       2.504   2.044  -0.089  1.00 12.43           C
ATOM     28  OE1 GLN A   4       2.744   3.268  -0.190  1.00 14.62           O
ATOM     29  NE2 GLN A   4       2.750   1.161  -1.091  1.00  9.05           N
ATOM     30  N   GLN A   5       2.154   1.821   4.871  1.00 10.38           N
ATOM     31  CA  GLN A   5       3.270   2.361   5.640  1.00 11.39           C
ATOM     32  C   GLN A   5       4.594   1.768   5.172  1.00 11.52           C
ATOM     33  O   GLN A   5       4.768   0.546   5.054  1.00 12.05           O
ATOM     34  CB  GLN A   5       3.056   2.183   7.147  1.00 11.96           C
ATOM     35  CG  GLN A   5       1.829   2.950   7.647  1.00 10.81           C
ATOM     36  CD  GLN A   5       1.344   2.414   8.954  1.00 13.10           C
ATOM     37  OE1 GLN A   5       0.774   1.325   9.002  1.00 10.65           O
ATOM     38  NE2 GLN A   5       1.549   3.187  10.039  1.00 12.30           N
ATOM     39  N   ASN A   6       5.514   2.664   4.856  1.00 11.99           N
ATOM     40  CA  ASN A   6       6.831   2.310   4.318  1.00 12.30           C
ATOM     41  C   ASN A   6       7.854   2.761   5.324  1.00 13.40           C
ATOM     42  O   ASN A   6       8.219   3.943   5.374  1.00 13.92           O
ATOM     43  CB  ASN A   6       7.065   3.016   2.993  1.00 12.13           C
ATOM     44  CG  ASN A   6       5.961   2.735   2.003  1.00 12.77           C
ATOM     45  OD1 ASN A   6       5.798   1.604   1.551  1.00 14.27           O
ATOM     46  ND2 ASN A   6       5.195   3.747   1.679  1.00 10.07           N
ATOM     47  N   TYR A   7       8.292   1.817   6.147  1.00 14.70           N
ATOM     48  CA  TYR A   7       9.159   2.144   7.299  1.00 15.18           C
ATOM     49  C   TYR A   7      10.603   2.331   6.885  1.00 15.91           C
ATOM     50  O   TYR A   7      11.041   1.811   5.855  1.00 15.76           O
ATOM     51  CB  TYR A   7       9.061   1.065   8.369  1.00 15.35           C
ATOM     52  CG  TYR A   7       7.665   0.929   8.902  1.00 14.45           C
ATOM     53  CD1 TYR A   7       6.771   0.021   8.327  1.00 15.68           C
ATOM     54  CD2 TYR A   7       7.210   1.756   9.920  1.00 14.80           C
ATOM     55  CE1 TYR A   7       5.480  -0.094   8.796  1.00 13.46           C
ATOM     56  CE2 TYR A   7       5.904   1.649  10.416  1.00 14.33           C
ATOM     57  CZ  TYR A   7       5.047   0.729   9.831  1.00 15.09           C
ATOM     58  OH  TYR A   7       3.766   0.589  10.291  1.00 14.39           O
ATOM     59  OXT TYR A   7      11.358   2.999   7.612  1.00 17.49           O
TER      60      TYR A   7
HETATM   61  O   HOH A   8      -6.471   5.227   7.124  1.00 22.62           O
HETATM   62  O   HOH A   9      10.431   1.858   3.216  1.00 19.71           O
HETATM   63  O   HOH A  10     -11.286   1.756  -1.468  1.00 17.08           O
HETATM   64  O   HOH A  11      11.808   4.179   9.970  1.00 23.99           O
HETATM   65  O   HOH A  12      13.605   1.327   9.198  1.00 26.17           O
HETATM   66  O   HOH A  13      -2.749   3.429  10.024  1.00 39.15           O
HETATM   67  O   HOH A  14      -1.500   0.682  10.967  1.00 43.49           O
MASTER      238    0    0    0    0    0    0    6   66    1    0    1
END
'''

    # test reading/writing PDB
    test_filename = 'test_model.pdb'
    test_output_filename = 'test_model_output.pdb'
    test_eff = 'model.eff'
    dm = DataManager(['model'])
    dm.process_model_str(test_filename, model_str)
    dm.write_model_file(model_str,
                        filename=test_output_filename,
                        overwrite=True)
    m = dm.get_model(test_output_filename)
    assert test_output_filename in dm.get_model_names()
    dm.write_model_file(m, overwrite=True)
    pdb_filename = 'cctbx_program.pdb'
    assert os.path.exists(pdb_filename)
    dm.process_model_file(pdb_filename)
    assert not dm.get_model(pdb_filename).input_model_format_cif()
    dm.write_model_file(m, test_filename, overwrite=True)

    # test reading PDB writing CIF
    test_filename = 'test_model.pdb'
    test_output_filename = 'test_model.cif'
    dm = DataManager(['model'])
    dm.process_model_str(test_filename, model_str)
    m = dm.get_model(test_filename)
    dm.write_model_file(m,
                        filename=test_output_filename,
                        format='cif',
                        overwrite=True)
    m = dm.get_model(test_output_filename)
    assert test_output_filename in dm.get_model_names()
    dm.write_model_file(m, overwrite=True)
    cif_filename = 'cctbx_program.cif'
    assert os.path.exists(cif_filename)
    dm.process_model_file(cif_filename)
    assert dm.get_model(cif_filename).input_model_format_cif()

    # test type
    assert dm.get_model_type() == 'x_ray'
    dm.set_model_type(test_filename, 'neutron')
    assert dm.get_model_type() == 'neutron'
    phil_scope = dm.export_phil_scope()
    extract = phil_scope.extract()
    assert extract.data_manager.model[0].type == 'neutron'
    with open(test_eff, 'w') as f:
        f.write(phil_scope.as_str())
    new_phil_scope = iotbx.phil.parse(file_name=test_eff)
    new_dm = DataManager(['model'])
    new_dm.load_phil_scope(new_phil_scope)
    assert new_dm.get_model_type(test_filename) == 'neutron'
    new_dm = DataManager(['model'])
    try:
        new_dm.set_default_model_type('nonsense')
    except Sorry:
        pass
    new_dm.set_default_model_type('electron')
    new_dm.process_model_file(test_filename)
    assert new_dm.get_model_type() == 'electron'
    assert len(new_dm.get_model_names()) == 1
    assert len(new_dm.get_model_names(model_type='electron')) == 1
    assert len(new_dm.get_model_names(model_type='neutron')) == 0

    os.remove(test_eff)
    os.remove(test_filename)

    # test reading/writing CIF
    test_filename = 'test_model_datatype.cif'
    dm.write_model_file(dm.get_model().model_as_mmcif(),
                        filename=test_filename,
                        overwrite=True)
    dm.process_model_file(test_filename)
    os.remove(test_filename)
    assert test_filename in dm.get_model_names()
    m = dm.get_model(test_filename)
    dm.write_model_file(m, overwrite=True)
    cif_filename = 'cctbx_program.cif'
    assert os.path.exists(cif_filename)
    dm.process_model_file(cif_filename)
    assert dm.get_model(cif_filename).input_model_format_cif()
    os.remove(pdb_filename)
    os.remove(cif_filename)

    # test pdb_interpretation
    extract = mmtbx.model.manager.get_default_pdb_interpretation_params()
    extract.pdb_interpretation.use_neutron_distances = True
    dm.update_pdb_interpretation_for_model(test_filename, extract)
    assert dm.get_model(test_filename).restraints_manager is None
예제 #24
0
def test_01():

  # Source data

  data_dir = os.path.dirname(os.path.abspath(__file__))
  data_ccp4 = os.path.join(data_dir, 'data',
                          'non_zero_origin_map.ccp4')
  data_pdb = os.path.join(data_dir, 'data',
                          'non_zero_origin_model.pdb')
  data_ncs_spec = os.path.join(data_dir, 'data',
                          'non_zero_origin_ncs_spec.ncs_spec')

  # Read in data

  dm = DataManager(['ncs_spec','model', 'real_map', 'phil'])
  dm.set_overwrite(True)

  map_file=data_ccp4
  dm.process_real_map_file(map_file)
  mm = dm.get_real_map(map_file)

  model_file=data_pdb
  dm.process_model_file(model_file)
  model = dm.get_model(model_file)

  ncs_file=data_ncs_spec
  dm.process_ncs_spec_file(ncs_file)
  ncs = dm.get_ncs_spec(ncs_file)

  mmm=map_model_manager(
    model = model,
    map_manager_1 = mm.deep_copy(),
    map_manager_2 = mm.deep_copy(),
    ncs_object = ncs,
    wrapping = False)
  mmm.add_map_manager_by_id(
     map_id='external_map',map_manager=mmm.map_manager().deep_copy())
  mmm.set_resolution(3)
  mmm.set_log(sys.stdout)

  dc = mmm.deep_copy()


  # Model sharpening
  mmm = dc.deep_copy()
  tls_info=mmm.tls_from_map(n_bins=10,
    model_id = 'model',
    map_id = 'map_manager',
    iterations = 1,
   )
  tlso = tls_info.tlso_list[0]
  print ("t:", tlso.t)
  print ("l:", tlso.l)
  print ("s:", tlso.s)
  print ("origin:", tlso.origin)

  assert approx_equal(tlso.t,
(0.7920199173476214, 0.742281514408794, 0.7103008342583756, -0.05199687072329786, 0.04301326317889638, -0.0032498605215769945))
  assert approx_equal(tlso.l,
(-0.0004103603606922303, -0.00042929108338180655, 0.0001519656028327732, -2.8489076942333132e-06, -6.23198622708519e-05, 3.694504506269563e-05))
  assert approx_equal(tlso.s,
(5.562000065270741e-09, -5.108813278707348e-10, -4.132731326301999e-09, 7.925572910527853e-10, -2.7018794222798323e-09, 1.0742616538181975e-09, 7.501166703517675e-10, -1.0661760561475498e-09, -2.860120638319051e-09))
  assert approx_equal(tlso.origin,
(-64.703319312974, -62.30575036040377, -63.74368724016462))


  print("TLS: ",tlso.t,tlso.l,tlso.s,tlso.origin)
예제 #25
0
def test_data_manager():
    a = DataManager(['model'])

    a.add_model('a', 'b')
    a.add_model('c', 'd')
    assert a.get_model() == 'b'
    assert a.get_model('a') == 'b'
    assert a.get_model('c') == 'd'
    assert a.get_model_names() == ['a', 'c']

    assert a.has_models()
    assert a.has_models(exact_count=True, expected_n=2)
    assert not a.has_models(expected_n=3, raise_sorry=False)

    # exporting phil
    working_phil = a.export_phil_scope()
    assert len(working_phil.extract().data_manager.model) == 2

    # data tracking
    try:
        a.has_models(expected_n=3, raise_sorry=True)
    except Sorry:
        pass

    try:
        a.has_models(exact_count=True, raise_sorry=True)
    except Sorry:
        pass

    a.set_default_model('c')
    assert a.get_model() == 'd'

    assert a.get_model_names() == ['a', 'c'
                                   ] or a.get_model_names() == ['c', 'a']

    a.remove_model('c')
    try:
        a.get_model()
    except Sorry:
        pass
    try:
        a.get_model('missing')
    except Sorry:
        pass
    try:
        a.set_default_model('missing')
    except Sorry:
        pass

    a = DataManager(datatypes=['sequence', 'phil'])
    assert a.get_sequence_names() == []
    assert not hasattr(a, 'get_model')

    # phil functions
    test_phil_str = '''
data_manager {
  phil_files = data_manager_test.eff
}
'''
    with open('data_manager_test.eff', 'w') as f:
        f.write(test_phil_str)

    # loading file with get function
    assert len(a.get_phil_names()) == 0
    p = a.get_phil('data_manager_test.eff')
    assert type(p) == libtbx.phil.scope
    assert 'data_manager_test.eff' in a.get_phil_names()

    # loading file with phil
    a = DataManager(datatypes=['phil'])
    test_phil = iotbx.phil.parse(test_phil_str)
    a.load_phil_scope(test_phil)

    assert 'data_manager_test.eff' in a.get_phil_names()
    assert a.get_default_phil_name() == 'data_manager_test.eff'

    os.remove('data_manager_test.eff')

    # writing
    a = DataManager(datatypes=['model', 'phil', 'sequence'])
    a.add_model('a', 'b')
    a.add_phil('c', 'd')
    a.add_sequence('e', 'f')

    a.write_model_file(a.get_model(), filename='a.dat', overwrite=True)
    a.write_phil_file(a.get_phil(), filename='c.dat', overwrite=True)
    a.write_sequence_file(a.get_sequence(), filename='e.dat', overwrite=True)

    with open('a.dat', 'r') as f:
        lines = f.readlines()
    assert lines[0] == 'b'

    os.remove('a.dat')
    os.remove('c.dat')
    os.remove('e.dat')
def test_01():

    data_dir = os.path.dirname(os.path.abspath(__file__))
    data_ccp4 = os.path.join(data_dir, 'data', 'non_zero_origin_map.ccp4')
    data_pdb = os.path.join(data_dir, 'data', 'non_zero_origin_model.pdb')
    data_ncs_spec = os.path.join(data_dir, 'data',
                                 'non_zero_origin_ncs_spec.ncs_spec')

    dm = DataManager(['ncs_spec', 'model', 'real_map', 'phil'])
    dm.set_overwrite(True)

    # Read in map and model

    map_file = data_ccp4
    dm.process_real_map_file(map_file)
    mm = dm.get_real_map(map_file)

    model_file = data_pdb
    dm.process_model_file(model_file)
    model = dm.get_model(model_file)

    ncs_file = data_ncs_spec
    dm.process_ncs_spec_file(ncs_file)
    ncs = dm.get_ncs_spec(ncs_file)

    mmm = map_model_manager()
    mmm.add_map_manager(mm)
    mmm.add_model(model)
    mmm.add_ncs_object(ncs)

    original_ncs = mmm.ncs_object()
    assert approx_equal(
        (24.0528, 11.5833, 20.0004),
        tuple(original_ncs.ncs_groups()[0].translations_orth()[-1]),
        eps=0.1)

    assert tuple(mmm._map_manager.origin_shift_grid_units) == (0, 0, 0)

    # Shift origin to (0,0,0)
    mmm.shift_origin()
    assert tuple(mmm._map_manager.origin_shift_grid_units) == (100, 100, 100)

    mmm.write_model('s.pdb')
    mmm.write_map('s.mrc')

    shifted_ncs = mmm.ncs_object()
    assert approx_equal(
        (-153.758, -74.044, -127.487),
        tuple(shifted_ncs.ncs_groups()[0].translations_orth()[-1]),
        eps=0.1)

    # Shift a model and shift it back

    model = mmm.model()
    shifted_model = mmm.shift_model_to_match_working_map(model=model)
    model_in_original_position = mmm.shift_model_to_match_original_map(
        model=shifted_model)
    assert (not approx_equal(
        model.get_sites_cart(), shifted_model.get_sites_cart(), out=None))
    assert approx_equal(model.get_sites_cart(),
                        model_in_original_position.get_sites_cart())

    # Generate a map and model

    mmm.generate_map()
    model = mmm.model()
    mm = mmm.map_manager()
    assert approx_equal(model.get_sites_cart()[0], (14.476, 10.57, 8.34),
                        eps=0.01)
    assert approx_equal(mm.map_data()[10, 10, 10], -0.0195, eps=0.001)
예제 #27
0
def Test(inFileName = None):

  #========================================================================
  # Make sure we can fill in mmtbx.probe.ExtraAtomInfoList info.
  # Generate an example data model with a small molecule in it unless we
  # were given a file name on the command line.
  if inFileName is not None and len(inFileName) > 0:
    # Read a model from a file using the DataManager
    dm = DataManager()
    dm.process_model_file(inFileName)
    model = dm.get_model(inFileName)
  else:
    # Generate a small-molecule model using the map model manager
    mmm=map_model_manager()         #   get an initialized instance of the map_model_manager
    mmm.generate_map()              #   get a model from a generated small library model and calculate a map for it
    model = mmm.model()             #   get the model

  # Fill in an ExtraAtomInfoList with an entry for each atom in the hierarchy.
  # We first find the largest i_seq sequence number in the model and reserve that
  # many entries so we will always be able to fill in the entry for an atom.
  atoms = model.get_atoms()
  maxI = atoms[0].i_seq
  for a in atoms:
    if a.i_seq > maxI:
      maxI = a.i_seq
  extra = []
  for i in range(maxI+1):
    extra.append(probe.ExtraAtomInfo())

  # Traverse the hierarchy and look up the extra data to be filled in.
  # Get a list of all the atoms in the chain while we're at it
  at = AtomTypes()
  ph = model.get_hierarchy()
  for m in ph.models():
    for chain in m.chains():
      for rg in chain.residue_groups():
        for ag in rg.atom_groups():
          for a in ag.atoms():
            ei, warn = at.FindProbeExtraAtomInfo(a)
            extra[a.i_seq] = ei
            # User code should test for and print warnings
            #if len(warn) > 0:
            #  print(warn)

  #========================================================================
  # Find an Oxygen atom and ask for its radii with explicit Hydrogen, implicit Hydrogen,
  # and Nuclear radii.
  o = None
  ph = model.get_hierarchy()
  for a in ph.models()[0].atoms():
    if a.element.strip() == 'O':
      o = a
  assert o is not None, "AtomTypes.Test(): Could not find Oxygen (internal test failure)"
  explicitH = AtomTypes(useNeutronDistances = False,
                        useImplicitHydrogenDistances = False).FindProbeExtraAtomInfo(o)[0].vdwRadius
  implicitH = AtomTypes(useNeutronDistances = False,
                        useImplicitHydrogenDistances = True).FindProbeExtraAtomInfo(o)[0].vdwRadius
  neutronH = AtomTypes(useNeutronDistances = True,
                        useImplicitHydrogenDistances = False).FindProbeExtraAtomInfo(o)[0].vdwRadius
  assert explicitH != implicitH, "AtomTypes.Test(): Implicit and explicit Oxygen radii did not differ as expected"

  #========================================================================
  # Check MaximumVDWRadius, calling it twice to make sure both the cached and non-cached
  # results work.
  for i in range(2):
    assert at.MaximumVDWRadius() == 2.5, "AtomTypes.Test(): Unexpected MaximumVDWRadius(): got "+str(MaximumVDWRadius())+", expected 2.5"

  #========================================================================
  # Check IsAromatic() to ensure it gives results when expected and not when not.
  aromaticChecks = [
      ['PHE', 'CE2', True],
      ['  U', 'HN3', True],
      ['ASN',   'O', False]
    ]
  for a in aromaticChecks:
    assert IsAromatic(a[0],a[1]) == a[2], "AtomTypes.Test(): {} {} not marked as aromatic {}".format(a[0],a[1],a[2])
예제 #28
0
def test_01():

    # Source data

    data_dir = os.path.dirname(os.path.abspath(__file__))
    data_ccp4 = os.path.join(data_dir, 'data', 'non_zero_origin_map.ccp4')
    data_pdb = os.path.join(data_dir, 'data', 'non_zero_origin_model.pdb')
    data_ncs_spec = os.path.join(data_dir, 'data',
                                 'non_zero_origin_ncs_spec.ncs_spec')

    # Read in data

    dm = DataManager(['ncs_spec', 'model', 'real_map', 'phil'])
    dm.set_overwrite(True)

    map_file = data_ccp4
    dm.process_real_map_file(map_file)
    mm = dm.get_real_map(map_file)

    model_file = data_pdb
    dm.process_model_file(model_file)
    model = dm.get_model(model_file)

    ncs_file = data_ncs_spec
    dm.process_ncs_spec_file(ncs_file)
    ncs = dm.get_ncs_spec(ncs_file)

    mmm = map_model_manager(model=model,
                            map_manager_1=mm.deep_copy(),
                            map_manager_2=mm.deep_copy(),
                            ncs_object=ncs,
                            wrapping=False)
    mmm.add_map_manager_by_id(map_id='external_map',
                              map_manager=mmm.map_manager().deep_copy())
    mmm.set_resolution(3)
    mmm.set_log(sys.stdout)

    dc = mmm.deep_copy()

    # Model sharpening
    mmm = dc.deep_copy()
    tls_info = mmm.tls_from_map(
        n_bins=10,
        model_id='model',
        map_id='map_manager',
        iterations=1,
    )
    tlso = tls_info.tlso_list[0]
    print("t:", tlso.t)
    print("l:", tlso.l)
    print("s:", tlso.s)
    print("origin:", tlso.origin)
    assert approx_equal(
        tlso.t,
        (0.6518723599417712, 0.6807846368236251, 0.6161941485135081,
         -0.04791178588048965, -0.0014794039180157132, 0.032774655367019095))
    assert approx_equal(tlso.l,
                        (-0.00020092054127279798, -9.557441568256003e-05,
                         -1.711699526358822e-05, -4.8893794663274e-05,
                         -2.026091444762368e-05, -2.194054393244713e-05))
    assert approx_equal(
        tlso.s,
        (1.3393493443427932e-09, 3.660975429526433e-10, -6.07051859483934e-10,
         4.1665859321329886e-10, -1.35856164931005e-09, 5.409502867516901e-10,
         -5.4005830782848e-10, -1.2017586805521653e-09,
         1.9212302485258283e-11))
    assert approx_equal(
        tlso.origin,
        (-64.70331931297407, -62.305747062422725, -63.74368724016457))

    print("TLS: ", tlso.t, tlso.l, tlso.s, tlso.origin)
예제 #29
0
def test_01():

    # Source data

    data_dir = os.path.dirname(os.path.abspath(__file__))
    data_ccp4 = os.path.join(data_dir, 'data', 'non_zero_origin_map.ccp4')
    data_pdb = os.path.join(data_dir, 'data', 'non_zero_origin_model.pdb')
    data_ncs_spec = os.path.join(data_dir, 'data',
                                 'non_zero_origin_ncs_spec.ncs_spec')

    # Read in data

    dm = DataManager(['ncs_spec', 'model', 'real_map', 'phil'])
    dm.set_overwrite(True)

    map_file = data_ccp4
    dm.process_real_map_file(map_file)
    mm = dm.get_real_map(map_file)

    model_file = data_pdb
    dm.process_model_file(model_file)
    model = dm.get_model(model_file)

    ncs_file = data_ncs_spec
    dm.process_ncs_spec_file(ncs_file)
    ncs = dm.get_ncs_spec(ncs_file)

    mmm = map_model_manager(model=model,
                            map_manager_1=mm.deep_copy(),
                            map_manager_2=mm.deep_copy(),
                            ncs_object=ncs,
                            wrapping=False)
    mmm.add_map_manager_by_id(map_id='external_map',
                              map_manager=mmm.map_manager().deep_copy())
    mmm.set_resolution(3)
    mmm.set_log(sys.stdout)

    dc = mmm.deep_copy()

    # Model sharpening
    mmm = dc.deep_copy()
    tls_info = mmm.tls_from_map(
        n_bins=10,
        model_id='model',
        map_id='map_manager',
        iterations=1,
    )
    tlso = tls_info.tlso_list[0]
    print("t:", tlso.t)
    print("l:", tlso.l)
    print("s:", tlso.s)
    print("origin:", tlso.origin)

    assert approx_equal(
        tlso.t,
        (1.180418902258779, 1.1747521845606608, 1.178996799712174,
         -0.08474662674769494, -0.022609295693646402, 0.0649209491344932))
    assert approx_equal(
        tlso.l,
        (-0.002159404807991249, -0.002107964765763024, 0.0008301439376854558,
         -5.973347993775719e-05, -0.000134276871934738, -9.05515898670584e-05))
    assert approx_equal(
        tlso.s,
        (2.9348223335616302e-08, 5.52441087256425e-09, -5.382459681103171e-09,
         4.3530347434547015e-09, -2.3559464233595e-08, 4.217968590464982e-09,
         -4.380707049750269e-09, 1.9232725033868253e-09,
         -5.788759082799497e-09))
    assert approx_equal(
        tlso.origin,
        (-64.70331931297399, -62.30573551948903, -63.743687240164604))

    print("TLS: ", tlso.t, tlso.l, tlso.s, tlso.origin)
예제 #30
0
def run_comparison(fileName1, fileName2, thresh, verbosity = 10):
  # Read the the models from each file.
  dm = DataManager()
  dm.set_overwrite(True)

  if verbosity >= 1:
    print('Reading model from',fileName1)
  dm.process_model_file(fileName1)
  m1 = dm.get_model(fileName1)

  if verbosity >= 1:
    print('Reading model from',fileName2)
  dm.process_model_file(fileName2)
  m2 = dm.get_model(fileName2)

  # Compare the two models and report on any differences.
  # Differences in numbers of models, chains, or residues are fatal.
  # Differences in atoms are what is reported.
  models1 = m1.get_hierarchy().models()
  models2 = m2.get_hierarchy().models()
  if len(models1) != len(models2):
    print('Error: Different number of models:',len(models1),'vs.',len(models2))
    sys.exit(2)
  for i in range(len(models1)):
    if verbosity >= 2:
      print('Model',models1[i].id)
    chains1 = models1[i].chains()
    chains2 = models2[i].chains()
    if len(chains1) != len(chains2):
      print('Error: Different number of chains:',len(chains1),'vs.',len(chains2),'in model',models1[i].id)
      sys.exit(2)
    for j in range(len(chains1)):
      if verbosity >= 3:
        print('  Chain',chains1[j].id)
      r1 = chains1[j].residue_groups()
      r2 = chains2[j].residue_groups()
      if len(r1) != len(r2):
        print('Error: Different number of residues:',len(r1),'vs.',len(r2),'in chain',chains1[j].id)
        sys.exit(2)
      for k in range(len(r1)):
        if verbosity >= 4:
          print('    Residue group',r1[k].resseq)
        ag1 = r1[k].atom_groups()
        ag2 = r2[k].atom_groups()
        if len(ag1) != len(ag2):
          print('Error: Different number of atom groups:',len(ag1),'vs.',len(ag2),'in residue group',r1[k].resseq)
          sys.exit(2)
        for m in range(len(ag1)):
          if verbosity >= 5:
            print('      Atom group',ag1[m].resname)
          as1 = ag1[m].atoms()
          as2 = ag2[m].atoms()

          # Atom names are unique within residues, so we can use that to
          # match atoms in one file from those in the other and print out
          # missing atoms and differences in positions.
          # We only check differences in positions for matches in the first
          # list because they will be duplicated by a check for the second.
          for a1 in as1:
            match = ag2[m].get_atom(a1.name.strip())
            if match is None:
              print(ag1[m].resname+' '+chains1[j].id+r1[k].resseq,'Only in first:',a1.name.strip())
              continue
            dist = a1.distance(match)
            if dist > thresh:
              print(ag1[m].resname+' '+chains1[j].id+r1[k].resseq,'Distance between',a1.name.strip(),"{:.2f}".format(dist))
          for a2 in as2:
            match = ag1[m].get_atom(a2.name.strip())
            if match is None:
              print(ag1[m].resname+' '+chains1[j].id+r1[k].resseq,'Only in second:',a2.name.strip())