Exemple #1
0
 def testPdbWriteXtal(self):
     """ Test PDB file writing from a Xtal structure """
     pdbfile = read_PDB(self.pdb)
     self._check4lzt(pdbfile)
     output = StringIO()
     pdbfile.write_pdb(output, renumber=False)
     output.seek(0)
     pdbfile2 = read_PDB(output)
     self._check4lzt(pdbfile2, check_meta=False)
     self._compareInputOutputPDBs(pdbfile, pdbfile2)
     output = reset_stringio(output)
     write_PDB(pdbfile, output)
     output.seek(0)
     pdbfile3 = read_PDB(output)
     self._check4lzt(pdbfile3, check_meta=False)
     self._compareInputOutputPDBs(pdbfile, pdbfile3, True)
     # Now check that renumbering is done correctly. 4lzt skips residues 130
     # through 200
     for res1, res2 in zip(pdbfile.residues, pdbfile3.residues):
         if res1.idx < 129:
             self.assertEqual(res1.number, res2.number)
         elif res1.idx < 135:
             self.assertEqual(res1.number, res2.number + 71)
         else:
             # Some residue numbers are skipped in the water numbering
             self.assertGreaterEqual(res1.number, res2.number + 71 + 794)
Exemple #2
0
 def testPdbWriteAltlocOptions(self):
     """ Test PDB file writing with different altloc options """
     pdbfile = read_PDB(self.pdb)
     self._check4lzt(pdbfile)
     output = StringIO()
     pdbfile.write_pdb(output, renumber=False, altlocs='all')
     output.seek(0)
     pdbfile2 = read_PDB(output)
     self._check4lzt(pdbfile2, check_meta=False)
     self._compareInputOutputPDBs(pdbfile, pdbfile2)
     # Check that 'first' option works
     output = reset_stringio(output)
     pdbfile.write_pdb(output, renumber=False, altlocs='first')
     output.seek(0)
     pdbfile3 = read_PDB(output)
     self._check4lzt(pdbfile3, check_meta=False, has_altloc=False)
     self._compareInputOutputPDBs(pdbfile, pdbfile3, altloc_option='first')
     # Check that the 'occupancy' option works
     output = reset_stringio(output)
     write_PDB(pdbfile, output, renumber=False, altlocs='occupancy')
     output.seek(0)
     pdbfile4 = read_PDB(output)
     self._check4lzt(pdbfile4, check_meta=False, has_altloc=False)
     self._compareInputOutputPDBs(pdbfile, pdbfile4, altloc_option='occupancy')
     # Double-check 'first' vs. 'occupancy'. Residue 85 (SER) has a conformer
     # A that has an occupancy of 0.37 and conformer B with occupancy 0.63
     self.assertEqual(pdbfile3.residues[84][4].xx, -4.162)
     self.assertEqual(pdbfile4.residues[84][4].xx, -4.157)
Exemple #3
0
 def testSegidHandling(self):
     """ Test handling of CHARMM-specific SEGID identifier (r/w) """
     pdbfile = read_PDB(self.overflow2)
     allsegids = set(['PROA', 'PROB', 'CARA', 'CARE', 'CARC', 'CARD', 'CARB',
                      'MEMB', 'TIP3', 'POT', 'CLA'])
     foundsegids = set()
     for atom in pdbfile.atoms:
         self.assertTrue(hasattr(atom, 'segid'))
         foundsegids.add(atom.segid)
     self.assertEqual(foundsegids, allsegids)
     self.assertEqual(pdbfile.atoms[0].segid, 'PROA')
     self.assertEqual(pdbfile.atoms[5161].segid, 'PROA')
     self.assertEqual(pdbfile.atoms[5162].segid, 'PROB')
     self.assertEqual(pdbfile.atoms[-1].segid, 'CLA')
     f = get_fn('pdb_segid_test1.pdb', written=True)
     f2 = get_fn('pdb_segid_test2.pdb', written=True)
     pdbfile.write_pdb(f)
     pdbfile2 = read_PDB(f)
     for atom in pdbfile2.atoms:
         self.assertFalse(hasattr(atom, 'segid'))
     pdbfile.write_pdb(f2, charmm=True)
     pdbfile3 = read_PDB(f2)
     for atom in pdbfile3.atoms:
         self.assertTrue(hasattr(atom, 'segid'))
         self.assertEqual(atom.segid, pdbfile.atoms[atom.idx].segid)
Exemple #4
0
 def testAscii(self):
     """ Test PDB file parsing """
     self._check4lzt(read_PDB(self.pdb))
     # The PDB file with multiple models
     pdbfile = read_PDB(open(self.models))
     all_crds = pdbfile.get_coordinates('all')
     self.assertEqual(all_crds.shape[0], 20)
     np.testing.assert_allclose(all_crds[0][0], [-8.886, -5.163, 9.647])
     np.testing.assert_allclose(all_crds[19][-1], [-12.051, 5.205, -2.146])
Exemple #5
0
 def testPdbWriteSimple(self):
     """ Test PDB file writing on a very simple input structure """
     pdbfile = read_PDB(self.simple)
     self.assertEqual(len(pdbfile.atoms), 33)
     self.assertEqual(len(pdbfile.residues), 3)
     output = StringIO()
     pdbfile.write_pdb(output)
     output.seek(0)
     pdbfile2 = read_PDB(output)
     self.assertEqual(len(pdbfile2.atoms), 33)
     self.assertEqual(len(pdbfile2.residues), 3)
     self._compareInputOutputPDBs(pdbfile, pdbfile2)
Exemple #6
0
 def testPdbWriteModels(self):
     """ Test PDB file writing from NMR structure with models """
     pdbfile = read_PDB(self.models)
     self.assertEqual(pdbfile.get_coordinates('all').shape, (20, 451, 3))
     self.assertEqual(len(pdbfile.atoms), 451)
     output = StringIO()
     write_PDB(pdbfile, output)
     output.seek(0)
     pdbfile2 = read_PDB(output)
     self.assertEqual(len(pdbfile2.atoms), 451)
     self.assertEqual(pdbfile2.get_coordinates('all').shape, (20, 451, 3))
     self._compareInputOutputPDBs(pdbfile, pdbfile2)
Exemple #7
0
 def testPositions(self):
     """ Tests that positions are Vec3's with units """
     from parmed import unit as u
     from parmed import Vec3
     pdbfile = read_PDB(open(self.models))
     self.assertIsInstance(pdbfile.positions[0], u.Quantity)
     self.assertIsInstance(pdbfile.positions[0].value_in_unit(u.angstroms), Vec3)
def com2str(com_pdb_str):
    import parmed
    import tempfile
    fp = tempfile.TemporaryFile(mode='w+t')
    fp.writelines(com_pdb_str)
    fp.seek(0)
    return parmed.read_PDB(fp)
Exemple #9
0
 def testMol2FileWithNoTypeNames(self):
     """ Tests writing a Mol2 without types uses names instead """
     struct = read_PDB(get_fn('2koc.pdb'))
     output = StringIO()
     formats.Mol2File.write(struct, output)
     output.seek(0)
     mol2 = formats.Mol2File.parse(output, structure=True)
Exemple #10
0
 def testAnisouWrite(self):
     """ Tests that write_PDB properly writes ANISOU records """
     def check_aniso(pdbfile):
         aniso1 = [2066, 1204, 1269, 44, 126, 191]
         aniso2 = [2090, 1182, 921, 46, 64, 60]
         aniso3 = [3057, 3932, 5304, 126, -937, -661]
         self.assertEqual(len(aniso1), len(pdbfile.atoms[0].anisou))
         for x, y in zip(aniso1, pdbfile.atoms[0].anisou):
             self.assertEqual(x/10000, y)
         self.assertEqual(len(aniso2), len(pdbfile.atoms[1].anisou))
         for x, y in zip(aniso2, pdbfile.atoms[1].anisou):
             self.assertEqual(x/10000, y)
         self.assertEqual(len(aniso3), len(pdbfile.atoms[-1].anisou))
         for x, y in zip(aniso3, pdbfile.atoms[-1].anisou):
             self.assertEqual(x/10000, y)
     pdbfile = read_PDB(self.pdb)
     check_aniso(pdbfile)
     output = StringIO()
     pdbfile.write_pdb(output)
     output.seek(0)
     pdbfile2 = read_PDB(output)
     # Should have no anisou records, since by default they are not written
     for atom in pdbfile2.atoms:
         self.assertIs(atom.anisou, None)
     output = reset_stringio(output)
     pdbfile.write_pdb(output, renumber=False, write_anisou=True)
     output.seek(0)
     # This one should have anisou records
     pdbfile3 = read_PDB(output)
     self._compareInputOutputPDBs(pdbfile, pdbfile3)
     for a1, a2 in zip(pdbfile.atoms, pdbfile3.atoms):
         if has_numpy():
             self.assertEqual(a1.anisou.shape, a2.anisou.shape)
         else:
             self.assertEqual(len(a1.anisou), len(a2.anisou))
         for x, y in zip(a1.anisou, a2.anisou):
             self.assertAlmostEqual(x, y, delta=1e-4)
         self.assertEqual(len(a1.other_locations), len(a2.other_locations))
         for key in sorted(a1.other_locations.keys()):
             oa1 = a1.other_locations[key]
             oa2 = a2.other_locations[key]
             if has_numpy():
                 self.assertEqual(oa1.anisou.shape, oa2.anisou.shape)
             else:
                 self.assertEqual(len(oa1.anisou), len(oa2.anisou))
             for x, y in zip(oa1.anisou, oa2.anisou):
                 self.assertAlmostEqual(x, y, delta=1e-4)
Exemple #11
0
 def label_receptor(self):
     structure = parmed.read_PDB(self.rec_path)
     self.addResLabels({'resi': [n.number for n in structure.residues]}, {
         'font': 'Arial',
         'fontColor': 'white',
         'showBackground': 'false',
         'fontSize': 10
     })
Exemple #12
0
 def testRegularOverflow(self):
     """ Test PDB file where atom number goes to ***** after 99999 """
     pdbfile = read_PDB(self.overflow2)
     self.assertEqual(len(pdbfile.atoms), 114277)
     self.assertEqual(len(pdbfile.residues), 25042)
     for i, atom in enumerate(pdbfile.atoms):
         self.assertEqual(atom.number, i+1)
         self.assertEqual(atom.idx, i)
Exemple #13
0
    def add_hydrogen(self, no_reduce_db=False):
        ''' Use reduce program to add hydrogen
    
        Parameters
        ----------
        obj: file object or parmed.Structure or its derived class
    
        Returns
        -------
        parm : parmed.Structure

        Requires
        --------
        reduce
        '''
        def touch(fname, times=None):
            with open(fname, 'a'):
                os.utime(fname, times)

        try:
            if no_reduce_db:
                touch('./dummydb')
            fileobj = StringIO()
            self.write_pdb(fileobj)
            fileobj.seek(0)
            reduce = os.path.join(os.getenv('AMBERHOME', ''), 'bin', 'reduce')
            if not os.path.exists(reduce):
                reduce = 'reduce'
            if no_reduce_db:
                process = subprocess.Popen([
                    reduce, '-BUILD', '-NUC', '-NOFLIP', '-DB ./dummydb', '-'
                ],
                                           stdin=subprocess.PIPE,
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE)
            else:
                process = subprocess.Popen(
                    [reduce, '-BUILD', '-NUC', '-NOFLIP', '-'],
                    stdin=subprocess.PIPE,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
            out, err = process.communicate(str.encode(fileobj.read()))
            out = out.decode()
            err = err.decode()
            if process.wait():
                logger.error("REDUCE returned non-zero exit status: "
                             "See reduce_info.log for more details")
            # print out the reduce log even if it worked
            with open('reduce_info.log', 'w') as fh:
                fh.write(err)
            pdbh = StringIO(out)
            # not using load_file since it does not read StringIO
            self.parm = parmed.read_PDB(pdbh)
        finally:
            fileobj.close()
            if no_reduce_db:
                os.unlink('./dummydb')
        return self
Exemple #14
0
def check_str(structure, ref=False, skip=False):
    if isinstance(structure, str):
        refstr = parmed.read_PDB(structure)
    else:
        refstr = structure

    previous = 0
    ind = 1
    res_dict = {}
    duplicates = []
    for res in refstr.residues:
        if 'LP' in res.name:
            GMXMMPBSA_ERROR(
                'The LP pseudo-atom is not supported. Please remove them following this instructions: '
                'https://valdes-tresanco-ms.github.io/gmx_MMPBSA/examples/Protein_ligand_LPH_atoms_CHARMMff/'
            )
        if res.chain == '':
            if ref:
                GMXMMPBSA_ERROR(
                    'The reference structure used is inconsistent. The following residue does not have a '
                    f'chain ID: {res.number}:{res.name}')
            elif not previous:
                res_dict[ind] = [[res.number, res.name, res.insertion_code]]
            elif res.number - previous in [0, 1]:
                res_dict[ind].append(
                    [res.number, res.name, res.insertion_code])
            else:
                ind += 1
                res_dict[ind] = [[res.number, res.name, res.insertion_code]]
            previous = res.number
        elif res.chain not in res_dict:
            res_dict[res.chain] = [[res.number, res.name, res.insertion_code]]
        else:
            res_dict[res.chain].append(
                [res.number, res.name, res.insertion_code])

    for chain, resl in res_dict.items():
        res_id_list = [[x, x2] for x, x1, x2 in resl]
        duplicates.extend(f'{chain}:{resl[c][0]}:{resl[c][1]}:{resl[c][2]}'
                          for c, x in enumerate(res_id_list)
                          if res_id_list.count(x) > 1)

    if ref:
        if duplicates:
            GMXMMPBSA_ERROR(
                f'The reference structure used is inconsistent. The following residues are duplicates:\n'
                f' {", ".join(duplicates)}')
    elif skip:
        if duplicates:
            return refstr
    elif duplicates:
        logging.warning(
            f'The complex structure used is inconsistent. The following residues are duplicates:\n'
            f' {", ".join(duplicates)}')
    return refstr
    def testLoadStruct(self):
        """ Test load_rosetta against read_PDB"""

        init()
        pose = pose_from_sequence(3*'A')

        struct = load_rosetta(pose)
        pdb = read_PDB(get_fn('ala_ala_ala.pdb'))

        self.assertEqual(len(struct.atoms), len(pdb.atoms))
        self.assertEqual(len(struct.residues), len(pdb.residues))
Exemple #16
0
def test_fetch_pdbid_and_use_reduce():
    ''' e.g: pdb4amber 1tsu --pdbid --reduce'''
    pdb_fn = '1tsu'
    command = ['pdb4amber', pdb_fn, '--pdbid', '--reduce']

    with tempfolder():
        output = subprocess.check_output(command).decode()
        input_pdb = StringIO(output)
        input_pdb.seek(0)
        parm = pmd.read_PDB(input_pdb)
        assert len(parm.atoms) == 3174
Exemple #17
0
def test_fetch_pdbid():
    ''' e.g: pdb4amber 1l2y --pdbid '''
    pdb_fn = '1l2y'
    command = ['pdb4amber', pdb_fn, '--pdbid']

    with tempfolder():
        output = subprocess.check_output(command).decode()
        input_pdb = StringIO(output)
        input_pdb.seek(0)
        parm = pmd.read_PDB(input_pdb)
        assert len(parm.atoms) == 304
def read_pdb(f):
    """Parse an mmCIF file (using the parmEd parser) and return a molecule

    Args:
        f (file): file-like object containing the mmCIF file

    Returns:
        moldesign.Molecule: parsed molecule
    """
    parmedmol = parmed.read_PDB(f)
    mol = parmed_to_mdt(parmedmol)
    return mol
Exemple #19
0
def test_simplest_command_pdb4amber_mypdb():
    # pdb4amber my.pdb
    pdb_fn = get_fn('2igd/2igd.pdb')
    command = ['pdb4amber', pdb_fn]

    with tempfolder():
        output = subprocess.check_output(
            ' '.join(command), shell=True).decode()
        input_pdb = StringIO(output)
        input_pdb.seek(0)
        parm = pmd.read_PDB(input_pdb)
        assert len(parm.atoms) == 574
def read_pdb(f):
    """Parse an mmCIF file (using the parmEd parser) and return a molecule

    Args:
        f (file): file-like object containing the mmCIF file

    Returns:
        moldesign.Molecule: parsed molecule
    """
    parmedmol = parmed.read_PDB(f)
    mol = parmed_to_mdt(parmedmol)
    return mol
Exemple #21
0
    def data_context_menu(self, point):

        index = self.treeWidget.indexAt(point)
        if not index.isValid():
            return
        item = self.treeWidget.itemAt(point)
        name = item.text(0)  # The text of the node.
        self.cont_menu = QMenu(self.treeWidget)
        save_line_csv = None
        save_bar_csv = None
        save_heatmap_csv = None
        save_pdb = None
        if 1 in item.col_box:
            save_line_csv = self.cont_menu.addAction(f"Save {item.text(0)} CSV (Line)")
        if 2 in item.col_box:
            save_bar_csv = self.cont_menu.addAction(f"Save {item.text(0)} CSV (Bar)")
        if 3 in item.col_box:
            save_heatmap_csv = self.cont_menu.addAction(f"Save {item.text(0)} CSV (Heatmap)")
        if 4 in item.col_box:
            save_pdb = self.cont_menu.addAction("Save PDB")
        action = self.cont_menu.exec_(self.treeWidget.mapToGlobal(point))
        if save_line_csv and action == save_line_csv:
            item.gmxMMPBSA_current_data.line_plot_dat.to_csv(item.syspath.parent.joinpath(
                item.chart_subtitle.replace(' | ', '_') + '_line.csv'), index=False)
        elif save_bar_csv and action == save_bar_csv:
            item.gmxMMPBSA_current_data.bar_plot_dat.mean().to_csv(item.syspath.parent.joinpath(
                item.chart_subtitle.replace(' | ', '_') + '_bar.csv'), index=True)
        elif save_heatmap_csv and action == save_heatmap_csv:
            item.gmxMMPBSA_current_data.heatmap_plot_dat.to_csv(item.syspath.parent.joinpath(
                item.chart_subtitle.replace(' | ', '_') + '_heatmap.csv'), index=True)
        elif save_pdb and action == save_pdb:
            if hasattr(item.app.FILES, 'complex_fixed'):
                com_pdb = item.syspath.parent.joinpath(item.app.FILES.complex_fixed)
            else:
                self.statusbar.showMessage(f'{item.app.FILES.prefix + "FIXED_COM.pdb"} not exits. The modified PDB file can '
                                f'be inconsistent. Please, consider use the latest version of gmx_MMPBSA', 20000)
                com_pdb = item.syspath.parent.joinpath(item.app.FILES.prefix + 'COM.pdb')

            com_pdb_str = parmed.read_PDB(com_pdb.as_posix())
            res_dict = item.gmxMMPBSA_current_data.bar_plot_dat.mean().to_dict()
            for res in com_pdb_str.residues:
                res_notation = f'{res.chain}:{res.name}:{res.number}'
                if res_notation in res_dict:
                    res_energy = res_dict[res_notation]
                else:
                    res_energy = 0.00
                for at in res.atoms:
                    at.bfactor = res_energy
            output_path = com_pdb.parent.joinpath(f'{item.sysname}_energy2bfactor.pdb')
            com_pdb_str.save(output_path.as_posix(), 'pdb', True, renumber=False)
Exemple #22
0
def test_keep_altlocs():
    ''' e.g: pdb4amber 2igd.pdb --keep-altlocs --reduce'''
    pdb_fn = get_fn('2igd/2igd.pdb')
    command = ['pdb4amber', pdb_fn, '--keep-altlocs', '--reduce']

    with tempfolder():
        output = subprocess.check_output(command).decode()
        input_pdb = StringIO(output)
        input_pdb.seek(0)
        parm = pmd.read_PDB(input_pdb)
        res4 = parm.residues[4]
        for atom in res4.atoms:
            if atom.name.startswith('CB') or atom.name.startswith('CG'):
                assert atom.other_locations
Exemple #23
0
def test_stdin_stdout_with_reduce():
    ''' e.g: cat my.pdb | pdb4amber --reduce '''
    pdb_fn = get_fn('2igd/2igd.pdb')
    command = ['cat', pdb_fn, '|', 'pdb4amber', '--reduce']

    with tempfolder():
        # use shell=True since check_output return exit 1 with |
        # not sure why.
        output = subprocess.check_output(
            ' '.join(command), shell=True).decode()
        input_pdb = StringIO(output)
        input_pdb.seek(0)
        parm = pmd.read_PDB(input_pdb)
        assert len(parm.atoms) == 1033
Exemple #24
0
 def _check4lzt(self, cif):
     pdb = read_PDB(self.lztpdb)
     self.assertEqual(len(cif.atoms), len(pdb.atoms))
     nextra = 0
     for a1, a2 in zip(cif.atoms, pdb.atoms):
         self.assertEqual(a1.name, a2.name)
         self.assertEqual(a1.number + nextra, a2.number)
         self.assertEqual(len(a1.anisou), len(a2.anisou))
         for x, y in zip(a1.anisou, a2.anisou):
             self.assertEqual(x, y)
         self.assertEqual(a1.altloc, a2.altloc)
         self.assertEqual(len(a1.other_locations), len(a2.other_locations))
         self.assertEqual(a1.residue.name, a2.residue.name)
         self.assertEqual(a1.residue.number, a2.residue.number)
         # TER cards consume a serial number in the PDB file, but *not* in a
         # CIF file.
         if a2.residue.ter and a2 is a2.residue.atoms[-1]:
             nextra += 1
     # Check the unit cell info
     self.assertEqual(cif.box[0], 27.240)
     self.assertEqual(cif.box[1], 31.870)
     self.assertEqual(cif.box[2], 34.230)
     self.assertEqual(cif.box[3], 88.520)
     self.assertEqual(cif.box[4], 108.53)
     self.assertEqual(cif.box[5], 111.89)
     # Check the metadata now
     self.assertEqual(cif.experimental, 'X-RAY DIFFRACTION')
     self.assertEqual(cif.authors,
             'Walsh, M.A., Schneider, T., Sieker, L.C., Dauter, Z., '
             'Lamzin, V., Wilson, K.S.')
     self.assertEqual(cif.title,
             'Refinement of triclinic hen egg-white lysozyme at atomic '
             'resolution.; Refinement of Triclinic Lysozyme: I. Fourier '
             'and Least-Squares Methods; Refinement of Triclinic Lysozyme: '
             'II. The Method of Stereochemically Restrained Least Squares')
     self.assertEqual(cif.journal,
             'Acta Crystallogr.,Sect.D; Acta Crystallogr.,Sect.B; '
             'Acta Crystallogr.,Sect.B')
     self.assertEqual(cif.journal_authors,
             'Walsh, M.A., Schneider, T.R., Sieker, L.C., Dauter, Z., '
             'Lamzin, V.S., Wilson, K.S., Hodsdon, J.M., Brown, G.M., '
             'Jensen, L.H., Ramanadham, M.')
     self.assertEqual(cif.year, '1998, 1990, 1990')
     self.assertEqual(cif.page, '522, 54, 63')
     self.assertEqual(cif.keywords, ['HYDROLASE', 'O-GLYCOSYL',
                                     'GLYCOSIDASE'])
     self.assertEqual(cif.volume, '54, 46, 46')
     self.assertEqual(cif.doi, '10.1107/S0907444997013656')
     self.assertEqual(cif.pmid, '9761848')
     self.assertEqual(cif.resolution, 0.95)
Exemple #25
0
 def testAnisouRead(self):
     """ Tests that read_PDB properly reads ANISOU records """
     pdbfile = read_PDB(self.pdb)
     aniso1 = [2066, 1204, 1269, 44, 126, 191] # first atom's ANISOU record
     aniso2 = [2090, 1182, 921, 46, 64, 60]    # second atom's ANISOU record
     aniso3 = [3057, 3932, 5304, 126, -937, -661] # last atom's ANISOU
     self.assertEqual(len(aniso1), len(pdbfile.atoms[0].anisou))
     for x, y in zip(aniso1, pdbfile.atoms[0].anisou):
         self.assertEqual(x/10000, y)
     self.assertEqual(len(aniso2), len(pdbfile.atoms[1].anisou))
     for x, y in zip(aniso2, pdbfile.atoms[1].anisou):
         self.assertEqual(x/10000, y)
     self.assertEqual(len(aniso3), len(pdbfile.atoms[-1].anisou))
     for x, y in zip(aniso3, pdbfile.atoms[-1].anisou):
         self.assertEqual(x/10000, y)
Exemple #26
0
 def old(self, no_reduce_db):
     try:
         if no_reduce_db:
             touch('./dummydb')
         fileobj = StringIO()
         self.write_pdb(fileobj)
         fileobj.seek(0)
         reduce = os.path.join(os.getenv('LIBTBX_BUILD'), 'reduce', 'exe',
                               'reduce')
         if not os.path.exists(reduce):
             reduce = 'phenix.reduce'
         cmd = [reduce, '-BUILD', '-NUC', '-NOFLIP', '-DB ./dummydb', '-']
         if no_reduce_db:
             process = subprocess.Popen([
                 reduce, '-BUILD', '-NUC', '-NOFLIP', '-DB ./dummydb', '-'
             ],
                                        stdin=subprocess.PIPE,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE)
         else:
             process = subprocess.Popen(
                 [reduce, '-BUILD', '-NUC', '-NOFLIP', '-'],
                 stdin=subprocess.PIPE,
                 stdout=subprocess.PIPE,
                 stderr=subprocess.PIPE)
         out, err = process.communicate(str.encode(fileobj.read()))
         out = out.decode()
         err = err.decode()
         if process.wait():
             logger.error("REDUCE returned non-zero exit status: "
                          "See reduce_info.log for more details")
         # print out the reduce log even if it worked
         with open('reduce_info.log', 'w') as fh:
             fh.write(err)
         pdbh = StringIO(out)
         # not using load_file since it does not read StringIO
         print('-' * 80)
         print(pdbh)
         print('-' * 80)
         self.parm = parmed.read_PDB(pdbh)
     finally:
         fileobj.close()
         if no_reduce_db:
             os.unlink('./dummydb')
     return self
Exemple #27
0
    def add_hydrogen(self, no_reduce_db=False):
        ''' Use reduce program to add hydrogen

        Parameters
        ----------
        obj: file object or parmed.Structure or its derived class

        Returns
        -------
        parm : parmed.Structure

        Requires
        --------
        reduce
        '''
        def touch(fname, times=None):
            with open(fname, 'a'):
                os.utime(fname, times)

        from mmtbx.utils import run_reduce_with_timeout

        parameters = '-BUILD -NUC -NOFLIP'
        if no_reduce_db:
            touch('./dummydb')
            parameters += ' -DB ./dummydb'
        parameters += ' -'

        fileobj = StringIO()
        self.write_pdb(fileobj)
        fileobj.seek(0)

        reduce_out = run_reduce_with_timeout(
            parameters=parameters,
            stdin_lines=fileobj.read(),
            stdout_splitlines=False,
        )
        assert reduce_out.return_code == 0

        pdbh = StringIO()
        pdbh.write(reduce_out.stdout_buffer)
        pdbh.seek(0)
        self.parm = parmed.read_PDB(pdbh)
        return self
Exemple #28
0
 def testPdbBigCoordinates(self):
     """ Test proper PDB coordinate parsing for large coordinates """
     pdbfile = read_PDB(get_fn('bigz.pdb'))
     self.assertAlmostEqual(pdbfile.coordinates[0,0], -100.024)
     self.assertAlmostEqual(pdbfile.coordinates[0,1], -100.103)
     self.assertAlmostEqual(pdbfile.coordinates[0,2], -100.101)
Exemple #29
0
def run(
    arg_pdbout,
    arg_pdbin,
    arg_nohyd=False,
    arg_dry=False,
    arg_prot=False,
    arg_strip_atom_mask=None,
    arg_mutate_string=None,
    arg_constph=False,
    arg_mostpop=False,
    arg_reduce=False,
    arg_no_reduce_db=False,
    arg_model=0,
    arg_add_missing_atoms=False,
    arg_elbow=False,
    arg_logfile='pdb4amber.log',
    arg_keep_altlocs=False,
    arg_leap_template=False,
    arg_conect=True,
    arg_noter=False,
):

    # always reset handlers to avoid duplication if run method is called more
    # than once
    logger.handlers = []
    if isinstance(arg_logfile, string_types):
        logfile_handler = logging.FileHandler(arg_logfile)
    elif hasattr(arg_logfile, 'write'):
        logfile_handler = logging.StreamHandler(arg_logfile)
    else:
        raise ValueError(
            "wrong arg_logfile: must be either string or file object")

    logger.addHandler(logfile_handler)
    name = arg_pdbin if not hasattr(arg_pdbin,
                                    '__name__') else arg_pdbin.__name__
    logger.info("\n==================================================")
    logger.info("Summary of pdb4amber for: %s" % name)
    logger.info("===================================================")

    if arg_pdbin == arg_pdbout:
        raise RuntimeError(
            "The input and output file names cannot be the same!\n")

    base_filename, extension = os.path.splitext(arg_pdbout)
    if arg_pdbin == 'stdin':
        if PY3:
            pdbin = StringIO(sys.stdin.read())
        else:
            pdbin = sys.stdin
    else:
        pdbin = arg_pdbin

    if isinstance(pdbin, parmed.Structure):
        parm = pdbin
    elif hasattr(pdbin, 'read'):
        # StringIO (e.g: read from pipe)
        # need to use read_PDB
        parm = parmed.read_PDB(pdbin)
    else:
        try:
            parm = parmed.load_file(pdbin)
        except parmed.exceptions.FormatNotFound:
            sys.stderr.write('Warning: input file may not be a PDB file!\n')
            sys.stderr.write('         trying to process it as one anyway.\n')
            # go back to read_PDB
            parm = parmed.read_PDB(pdbin)

    pdbfixer = AmberPDBFixer(parm)

    pdbfixer._write_renum(base_filename)

    if arg_reduce:
        pdbfixer.add_hydrogen(no_reduce_db=arg_no_reduce_db)

    sumdict = pdbfixer._summary()

    # remove hydrogens if option -y is used:==============================
    if arg_nohyd:
        pdbfixer.parm.strip('@H=')

    # find non-standard Amber residues:===================================
    #   TODO: why does the following call discard the return array of
    #         non-standard residue names?
    ns_names = pdbfixer.find_non_starndard_resnames()

    ns_mask = ':' + ','.join(ns_names)
    ns_mask_filename = base_filename + '_nonprot.pdb'
    if ns_mask != ':':
        pdbfixer.parm[ns_mask].save(ns_mask_filename, overwrite=True)
    else:
        with open(ns_mask_filename, 'w') as fh:
            fh.write("")

    # if arg_elbow:
    #     ns_names = find_non_starndard_resnames_elbow(parm)

    # keep only protein:==================================================
    if arg_prot:
        pdbfixer.parm.strip('!:' + ','.join(RESPROT))

    # strip atoms with given mask    =====================================
    if arg_strip_atom_mask is not None:
        pdbfixer.parm.strip(arg_strip_atom_mask)

    # remove water if -d option used:=====================================
    if arg_dry:
        water_mask = ':' + ','.join(parmed.residue.WATER_NAMES)
        water_parm = pdbfixer.parm[water_mask]
        pdbfixer.remove_water()
        water_parm.save('{}_water.pdb'.format(base_filename), overwrite=True)
    # find histidines that might have to be changed:=====================
    if arg_constph:
        pdbfixer.constph()
    else:
        pdbfixer.assign_histidine()

    # find possible S-S in the final protein:=============================
    sslist, cys_cys_atomidx_set = pdbfixer.find_disulfide()
    pdbfixer.rename_cys_to_cyx(sslist)
    with open(base_filename + '_sslink', 'w') as fh:
        for (idx0, idx1) in sslist:
            fh.write('{} {}\n'.format(idx0 + 1, idx1 + 1))

    # find possible gaps:==================================================
    gaplist = pdbfixer.find_gaps()

    mask_str_list = []
    if arg_mutate_string is not None:
        # e.g: arg_mutate_str = "3-ALA,4-GLU"
        for mask_str in arg_mutate_string.replace(';', ',').split(','):
            index, resname = mask_str.split('-')
            mask_str_list.append([int(index.strip()) - 1, resname.strip()])
        pdbfixer.mutate(mask_str_list)

        # mutation will remove all hydrogens
        # add back if using reduce
        if arg_reduce:
            pdbfixer.add_hydrogen(no_reduce_db=arg_no_reduce_db)

    # count heavy atoms:==================================================
    missing_atom_residues = pdbfixer.find_missing_heavy_atoms()
    logger.info("\n---------- Mising heavy atom(s)\n")
    if missing_atom_residues:
        for (residue, n_missing) in missing_atom_residues:
            logger.warn('{}_{} misses {} heavy atom(s)'.format(
                residue.name, residue.idx + 1, n_missing))
    else:
        logger.info('None')

    if arg_add_missing_atoms:
        pdbfixer.add_missing_atoms()

    # =====================================================================
    # make final output to new PDB file
    # =====================================================================
    if arg_model >= 0:
        final_coordinates = pdbfixer.parm.get_coordinates()[arg_model]
        write_kwargs = dict(coordinates=final_coordinates)
    else:
        # keep all models
        write_kwargs = dict()
    write_kwargs[
        'increase_tercount'] = False  # so CONECT record can work properly
    if not arg_keep_altlocs:
        if sumdict['has_altlocs']:
            logger.info('The alternate coordinates have been discarded.')
            if arg_mostpop:
                logger.info(
                    'Only the highest occupancy for each atom was kept.')
                write_kwargs = dict(altlocs='occupancy')
            else:
                logger.info(
                    'Only the first occurrence for each atom was kept.')
                write_kwargs = dict(altlocs='first')
        # remove altlocs label
        for atom in pdbfixer.parm.atoms:
            atom.altloc = ''
            for oatom in atom.other_locations.values():
                oatom.altloc = ''
    if arg_pdbout in ['stdout', 'stderr'] or arg_pdbout.endswith('.pdb'):
        output = pdbfixer._write_pdb_to_stringio(
            cys_cys_atomidx_set=cys_cys_atomidx_set,
            disulfide_conect=arg_conect,
            noter=arg_noter,
            **write_kwargs)
        output.seek(0)
        if arg_pdbout in ['stdout', 'stderr']:
            pdb_out_filename = 'stdout.pdb'
            print(output.read())
        else:
            pdb_out_filename = arg_pdbout
            with open(arg_pdbout, 'w') as fh:
                fh.write(output.read())
    else:
        # mol2 does not accept altloc keyword
        pdb_out_filename = arg_pdbout
        pdbfixer.parm.save(pdb_out_filename, overwrite=True)

    if arg_leap_template:
        with open('leap.template.in', 'w') as fh:
            if arg_prot:
                final_ns_names = []
            else:
                final_ns_names = ns_names
            content = _make_leap_template(parm,
                                          final_ns_names,
                                          gaplist,
                                          sslist,
                                          input_pdb=pdb_out_filename,
                                          prmtop='prmtop',
                                          rst7='rst7')
            fh.write(content)
    return ns_names, gaplist, sslist
Exemple #30
0
 def testVmdOverflow(self):
     """ Test PDB file where atom and residue numbers overflow """
     pdbfile = read_PDB(self.overflow)
     self.assertEqual(len(pdbfile.atoms), 110237)
     self.assertEqual(len(pdbfile.residues), 35697)
     np.testing.assert_allclose(pdbfile.box, [0, 0, 0, 90, 90, 90])
Exemple #31
0
    def checkPDB(self):
        """
        Generate parmed structure object for complex, receptor and ligand ( if it is protein-like)

        1 - Rename HIS
        2 - Rename CYS
        3 - Delete H
        4 - Rename oxygen in termini from GROMACS to AMBER name
          - Rename CD in ILE from GROMACS to AMBER name
        5 - Save
        :return:
        """
        logging.info('Generating AMBER Compatible PDB Files...')

        self.remove_MODEL(self.complex_pdb)
        self.remove_MODEL(self.receptor_pdb)
        self.remove_MODEL(self.ligand_pdb)
        # try:
        #     self.complex_str = parmed.read_PDB(self.complex_pdb)  # can always be initialized
        # except: # when structure file has no chains ids
        #     remove_MODEL(self.complex_pdb)
        self.complex_str = parmed.read_PDB(
            self.complex_pdb)  # can always be initialized
        self.receptor_str = parmed.read_PDB(self.receptor_pdb)
        self.ligand_str = parmed.read_PDB(self.ligand_pdb)
        if self.FILES.reference_structure:
            self.ref_str = parmed.read_PDB(self.FILES.reference_structure)

        self.fix_chains_IDs(self.complex_str, self.receptor_str,
                            self.ligand_str, self.ref_str)

        # fix receptor structure
        self.properHIS(self.receptor_str)
        self.properCYS(self.receptor_str)
        self.properAspGluLys(self.receptor_str)
        self.fix_H_ATOMS(self.receptor_str)
        # For some reason removing the hydrogens returns the hydrogen-bound atoms to their original names. This is
        # problematic with ILE switching from CD to CD1. parmed bug?
        self.receptor_str.strip('@/H')
        self.properATOMS(self.receptor_str)

        # check if rec contain ions (metals)
        self.rec_ions = self.receptor_str[:, ions, :]
        self.rec_ions_after = False
        if self.rec_ions.atoms:
            # fix atom number, avoid core dump in tleap
            i = 1
            for at in self.rec_ions.atoms:
                at.number = i
                i += 1
            # check ions location
            count = 0
            for res in self.receptor_str.residues:
                if res.number != self.complex_str.residues[count].number:
                    self.rec_ions_after = True
                    break
                count += 1
            if self.rec_ions_after:
                self.rec_ions.save(self.rec_ions_pdb,
                                   'pdb',
                                   True,
                                   renumber=False)
                # if exists any ions then strip them
                self.receptor_str.strip(f':{",".join(ions)}')
                self.rec_str_ions = True
        self.receptor_str.save(self.receptor_pdb_fixed,
                               'pdb',
                               True,
                               renumber=False)

        # fix ligand structure if is protein
        self.properHIS(self.ligand_str)
        self.properCYS(self.ligand_str)
        self.properAspGluLys(self.ligand_str)
        self.fix_H_ATOMS(self.ligand_str)
        self.ligand_str.strip('@/H')
        self.properATOMS(self.ligand_str)

        # check if lig contain ions (metals)
        self.lig_ions = self.ligand_str[:, ions, :]
        if self.lig_ions.atoms:
            # fix atom number, avoid core dump in tleap
            i = 1
            for at in self.lig_ions.atoms:
                at.number = i
                i += 1
            self.lig_ions.save(self.lig_ions_pdb, 'pdb', True, renumber=False)
            # if exists any ions then strip them
            self.ligand_str.strip(f':{",".join(ions)}')
            self.lig_str_ions = True
        self.ligand_str.save(self.ligand_pdb_fixed,
                             'pdb',
                             True,
                             renumber=False)

        if self.INPUT['alarun']:
            logging.info('Building Mutant receptor...')
            if self.INPUT['mutant'].lower() in ['rec', 'receptor']:
                self.mutant_receptor_str = parmed.read_PDB(
                    self.receptor_pdb_fixed)
                # fix mutant receptor structure
                self.mutatexala(self.mutant_receptor_str)
                self.mutant_receptor_str.save(self.mutant_receptor_pdb_fixed,
                                              'pdb',
                                              True,
                                              renumber=False)
            else:
                logging.info('Building Mutant ligand...')
                if self.FILES.ligand_mol2:
                    GMXMMPBSA_ERROR(
                        'Mutation is only possible if the ligand is protein-like'
                    )
                self.mutant_ligand_str = parmed.read_PDB(self.ligand_pdb_fixed)
                self.mutatexala(self.mutant_ligand_str)
                self.mutant_ligand_str.save(self.mutant_ligand_pdb_fixed,
                                            'pdb',
                                            True,
                                            renumber=False)

        # Get residue form receptor-ligand interface
        if self.print_residues:
            if self.use_temp:
                temp_str = parmed.read_PDB('rec_temp.pdb')
                rec_resnum = len(temp_str.residues)
            else:
                rec_resnum = len(self.receptor_str.residues)
            res_list = []
            res_ndx = 1
            for rres in self.complex_str.residues[:
                                                  rec_resnum]:  # iterate over receptor residues
                lres_ndx = rec_resnum + 1
                for lres in self.complex_str.residues[
                        rec_resnum:]:  # iterate over ligand residues
                    for rat in rres.atoms:
                        rat_coor = [rat.xx, rat.xy, rat.xz]
                        for lat in lres.atoms:
                            lat_coor = [lat.xx, lat.xy, lat.xz]
                            if dist(rat_coor, lat_coor) <= self.within:
                                if res_ndx not in res_list:
                                    res_list.append(res_ndx)
                                if lres_ndx not in res_list:
                                    res_list.append(lres_ndx)
                                break
                    lres_ndx += 1
                res_ndx += 1
            res_list.sort()
            self.INPUT['print_res'] = ','.join([str(x) for x in res_list])
Exemple #32
0
 def testPDBWriteFormat(self):
     """ Test PDB atom names are properly justified per PDB standard """
     pdbfile = read_PDB(self.format_test)
     f = get_fn('pdb_format_test.pdb', written=True)
     pdbfile.write_pdb(f, write_anisou=True)
     self.assertTrue(diff_files(get_saved_fn('SCM_A_formatted.pdb'), f))
Exemple #33
0
 def testBzip(self):
     """ Test Bzipped-PDB file parsing """
     self._check4lzt(read_PDB(self.pdbbz2))
Exemple #34
0
 def testDownloadSave(self):
     """ Tests downloading PDB files and saving a copy """
     fname = get_fn('downloaded.pdb', written=True)
     self._check4lzt(download_PDB('4lzt', saveto=fname))
     self._check4lzt(read_PDB(fname))
Exemple #35
0
 def get_residues(self):
     if self.rec_path is not None:
         structure = parmed.read_PDB(self.rec_path)
         return [r.number for r in structure.residues]
     else:
         return []
Exemple #36
0
def load_gmxmmpbsa_info(fname):
    """
    Loads up an gmx_MMPBSA info file and returns a mmpbsa_data instance with all
    of the data available in numpy arrays if numpy is available. The returned
    object is a mmpbsa_data instance.

    change the structure to get more easy way to graph per residue

    mmpbsa_data attributes:
    -----------------------
       o  Derived from "dict"
       o  Each solvent model is a dictionary key for a numpy array (if numpy is
          available) or array.array (if numpy is unavailable) for each of the
          species (complex, receptor, ligand) present in the calculation.
       o  The alanine scanning mutant data is under another dict denoted by the
          'mutant' key.

    Data Layout:
    ------------
               Model     |  Dictionary Key    |  Data Keys Available
       -------------------------------------------------------------------
       Generalized Born  |  'gb'              |  EGB, ESURF, *
       Poisson-Boltzmann |  'pb'              |  EPB, EDISPER, ECAVITY, *
       3D-RISM (GF)      |  'rism gf'         |
       3D-RISM (Standard)|  'rism std'        |
       Normal Mode       |  'nmode'           |
       Quasi-harmonic    |  'qh'              |

    * == TOTAL, VDW, EEL, 1-4 EEL, 1-4 VDW, BOND, ANGLE, DIHED

    The keys above are entries for the main dict as well as the sub-dict whose
    key is 'mutant' in the main dict.  Each entry in the main (and mutant sub-)
    dict is, itself, a dict with 1 or 3 keys; 'complex', 'receptor', 'ligand';
    where 'receptor' and 'ligand' are missing for stability calculations.
    If numpy is available, all data will be numpy.ndarray instances.  Otherwise,
    all data will be array.array instances.

    All of the objects referenced by the listed 'Dictionary Key's are dicts in
    which the listed 'Data Keys Available' are keys to the data arrays themselves

    Examples:
    ---------
       # Load numpy for our analyses (optional)
       import numpy as np

       # Load the _MMPBSA_info file:
       mydata = load_mmpbsa_info('_MMPBSA_info')

       # Access the complex GB data structure and calculate the autocorr. fcn.
       autocorr = np.correlate(mydata['gb']['complex']['TOTAL'],
                               mydata['gb']['complex']['TOTAL'])

       # Calculate the standard deviation of the alanine mutant receptor in PB
       print mydata.mutant['pb']['receptor']['TOTAL'].std()
    """
    if not isinstance(fname, Path):
        fname = Path(fname)

    if not fname.exists():
        raise NoFileExists("cannot find %s!" % fname)
    os.chdir(fname.parent)
    app = main.MMPBSA_App(MPI)
    info = infofile.InfoFile(app)
    info.read_info(fname)
    app.normal_system = app.mutant_system = None
    app.parse_output_files()
    return_data = mmpbsa_data(app)
    # Since Decomp data is parsed in a memory-efficient manner (by not storing
    # all of the data in arrays, but rather by printing each data point as it's
    # parsed), we need to handle the decomp data separately here
    # Open Complex fixed structure to assign per-(residue/wise) residue name
    try:
        complex_str = parmed.read_PDB(app.FILES.complex_fixed)
    except:
        complex_str = parmed.read_PDB(app.FILES.prefix + 'COM.pdb')
    # Get receptor and ligand masks
    mut_index = None

    rec = {}
    mut_rec = {}
    rmstr = app.INPUT['receptor_mask'].strip(':')
    rml = rmstr.split(',')
    for x in rml:
        if len(x.split('-')) > 1:
            start, end = x.split('-')
            for i in range(int(start), int(end) + 1):
                residue = complex_str.residues[i - 1]
                icode = f'{residue.insertion_code}'
                if icode:
                    icode = ':' + icode
                rec[i] = (f"{residue.chain}:{residue.name}:{residue.number}" +
                          icode)
                mut_rec[i] = (
                    f"{residue.chain}:{residue.name}:{residue.number}" + icode)
                if app.INPUT['mutant_res'] == (
                        f"{residue.chain}:{residue.number}" + icode):
                    mut_rec[i] = (
                        f"{residue.chain}:{app.INPUT['mutant']}:{residue.number}"
                        + icode)
        else:
            i = int(x)
            residue = complex_str.residues[i - 1]
            icode = f'{residue.insertion_code}'
            if icode:
                icode = ':' + icode
            rec[i] = (f"{residue.chain}:{residue.name}:{residue.number}" +
                      icode)
            mut_rec[i] = (f"{residue.chain}:{residue.name}:{residue.number}" +
                          icode)
            if app.INPUT['mutant_res'] == (
                    f"{residue.chain}:{residue.number}" + icode):
                mut_rec[i] = (
                    f"{residue.chain}:{app.INPUT['mutant']}:{residue.number}" +
                    icode)

    lig = {}
    mut_lig = {}
    lmstr = app.INPUT['ligand_mask'].strip(':')
    lml = lmstr.split(',')
    for x in lml:
        if len(x.split('-')) > 1:
            start, end = x.split('-')
            for i in range(int(start), int(end) + 1):
                residue = complex_str.residues[i - 1]
                icode = f'{residue.insertion_code}'
                if icode:
                    icode = ':' + icode
                lig[i] = (f"{residue.chain}:{residue.name}:{residue.number}" +
                          icode)
                mut_lig[i] = (
                    f"{residue.chain}:{residue.name}:{residue.number}" + icode)
                if app.INPUT['mutant_res'] == (
                        f"{residue.chain}:{residue.number}" + icode):
                    mut_lig[i] = (
                        f"{residue.chain}:{app.INPUT['mutant']}:{residue.number}"
                        + icode)
        else:
            i = int(x)
            residue = complex_str.residues[i - 1]
            icode = f'{residue.insertion_code}'
            if icode:
                icode = ':' + icode
            lig[i] = (f"{residue.chain}:{residue.name}:{residue.number}" +
                      icode)
            mut_lig[i] = (f"{residue.chain}:{residue.name}:{residue.number}" +
                          icode)
            if app.INPUT['mutant_res'] == (
                    f"{residue.chain}:{residue.number}" + icode):
                mut_lig[i] = (
                    f"{residue.chain}:{app.INPUT['mutant']}:{residue.number}" +
                    icode)

    com = rec.copy()
    com.update(lig)
    com_res_info = []
    for key, value in sorted(com.items()):
        com_res_info.append(value)

    rec_res_info = []
    for key, value in rec.items():
        rec_res_info.append(value)

    lig_res_info = []
    for key, value in lig.items():
        lig_res_info.append(value)

    mut_com_res_info = []
    mut_com = mut_rec.copy()
    mut_com.update(mut_lig)
    for key, value in sorted(mut_com.items()):
        mut_com_res_info.append(value)

    mut_rec_res_info = []
    for key, value in mut_rec.items():
        mut_rec_res_info.append(value)

    mut_lig_res_info = []
    for key, value in mut_lig.items():
        mut_lig_res_info.append(value)

    if not app.INPUT['alarun']:
        return_data.mutant = {}
    if app.INPUT['decomprun']:
        # Simplify the decomp class instance creation
        if app.INPUT['idecomp'] in (1, 2):
            DecompClass = lambda x, part: APIDecompOut(x, part, app)
        else:
            DecompClass = lambda x, part: APIPairDecompOut(x, part, app)

        if not app.INPUT['mutant_only']:
            # Do normal GB
            if app.INPUT['gbrun']:
                return_data['decomp'] = {'gb': {}}
                return_data['decomp']['gb']['complex'] = DecompClass(
                    app.FILES.prefix + 'complex_gb.mdout',
                    com_res_info).array_data

                if not app.stability:
                    return_data['decomp']['gb']['receptor'] = DecompClass(
                        app.FILES.prefix + 'receptor_gb.mdout',
                        rec_res_info).array_data
                    return_data['decomp']['gb']['ligand'] = DecompClass(
                        app.FILES.prefix + 'ligand_gb.mdout',
                        lig_res_info).array_data
                    return_data['decomp']['gb']['delta'] = get_delta_decomp(
                        app, 'gb', return_data['decomp'])
            # Do normal PB
            if app.INPUT['pbrun']:
                return_data['decomp'] = {'pb': {}}
                return_data['decomp']['pb']['complex'] = DecompClass(
                    app.FILES.prefix + 'complex_pb.mdout',
                    com_res_info).array_data
                if not app.stability:
                    return_data['decomp']['pb']['receptor'] = DecompClass(
                        app.FILES.prefix + 'receptor_pb.mdout',
                        rec_res_info).array_data
                    return_data['decomp']['pb']['ligand'] = DecompClass(
                        app.FILES.prefix + 'ligand_pb.mdout',
                        lig_res_info).array_data
                    return_data['decomp']['pb']['delta'] = get_delta_decomp(
                        app, 'pb', return_data['decomp'])
        if app.INPUT['alarun']:
            # Do mutant GB
            if app.INPUT['gbrun']:
                return_data.mutant['decomp'] = {'gb': {}}
                return_data.mutant['decomp']['gb']['complex'] = DecompClass(
                    app.FILES.prefix + 'mutant_complex_gb.mdout',
                    mut_com_res_info).array_data
                if not app.stability:
                    return_data.mutant['decomp']['gb'][
                        'receptor'] = DecompClass(
                            app.FILES.prefix + 'mutant_receptor_gb.mdout',
                            mut_rec_res_info).array_data
                    return_data.mutant['decomp']['gb']['ligand'] = DecompClass(
                        app.FILES.prefix + 'mutant_ligand_gb.mdout',
                        mut_lig_res_info).array_data
                    return_data.mutant['decomp']['gb'][
                        'delta'] = get_delta_decomp(
                            app, 'gb', return_data.mutant['decomp'])
            # Do mutant PB
            if app.INPUT['pbrun']:
                return_data.mutant['decomp'] = {'pb': {}}
                return_data.mutant['decomp']['pb']['complex'] = DecompClass(
                    app.FILES.prefix + 'mutant_complex_pb.mdout',
                    mut_com_res_info).array_data
                if not app.stability:
                    return_data.mutant['decomp']['pb'][
                        'receptor'] = DecompClass(
                            app.FILES.prefix + 'mutant_receptor_pb.mdout',
                            mut_rec_res_info).array_data
                    return_data.mutant['decomp']['pb']['ligand'] = DecompClass(
                        app.FILES.prefix + 'mutant_ligand_pb.mdout',
                        mut_lig_res_info).array_data
                    return_data.mutant['decomp']['pb'][
                        'delta'] = get_delta_decomp(
                            app, 'pb', return_data.mutant['decomp'])
        else:
            return_data.mutant = None

    app_namespace = SimpleNamespace(FILES=app.FILES,
                                    INPUT=app.INPUT,
                                    numframes=app.numframes,
                                    numframes_nmode=app.numframes_nmode)

    return return_data, app_namespace
Exemple #37
0
    def showdata(self, item: CustomItem, col):
        self.treeWidget.clearSelection()
        # self.update_options(item)   # FIXME: only when we able the options
        if col == 1:
            s = item.lp_subw
            if item.checkState(col) == Qt.Checked:
                item.setSelected(True)
                if s:
                    s.show()
                else:
                    sub = Charts(item=item, col=col, options={'chart_type':[Charts.LINE, Charts.ROLLING],
                                                              'hide_toolbar': self.data_options['hide_toolbar']})
                    sub.make_chart()
                    self.mdi.addSubWindow(sub)
                    sub.show()
            else:
                if s:
                    self.mdi.activatePreviousSubWindow()
                    s.close()
        elif col == 2:
            s = item.bp_subw
            if item.checkState(col) == Qt.Checked:
                item.setSelected(True)
                if s:  # check if any subwindow has been store
                    s.show()
                else:
                    sub = Charts(item=item, col=col, options={'chart_type':[Charts.BAR], 'hide_toolbar':
                        self.data_options['hide_toolbar']})
                    sub.make_chart()
                    self.mdi.addSubWindow(sub)
                    sub.show()
            else:
                if s:
                    self.mdi.activatePreviousSubWindow()
                    s.close()
        elif col == 3:
            s = item.hmp_subw
            if item.checkState(col) == Qt.Checked:
                item.setSelected(True)
                if s:  # check if any subwindow has been store
                    s.show()
                else:
                    sub = Charts(item=item, col=col, options={'chart_type':[Charts.HEATMAP], 'hide_toolbar':
                        self.data_options['hide_toolbar']})
                    sub.make_chart()
                    self.mdi.addSubWindow(sub)
                    sub.show()
            else:
                if s:
                    self.mdi.activatePreviousSubWindow()
                    s.close()
        elif col == 4:
            pymol_p = item.pymol_process
            pymol_path = [os.path.join(path, 'pymol') for path in os.environ["PATH"].split(os.pathsep)
                          if os.path.exists(os.path.join(path, 'pymol')) and
                          os.access(os.path.join(path, 'pymol'), os.X_OK)]
            if not pymol_path:
                m = QMessageBox.critical(self, 'PyMOL not found!', 'PyMOL not found!. Make sure PyMOL is in the '
                                                                   'PATH.', QMessageBox.Ok)
                item.setCheckState(4, Qt.Unchecked)
                return
            else:
                pymol = pymol_path[0]

            if hasattr(item.app.FILES, 'complex_fixed'):
                com_pdb = item.syspath.parent.joinpath(item.app.FILES.complex_fixed)
            else:
                self.statusbar.showMessage(f'{item.app.FILES.prefix + "FIXED_COM.pdb"} not exits. The modified PDB file can '
                                f'be inconsistent. Please, consider use the latest version of gmx_MMPBSA')
                com_pdb = item.syspath.parent.joinpath(item.app.FILES.prefix + 'COM.pdb')
            bfactor_pml = item.syspath.parent.joinpath('bfactor.pml')
            output_path = com_pdb.parent.joinpath(f'{item.sysname}_energy2bfactor.pdb')
            if item.checkState(col) == Qt.Checked:
                item.setSelected(True)
                available_instance = self.get_pymol_instance()
                if not available_instance:
                    m = QMessageBox.critical(self, 'Error trying to open multiple instances of PyMOL',
                                             'Only 5 instance of PyMOL is allowed and 5 are already running. '
                                             'If you want to view this, please close the some one.',
                                             QMessageBox.Ok)
                    item.setCheckState(4, Qt.Unchecked)
                    return

                if not pymol_p or pymol_p.state() == QProcess.Running:
                    pymol_p =  available_instance # Keep a reference to the QProcess (e.g. on self) while it's running.
                    item.pymol_process = pymol_p # store pymol instance until we finish the process
                qpd = QProgressDialog('Generate modified pdb and open it in PyMOL', 'Abort', 0, 2, self)
                qpd.setWindowModality(Qt.WindowModal)
                qpd.setMinimumDuration(1500)

                for i in range(2):
                    qpd.setValue(i)
                    if qpd.wasCanceled():
                        break
                    if i == 0:
                        com_pdb_str = parmed.read_PDB(com_pdb.as_posix())
                        res_dict = item.gmxMMPBSA_current_data.bar_plot_dat.mean().to_dict()
                        for res in com_pdb_str.residues:
                            res_notation = f'{res.chain}:{res.name}:{res.number}'
                            if res_notation in res_dict:
                                res_energy = res_dict[res_notation]
                            else:
                                res_energy = 0.00
                            for at in res.atoms:
                                at.bfactor = res_energy
                        com_pdb_str.save(output_path.as_posix(), 'pdb', True, renumber=False)
                        energy2pdb_pml(res_dict, bfactor_pml, output_path)
                qpd.setValue(2)
                pymol_p.start(pymol, [bfactor_pml.as_posix()])
                pymol_p.finished.connect(lambda : item.setCheckState(4, Qt.Unchecked))
            else:
                if pymol_p and pymol_p.state() == QProcess.Running:
                    pymol_p.terminate()
                    item.pymol_process = None