예제 #1
0
def scoreBenchmark2():
    
    dy.confProDy(verbosity='none')

    resultsFile = open('mu-solv-scores.txt', 'w+')
    csvWriter = csv.writer(resultsFile)
    csvWriter.writerow(['name', 'mu', 'TYR', 'SER', 'CYS' ])

    csvReader  = csv.reader(open('train_dG_est.csv', 'rU'))
    csvReader.next()
    headers = csvReader.next()

    for row in csvReader:

        PDB_ID = row[0]
        mu_pot_score = float(row[1])
        dASA_tyr = float(row[2])
        dASA_ser = float(row[3])
        dASA_cys = float(row[4])
        dG_est   = float(row[5])

        feature_vector = scoreOne(PDB_ID)
        results = [PDB_ID]
        map(results.append, feature_vector)
        print results
        csvWriter.writerow(results)

    resultsFile.close()
 def set_default_based_on_argparse(self, selection, vmd, compare, ref, debug, pdb):
     '''
     set arguments from command line based on argparser
     '''
     if selection in ["calpha", "backbone", "all"]:
         self.selection=selection
     else:
         print("Invalid value for selection:", selection)
         print("Possible Values for selection are: calpha, backbone, all")
         exit()
     self.vmd = vmd
     self.compare=compare
     self.ref=ref
     self.debug=debug
     self.pdbarg=pdb
     
     #set prody level of output
     if self.debug:
         prody.confProDy(verbosity="debug")
     else:
         prody.confProDy(verbosity="info")
         
     if self.debug:
         print("***DEBUG***")
         self.printSettings()
         print("***DEBUG-END***")
예제 #3
0
    def gen_normal_modes(self):
        """gen_normal_modes
        """
        print('>>> Computing normal modes with ProDy')
        from prody import ANM as prody_ANM
        from prody import confProDy

        confProDy(verbosity='critical')

        anm = prody_ANM()
        anm.buildHessian(self.atom_pos * 10**10, cutoff=self.elastic_network_cutoff)
        anm.calcModes(n_modes=self.num_normal_modes)

        self.normal_mode_vectors = anm.getEigvecs().reshape(self.atom_pos.shape[0],
                                                            self.atom_pos.shape[1],
                                                            self.num_normal_modes)
        self.normal_mode_variances = 1./anm.getEigvals()
예제 #4
0
파일: run_FlexE.py 프로젝트: grollins/FlexE
def main():
    #no log messages:
    confProDy(verbosity='none')

    # parse command line arguments
    parser = ArgumentParser(description='Calculate MDENM energies from a pdb \
                                    will calculate energy using modes from pdb\
                                    and then from reference--> crystal should\
                                    be the reference')
    parser.add_argument('--pdb', help='Molecule we want to examine. It will also be used as topology')
    parser.add_argument('--reference',
                        help='Rerence pdb to which we will rmsd everything')
    args = parser.parse_args() 

    flexy = FlexE(ref_pdb_file=args.reference)
    results = flexy.compare_with_ref(pdb_file=args.pdb)
    rmsd, energy_ref_to_pdb, energy_pdb_to_ref = results
    print "%s %.2f %.2f %.2f " % (args.pdb, rmsd, energy_ref_to_pdb, energy_pdb_to_ref)
예제 #5
0
 def setUpClass(cls):
     # Generate and read the pdb
     cls.pdb_path = "tmp_pdb.pdb"
     open(cls.pdb_path,"w").write(amber_short_ca_contents);
     try:
         prody.confProDy(verbosity='none')#setVerbosity('none')
     except Exception :
         print "Impossible to silent prody" 
     cls.pdb = prody.parsePDB(cls.pdb_path, subset='calpha')
     
     # Save coordsets before superposition
     cls.not_iterposed_coordsets = numpy.array(cls.pdb.getCoordsets())
     
     # Do Prody iterposition
     cls.ensemble = prody.Ensemble('pca_test_ensemble')
     cls.ensemble.setCoords( cls.pdb.getCoords())
     cls.ensemble.addCoordset(cls.pdb.getCoordsets())
     #prody.setVerbosity('info')
     cls.ensemble.iterpose()
     cls.coordsets = cls.ensemble.getCoordsets()
예제 #6
0
    def setUpClass(cls):
        # Generate and read the pdb
        cls.pdb_path = "tmp_pdb.pdb"
        open(cls.pdb_path, "w").write(amber_short_ca_contents)
        try:
            prody.confProDy(verbosity='none')  #setVerbosity('none')
        except Exception:
            print "Impossible to silent prody"
        cls.pdb = prody.parsePDB(cls.pdb_path, subset='calpha')

        # Save coordsets before superposition
        cls.not_iterposed_coordsets = numpy.array(cls.pdb.getCoordsets())

        # Do Prody iterposition
        cls.ensemble = prody.Ensemble('pca_test_ensemble')
        cls.ensemble.setCoords(cls.pdb.getCoords())
        cls.ensemble.addCoordset(cls.pdb.getCoordsets())
        #prody.setVerbosity('info')
        cls.ensemble.iterpose()
        cls.coordsets = cls.ensemble.getCoordsets()
예제 #7
0
def read_pdb_dir(pdb_fs, pdb_dir):
    '''
    Only .ent files are read...
    Returns AtomGroup object (prody)
    '''

    # import prody for pdb read/write
    import prody
    prody.confProDy(verbosity='none')

    struct_data = []

    for pdb_f in pdb_fs:

        pdb_f = os.path.join(pdb_dir, pdb_f)

        if (os.path.exists(pdb_f)):
            struct = prody.parsePDB(pdb_f)
        else:
            struct = None

        struct_data.append((pdb_f, struct))

    return struct_data
예제 #8
0
def read_pdb_dir(pdb_fs, pdb_dir):
    '''
    Only .ent files are read...
    Returns AtomGroup object (prody)
    '''

    # import prody for pdb read/write
    import prody
    prody.confProDy(verbosity='none')

    struct_data = []

    for pdb_f in pdb_fs:

        pdb_f = os.path.join(pdb_dir, pdb_f)

        if(os.path.exists(pdb_f)):
            struct = prody.parsePDB(pdb_f)
        else:
            struct = None

        struct_data.append((pdb_f, struct))

    return struct_data
예제 #9
0
    def __init__(self, parameters, observer):
        """
        Class creator. It parses the needed files and extracts info and coordinates.
        """

        super(TrajectoryHandler, self).__init__(observer)

        print "Reading conformations..."
        prody.confProDy(verbosity="none")

        self.parameters = parameters
        matrix_parameters = parameters.get_value(
            "data.matrix.parameters", default_value=ProtocolParameters.empty())
        parameters["data"]["files"] = self.expand_file_lists(
            parameters["data"]["files"])
        self.files = parameters["data"]["files"]
        self.pdbs = []

        if len(self.files) == 0:
            common.print_and_flush("[ERROR] no pdbs. Exiting...\n")
            self.notify("SHUTDOWN", "No pdbs defined in script.")
            exit()

        self.notify("Loading", "Loading Trajectories")

        # Bookmarking structure
        self.bookmarking = {"pdb": None, "selections": {}}

        merged_structure = self.getMergedStructure()
        self.coordsets = merged_structure.getCoordsets()
        self.number_of_conformations = self.coordsets.shape[0]
        self.number_of_atoms = self.coordsets.shape[1]

        self.handle_selection_parameters(matrix_parameters)
        print "%d conformations of %d atoms were read." % (
            merged_structure.numCoordsets(), merged_structure.numAtoms())
#!/usr/bin/env python
# encoding: utf-8


import numpy
import numpy.matlib
import scipy.stats
import prody
import os
import itertools
import multiprocessing

prody.confProDy(verbosity='none')

n_process = 4


def compute_dist_matrix(points):
    numPoints = len(points)
    distMat = numpy.sqrt(numpy.sum((numpy.matlib.repmat(points, numPoints, 1) - numpy.matlib.repeat(points, numPoints, axis=0))**2, axis=1))
    return distMat.reshape((numPoints,numPoints))


def extract_matrix(inputs):
    pdb_file, secondary_file, ss_type, ss_length = inputs

    path = os.path.join('PDB', pdb_file)
    protein = prody.parsePDB(path, subset='CA')
    coords = protein.getCoords()
    n_residues = coords.shape[0]
예제 #11
0
파일: naccess.py 프로젝트: acplus/peptalk
import subprocess, os
import tempfile
import prody
import pandas as pd

prody.confProDy(verbosity='error')

NACCESS_BINARY = '/home/assaff/tools/naccess2.1.1/naccess'

rsa_colnames = [
            'RES', 'Resname', 'Chain', 'Resnum', 
            'All-atoms-abs', 'All-atoms-rel', 
            'Total-Side-abs', 'Total-Side-rel', 
            'Main-Chain-abs', 'Main-Chain-rel', 
            'Non-polar-abs', 'Non-polar-rel', 
            'All-polar-abs', 'All-polar-rel',
            ]

rsa_colwidths = [
            3,4,2,4,
            9,6,
            7,6,
            7,6,
            7,6,
            7,6,
            ]

def read_naccess_rsa(rsa_filename):
    rsa_table = pd.read_fwf(rsa_filename, 
                       sep=None, 
예제 #12
0
#!/usr/bin/env python

"""Calculates the new PDB structure of an input PDB structure using ANM"""

import sys
from prody import parsePDB, ANM, extendModel, confProDy
from lightdock.pdbutil.PDBIO import parse_complex_from_file, write_pdb_to_file
from lightdock.structure.complex import Complex


def usage():
    print "Usage: %s PDB_file n_modes factor" % sys.argv[0]


if __name__ == "__main__":
    confProDy(verbosity='info')

    try:
        pdb_structure = sys.argv[1]
        n_modes = int(sys.argv[2])
        factor = float(sys.argv[3])
    except:
        usage()
        raise SystemExit("Wrong command line")

    protein = parsePDB(pdb_structure)
    ca_atoms = protein.select('name CA')
    protein_anm = ANM('protein ca')
    protein_anm.buildHessian(ca_atoms)
    protein_anm.calcModes(n_modes=n_modes)
    print 'Normal modes calculated'
예제 #13
0
import argparse
import logging
from pathlib import Path


def __append_error(path_txt, err_msg = None):
    if err_msg is not None:
        with open(path_txt, 'a') as f:
            f.write('---\n\n')
            f.write(str(err_msg))
            f.write('\n')


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    prody.confProDy(verbosity='none', typo_warnings=False, selection_warning=False)
    prody.LOGGER._logger.setLevel(logging.ERROR)
    parser = argparse.ArgumentParser()
    parser.add_argument('--idx', type=str, required=True, default=None, help='path to proteins index file')
    args = parser.parse_args()
    logging.info(f'args = {args}')
    num_log_iter = 10
    path_csv = args.idx
    path_csv_out = os.path.splitext(path_csv)[0] + '-chains.txt'
    path_err_txt = os.path.splitext(path_csv)[0] + '-chains-errlog.txt'
    Path(path_err_txt).touch()
    wdir = os.path.dirname(path_csv)
    data_csv = pd.read_csv(path_csv, converters={'uid': str})
    paths_pdb = [os.path.join(wdir, x) for x in data_csv['path']]
    #
    data_csv_out = {x: [] for x in ['path2', 'chains_num', 'chains_legnth', 'chains_legnth_total']}
예제 #14
0
import prody
import itertools
import pandas as pd
from os import path
import numpy as np
prody.confProDy(verbosity="none")


def uep(pdb_path, uep_contact_matrix, group1, group2):
    aa_code = {
        'A': 'ALA',
        'R': 'ARG',
        'N': 'ASN',
        'D': 'ASP',
        'C': 'CYS',
        'E': 'GLU',
        'Q': 'GLN',
        'G': 'GLY',
        'H': 'HIS',
        'I': 'ILE',
        'L': 'LEU',
        'K': 'LYS',
        'M': 'MET',
        'F': 'PHE',
        'P': 'PRO',
        'S': 'SER',
        'T': 'THR',
        'W': 'TRP',
        'Y': 'TYR',
        'V': 'VAL'
    }
예제 #15
0
import time
import os
import prody
import numpy 
import bz2
from pyRMSD.utils.proteinReading import flattenCoords
from pyproct.clustering.metrics.pcaMetrics import PCAMetric
if __name__ == '__main__':
    """
    Compares Prody and pyProClust implementation.
    """
    
    ######################
    # BENCHMARKING
    ######################
    prody.confProDy(verbosity='none')#setVerbosity('none')
    print "Loading file..."
    t1 = time.time()
    print "\tUncompressing..."
    open("tmp_amber_long.pdb","w").write(bz2.BZ2File("data/amber_long.pdb.tar.bz2").read())
    print "\tLoading..."
    pdb = prody.parsePDB("tmp_amber_long.pdb", subset='calpha')
    not_iterposed_coordsets = numpy.array(pdb.getCoordsets())
    number_of_conformations = not_iterposed_coordsets.shape[0]
    atoms_per_conformation = not_iterposed_coordsets.shape[1]
    os.system("rm tmp_amber_long.pdb")
    print "\tDeleting temporary file"
    t2 = time.time()
    print 'Loading took %0.3f s' % (t2-t1)
    
    ######################
예제 #16
0
import prody.sequence as sequence
import csv
import sys
import prody
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_protein
#sys.path.insert(0, '/Users/peter/Work/3decision/3decision_python')
sys.path.insert(0, '/Users/peter/Documents/Work/3decision/3decision_python')

prody.confProDy(verbosity='none')
import db_interface as dbi3dec

input_file = csv.DictReader(open("kinome.csv"), delimiter=';')

#TODO : check why a few uniprot codes are missing here

domain_annotations = [
    'Protein kinase domain (Domain)', 'Protein tyrosine kinase (Domain)'
]
expected_pfam_families = [
    'PF07714'
]  #PF00069']#,'PF07714'] #,'PF12330','PF00454','PF01163','PF03109']

o3i = dbi3dec.Oracle3decInterface()
o3i.connect()
final_sequences = []  #final list containing all kinase domain sequences
for line in input_file:
    uniprot_code = (line['UNIPROT_CODE'])
    if (len(uniprot_code)):
예제 #17
0
import MDAnalysis as md

import pandas as pd

from prody import confProDy, parsePDB, writePDB
silence_prody = confProDy(verbosity='none')

from plip.structure.preparation import PDBComplex

import progressbar

import os
import sys
import warnings
warnings.filterwarnings("ignore")


def plipmd(topol=None, traj=None):

    traj = list(traj.strip('[]').split(','))

    u = md.Universe(topol, traj)

    print('\nINFO: your system contains {} segments with labels {} \n'.format(
        len(u.segments), list(u.segments.segids)))

    if len(u.segments.segids) == 1:
        print('''
			WARNING: Only one segment was identified. Proceeding to manual segment definition\n\n

			You must define the peptide residues using MDAnalysis format. 
예제 #18
0
파일: gmx_prep.py 프로젝트: badmutex/mdprep
from . import gmx
from . import mdp_defaults

import pxul
from pxul.logging import logger

import mdtraj
import prody
prody.confProDy(verbosity='critical')
import textwrap
import os
import shutil
import copy


def count_occurences(string, lines):
    """
    Count the  number of times 'string' occures in 'lines'
    Where
      string :: str
      lines  :: [str]
    returns: int
    """
    count = 0
    for line in lines:
        if string in line:
            count += 1
    return count


class suffix(object):
예제 #19
0
# H5PY for storage
import numpy as np
import numpy.ma as ma
import h5py
# from h5py import h5s
import oddt
import ConfigParser

# check if we have openbabel < 3.0.0
try:
    from pybel import ob
except ImportError:
    from openbabel import openbabel as ob

prody.confProDy(verbosity='error')


class Config(object):

    fname = None
    config = None
    numposes = None
    box = None

    def __init__(self, fname=None):
        if fname:
            self.parse_config(fname)
            self.is_valid()
            self.fname = fname
        else:
예제 #20
0
파일: prody_anm.py 프로젝트: dww100/zazzie
    def prody_anm(self, variables, txtOutput):
        '''
        PRODY DRIVER is the function to read in variables from GUI input and
        used to run a prody normal mode calculation using the anisotropic network model
        (ANM) on a structure provide in a pdb file.

        INPUT:  variable descriptions:

        pdbfile:          input pdb file (reference)

        OUTPUT:
                        model_anm_extended_bb.nmd
                        model_traverse.dcd
                        model_samp.dcd
                        model_samp.pdb
                        model_anm_sqflucts.txt
                        model_anm_kirchhoff.txt
                        model_anm_hessian.txt
                        model_anm_cross-correlations.hm
                        model_anm_cross-correlations.txt
                        model_anm_covariance.txt
                        model_anm_beta.txt
                        model_anm_evalues.txt
                        model_anm_evectors.txt
                        model_anm_extended_all.nmd
                        model_anm.nmd

        txtOutput:        TK handler for output to GUI textbox

        files stored in ~/runname/prody directory:
        outfile:          output filename

        '''
        log = self.log
        pgui = self.run_utils.print_gui

        # start gui output
        pgui("\n%s \n" % ('=' * 60))
        pgui("DATA FROM RUN: %s \n\n" % time.asctime( time.gmtime( time.time() ) ))

        mvars = self.mvars
        #path = os.path.join(os.getcwd(),mvars.runname, 'prody')
        path = os.path.join(mvars.runname, 'prody')
        direxist = os.path.exists(path)
        if(direxist == 0):
            try:
                result = os.system('mkdir -p ' + path)
            except:
                message = 'can not create project directory: ' + path
                message += '\nstopping here\n'
                print_failure(message, txtOutput)
            if(result != 0):
                message = 'can not create project directory: ' + path
                message += '\nstopping here\n'
                print_failure(message, txtOutput)
        if mvars.advanced_usage == 1:
            run_cmd = prody_exe + mvars.advanced_usage_cmd
            os.system(run_cmd)
            run_cmd = 'mv *.nmd *.txt *.hm prody'
            os.system(run_cmd)
            exit()

        # display progress
        fraction_done = (0 + 1) * 1.0 / 10.0
        report_string = 'STATUS\t%f' % fraction_done
        pgui(report_string)

        prody.confProDy(verbosity='none')  #option to set silent verbosity
        model = mvars.pdbfile[0:len(mvars.pdbfile) - 4]
        run_cmd = prody_exe + ' anm ' + \
            mvars.pdbfile + ' -t all -n ' + str(mvars.number_modes) + ' -a'
        log.info('staring prody_exe %s' % run_cmd)
        prody_run = subprocess.Popen(run_cmd,shell=True,executable='/bin/bash')
        prody_run.wait() 
        #prody.confProDy(verbosity='none')  #option to set silent verbosity
        file_anm = model + '_anm_extended_all.nmd'

        # display progress
        fraction_done = (1 + 1) * 1.0 / 10.0
        report_string = 'STATUS\t%f' % fraction_done
        pgui(report_string)

        # parse nmd file with resuts extended to all atoms
        log.info('staring prody.parseNMD %s' % file_anm)
        mod, ag = prody.parseNMD(file_anm, type=None)
        allatoms = ag.copy()
        # set up to randomly sample number_conformations_samp modes
        log.info('staring prody.sampleModes')
        ensemble = prody.sampleModes(mod[:mvars.number_modes],
                                     ag,
                                     n_confs=mvars.number_conformations_samp,
                                     rmsd=mvars.rmsd_conformations_samp)
        ensemble
        log.info('staring prody ensemble and writing pdb/dcd files')
        allatoms.addCoordset(ensemble)
        prody.writePDB('model_samp.pdb', allatoms)
        prody.writeDCD('model_samp.dcd', allatoms)
        trajectory_names = []
        
        # display progress
        fraction_done = (1 + 2) * 1.0 / 10.0
        report_string = 'STATUS\t%f' % fraction_done
        pgui(report_string)

        log.info('starting prody traverse')
        for i in xrange(0, mvars.number_modes):
            #print i
            # setup to tranverse slowest mode
            traverse = prody.traverseMode(
                mod[i],
                allatoms,
                n_steps=mvars.number_steps_traverse,
                rmsd=mvars.rmsd_traverse)
            traverse
            prody.writeDCD('traverse.dcd', traverse)
            this_dcd = str(os.path.join(path, 'traverse_' + str(i) + '.dcd'))
            cmd = 'mv traverse.dcd ' + this_dcd
            os.system(cmd)
            trajectory_names.append(this_dcd)

        # display progress
        fraction_done = (1 + 7) * 1.0 / 10.0
        report_string = 'STATUS\t%f' % fraction_done
        pgui(report_string)

        m1 = sasmol.SasMol(0)
        m2 = sasmol.SasMol(0)
        m1.read_pdb(mvars.pdbfile)
        m2.read_pdb(mvars.pdbfile,fastread=True)

        mvars.dcdfile = mvars.runname + '.dcd'
        log.info('opening new dcd file to store trajectory: %s' %
                 os.path.join(self.runpath, mvars.dcdfile))

        outfile_name = str(os.path.join(path, mvars.dcdfile))
        dcdoutfile = m2.open_dcd_write(outfile_name)
        count = 0
        coor = numpy.zeros((1,m2.natoms(),3),numpy.float32)
        for this_trajectory_name in trajectory_names:

            dcdfile = m1.open_dcd_read(this_trajectory_name)
            number_of_frames = dcdfile[2]

            for j in xrange(number_of_frames):
                m1.read_dcd_step(dcdfile,j)
                coor[0,:,:] = m1.coor()[0]
                m2.setCoor(coor)
                m2.write_dcd_step(dcdoutfile,0, count + 1)
                count += 1

        m2.close_dcd_write(dcdoutfile)

        log.info('moving files to runname / prody')

        file_anm = model + '_anm.nmd'
        mod, ag = prody.parseNMD(file_anm, type=None)
        mod1 = prody.parsePDB(mvars.pdbfile)
        calphas = mod1.select('calpha')
        bb_anm, bb_atoms = prody.extendModel(mod, calphas, mod1.select(
            'backbone'))  # extend model to backbone atoms
        prody.writeNMD('model_anm_extended_bb.nmd', bb_anm, bb_atoms)

        cmd = 'mv model_samp.pdb ' + path + os.sep + os.path.basename(model) + '_samp.pdb'
        os.system(cmd)

        cmd = 'mv model_samp.dcd ' + path + os.sep + os.path.basename(model) + '_samp.dcd'
        os.system(cmd)

        cmd = 'mv model_anm_extended_bb.nmd ' + \
            model + '_anm_extended_bb.nmd'
        os.system(cmd)

        cmd = 'mv *.hm *.nmd *.txt ' + path + os.sep
        os.system(cmd)
        
        # display progress
        fraction_done = (1 + 9) * 1.0 / 10.0
        report_string = 'STATUS\t%f' % fraction_done
        pgui(report_string)

        return
예제 #21
0
    R[2, 1] = sy * cz
    R[2, 2] = cy

    return R


if __name__ == "__main__":

    usage = "usage: %prog [options] docking_out_file"
    p = OptionParser(usage=usage)
    (options, args) = p.parse_args()

    if len(args) != 2:
        p.error("incorrect number of arguments")

    prody.confProDy(auto_show=False)

    outFileLines = open(args[0], "r").read().splitlines()
    grid_num = int(outFileLines[0].split("\t")[0])
    grid_size = float(outFileLines[0].split("\t")[1])
    recFile = outFileLines[2].split("\t")[0]
    rec_pdbid = recFile[0:4]
    ligFile = outFileLines[3].split("\t")[0]
    lig_pdbid = ligFile[0:4]
    rec_init_cent = np.array([float(x) for x in outFileLines[2].strip().split("\t")[1:] if not len(x) == 0])
    lig_init_cent = np.array([float(x) for x in outFileLines[3].strip().split("\t")[1:] if not len(x) == 0])

    partnerPdbFile = args[1]
    partner = prody.parsePDB(partnerPdbFile)
    partner = partner.select("protein")
    partner = preprocess_single(partner, "C")
예제 #22
0
from prody import parsePDB, fetchPDB, confProDy
from collections import OrderedDict
from difflib import SequenceMatcher
from src.utils import PAD_SS, to_one_letter, BACKBONE
from src.utils.profile import *
from src.utils.data import *
from src.utils.stride import *

LOAD_CCM = False

if LOAD_CCM: from src.utils.ccmpred import *

warnings.simplefilter(action='ignore', category=FutureWarning)

np.seterr('raise')
confProDy(verbosity='none')
random.seed(101)

PERM = 'r'
IDX = h5py.File(osp.join(DATA_HOME, 'h5', 'idx.h5'), PERM)
BETAS = h5py.File(osp.join(DATA_HOME, 'h5', 'betas.h5'), PERM)
COORDS = h5py.File(osp.join(DATA_HOME, 'h5', 'coords.h5'), PERM)
RESNAMES = h5py.File(osp.join(DATA_HOME, 'h5', 'resnames.h5'), PERM)
ATOMNAMES = h5py.File(osp.join(DATA_HOME, 'h5', 'atomnames.h5'), PERM)

_, SEQs = FASTA(osp.join(DATA_HOME, 'etc', 'pdb_seqres.txt'))

MAX_ALLOWED_SHIFT = 10
MAX_BATCH_SIZE = 1
MAX_LENGTH = 512
MIN_LENGTH = 32
예제 #23
0
    R[2, 1] = sy * cz
    R[2, 2] = cy

    return R


if __name__ == '__main__':

    usage = "usage: %prog [options] docking_out_file"
    p = OptionParser(usage=usage)
    (options, args) = p.parse_args()

    if len(args) != 2:
        p.error("incorrect number of arguments")

    prody.confProDy(auto_show=False)

    outFileLines = open(args[0], 'r').read().splitlines()
    grid_num = int(outFileLines[0].split('\t')[0])
    grid_size = float(outFileLines[0].split('\t')[1])
    recFile = outFileLines[2].split('\t')[0]
    rec_pdbid = recFile[0:4]
    ligFile = outFileLines[3].split('\t')[0]
    lig_pdbid = ligFile[0:4]
    rec_init_cent = np.array([
        float(x) for x in outFileLines[2].strip().split('\t')[1:]
        if not len(x) == 0
    ])
    lig_init_cent = np.array([
        float(x) for x in outFileLines[3].strip().split('\t')[1:]
        if not len(x) == 0
예제 #24
0
from HPC_Drug import important_lists
from HPC_Drug import pipeline_functions
from HPC_Drug import file_manipulation
from HPC_Drug.structures import ligand
from HPC_Drug.structures import protein
from HPC_Drug.auxiliary_functions import get_iterable
from HPC_Drug.PDB import prody

#deactivating all
#BiopythonWarning
import warnings
import Bio
warnings.simplefilter('ignore', Bio.BiopythonWarning)

#deactivating all prody warnings
true_prody_module.confProDy(verbosity='none')

class Orient(object):
    """This class contains the methods needed to 
    put the given stucture in a box with solvent
    and do many other calculations needed to create
    the input for the orac optimization with solvent"""

    def __init__(self, Protein = None, Ligand = None):
        self.Protein = Protein
        self.Ligand = Ligand
        self.atom_weights = important_lists.atom_weights


    def center_of_mass(self, entity = None, geometric=False):
        """
예제 #25
0
import prody as pr
from tqdm import tqdm

from sidechainnet.utils.align import (assert_mask_gaps_are_correct,
                                      expand_data_with_mask, init_aligner,
                                      merge)
from sidechainnet.utils.download import download_sidechain_data
from sidechainnet.utils.errors import write_errors_to_files
from sidechainnet.utils.manual_adjustment import (manually_adjust_data,
                                                  manually_correct_mask,
                                                  needs_manual_adjustment)
from sidechainnet.utils.measure import NUM_COORDS_PER_RES
from sidechainnet.utils.organize import load_data, organize_data, save_data
from sidechainnet.utils.parse import parse_raw_proteinnet

pr.confProDy(verbosity="none")
pr.confProDy(auto_secondary=False)


def combine(pn_entry, sc_entry, aligner, pnid):
    """Supplements one entry in ProteinNet with sidechain information.

    Args:
        aligner: A sequence aligner with desired settings. See
            utils.alignment.init_aligner().
        pn_entry: A dictionary describing a single ProteinNet protein. Contains
            sequence, coordinates, PSSMs, secondary structure.
        sc_entry: A dictionary describing the sidechain information for the same
            protein. Contains sequence, coordinates, and angles.

    Returns:
예제 #26
0
파일: gmx_prep.py 프로젝트: badi/mdprep
from . import gmx
from . import mdp_defaults

import pxul
from pxul.logging import logger

import mdtraj
import prody
prody.confProDy(verbosity='critical')
import textwrap
import os
import shutil
import copy


def count_occurences(string, lines):
    """
    Count the  number of times 'string' occures in 'lines'
    Where
      string :: str
      lines  :: [str]
    returns: int
    """
    count = 0
    for line in lines:
        if string in line:
            count += 1
    return count

class suffix(object):
    @classmethod
예제 #27
0
파일: __init__.py 프로젝트: migonsu/pyProCT
import prody
from pyproct.driver.time.timerHandler import TimerHandler

# Set current version
__version__ = "1.7.2"

# Set prody verbosity to avoid excess of cmd line messages
prody.confProDy(verbosity="none")
예제 #28
0
#!/usr/bin/env python3
"""Filter LightDock final swarm results depending on the percentage of restraints satisfied"""

import sys
import os
import argparse
import shutil
import re
from prody.measure.contacts import Contacts
from prody import parsePDB, confProDy
from lightdock.util.logger import LoggingManager
from lightdock.util.analysis import read_ranking_file

# Disable ProDy output
confProDy(verbosity='info')
filtered_folder = 'filtered'

log = LoggingManager.get_logger('lgd_filter_restraints')


def get_structures(ranking, base_path='.'):
    structures = []
    for rank in ranking:
        swarm_id = rank.id_swarm
        glowworm_id = rank.id_glowworm
        score = rank.scoring
        structures.append([
            os.path.join(base_path, 'swarm_{}'.format(swarm_id),
                         'lightdock_{}.pdb'.format(glowworm_id)), score
        ])
    return structures
예제 #29
0
import collections
import pandas as pds
import prody as pd
import time
import glob
import argparse
import numpy as np
import growai.model.docking.glide as gl
import growai.model.helpers.helpers as hp
import growai.analysis as an

# Deactive tnesorflow warnings
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

# Deactivate prody warnings
pd.confProDy(verbosity="none")

# Disable Rdkit stdout
rdBase.DisableLog('rdApp.error')

#Harcode RDKIT Data
#RDKIT=os.path.join(cs.SCHRODINGER, "mmshare-v*/data/RDKit/Data/")
RDKIT=""

#Print out Error if RDKIT not recognize
try:
    fdefName = os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef')
    factory = ChemicalFeatures.BuildFeatureFactory(fdefName)
except IOError:
    print("If error: not RDDataDir specified on installation. Manually change the RDKIT variable on this file {}".format(os.__file__))
    fdefName = os.path.join(RDKIT,'BaseFeatures.fdef')