def calcGNMfeatures(self, chain='all', env='chain', GNM_PRS=True): """Computes GNM-based features. :arg chain: chain identifier :type chain: str :arg env: environment model, i.e. ``'chain'``, ``'reduced'`` or ``'sliced'`` :type env: str :arg GNM_PRS: whether or not to compute features based on Perturbation Response Scanning analysis :type GNM_PRS: bool """ assert env in ['chain', 'reduced', 'sliced'] assert type(GNM_PRS) is bool # list of features to be computed features = ['GNM_MSF-'+env] if GNM_PRS: features += ['GNM_effectiveness-'+env, 'GNM_sensitivity-'+env] # compute features (if not precomputed) if chain == 'all': chain_list = self.chids else: chain_list = [chain, ] for chID in chain_list: d = self.feats[chID] if all([f in d for f in features]): continue try: gnm = self.calcGNM(chID, env=env) except Exception as e: if (isinstance(e, MemoryError)): msg = 'MemoryError' else: msg = str(e) for f in features: d[f] = msg LOGGER.warn(msg) continue key_msf = 'GNM_MSF-' + env if key_msf not in d: try: d[key_msf] = calcSqFlucts(gnm) except Exception as e: msg = str(e) d[key_msf] = msg LOGGER.warn(msg) key_eff = 'GNM_effectiveness-' + env if key_eff in features and key_eff not in d: key_sns = 'GNM_sensitivity-' + env try: prs_mtrx, eff, sns = calcPerturbResponse(gnm) d[key_eff] = eff d[key_sns] = sns except Exception as e: msg = str(e) d[key_eff] = msg d[key_sns] = msg LOGGER.warn(msg) return
def _do_translation(self): weights = 1.0 / prody.calcSqFlucts(self._anm) #x_mean = numpy.mean(self._x, axis=0) #y_mean = numpy.mean(self._y, axis=0) x_mean = calc_average_coords(self._x, weights) y_mean = calc_average_coords(self._y, weights) self._x = self._x - x_mean self._y = self._y - y_mean self._x_mean = x_mean self._y_mean = y_mean
def calcGNMfeatures(self, chain='all', env='chain', GNM_PRS=True): assert env in ['chain', 'reduced', 'sliced'] assert type(GNM_PRS) is bool # list of features to be computed features = ['GNM_MSF-' + env] if GNM_PRS: features += ['GNM_effectiveness-' + env, 'GNM_sensitivity-' + env] # compute features (if not precomputed) if chain == 'all': chain_list = self.chids else: chain_list = [ chain, ] for chID in chain_list: d = self.feats[chID] if all([f in d for f in features]): continue try: gnm = self.calcGNM(chID, env=env) except Exception as e: if (isinstance(e, MemoryError)): msg = 'MemoryError' else: msg = str(e) for f in features: d[f] = msg LOGGER.warn(msg) continue key_msf = 'GNM_MSF-' + env if key_msf not in d: try: d[key_msf] = calcSqFlucts(gnm) except Exception as e: msg = str(e) d[key_msf] = msg LOGGER.warn(msg) key_eff = 'GNM_effectiveness-' + env if key_eff in features and key_eff not in d: key_sns = 'GNM_sensitivity-' + env try: prs_mtrx, eff, sns = calcPerturbResponse(gnm) d[key_eff] = eff d[key_sns] = sns except Exception as e: msg = str(e) d[key_eff] = msg d[key_sns] = msg LOGGER.warn(msg) return
def get_enm_fluctuations(enm, n_modes=6): """ Get squared fluctuations of each residue according to an elastic network model Parameters ---------- enm pd.dynamics.anm.ANM or pd.dynamics.gnm.GNM object n_modes number of ENM modes to consider Returns ------- array of squared fluctuations per residue """ return pd.calcSqFlucts(enm[:n_modes])
def get_pca_fluctuations(ensemble, limit=3): """ Get squared fluctuations of each residue according to a PCA on the ensemble Parameters ---------- ensemble pd.PDBEnsemble object limit number of PCA modes to consider Returns ------- array of squared fluctuations per aligned residue """ pca = pd.PCA() pca.buildCovariance(ensemble) pca.calcModes() return pd.calcSqFlucts(pca[:limit])
def prody_anm(pdb, **kwargs): """Perform ANM calculations for *pdb*. """ for key in DEFAULTS: if not key in kwargs: kwargs[key] = DEFAULTS[key] from os.path import isdir, join outdir = kwargs.get('outdir') if not isdir(outdir): raise IOError('{0} is not a valid path'.format(repr(outdir))) import numpy as np import prody LOGGER = prody.LOGGER selstr = kwargs.get('select') prefix = kwargs.get('prefix') cutoff = kwargs.get('cutoff') gamma = kwargs.get('gamma') nmodes = kwargs.get('nmodes') selstr = kwargs.get('select') model = kwargs.get('model') pdb = prody.parsePDB(pdb, model=model) if prefix == '_anm': prefix = pdb.getTitle() + '_anm' select = pdb.select(selstr) if select is None: LOGGER.warn('Selection {0} did not match any atoms.'.format( repr(selstr))) return LOGGER.info('{0} atoms will be used for ANM calculations.'.format( len(select))) anm = prody.ANM(pdb.getTitle()) anm.buildHessian(select, cutoff, gamma) anm.calcModes(nmodes) LOGGER.info('Writing numerical output.') if kwargs.get('outnpz'): prody.saveModel(anm, join(outdir, prefix)) prody.writeNMD(join(outdir, prefix + '.nmd'), anm, select) extend = kwargs.get('extend') if extend: if extend == 'all': extended = prody.extendModel(anm, select, pdb) else: extended = prody.extendModel(anm, select, select | pdb.bb) prody.writeNMD(join(outdir, prefix + '_extended_' + extend + '.nmd'), *extended) outall = kwargs.get('outall') delim = kwargs.get('numdelim') ext = kwargs.get('numext') format = kwargs.get('numformat') if outall or kwargs.get('outeig'): prody.writeArray(join(outdir, prefix + '_evectors' + ext), anm.getArray(), delimiter=delim, format=format) prody.writeArray(join(outdir, prefix + '_evalues' + ext), anm.getEigvals(), delimiter=delim, format=format) if outall or kwargs.get('outbeta'): from prody.utilities import openFile fout = openFile(prefix + '_beta.txt', 'w', folder=outdir) fout.write( '{0[0]:1s} {0[1]:4s} {0[2]:4s} {0[3]:5s} {0[4]:5s}\n'.format( ['C', 'RES', '####', 'Exp.', 'The.'])) for data in zip(select.getChids(), select.getResnames(), select.getResnums(), select.getBetas(), prody.calcTempFactors(anm, select)): fout.write( '{0[0]:1s} {0[1]:4s} {0[2]:4d} {0[3]:5.2f} {0[4]:5.2f}\n'. format(data)) fout.close() if outall or kwargs.get('outcov'): prody.writeArray(join(outdir, prefix + '_covariance' + ext), anm.getCovariance(), delimiter=delim, format=format) if outall or kwargs.get('outcc') or kwargs.get('outhm'): cc = prody.calcCrossCorr(anm) if outall or kwargs.get('outcc'): prody.writeArray(join(outdir, prefix + '_cross-correlations' + ext), cc, delimiter=delim, format=format) if outall or kwargs.get('outhm'): prody.writeHeatmap(join(outdir, prefix + '_cross-correlations.hm'), cc, resnum=select.getResnums(), xlabel='Residue', ylabel='Residue', title=anm.getTitle() + ' cross-correlations') if outall or kwargs.get('hessian'): prody.writeArray(join(outdir, prefix + '_hessian' + ext), anm.getHessian(), delimiter=delim, format=format) if outall or kwargs.get('kirchhoff'): prody.writeArray(join(outdir, prefix + '_kirchhoff' + ext), anm.getKirchhoff(), delimiter=delim, format=format) if outall or kwargs.get('outsf'): prody.writeArray(join(outdir, prefix + '_sqflucts' + ext), prody.calcSqFlucts(anm), delimiter=delim, format=format) figall = kwargs.get('figall') cc = kwargs.get('figcc') sf = kwargs.get('figsf') bf = kwargs.get('figbeta') cm = kwargs.get('figcmap') if figall or cc or sf or bf or cm: try: import matplotlib.pyplot as plt except ImportError: LOGGER.warning('Matplotlib could not be imported. ' 'Figures are not saved.') else: prody.SETTINGS['auto_show'] = False LOGGER.info('Saving graphical output.') format = kwargs.get('figformat') width = kwargs.get('figwidth') height = kwargs.get('figheight') dpi = kwargs.get('figdpi') format = format.lower() if figall or cc: plt.figure(figsize=(width, height)) prody.showCrossCorr(anm) plt.savefig(join(outdir, prefix + '_cc.' + format), dpi=dpi, format=format) plt.close('all') if figall or cm: plt.figure(figsize=(width, height)) prody.showContactMap(anm) plt.savefig(join(outdir, prefix + '_cm.' + format), dpi=dpi, format=format) plt.close('all') if figall or sf: plt.figure(figsize=(width, height)) prody.showSqFlucts(anm) plt.savefig(join(outdir, prefix + '_sf.' + format), dpi=dpi, format=format) plt.close('all') if figall or bf: plt.figure(figsize=(width, height)) bexp = select.getBetas() bcal = prody.calcTempFactors(anm, select) plt.plot(bexp, label='Experimental') plt.plot(bcal, label=('Theoretical (R={0:.2f})'.format( np.corrcoef(bcal, bexp)[0, 1]))) plt.legend(prop={'size': 10}) plt.xlabel('Node index') plt.ylabel('Experimental B-factors') plt.title(pdb.getTitle() + ' B-factors') plt.savefig(join(outdir, prefix + '_bf.' + format), dpi=dpi, format=format) plt.close('all')
def prody_anm(opt): """Perform ANM calculations based on command line arguments.""" outdir = opt.outdir if not os.path.isdir(outdir): opt.subparser.error('{0:s} is not a valid path'.format(outdir)) import numpy as np import prody LOGGER = prody.LOGGER pdb = opt.pdb prefix = opt.prefix cutoff, gamma = opt.cutoff, opt.gamma, nmodes, selstr, model = opt.nmodes, opt.select, opt.model pdb = prody.parsePDB(pdb, model=model) if prefix == '_anm': prefix = pdb.getTitle() + '_anm' select = pdb.select(selstr) if select is None: opt.subparser('Selection "{0:s}" do not match any atoms.' .format(selstr)) LOGGER.info('{0:d} atoms will be used for ANM calculations.' .format(len(select))) anm = prody.ANM(pdb.getTitle()) anm.buildHessian(select, cutoff, gamma) anm.calcModes(nmodes) LOGGER.info('Writing numerical output.') if opt.npz: prody.saveModel(anm) prody.writeNMD(os.path.join(outdir, prefix + '.nmd'), anm, select) outall = opt.all delim, ext, format = opt.delim, opt.ext, opt.numformat if outall or opt.eigen: prody.writeArray(os.path.join(outdir, prefix + '_evectors'+ext), anm.getArray(), delimiter=delim, format=format) prody.writeArray(os.path.join(outdir, prefix + '_evalues'+ext), anm.getEigenvalues(), delimiter=delim, format=format) if outall or opt.beta: fout = prody.openFile(prefix + '_beta.txt', 'w', folder=outdir) fout.write('{0[0]:1s} {0[1]:4s} {0[2]:4s} {0[3]:5s} {0[4]:5s}\n' .format(['C', 'RES', '####', 'Exp.', 'The.'])) for data in zip(select.getChids(), select.getResnames(), select.getResnums(), select.getBetas(), prody.calcTempFactors(anm, select)): fout.write('{0[0]:1s} {0[1]:4s} {0[2]:4d} {0[3]:5.2f} {0[4]:5.2f}\n' .format(data)) fout.close() if outall or opt.covar: prody.writeArray(os.path.join(outdir, prefix + '_covariance'+ext), anm.getCovariance(), delimiter=delim, format=format) if outall or opt.ccorr: prody.writeArray(os.path.join(outdir, prefix + '_cross-correlations' + ext), prody.calcCrossCorr(anm), delimiter=delim, format=format) if outall or opt.hessian: prody.writeArray(os.path.join(outdir, prefix + '_hessian'+ext), anm.getHessian(), delimiter=delim, format=format) if outall or opt.kirchhoff: prody.writeArray(os.path.join(outdir, prefix + '_kirchhoff'+ext), anm.getKirchhoff(), delimiter=delim, format=format) if outall or opt.sqflucts: prody.writeArray(os.path.join(outdir, prefix + '_sqflucts'+ext), prody.calcSqFlucts(anm), delimiter=delim, format=format) figall, cc, sf, bf, cm = opt.figures, opt.cc, opt.sf, opt.bf, opt.cm if figall or cc or sf or bf or cm: try: import matplotlib.pyplot as plt except ImportError: LOGGER.warning('Matplotlib could not be imported. ' 'Figures are not saved.') else: LOGGER.info('Saving graphical output.') format, width, height, dpi = \ opt.figformat, opt.width, opt.height, opt.dpi format = format.lower() if figall or cc: plt.figure(figsize=(width, height)) prody.showCrossCorr(anm) plt.savefig(os.path.join(outdir, prefix + '_cc.'+format), dpi=dpi, format=format) plt.close('all') if figall or cm: plt.figure(figsize=(width, height)) prody.showContactMap(anm) plt.savefig(os.path.join(outdir, prefix + '_cm.'+format), dpi=dpi, format=format) plt.close('all') if figall or sf: plt.figure(figsize=(width, height)) prody.showSqFlucts(anm) plt.savefig(os.path.join(outdir, prefix + '_sf.'+format), dpi=dpi, format=format) plt.close('all') if figall or bf: plt.figure(figsize=(width, height)) bexp = select.getBetas() bcal = prody.calcTempFactors(anm, select) plt.plot(bexp, label='Experimental') plt.plot(bcal, label=('Theoretical (R={0:.2f})' .format(np.corrcoef(bcal, bexp)[0,1]))) plt.legend(prop={'size': 10}) plt.xlabel('Node index') plt.ylabel('Experimental B-factors') plt.title(pdb.getTitle() + ' B-factors') plt.savefig(os.path.join(outdir, prefix + '_bf.'+format), dpi=dpi, format=format) plt.close('all')
def corepagecalculation(pdbfilename, selatom, noma1, nummodes, gamcut, cut1, gam2, cut2, showresults, smodes, snmd, smodel, scollec, massnomass, sample1, modeens, confens, rmsdens, traverse1, modetra, steptra, rmsdtra, modelnumber, caanm, cagnm, nohanm, nohgnm, allanm, allgnm, bbanm, bbgnm, scanm, scgnm, nmdfolder, modesfolder, collectivityfolder, modelnewname, nmdnewname, modesnewname, modesendname, collectivitynewname, collectivityendname, samplenewname, traversenewname, crosscorr=0, corrfolder='', corrname='', corrend='', compmode01='7', compmode02='15', sqflucts=0, sqfluctsfolder='', sqfluctsname='', sqfluctsend='', separatevar1='0', temfac=0, temfacfolder='', temfacname='', temfacend='', fracovar=0, fraconame='', fracoend='', ovlap=0, ovlapfold='', ovlapname='', ovlapend='', ovlaptab=0, ovlaptabname='', ovlaptabend='', comppdbfilename=''): # modelnumber import prody import time import os import Tkinter root=Tkinter.Tk() root.title('Info') onlypage=Tkinter.Frame(root) onlypage.pack(side='top') Tkinter.Label(onlypage,text='File: '+pdbfilename).grid(row=0,column=0,sticky='w') Tkinter.Label(onlypage,text='Atoms: '+selatom).grid(row=1,column=0,sticky='w') Tkinter.Label(onlypage,text='Analysis: '+noma1).grid(row=2,column=0,sticky='w') path=os.path.join(os.path.expanduser('~'),'.noma/') fin = open(path+'savefile.txt','r') global savedfile savedfile=fin.readlines() fin.close() i=0 a=len(savedfile) while i<a: savedfile[i]=savedfile[i][:-1] i+=1 if gamcut=='0': Tkinter.Label(onlypage,text='Gamma: r^'+savedfile[91]).grid(row=3,column=0,sticky='w') Tkinter.Label(onlypage,text='Cutoff: '+cut1).grid(row=4,column=0,sticky='w') elif gamcut=='1': Tkinter.Label(onlypage,text='Gamma: '+gam2).grid(row=3,column=0,sticky='w') Tkinter.Label(onlypage,text='Cutoff: '+cut2).grid(row=4,column=0,sticky='w') find = 0 # while find < len(pdbfilename): # if pdbfilename[-(find+1):-find] == '/': # bgn = len(pdbfilename)-find # break # else: # helps in the find +=1 # saving of files try: # float(bgn) # except (NameError): # bgn = 0 # find = 0 # while bgn+find<len(pdbfilename): # if pdbfilename[bgn+find:bgn+find+1] == '.': # end = len(pdbfilename)-(bgn+find) # break # else: # find +=1 # try: # name = pdbfilename[bgn:-end] # except (NameError): # name = pdbfilename[bgn:len(pdbfilename)] # name of the file bgn = pdbfilename[:bgn] # path for file mytimeis = time.asctime(time.localtime(time.time())) start = time.time() try: p38 = prody.parsePDB(pdbfilename,model=int(modelnumber)) except: import tkMessageBox tkMessageBox.askokcancel("File Error","""This is not the correct path or name. Try entering /some/path/nameoffile.pdb If you need help finding the path, open a new terminal and enter: find -name 'filename.pdb' use the output as the pdb input If this doesn't work, make sure the file is in PDB format.""") p38 = prody.parsePDB(pdbfilename) print 'Submitted: '+pdbfilename+' at '+mytimeis Tkinter.Label(onlypage,text='Submitted at: '+mytimeis).grid(row=5,column=0,sticky='w') root.update() if selatom == "C-alpha" and noma1 == "Gaussian Normal Mode": folder = cagnm+'/' pro = p38.select('protein and name CA') # selects only carbon alpahs elif selatom == "C-alpha" and noma1 == "Anisotropic Normal Mode": folder = caanm+'/' pro = p38.select('protein and name CA') elif selatom == "Heavy" and noma1 == "Gaussian Normal Mode": folder = nohgnm+'/' pro = p38.select('protein and not name "[1-9]?H.*"') # gets rid of all Hydrogens elif selatom == "Heavy" and noma1 == "Anisotropic Normal Mode": folder = nohanm+'/' pro = p38.select('protein and not name "[1-9]?H.*"') elif selatom == "All" and noma1 == "Gaussian Normal Mode": folder = allgnm+'/' pro = p38.select('protein') elif selatom == "All" and noma1 == "Anisotropic Normal Mode": folder = allanm+'/' pro = p38.select('protein') elif selatom == "Backbone" and noma1 == "Gaussian Normal Mode": folder = bbgnm+'/' pro = p38.select('protein and name CA C O N H') # selects backbone elif selatom == "Backbone" and noma1 == "Anisotropic Normal Mode": folder = bbanm+'/' pro = p38.select('protein and name CA C O N H') # selects backbone elif selatom == "Sidechain" and noma1 == "Gaussian Normal Mode": folder = scgnm+'/' pro = p38.select('protein and not name CA C O N H') # selects sidechain elif selatom == "Sidechain" and noma1 == "Anisotropic Normal Mode": folder = scanm+'/' pro = p38.select('protein and not name CA C O N H') # selects sidechain try: # open(bgn+folder) # creates the folders except (IOError): # where the files will try: # be saved only if they os.makedirs(bgn+folder) # are not there except (OSError): # mer = 0 # if noma1 == "Gaussian Normal Mode": print 'Building the Kirchhoff matrix' Tkinter.Label(onlypage,text='Building Kirchhoff').grid(row=6,column=0,sticky='w') root.update() anm = prody.GNM(name)### if gamcut=='0': anm.buildKirchhoff(pro,cutoff=float(cut1),gamma=gammaDistanceDependent)### anm.setKirchhoff(anm.getKirchhoff()) elif gamcut=='1': anm.buildKirchhoff(pro,cutoff=float(cut2),gamma=float(gam2))### brat = 2 elif noma1 == "Anisotropic Normal Mode": print 'Building the Hessian matrix' Tkinter.Label(onlypage,text='Building Hessian').grid(row=6,column=0,sticky='w') root.update() anm = prody.ANM(name)### if gamcut=='0': anm.buildHessian(pro,cutoff=float(cut1),gamma=gammaDistanceDependent)### anm.setHessian(anm.getHessian())### elif gamcut=='1': anm.buildHessian(pro,cutoff=float(cut2),gamma=float(gam2))### brat = 7 print 'Calculating modes' Tkinter.Label(onlypage,text='Calculating modes').grid(row=7,column=0,sticky='w') root.update() anm.calcModes(int(nummodes),zeros = True)### numatom=anm.numAtoms()### eigval=anm.getEigvals()### atomname=pro.getNames()### if smodel==1: if brat==2: modelfilename=bgn+folder+name+modelnewname+'.gnm.npz' elif brat==7: modelfilename=bgn+folder+name+modelnewname+'.anm.npz' print 'Saving Model' Tkinter.Label(onlypage,text='Saving Model').grid(row=8,column=0,sticky='w') root.update() try: prody.saveModel(anm,bgn+folder+name+modelnewname,True)### except: print 'Matrix not saved due to size' Tkinter.Label(onlypage,text='Matrix not saved').grid(row=8,column=0,sticky='w') root.update() prody.saveModel(anm,bgn+folder+name+modelnewname)### if snmd==1: print 'Saving NMD' Tkinter.Label(onlypage,text='Saving NMD').grid(row=9,column=0,sticky='w') root.update() try: # os.makedirs(bgn+folder+nmdfolder+'/') # except (OSError): # mer = 0 # prody.writeNMD(bgn+folder+nmdfolder+'/'+name+nmdnewname+'.nmd',anm[:len(eigval)],pro)### # this can be viewed in VMD if smodes==1: print 'Saving Modes' Tkinter.Label(onlypage,text='Saving Modes').grid(row=10,column=0,sticky='w') root.update() try: # os.makedirs(bgn+folder+modesfolder+'/') # except (OSError): # mer = 0 # modefile = bgn+folder+modesfolder+'/'+name+modesnewname+'.'+modesendname fout = open(modefile,'w') mer = 0 while mer< len(eigval): slowest_mode = anm[mer]### r = slowest_mode.getEigvec()### p = slowest_mode.getEigval()### tq = 0 tt = 0 ttt = 1 tttt = 2 fout.write('MODE {0:3d} {1:15e}'.format(mer+1,p)) fout.write(""" ------------------------------------------------- """) if noma1 == "Gaussian Normal Mode": while tq < numatom: fout.write("""{0:4s}{1:15e} """.format(atomname[tq],r[tq])) tq +=1 elif noma1 == "Anisotropic Normal Mode": while tt < numatom*3: fout.write("""{0:4s}{1:15e}{2:15e}{3:15e} """.format(atomname[tq],r[tt],r[ttt],r[tttt])) tq+=1 tt +=3 ttt+=3 tttt+=3 mer +=1 fout.close() if showresults=='1': os.system('/usr/bin/gnome-open '+modefile) if scollec==1: print 'Saving collectivity' Tkinter.Label(onlypage,text='Saving collectivity').grid(row=11,column=0,sticky='w') root.update() try: # os.makedirs(bgn+folder+collectivityfolder+'/') # except (OSError): # mer = 0 # mer = 0 xx = [0]*(numatom) # sets the array to zero and other initial conditions i = 0 aa = 0 no = 0 var3 = 0 sss = [0]*(len(eigval)) while mer< len(eigval): slowest_mode = anm[mer]### r = slowest_mode.getEigvec()### p = slowest_mode.getEigval()### a = 0 tt = 0 ttt = 1 tttt = 2 while a < numatom: atom = atomname[a] mass = 0 while mass < 2: if atom[mass] == "N": # all nitrogen m = 14.0067 break elif atom[mass] == 'H': # all hydrogen m = 1.00794 break elif atom[mass] == "C" : # all carbon m = 12.0107 break elif atom[mass] == "O" : # all oxygen m = 15.9994 break elif atom[mass] == 'S': # all sulfur m = 32.065 break elif atom[mass] == 'P' : # all phosphorus m = 30.973762 break else: if mass == 0: mass +=1 try: atom[mass] except (IndexError): m = 1 if no == 0: print 'Enter atom '+atom+' in to the system. Its mass was set to 1 in this simulation.' no +=1 break else: m = 1 if no == 0: print 'Enter atom '+atom+' in to the system. Its mass was set to 1 in this simulation' no +=1 break if len(r)/numatom == 3: xx[i] = (r[tt]**2 + r[ttt]**2 + r[tttt]**2)/m i +=1 tt +=3 ttt+=3 tttt+=3 else: xx[i] = (r[tt]**2)/m i +=1 tt +=1 a +=1 var3 = 0 j = 0 loop = 1 while loop == 1: if sum(xx) == 0: # need this because you can't divide by 0 loop = 0 elif j <(numatom): var1 = xx[j]/sum(xx) if var1 == 0: var2 = 0 elif var1 != 0: from math import log # this means natural log var2 = var1* log(var1) var3 += var2 j +=1 else: from math import exp k = exp(-var3)/numatom sss[aa] = k, aa+1 aa +=1 mer +=1 loop = 0 i = 0 xx = [0]*(numatom) # goes through all this until the big loop is done a = 0 k=[0]*(len(eigval)) while a < len(eigval): k[a]=prody.calcCollectivity(anm[a]),a+1 a +=1 collectivefile = bgn+folder+collectivityfolder+'/'+name+collectivitynewname+'.'+collectivityendname fout = open(collectivefile,'w') if massnomass=='0': fout.write('MODE COLLECTIVITY(mass)') fout.write(""" --------------------------- """) for h in sorted(sss,reverse=True): fout.write(str(h)[-3:-1]+' '+str(h)[1:19]+""" """) fout.write(""" MODE COLLECTIVITY(without mass)""") fout.write(""" --------------------------- """) for hh in sorted(k,reverse=True): fout.write(str(hh)[-3:-1]+' '+str(hh)[1:19]+""" """) elif massnomass=='1': fout.write('MODE COLLECTIVITY(without mass)') fout.write(""" --------------------------- """) for hh in sorted(k,reverse=True): fout.write(str(hh)[-3:-1]+' '+str(hh)[1:19]+""" """) fout.write(""" MODE COLLECTIVITY(mass)""") fout.write(""" --------------------------- """) for h in sorted(sss,reverse=True): fout.write(str(h)[-3:-1]+' '+str(h)[1:19]+""" """) fout.close() if showresults=='1': os.system('/usr/bin/gnome-open '+collectivefile) fin = open(collectivefile,'r') lst = fin.readlines() hi0 = 2 looop = 1 prut=0 secoll=0 thicoll=0 while looop == 1: fine = lst[hi0] if int(fine[0:2]) >= brat: if prut==0: prut=fine[0:2] elif secoll==0: secoll=fine[0:2] elif thicoll==0: thicoll=fine[0:2] else: foucoll=fine[0:2] looop = 0 else: hi0 +=1 mostcollective= "Mode "+prut+" is the most collective." Tkinter.Label(onlypage,text='Mode '+prut+' is the most collective').grid(row=12,column=0,sticky='w') root.update() print mostcollective fin.close() if sample1 == 1: print 'Saving sample file' Tkinter.Label(onlypage,text='Saving sample file').grid(row=13,column=0,sticky='w') root.update() a = modeens+' ' b = [0]*(len(a)+1) i = 0 j = 0 b1 = 0 while i < len(a): if a[i:i+1] ==' ' or a[i:i+1]==',': try: b[b1]=int(a[j:i])-1 except: if '1c' in a[j:i]: b[b1]=int(prut)-1 elif '2c' in a[j:i]: b[b1]=int(prut)-1 b1 +=1 b[b1]=int(secoll)-1 elif '3c' in a[j:i]: b[b1]=int(prut)-1 b1 +=1 b[b1]=int(secoll)-1 b1 +=1 b[b1]=int(thicoll)-1 elif '4c' in a[j:i]: b[b1]=int(prut)-1 b1 +=1 b[b1]=int(secoll)-1 b1 +=1 b[b1]=int(thicoll)-1 b1+=1 b[b1]=int(foucoll)-1 j = i+1 i +=1 b1 +=1 else: i +=1 del b[b1:] ensemble = prody.sampleModes(anm[b],pro, n_confs=int(confens), rmsd =float(rmsdens)) p38ens=pro.copy() p38ens.delCoordset(0) p38ens.addCoordset(ensemble.getCoordsets()) prody.writePDB(bgn+folder+name+samplenewname+'.pdb',p38ens) if traverse1 ==1: print 'Saving traverse file' Tkinter.Label(onlypage,text='Saving traverse file').grid(row=14,column=0,sticky='w') root.update() if modetra=='c': modefortra=int(prut)-1 else: modefortra=int(modetra)-1 trajectory=prody.traverseMode(anm[modefortra],pro,n_steps=int(steptra),rmsd=float(rmsdtra)) prody.calcRMSD(trajectory).round(2) p38traj=pro.copy() p38traj.delCoordset(0) p38traj.addCoordset(trajectory.getCoordsets()) prody.writePDB(bgn+folder+name+'_mode'+str(modefortra+1)+traversenewname+'.pdb',p38traj) if crosscorr==1: print 'Saving cross correlation' Tkinter.Label(onlypage,text='Saving cross-correlation').grid(row=15,column=0,sticky='w') root.update() try: # os.makedirs(bgn+folder+corrfolder+'/') # except (OSError): # mer = 0 i=int(compmode01) while i <= int(compmode02): x=i-1 correlationdataname=bgn+folder+corrfolder+'/'+name+corrname+'_mode'+str(x+1)+'.'+corrend prody.writeArray(correlationdataname,prody.calcCrossCorr(anm[x]),'%.18e') print correlationdataname i+=1 ## if sqflucts==1: print 'Saving square fluctuation' Tkinter.Label(onlypage,text='Saving square fluctuation').grid(row=16,column=0,sticky='w') root.update() try: # os.makedirs(bgn+folder+sqfluctsfolder+'/') # except (OSError): # mer = 0 i=int(compmode01) while i < int(compmode02): yelp = i-1 sqfluctdataname = bgn+folder+sqfluctsfolder+'/'+name+sqfluctsname+'_mode'+str(yelp+1)+'.'+sqfluctsend fout = open(sqfluctdataname,'w') if separatevar1=='0': a = 0 while a < numatom: fout.write(str(a)) fout.write(""" """) fout.write(str(prody.calcSqFlucts(anm[yelp])[a])) fout.write(""" """) a +=1 elif separatevar1=='1': a=0 while a <numatom: firstresnum=int(p38.getResnums()[0:1][0]) origiresnum=int(p38.getResnums()[0:1][0]) while firstresnum<(int(numatom*1.0/p38.numChains())+origiresnum): fout.write(str(firstresnum)) fout.write('\t') fout.write(str(prody.calcSqFlucts(anm[yelp])[a])) fout.write('\n') a+=1 firstresnum+=1 fout.write('&\n') fout.close() print sqfluctdataname i+=1 if temfac==1: print 'Saving temperature factors' Tkinter.Label(onlypage,text='Saving temperature factors').grid(row=17,column=0,sticky='w') root.update() try: # os.makedirs(bgn+folder+temfacfolder+'/') # except (OSError): # mer = 0 fin=open(pdbfilename,'r') d = [None]*len(atomname) e = 0 for line in fin: pair = line.split() if 'ATOM ' in line and e < len(atomname): if str(pair[2]) == str(atomname[e]): d[e]=str(pair[1]) e+=1 else: e+=0 else: continue fin.close() sqf = prody.calcSqFlucts(anm) x = sqf/((sqf**2).sum()**.5) y = prody.calcTempFactors(anm,pro) a = 0 tempfactorsdataname =bgn+folder+temfacfolder+'/'+name+temfacname+'.'+temfacend fout=open(tempfactorsdataname,'w') fout.write("""Atom Residue TempFactor TempFactor with exp beta """) while a < numatom: fout.write("""{0:4s} {1:4d} {2:15f} {3:15f} """.format(d[a],a+1,x[a],y[a])) a +=1 fout.close() print tempfactorsdataname if fracovar==1: try: import matplotlib.pyplot as plt print 'Saving Fraction of Variance' Tkinter.Label(onlypage,text='Saving Fraction of Variance').grid(row=18,column=0,sticky='w') root.update() try: # os.makedirs(bgn+folder+modesfolder+'/') # except (OSError): # mer = 0 # plt.figure(figsize = (5,4)) prody.showFractVars(anm) prody.showCumulFractVars(anm) fracvardataname =bgn+folder+modesfolder+'/'+name+fraconame+'.'+fracoend plt.savefig(fracvardataname) print fracvardataname if showresults=='1': os.system('/usr/bin/gnome-open '+fracvardataname) except: print 'Error: Fraction of Variance' Tkinter.Label(onlypage,text='Error: Fraction of Variance').grid(row=18,column=0,sticky='w') root.update() mer=0 if ovlap==1 or ovlaptab==1: try: import matplotlib.pyplot as plt print 'Saving Overlap' Tkinter.Label(onlypage,text='Saving Overlap').grid(row=19,column=0,sticky='w') root.update() Tkinter.Label(onlypage,text='Comparison: '+comppdbfilename).grid(row=20,column=0,sticky='w') ## find = 0 while find < len(comppdbfilename): if comppdbfilename[-(find+1):-find] == '/': bgn1 = len(comppdbfilename)-find break else: find +=1 try: float(bgn1) except (NameError): bgn1 = 0 find = 0 while bgn1+find<len(comppdbfilename): if comppdbfilename[bgn1+find:bgn1+find+1] == '.': end1 = len(comppdbfilename)-(bgn1+find) break else: find +=1 try: name1 = comppdbfilename[bgn1:-end1] except (NameError): name1 = comppdbfilename[bgn1:len(comppdbfilename)] bgn1 = comppdbfilename[:bgn1] p381 = prody.parsePDB(comppdbfilename,model=int(modelnumber)) if selatom == "C-alpha" and noma1 == "Gaussian Normal Mode": pro1 = p381.select('protein and name CA') elif selatom == "C-alpha" and noma1 == "Anisotropic Normal Mode": pro1 = p381.select('protein and name CA') elif selatom == "Heavy" and noma1 == "Gaussian Normal Mode": pro1 = p381.select('protein and not name "[1-9]?H.*"') elif selatom == "Heavy" and noma1 == "Anisotropic Normal Mode": pro1 = p381.select('protein and not name "[1-9]?H.*"') elif selatom == "All" and noma1 == "Gaussian Normal Mode": pro1 = p381.select('protein') elif selatom == "All" and noma1 == "Anisotropic Normal Mode": pro1 = p381.select('protein') elif selatom == "Backbone" and noma1 == "Gaussian Normal Mode": pro1 = p381.select('protein and name CA C O N H') elif selatom == "Backbone" and noma1 == "Anisotropic Normal Mode": pro1 = p381.select('protein and name CA C O N H') elif selatom == "Sidechain" and noma1 == "Gaussian Normal Mode": pro1 = p381.select('protein and not name CA C O N H') elif selatom == "Sidechain" and noma1 == "Anisotropic Normal Mode": pro1 = p381.select('protein and not name CA C O N H') if noma1 == "Gaussian Normal Mode": print 'Building the Kirchhoff matrix' Tkinter.Label(onlypage,text='Building Kirchhoff').grid(row=21,column=0,sticky='w') root.update() anm1 = prody.GNM(name1) if gamcut=='0': anm1.buildKirchhoff(pro1,cutoff=float(cut1),gamma=gammaDistanceDependent) anm1.setKirchhoff(anm1.getKirchhoff()) elif gamcut=='1': anm1.buildKirchhoff(pro1,cutoff=float(cut2),gamma=float(gam2)) brat = 2 elif noma1 == "Anisotropic Normal Mode": print 'Building the Hessian matrix' Tkinter.Label(onlypage,text='Building Hessian').grid(row=21,column=0,sticky='w') root.update() anm1 = prody.ANM(name1) if gamcut=='0': anm1.buildHessian(pro1,cutoff=float(cut1),gamma=gammaDistanceDependent) anm1.setHessian(anm1.getHessian()) elif gamcut=='1': anm1.buildHessian(pro1,cutoff=float(cut2),gamma=float(gam2)) brat = 7 print 'Calculating modes' Tkinter.Label(onlypage,text='Calculating modes').grid(row=22,column=0,sticky='w') root.update() anm1.calcModes(int(nummodes),zeros = True) ## try: os.makedirs(bgn+folder+ovlapfold+'/') except (OSError): mer = 0 if ovlap==1: i=int(compmode01) while i < int(compmode02): a = i-1 plt.figure(figsize=(5,4)) prody.showCumulOverlap(anm[a],anm1) prody.showOverlap(anm[a],anm1) plt.title('Overlap with Mode '+str(a+1)+' from '+name) plt.xlabel(name1+' mode index') overlapname = bgn+folder+ovlapfold+'/'+name+'_'+name1+ovlapname+'_mode'+str(a+1)+'.'+ovlapend plt.savefig(overlapname) print overlapname i+=1 if ovlaptab==1: plt.figure(figsize=(5,4)) prody.showOverlapTable(anm1,anm) plt.xlim(int(compmode01)-1,int(compmode02)) plt.ylim(int(compmode01)-1,int(compmode02)) plt.title(name1+' vs '+name+' Overlap') plt.ylabel(name1) plt.xlabel(name) overlapname = bgn+folder+ovlapfold+'/'+name+'_'+name1+ovlaptabname+'.'+ovlaptabend plt.savefig(overlapname) print overlapname except: mer=0 root.destroy() mynewtimeis = float(time.time()-start) if mynewtimeis <= 60.00: timeittook= "The calculations took %.2f s."%(mynewtimeis) elif mynewtimeis > 60.00 and mynewtimeis <= 3600.00: timeittook= "The calculations took %.2f min."%((mynewtimeis/60.00)) else: timeittook= "The calculations took %.2f hrs."%((mynewtimeis/3600.00)) print timeittook if smodel==1 and scollec==1: return (timeittook,modelfilename,str(int(prut))) elif scollec==1: return (timeittook,'nofile',str(int(prut))) elif smodel==1: return (timeittook,modelfilename,'nocoll') else: return (timeittook,'nofile','nocoll')
def prody_gnm(pdb, **kwargs): """Perform GNM calculations for *pdb*. """ for key in DEFAULTS: if not key in kwargs: kwargs[key] = DEFAULTS[key] from os.path import isdir, splitext, join outdir = kwargs.get("outdir") if not isdir(outdir): raise IOError("{0} is not a valid path".format(repr(outdir))) import numpy as np import prody LOGGER = prody.LOGGER selstr = kwargs.get("select") prefix = kwargs.get("prefix") cutoff = kwargs.get("cutoff") gamma = kwargs.get("gamma") nmodes = kwargs.get("nmodes") selstr = kwargs.get("select") model = kwargs.get("model") pdb = prody.parsePDB(pdb, model=model) if prefix == "_gnm": prefix = pdb.getTitle() + "_gnm" select = pdb.select(selstr) if select is None: raise ValueError("selection {0} do not match any atoms".format(repr(selstr))) LOGGER.info("{0} atoms will be used for GNM calculations.".format(len(select))) gnm = prody.GNM(pdb.getTitle()) gnm.buildKirchhoff(select, cutoff, gamma) gnm.calcModes(nmodes) LOGGER.info("Writing numerical output.") if kwargs.get("outnpz"): prody.saveModel(gnm, join(outdir, prefix)) prody.writeNMD(join(outdir, prefix + ".nmd"), gnm, select) extend = kwargs.get("extend") if extend: if extend == "all": extended = prody.extendModel(gnm, select, pdb) else: extended = prody.extendModel(gnm, select, select | pdb.bb) prody.writeNMD(join(outdir, prefix + "_extended_" + extend + ".nmd"), *extended) outall = kwargs.get("outall") delim = kwargs.get("numdelim") ext = kwargs.get("numext") format = kwargs.get("numformat") if outall or kwargs.get("outeig"): prody.writeArray(join(outdir, prefix + "_evectors" + ext), gnm.getArray(), delimiter=delim, format=format) prody.writeArray(join(outdir, prefix + "_evalues" + ext), gnm.getEigvals(), delimiter=delim, format=format) if outall or kwargs.get("outbeta"): from prody.utilities import openFile fout = openFile(prefix + "_beta.txt", "w", folder=outdir) fout.write("{0[0]:1s} {0[1]:4s} {0[2]:4s} {0[3]:5s} {0[4]:5s}\n".format(["C", "RES", "####", "Exp.", "The."])) for data in zip( select.getChids(), select.getResnames(), select.getResnums(), select.getBetas(), prody.calcTempFactors(gnm, select), ): fout.write("{0[0]:1s} {0[1]:4s} {0[2]:4d} {0[3]:5.2f} {0[4]:5.2f}\n".format(data)) fout.close() if outall or kwargs.get("outcov"): prody.writeArray( join(outdir, prefix + "_covariance" + ext), gnm.getCovariance(), delimiter=delim, format=format ) if outall or kwargs.get("outcc") or kwargs.get("outhm"): cc = prody.calcCrossCorr(gnm) if outall or kwargs.get("outcc"): prody.writeArray(join(outdir, prefix + "_cross-correlations" + ext), cc, delimiter=delim, format=format) if outall or kwargs.get("outhm"): prody.writeHeatmap( join(outdir, prefix + "_cross-correlations.hm"), cc, resnum=select.getResnums(), xlabel="Residue", ylabel="Residue", title=gnm.getTitle() + " cross-correlations", ) if outall or kwargs.get("kirchhoff"): prody.writeArray(join(outdir, prefix + "_kirchhoff" + ext), gnm.getKirchhoff(), delimiter=delim, format=format) if outall or kwargs.get("outsf"): prody.writeArray( join(outdir, prefix + "_sqfluct" + ext), prody.calcSqFlucts(gnm), delimiter=delim, format=format ) figall = kwargs.get("figall") cc = kwargs.get("figcc") sf = kwargs.get("figsf") bf = kwargs.get("figbeta") cm = kwargs.get("figcmap") modes = kwargs.get("figmode") if figall or cc or sf or bf or cm or modes: try: import matplotlib.pyplot as plt except ImportError: LOGGER.warning("Matplotlib could not be imported. " "Figures are not saved.") else: prody.SETTINGS["auto_show"] = False LOGGER.info("Saving graphical output.") format = kwargs.get("figformat") width = kwargs.get("figwidth") height = kwargs.get("figheight") dpi = kwargs.get("figdpi") format = format.lower() if figall or cc: plt.figure(figsize=(width, height)) prody.showCrossCorr(gnm) plt.savefig(join(outdir, prefix + "_cc." + format), dpi=dpi, format=format) plt.close("all") if figall or cm: plt.figure(figsize=(width, height)) prody.showContactMap(gnm) plt.savefig(join(outdir, prefix + "_cm." + format), dpi=dpi, format=format) plt.close("all") if figall or sf: plt.figure(figsize=(width, height)) prody.showSqFlucts(gnm) plt.savefig(join(outdir, prefix + "_sf." + format), dpi=dpi, format=format) plt.close("all") if figall or bf: plt.figure(figsize=(width, height)) bexp = select.getBetas() bcal = prody.calcTempFactors(gnm, select) plt.plot(bexp, label="Experimental") plt.plot(bcal, label=("Theoretical (corr coef = {0:.2f})".format(np.corrcoef(bcal, bexp)[0, 1]))) plt.legend(prop={"size": 10}) plt.xlabel("Node index") plt.ylabel("Experimental B-factors") plt.title(pdb.getTitle() + " B-factors") plt.savefig(join(outdir, prefix + "_bf." + format), dpi=dpi, format=format) plt.close("all") if modes: indices = [] items = modes.split() items = sum([item.split(",") for item in items], []) for item in items: try: item = item.split("-") if len(item) == 1: indices.append(int(item[0]) - 1) elif len(item) == 2: indices.extend(range(int(item[0]) - 1, int(item[1]))) except: pass for index in indices: try: mode = gnm[index] except: pass else: plt.figure(figsize=(width, height)) prody.showMode(mode) plt.grid() plt.savefig( join(outdir, prefix + "_mode_" + str(mode.getIndex() + 1) + "." + format), dpi=dpi, format=format, ) plt.close("all")
def get_gnm_fluctuations(protein: pd.AtomGroup, n_modes: int = 50): """ Get atom fluctuations using a Gaussian network model with n_modes modes. """ protein_gnm, _ = pd.calcGNM(protein, n_modes=n_modes, selstr="all") return pd.calcSqFlucts(protein_gnm)
def get_anm_fluctuations(protein: pd.AtomGroup, n_modes: int = 50): """ Get atom fluctuations using an Anisotropic network model with n_modes modes. """ protein_anm, _ = pd.calcANM(protein, n_modes=n_modes, selstr="all") return pd.calcSqFlucts(protein_anm)
def prody_gnm(pdb, **kwargs): """Perform GNM calculations for *pdb*. """ for key in DEFAULTS: if not key in kwargs: kwargs[key] = DEFAULTS[key] from os.path import isdir, splitext, join outdir = kwargs.get('outdir') if not isdir(outdir): raise IOError('{0} is not a valid path'.format(repr(outdir))) import numpy as np import prody LOGGER = prody.LOGGER selstr = kwargs.get('select') prefix = kwargs.get('prefix') cutoff = kwargs.get('cutoff') gamma = kwargs.get('gamma') nmodes = kwargs.get('nmodes') selstr = kwargs.get('select') model = kwargs.get('model') altloc = kwargs.get('altloc') zeros = kwargs.get('zeros') pdb = prody.parsePDB(pdb, model=model, altloc=altloc) if prefix == '_gnm': prefix = pdb.getTitle() + '_gnm' select = pdb.select(selstr) if select is None: raise ValueError('selection {0} do not match any atoms'.format( repr(selstr))) LOGGER.info('{0} atoms will be used for GNM calculations.'.format( len(select))) gnm = prody.GNM(pdb.getTitle()) nproc = kwargs.get('nproc') if nproc: try: from threadpoolctl import threadpool_limits except ImportError: raise ImportError( 'Please install threadpoolctl to control threads') with threadpool_limits(limits=nproc, user_api="blas"): gnm.buildKirchhoff(select, cutoff, gamma) gnm.calcModes(nmodes, zeros=zeros) else: gnm.buildKirchhoff(select, cutoff, gamma) gnm.calcModes(nmodes, zeros=zeros) LOGGER.info('Writing numerical output.') if kwargs.get('outnpz'): prody.saveModel(gnm, join(outdir, prefix)) if kwargs.get('outscipion'): prody.writeScipionModes(outdir, gnm) prody.writeNMD(join(outdir, prefix + '.nmd'), gnm, select) extend = kwargs.get('extend') if extend: if extend == 'all': extended = prody.extendModel(gnm, select, pdb) else: extended = prody.extendModel(gnm, select, select | pdb.bb) prody.writeNMD(join(outdir, prefix + '_extended_' + extend + '.nmd'), *extended) outall = kwargs.get('outall') delim = kwargs.get('numdelim') ext = kwargs.get('numext') format = kwargs.get('numformat') if outall or kwargs.get('outeig'): prody.writeArray(join(outdir, prefix + '_evectors' + ext), gnm.getArray(), delimiter=delim, format=format) prody.writeArray(join(outdir, prefix + '_evalues' + ext), gnm.getEigvals(), delimiter=delim, format=format) if outall or kwargs.get('outbeta'): from prody.utilities import openFile fout = openFile(prefix + '_beta' + ext, 'w', folder=outdir) fout.write( '{0[0]:1s} {0[1]:4s} {0[2]:4s} {0[3]:5s} {0[4]:5s}\n'.format( ['C', 'RES', '####', 'Exp.', 'The.'])) for data in zip(select.getChids(), select.getResnames(), select.getResnums(), select.getBetas(), prody.calcTempFactors(gnm, select)): fout.write( '{0[0]:1s} {0[1]:4s} {0[2]:4d} {0[3]:5.2f} {0[4]:5.2f}\n'. format(data)) fout.close() if outall or kwargs.get('outcov'): prody.writeArray(join(outdir, prefix + '_covariance' + ext), gnm.getCovariance(), delimiter=delim, format=format) if outall or kwargs.get('outcc') or kwargs.get('outhm'): cc = prody.calcCrossCorr(gnm) if outall or kwargs.get('outcc'): prody.writeArray(join(outdir, prefix + '_cross-correlations' + ext), cc, delimiter=delim, format=format) if outall or kwargs.get('outhm'): prody.writeHeatmap(join(outdir, prefix + '_cross-correlations.hm'), cc, resnum=select.getResnums(), xlabel='Residue', ylabel='Residue', title=gnm.getTitle() + ' cross-correlations') if outall or kwargs.get('kirchhoff'): prody.writeArray(join(outdir, prefix + '_kirchhoff' + ext), gnm.getKirchhoff(), delimiter=delim, format=format) if outall or kwargs.get('outsf'): prody.writeArray(join(outdir, prefix + '_sqfluct' + ext), prody.calcSqFlucts(gnm), delimiter=delim, format=format) figall = kwargs.get('figall') cc = kwargs.get('figcc') sf = kwargs.get('figsf') bf = kwargs.get('figbeta') cm = kwargs.get('figcmap') modes = kwargs.get('figmode') if figall or cc or sf or bf or cm or modes: try: import matplotlib.pyplot as plt except ImportError: LOGGER.warning('Matplotlib could not be imported. ' 'Figures are not saved.') else: prody.SETTINGS['auto_show'] = False LOGGER.info('Saving graphical output.') format = kwargs.get('figformat') width = kwargs.get('figwidth') height = kwargs.get('figheight') dpi = kwargs.get('figdpi') format = format.lower() if figall or cc: plt.figure(figsize=(width, height)) prody.showCrossCorr(gnm) plt.savefig(join(outdir, prefix + '_cc.' + format), dpi=dpi, format=format) plt.close('all') if figall or cm: plt.figure(figsize=(width, height)) prody.showContactMap(gnm) plt.savefig(join(outdir, prefix + '_cm.' + format), dpi=dpi, format=format) plt.close('all') if figall or sf: plt.figure(figsize=(width, height)) prody.showSqFlucts(gnm) plt.savefig(join(outdir, prefix + '_sf.' + format), dpi=dpi, format=format) plt.close('all') if figall or bf: plt.figure(figsize=(width, height)) bexp = select.getBetas() bcal = prody.calcTempFactors(gnm, select) plt.plot(bexp, label='Experimental') plt.plot(bcal, label=('Theoretical (corr coef = {0:.2f})'.format( np.corrcoef(bcal, bexp)[0, 1]))) plt.legend(prop={'size': 10}) plt.xlabel('Node index') plt.ylabel('Experimental B-factors') plt.title(pdb.getTitle() + ' B-factors') plt.savefig(join(outdir, prefix + '_bf.' + format), dpi=dpi, format=format) plt.close('all') if modes: indices = [] items = modes.split() items = sum([item.split(',') for item in items], []) for item in items: try: item = item.split('-') if len(item) == 1: indices.append(int(item[0]) - 1) elif len(item) == 2: indices.extend( list(range(int(item[0]) - 1, int(item[1])))) except: pass for index in indices: try: mode = gnm[index] except: pass else: plt.figure(figsize=(width, height)) prody.showMode(mode) plt.grid() plt.savefig(join( outdir, prefix + '_mode_' + str(mode.getIndex() + 1) + '.' + format), dpi=dpi, format=format) plt.close('all')
def calcANMfeatures(self, chain='all', env='chain', ANM_PRS=True, stiffness=True, MBS=False): """Computes ANM-based features. :arg chain: chain identifier :type chain: str :arg env: environment model, i.e. ``'chain'``, ``'reduced'`` or ``'sliced'`` :type env: str :arg ANM_PRS: whether or not to compute features based on Perturbation Response Scanning analysis :type ANM_PRS: bool :arg stiffness: whether or not to compute stiffness with MechStiff :type stiffness: bool :arg MBS: whether or not to compute Mechanical Bridging Score :type MBS: bool """ assert env in ['chain', 'reduced', 'sliced'] for k in ANM_PRS, stiffness, MBS: assert type(k) is bool # list of features to be computed features = ['ANM_MSF-'+env] if ANM_PRS: features += ['ANM_effectiveness-'+env, 'ANM_sensitivity-'+env] if MBS: features += ['MBS-'+env] if stiffness: features += ['stiffness-'+env] # compute features (if not precomputed) if chain == 'all': chain_list = self.chids else: chain_list = [chain, ] for chID in chain_list: d = self.feats[chID] if all([f in d for f in features]): continue try: anm = self.calcANM(chID, env=env) except Exception as e: if (isinstance(e, MemoryError)): msg = 'MemoryError' else: msg = str(e) for f in features: d[f] = msg LOGGER.warn(msg) continue key_msf = 'ANM_MSF-' + env if key_msf not in d: try: d[key_msf] = calcSqFlucts(anm) except Exception as e: msg = str(e) d[key_msf] = msg LOGGER.warn(msg) key_eff = 'ANM_effectiveness-' + env if key_eff in features and key_eff not in d: key_sns = 'ANM_sensitivity-' + env try: prs_mtrx, eff, sns = calcPerturbResponse(anm) d[key_eff] = eff d[key_sns] = sns except Exception as e: msg = str(e) d[key_eff] = msg d[key_sns] = msg LOGGER.warn(msg) key_mbs = 'MBS-' + env if key_mbs in features and key_mbs not in d: try: pdb = self.getPDB() ca = pdb[chID].ca d[key_mbs] = calcMBS(anm, ca, cutoff=15.) except Exception as e: msg = str(e) d[key_mbs] = msg LOGGER.warn(msg) key_stf = 'stiffness-' + env if key_stf in features and key_stf not in d: try: pdb = self.getPDB() ca = pdb[chID].ca stiff_mtrx = calcMechStiff(anm, ca) d[key_stf] = np.mean(stiff_mtrx, axis=0) except Exception as e: msg = str(e) d[key_stf] = msg LOGGER.warn(msg) return
def prody_pca(coords, **kwargs): """Perform PCA calculations for PDB or DCD format *coords* file. """ for key in DEFAULTS: if not key in kwargs: kwargs[key] = DEFAULTS[key] from os.path import isdir, splitext, join outdir = kwargs.get('outdir') if not isdir(outdir): raise IOError('{0} is not a valid path'.format(repr(outdir))) import prody LOGGER = prody.LOGGER prefix = kwargs.get('prefix') nmodes = kwargs.get('nmodes') selstr = kwargs.get('select') quiet = kwargs.pop('quiet', False) altloc = kwargs.get('altloc') ext = splitext(coords)[1].lower() if ext == '.gz': ext = splitext(coords[:-3])[1].lower() if ext == '.dcd': pdb = kwargs.get('psf') or kwargs.get('pdb') if pdb: if splitext(pdb)[1].lower() == '.psf': pdb = prody.parsePSF(pdb) else: pdb = prody.parsePDB(pdb, altlocs=altlocs) dcd = prody.DCDFile(coords) if prefix == '_pca' or prefix == '_eda': prefix = dcd.getTitle() + prefix if len(dcd) < 2: raise ValueError('DCD file must have multiple frames') if pdb: if pdb.numAtoms() == dcd.numAtoms(): select = pdb.select(selstr) dcd.setAtoms(select) LOGGER.info('{0} atoms are selected for calculations.'.format( len(select))) else: select = pdb.select(selstr) if select.numAtoms() != dcd.numAtoms(): raise ValueError('number of selected atoms ({0}) does ' 'not match number of atoms in the DCD ' 'file ({1})'.format( select.numAtoms(), dcd.numAtoms())) if pdb.numCoordsets(): dcd.setCoords(select.getCoords()) else: select = prody.AtomGroup() select.setCoords(dcd.getCoords()) pca = prody.PCA(dcd.getTitle()) nproc = kwargs.get('nproc') if nproc: try: from threadpoolctl import threadpool_limits except ImportError: raise ImportError( 'Please install threadpoolctl to control threads') with threadpool_limits(limits=nproc, user_api="blas"): if len(dcd) > 1000: pca.buildCovariance(dcd, aligned=kwargs.get('aligned'), quiet=quiet) pca.calcModes(nmodes) ensemble = dcd else: ensemble = dcd[:] if not kwargs.get('aligned'): ensemble.iterpose(quiet=quiet) pca.performSVD(ensemble) nmodes = pca.numModes() else: if len(dcd) > 1000: pca.buildCovariance(dcd, aligned=kwargs.get('aligned'), quiet=quiet) pca.calcModes(nmodes) ensemble = dcd else: ensemble = dcd[:] if not kwargs.get('aligned'): ensemble.iterpose(quiet=quiet) pca.performSVD(ensemble) nmodes = pca.numModes() else: pdb = prody.parsePDB(coords) if pdb.numCoordsets() < 2: raise ValueError('PDB file must contain multiple models') if prefix == '_pca' or prefix == '_eda': prefix = pdb.getTitle() + prefix select = pdb.select(selstr) LOGGER.info('{0} atoms are selected for calculations.'.format( len(select))) if select is None: raise ValueError('selection {0} do not match any atoms'.format( repr(selstr))) LOGGER.info('{0} atoms will be used for PCA calculations.'.format( len(select))) ensemble = prody.Ensemble(select) pca = prody.PCA(pdb.getTitle()) if not kwargs.get('aligned'): ensemble.iterpose() nproc = kwargs.get('nproc') if nproc: try: from threadpoolctl import threadpool_limits except ImportError: raise ImportError( 'Please install threadpoolctl to control threads') with threadpool_limits(limits=nproc, user_api="blas"): pca.performSVD(ensemble) else: pca.performSVD(ensemble) LOGGER.info('Writing numerical output.') if kwargs.get('outnpz'): prody.saveModel(pca, join(outdir, prefix)) if kwargs.get('outscipion'): prody.writeScipionModes(outdir, pca) prody.writeNMD(join(outdir, prefix + '.nmd'), pca[:nmodes], select) extend = kwargs.get('extend') if extend: if pdb: if extend == 'all': extended = prody.extendModel(pca[:nmodes], select, pdb) else: extended = prody.extendModel(pca[:nmodes], select, select | pdb.bb) prody.writeNMD( join(outdir, prefix + '_extended_' + extend + '.nmd'), *extended) else: prody.LOGGER.warn('Model could not be extended, provide a PDB or ' 'PSF file.') outall = kwargs.get('outall') delim = kwargs.get('numdelim') ext = kwargs.get('numext') format = kwargs.get('numformat') if outall or kwargs.get('outeig'): prody.writeArray(join(outdir, prefix + '_evectors' + ext), pca.getArray(), delimiter=delim, format=format) prody.writeArray(join(outdir, prefix + '_evalues' + ext), pca.getEigvals(), delimiter=delim, format=format) if outall or kwargs.get('outcov'): prody.writeArray(join(outdir, prefix + '_covariance' + ext), pca.getCovariance(), delimiter=delim, format=format) if outall or kwargs.get('outcc') or kwargs.get('outhm'): cc = prody.calcCrossCorr(pca) if outall or kwargs.get('outcc'): prody.writeArray(join(outdir, prefix + '_cross-correlations' + ext), cc, delimiter=delim, format=format) if outall or kwargs.get('outhm'): resnums = select.getResnums() hmargs = {} if resnums is None else {'resnums': resnums} prody.writeHeatmap(join(outdir, prefix + '_cross-correlations.hm'), cc, xlabel='Residue', ylabel='Residue', title=pca.getTitle() + ' cross-correlations', **hmargs) if outall or kwargs.get('outsf'): prody.writeArray(join(outdir, prefix + '_sqfluct' + ext), prody.calcSqFlucts(pca), delimiter=delim, format=format) if outall or kwargs.get('outproj'): prody.writeArray(join(outdir, prefix + '_proj' + ext), prody.calcProjection(ensemble, pca), delimiter=delim, format=format) figall = kwargs.get('figall') cc = kwargs.get('figcc') sf = kwargs.get('figsf') sp = kwargs.get('figproj') if figall or cc or sf or sp: try: import matplotlib.pyplot as plt except ImportError: LOGGER.warning('Matplotlib could not be imported. ' 'Figures are not saved.') else: prody.SETTINGS['auto_show'] = False LOGGER.info('Saving graphical output.') format = kwargs.get('figformat') width = kwargs.get('figwidth') height = kwargs.get('figheight') dpi = kwargs.get('figdpi') format = format.lower() if figall or cc: plt.figure(figsize=(width, height)) prody.showCrossCorr(pca) plt.savefig(join(outdir, prefix + '_cc.' + format), dpi=dpi, format=format) plt.close('all') if figall or sf: plt.figure(figsize=(width, height)) prody.showSqFlucts(pca) plt.savefig(join(outdir, prefix + '_sf.' + format), dpi=dpi, format=format) plt.close('all') if figall or sp: indices = [] for item in sp.split(): try: if '-' in item: item = item.split('-') if len(item) == 2: indices.append( list(range(int(item[0]) - 1, int(item[1])))) elif ',' in item: indices.append( [int(i) - 1 for i in item.split(',')]) else: indices.append(int(item) - 1) except: pass for index in indices: plt.figure(figsize=(width, height)) prody.showProjection(ensemble, pca[index]) if isinstance(index, Integral): index = [index] index = [str(i + 1) for i in index] plt.savefig(join( outdir, prefix + '_proj_' + '_'.join(index) + '.' + format), dpi=dpi, format=format) plt.close('all')
def prody_anm(pdb, **kwargs): """Perform ANM calculations for *pdb*. """ for key in DEFAULTS: if not key in kwargs: kwargs[key] = DEFAULTS[key] from os.path import isdir, join outdir = kwargs.get('outdir') if not isdir(outdir): raise IOError('{0} is not a valid path'.format(repr(outdir))) import numpy as np import prody LOGGER = prody.LOGGER selstr = kwargs.get('select') prefix = kwargs.get('prefix') cutoff = kwargs.get('cutoff') gamma = kwargs.get('gamma') nmodes = kwargs.get('nmodes') selstr = kwargs.get('select') model = kwargs.get('model') pdb = prody.parsePDB(pdb, model=model) if prefix == '_anm': prefix = pdb.getTitle() + '_anm' select = pdb.select(selstr) if select is None: LOGGER.warn('Selection {0} did not match any atoms.' .format(repr(selstr))) return LOGGER.info('{0} atoms will be used for ANM calculations.' .format(len(select))) anm = prody.ANM(pdb.getTitle()) anm.buildHessian(select, cutoff, gamma) anm.calcModes(nmodes) LOGGER.info('Writing numerical output.') if kwargs.get('outnpz'): prody.saveModel(anm, join(outdir, prefix)) prody.writeNMD(join(outdir, prefix + '.nmd'), anm, select) extend = kwargs.get('extend') if extend: if extend == 'all': extended = prody.extendModel(anm, select, pdb) else: extended = prody.extendModel(anm, select, select | pdb.bb) prody.writeNMD(join(outdir, prefix + '_extended_' + extend + '.nmd'), *extended) outall = kwargs.get('outall') delim = kwargs.get('numdelim') ext = kwargs.get('numext') format = kwargs.get('numformat') if outall or kwargs.get('outeig'): prody.writeArray(join(outdir, prefix + '_evectors'+ext), anm.getArray(), delimiter=delim, format=format) prody.writeArray(join(outdir, prefix + '_evalues'+ext), anm.getEigvals(), delimiter=delim, format=format) if outall or kwargs.get('outbeta'): from prody.utilities import openFile fout = openFile(prefix + '_beta.txt', 'w', folder=outdir) fout.write('{0[0]:1s} {0[1]:4s} {0[2]:4s} {0[3]:5s} {0[4]:5s}\n' .format(['C', 'RES', '####', 'Exp.', 'The.'])) for data in zip(select.getChids(), select.getResnames(), select.getResnums(), select.getBetas(), prody.calcTempFactors(anm, select)): fout.write('{0[0]:1s} {0[1]:4s} {0[2]:4d} {0[3]:5.2f} {0[4]:5.2f}\n' .format(data)) fout.close() if outall or kwargs.get('outcov'): prody.writeArray(join(outdir, prefix + '_covariance' + ext), anm.getCovariance(), delimiter=delim, format=format) if outall or kwargs.get('outcc') or kwargs.get('outhm'): cc = prody.calcCrossCorr(anm) if outall or kwargs.get('outcc'): prody.writeArray(join(outdir, prefix + '_cross-correlations' + ext), cc, delimiter=delim, format=format) if outall or kwargs.get('outhm'): prody.writeHeatmap(join(outdir, prefix + '_cross-correlations.hm'), cc, resnum=select.getResnums(), xlabel='Residue', ylabel='Residue', title=anm.getTitle() + ' cross-correlations') if outall or kwargs.get('hessian'): prody.writeArray(join(outdir, prefix + '_hessian'+ext), anm.getHessian(), delimiter=delim, format=format) if outall or kwargs.get('kirchhoff'): prody.writeArray(join(outdir, prefix + '_kirchhoff'+ext), anm.getKirchhoff(), delimiter=delim, format=format) if outall or kwargs.get('outsf'): prody.writeArray(join(outdir, prefix + '_sqflucts'+ext), prody.calcSqFlucts(anm), delimiter=delim, format=format) figall = kwargs.get('figall') cc = kwargs.get('figcc') sf = kwargs.get('figsf') bf = kwargs.get('figbeta') cm = kwargs.get('figcmap') if figall or cc or sf or bf or cm: try: import matplotlib.pyplot as plt except ImportError: LOGGER.warning('Matplotlib could not be imported. ' 'Figures are not saved.') else: prody.SETTINGS['auto_show'] = False LOGGER.info('Saving graphical output.') format = kwargs.get('figformat') width = kwargs.get('figwidth') height = kwargs.get('figheight') dpi = kwargs.get('figdpi') format = format.lower() if figall or cc: plt.figure(figsize=(width, height)) prody.showCrossCorr(anm) plt.savefig(join(outdir, prefix + '_cc.'+format), dpi=dpi, format=format) plt.close('all') if figall or cm: plt.figure(figsize=(width, height)) prody.showContactMap(anm) plt.savefig(join(outdir, prefix + '_cm.'+format), dpi=dpi, format=format) plt.close('all') if figall or sf: plt.figure(figsize=(width, height)) prody.showSqFlucts(anm) plt.savefig(join(outdir, prefix + '_sf.'+format), dpi=dpi, format=format) plt.close('all') if figall or bf: plt.figure(figsize=(width, height)) bexp = select.getBetas() bcal = prody.calcTempFactors(anm, select) plt.plot(bexp, label='Experimental') plt.plot(bcal, label=('Theoretical (R={0:.2f})' .format(np.corrcoef(bcal, bexp)[0,1]))) plt.legend(prop={'size': 10}) plt.xlabel('Node index') plt.ylabel('Experimental B-factors') plt.title(pdb.getTitle() + ' B-factors') plt.savefig(join(outdir, prefix + '_bf.'+format), dpi=dpi, format=format) plt.close('all')
def calcANMfeatures(self, chain='all', env='chain', ANM_PRS=True, stiffness=True, MBS=False): assert env in ['chain', 'reduced', 'sliced'] for k in ANM_PRS, stiffness, MBS: assert type(k) is bool # list of features to be computed features = ['ANM_MSF-' + env] if ANM_PRS: features += ['ANM_effectiveness-' + env, 'ANM_sensitivity-' + env] if MBS: features += ['MBS-' + env] if stiffness: features += ['stiffness-' + env] # compute features (if not precomputed) if chain == 'all': chain_list = self.chids else: chain_list = [ chain, ] for chID in chain_list: d = self.feats[chID] if all([f in d for f in features]): continue try: anm = self.calcANM(chID, env=env) except Exception as e: if (isinstance(e, MemoryError)): msg = 'MemoryError' else: msg = str(e) for f in features: d[f] = msg LOGGER.warn(msg) continue key_msf = 'ANM_MSF-' + env if key_msf not in d: try: d[key_msf] = calcSqFlucts(anm) except Exception as e: msg = str(e) d[key_msf] = msg LOGGER.warn(msg) key_eff = 'ANM_effectiveness-' + env if key_eff in features and key_eff not in d: key_sns = 'ANM_sensitivity-' + env try: prs_mtrx, eff, sns = calcPerturbResponse(anm) d[key_eff] = eff d[key_sns] = sns except Exception as e: msg = str(e) d[key_eff] = msg d[key_sns] = msg LOGGER.warn(msg) key_mbs = 'MBS-' + env if key_mbs in features and key_mbs not in d: try: pdb = self.getPDB() ca = pdb[chID].ca d[key_mbs] = calcMBS(anm, ca, cutoff=15.) except Exception as e: msg = str(e) d[key_mbs] = msg LOGGER.warn(msg) key_stf = 'stiffness-' + env if key_stf in features and key_stf not in d: try: pdb = self.getPDB() ca = pdb[chID].ca stiff_mtrx = calcMechStiff(anm, ca) d[key_stf] = np.mean(stiff_mtrx, axis=0) except Exception as e: msg = str(e) d[key_stf] = msg LOGGER.warn(msg) return
def prody_pca(coords, **kwargs): """Perform PCA calculations for PDB or DCD format *coords* file. """ for key in DEFAULTS: if not key in kwargs: kwargs[key] = DEFAULTS[key] from os.path import isdir, splitext, join outdir = kwargs.get('outdir') if not isdir(outdir): raise IOError('{0} is not a valid path'.format(repr(outdir))) import prody LOGGER = prody.LOGGER prefix = kwargs.get('prefix') nmodes = kwargs.get('nmodes') selstr = kwargs.get('select') ext = splitext(coords)[1].lower() if ext == '.gz': ext = splitext(coords[:-3])[1].lower() if ext == '.dcd': pdb = kwargs.get('psf') or kwargs.get('pdb') if pdb: if splitext(pdb)[1].lower() == '.psf': pdb = prody.parsePSF(pdb) else: pdb = prody.parsePDB(pdb) dcd = prody.DCDFile(coords) if prefix == '_pca' or prefix == '_eda': prefix = dcd.getTitle() + prefix if len(dcd) < 2: raise ValueError('DCD file must have multiple frames') if pdb: if pdb.numAtoms() == dcd.numAtoms(): select = pdb.select(selstr) dcd.setAtoms(select) LOGGER.info('{0} atoms are selected for calculations.' .format(len(select))) else: select = pdb.select(selstr) if select.numAtoms() != dcd.numAtoms(): raise ValueError('number of selected atoms ({0}) does ' 'not match number of atoms in the DCD ' 'file ({1})'.format(select.numAtoms(), dcd.numAtoms())) if pdb.numCoordsets(): dcd.setCoords(select.getCoords()) else: select = prody.AtomGroup() select.setCoords(dcd.getCoords()) pca = prody.PCA(dcd.getTitle()) if len(dcd) > 1000: pca.buildCovariance(dcd, aligned=kwargs.get('aligned')) pca.calcModes(nmodes) ensemble = dcd else: ensemble = dcd[:] if not kwargs.get('aligned'): ensemble.iterpose() pca.performSVD(ensemble) else: pdb = prody.parsePDB(coords) if pdb.numCoordsets() < 2: raise ValueError('PDB file must contain multiple models') if prefix == '_pca' or prefix == '_eda': prefix = pdb.getTitle() + prefix select = pdb.select(selstr) LOGGER.info('{0} atoms are selected for calculations.' .format(len(select))) if select is None: raise ValueError('selection {0} do not match any atoms' .format(repr(selstr))) LOGGER.info('{0} atoms will be used for PCA calculations.' .format(len(select))) ensemble = prody.Ensemble(select) pca = prody.PCA(pdb.getTitle()) if not kwargs.get('aligned'): ensemble.iterpose() pca.performSVD(ensemble) LOGGER.info('Writing numerical output.') if kwargs.get('outnpz'): prody.saveModel(pca, join(outdir, prefix)) prody.writeNMD(join(outdir, prefix + '.nmd'), pca[:nmodes], select) extend = kwargs.get('extend') if extend: if pdb: if extend == 'all': extended = prody.extendModel(pca[:nmodes], select, pdb) else: extended = prody.extendModel(pca[:nmodes], select, select | pdb.bb) prody.writeNMD(join(outdir, prefix + '_extended_' + extend + '.nmd'), *extended) else: prody.LOGGER.warn('Model could not be extended, provide a PDB or ' 'PSF file.') outall = kwargs.get('outall') delim = kwargs.get('numdelim') ext = kwargs.get('numext') format = kwargs.get('numformat') if outall or kwargs.get('outeig'): prody.writeArray(join(outdir, prefix + '_evectors'+ext), pca.getArray(), delimiter=delim, format=format) prody.writeArray(join(outdir, prefix + '_evalues'+ext), pca.getEigvals(), delimiter=delim, format=format) if outall or kwargs.get('outcov'): prody.writeArray(join(outdir, prefix + '_covariance'+ext), pca.getCovariance(), delimiter=delim, format=format) if outall or kwargs.get('outcc') or kwargs.get('outhm'): cc = prody.calcCrossCorr(pca) if outall or kwargs.get('outcc'): prody.writeArray(join(outdir, prefix + '_cross-correlations' + ext), cc, delimiter=delim, format=format) if outall or kwargs.get('outhm'): resnums = select.getResnums() hmargs = {} if resnums is None else {'resnums': resnums} prody.writeHeatmap(join(outdir, prefix + '_cross-correlations.hm'), cc, xlabel='Residue', ylabel='Residue', title=pca.getTitle() + ' cross-correlations', **hmargs) if outall or kwargs.get('outsf'): prody.writeArray(join(outdir, prefix + '_sqfluct'+ext), prody.calcSqFlucts(pca), delimiter=delim, format=format) if outall or kwargs.get('outproj'): prody.writeArray(join(outdir, prefix + '_proj'+ext), prody.calcProjection(ensemble, pca), delimiter=delim, format=format) figall = kwargs.get('figall') cc = kwargs.get('figcc') sf = kwargs.get('figsf') sp = kwargs.get('figproj') if figall or cc or sf or sp: try: import matplotlib.pyplot as plt except ImportError: LOGGER.warning('Matplotlib could not be imported. ' 'Figures are not saved.') else: prody.SETTINGS['auto_show'] = False LOGGER.info('Saving graphical output.') format = kwargs.get('figformat') width = kwargs.get('figwidth') height = kwargs.get('figheight') dpi = kwargs.get('figdpi') format = format.lower() if figall or cc: plt.figure(figsize=(width, height)) prody.showCrossCorr(pca) plt.savefig(join(outdir, prefix + '_cc.'+format), dpi=dpi, format=format) plt.close('all') if figall or sf: plt.figure(figsize=(width, height)) prody.showSqFlucts(pca) plt.savefig(join(outdir, prefix + '_sf.'+format), dpi=dpi, format=format) plt.close('all') if figall or sp: indices = [] for item in sp.split(): try: if '-' in item: item = item.split('-') if len(item) == 2: indices.append(list(range(int(item[0])-1, int(item[1])))) elif ',' in item: indices.append([int(i)-1 for i in item.split(',')]) else: indices.append(int(item)-1) except: pass for index in indices: plt.figure(figsize=(width, height)) prody.showProjection(ensemble, pca[index]) if isinstance(index, int): index = [index] index = [str(i+1) for i in index] plt.savefig(join(outdir, prefix + '_proj_' + '_'.join(index) + '.' + format), dpi=dpi, format=format) plt.close('all')
def iENM_stochastic( pdb_obj, prefix, selstr='calpha', dr=1, nsteps=1, const=1, cutoff=10, selmode=-1, path_length=0.5 ): import prody as pd import pylab as pb import numpy as np import random as random pdb = pdb_obj.select( selstr ) natoms = pdb.numAtoms() dcd = np.zeros( ( natoms*3, 1 ) ) dcd = pdb.getCoords().reshape( natoms*3 ) ensemble = pd.Ensemble() ensemble.setCoords(dcd.reshape(natoms,3)) filename_pdb = '%s.pdb' % prefix filename_dcd = '%s.dcd' % prefix pd.writePDB( filename_pdb, pdb ) for i in xrange( 1, nsteps+1 ): print 'Calculating coordinates at step %d\n' % i # Load coordinates from previous step pdb.setCoords( dcd.reshape(natoms,3) ) # GNM analysis gnm = pd.GNM() gnm.buildKirchhoff( pdb, cutoff=cutoff, gamma=const ) K = gnm.getKirchhoff() N = np.shape(K)[1] gnm.calcModes( n_modes=N,zeros=False,turbo=True ) if selmode == -1: fluct = pd.calcSqFlucts( gnm ) / max( pd.calcSqFlucts( gnm ) ) if selmode != -1: fluct = pd.calcSqFlucts( gnm[selmode] ) / max( pd.calcSqFlucts( gnm[selmode] ) ) # ANM analysis anm = pd.ANM() anm.buildHessian( pdb, cutoff=cutoff, gamma=const ) H = anm.getHessian() N = (np.shape(H)[1] / 3) anm.calcModes( n_modes=N,zeros=False,turbo=True ) eVec = anm.getEigvecs() eVal = anm.getEigvals() # Normalize vectors for i in xrange (0, np.size(eVec[1]) ): eVec[:,i] = norm_vec( eVec[:,i], natoms ) # Randomly sample gaussian distribution of vectors gVec = gauss_vec(eVec) # Weight motion along gVec by gnm square fluctuations rvec = gVec rmag = np.zeros((natoms,3)) rmag[:,0] = fluct rmag[:,1] = rmag[:,0] rmag[:,2] = rmag[:,0] rmag = rmag.reshape(natoms*3) rmag = ((-1)**np.random.random_integers(0,1,np.shape(rmag))) * path_length * rmag # rmag = ((-1)**random.randint(0,1)) * rmag rmag = rmag.reshape(natoms*3) rvec = rvec.reshape(natoms*3) rvec = np.multiply(rvec,rmag) # Predict new coords dcd = rvec.reshape(natoms*3) + pdb.getCoords().reshape(natoms*3) ensemble.addCoordset(dcd.reshape(natoms,3)) pd.writeDCD( filename_dcd, ensemble ) return ( pdb, ensemble )
def prody_pca(opt): """Perform PCA calculations based on command line arguments.""" outdir = opt.outdir if not os.path.isdir(outdir): opt.subparser.error('{0:s} is not a valid path'.format(outdir)) import prody LOGGER = prody.LOGGER coords = opt.coords prefix = opt.prefix nmodes, selstr = opt.nmodes, opt.select if os.path.splitext(coords)[1].lower() == '.dcd': ag = opt.psf or opt.pdb if ag: if os.path.splitext(ag)[1].lower() == '.psf': ag = prody.parsePSF(ag) else: ag = prody.parsePDB(ag) dcd = prody.DCDFile(opt.coords) if len(dcd) < 2: opt.subparser("DCD file must contain multiple frames.") if ag: dcd.setAtomGroup(ag) select = dcd.select(selstr) LOGGER.info('{0:d} atoms are selected for calculations.' .format(len(select))) else: select = prody.AtomGroup() select.setCoords(dcd.getCoords()) pca = prody.PCA(dcd.getTitle()) if len(dcd) > 1000: pca.buildCovariance(dcd) pca.calcModes(dcd) else: pca.performSVD(dcd[:]) else: pdb = prody.parsePDB(opt.coords) if pdb.numCoordsets() < 2: opt.subparser("PDB file must contain multiple models.") if prefix == '_pca': prefix = pdb.getTitle() + '_pca' select = pdb.select(selstr) LOGGER.info('{0:d} atoms are selected for calculations.' .format(len(select))) if select is None: opt.subparser('Selection "{0:s}" do not match any atoms.' .format(selstr)) LOGGER.info('{0:d} atoms will be used for PCA calculations.' .format(len(select))) ensemble = prody.Ensemble(select) pca = prody.PCA(pdb.getTitle()) ensemble.iterpose() pca.performSVD(ensemble) LOGGER.info('Writing numerical output.') if opt.npz: prody.saveModel(pca) prody.writeNMD(os.path.join(outdir, prefix + '.nmd'), pca[:nmodes], select) outall = opt.all delim, ext, format = opt.delim, opt.ext, opt.numformat if outall or opt.eigen: prody.writeArray(os.path.join(outdir, prefix + '_evectors'+ext), pca.getArray(), delimiter=delim, format=format) prody.writeArray(os.path.join(outdir, prefix + '_evalues'+ext), pca.getEigenvalues(), delimiter=delim, format=format) if outall or opt.covar: prody.writeArray(os.path.join(outdir, prefix + '_covariance'+ext), pca.getCovariance(), delimiter=delim, format=format) if outall or opt.ccorr: prody.writeArray(os.path.join(outdir, prefix + '_cross-correlations' + ext), prody.calcCrossCorr(pca), delimiter=delim, format=format) if outall or opt.sqflucts: prody.writeArray(os.path.join(outdir, prefix + '_sqfluct'+ext), prody.calcSqFlucts(pca), delimiter=delim, format=format) if outall or opt.proj: prody.writeArray(os.path.join(outdir, prefix + '_proj'+ext), prody.calcProjection(ensemble, pca), delimiter=delim, format=format) figall, cc, sf, sp = opt.figures, opt.cc, opt.sf, opt.sp if figall or cc or sf or sp: try: import matplotlib.pyplot as plt except ImportError: LOGGER.warning('Matplotlib could not be imported. ' 'Figures are not saved.') else: LOGGER.info('Saving graphical output.') format, width, height, dpi = \ opt.figformat, opt.width, opt.height, opt.dpi format = format.lower() if figall or cc: plt.figure(figsize=(width, height)) prody.showCrossCorr(pca) plt.savefig(os.path.join(outdir, prefix + '_cc.'+format), dpi=dpi, format=format) plt.close('all') if figall or sf: plt.figure(figsize=(width, height)) prody.showSqFlucts(pca) plt.savefig(os.path.join(outdir, prefix + '_sf.'+format), dpi=dpi, format=format) plt.close('all') if figall or sp: indices = [] for item in sp.split(): try: if '-' in item: item = item.split('-') if len(item) == 2: indices.append(range(int(item[0])-1, int(item[1]))) elif ',' in item: indices.append([int(i)-1 for i in item.split(',')]) else: indices.append(int(item)-1) except: pass for index in indices: plt.figure(figsize=(width, height)) prody.showProjection(ensemble, pca[index]) if isinstance(index, int): index = [index] index = [str(i+1) for i in index] plt.savefig(os.path.join(outdir, prefix + '_proj_' + '_'.join(index) + '.' + format), dpi=dpi, format=format) plt.close('all')
def iENM_deformationE( pdb_obj, ref_pdb_obj, prefix, selstr='calpha', dr=1, nsteps=1, const=1, cutoff=10, path_length=1 ): import prody as pd import pylab as pb import numpy as np import random as random pdb = pdb_obj.select( selstr ) natoms = pdb.numAtoms() dcd = np.zeros( ( natoms*3, 1 ) ) dcd = pdb.getCoords().reshape( natoms*3 ) ensemble = pd.Ensemble() ensemble.setCoords(dcd.reshape(natoms,3)) filename_pdb = '%s.pdb' % prefix filename_dcd = '%s.dcd' % prefix pd.writePDB( filename_pdb, pdb ) ref = ref_pdb_obj.select( selstr ) for i in xrange( 1, nsteps+1 ): print 'Calculating coordinates at step %d\n' % i # Load coordinates from previous step pdb.setCoords( dcd.reshape((natoms,3)) ) # ANM analysis anm = pd.ANM() anm.buildHessian( pdb, cutoff=cutoff, gamma=const ) H = anm.getHessian() # N = (np.shape(H)[1] / 3) if np.size(selmode) > 1: N = max(selmode) + 1 if np.size(selmode) == 1: N = selmode + 1 anm.calcModes( n_modes=N,zeros=False,turbo=True ) eVec = anm.getEigvecs() eVal = anm.getEigvals() # Normalize eigenvectors and generate gaussian if needed if np.size(selmode) > 1: rvec = np.zeros((np.shape(eVec)[0],np.size(selmode))) for i in xrange ( 0, np.size(eVec[1]) ): rvec[:,i] = norm_vec( eVec[:,selmode[i]], natoms ) rvec = gauss_vec(rvec) if np.size(selmode) == 1: if selmode == 0: rvec = norm_vec( eVec, natoms ) if selmode > 0: rvec = norm_vec( eVec[:,selmode], natoms ) # if np.size(selmode) > 1: # for i in xrange (0, np.size(eVec[1]) ): # rvec = norm_vec( eVec[:,selmode[i]], natoms ) # rvec = gauss_vec(rvec[:,selmode]) # if np.size(selmode) == 1: # rvec = norm_vec( eVec[:,selmode], natoms ) rmag = np.zeros((natoms,3)) fluct = pd.calcSqFlucts(anm[selmode]) fluct = fluct / max(fluct) rmag[:,0] = fluct rmag[:,1] = fluct rmag[:,2] = fluct rmag = rmag.reshape(natoms*3) # rmag = ((-1)**random.randint(0,1)) * path_length * rmag # Commented out to look at positive eigenvector! rmag = path_length * rmag rmag = rmag.reshape(natoms*3) rvec = rvec.reshape(natoms*3) rvec = np.multiply(rvec,rmag) # Predict new coords dcd = rvec + pdb.getCoords().reshape( natoms*3 ) ensemble.addCoordset(dcd.reshape(natoms,3)) pd.writeDCD( filename_dcd, ensemble )