def __init__(self, data={}, chunk_size=5, hosts=[], niceness={'default': 20}, slave_script='', verbose=1, show_output=0, add_hosts=1, redistribute=1): """ @param data: dict of items to be processed @type data: {str_id:any} @param chunk_size: number of items that are processed per job @type chunk_size: int @param hosts: list of host-names @type hosts: [str] @param niceness: host niceness dictionary {str_host-name: int_niceness} @type niceness: {str:int} @param slave_script: absolute path to slave-script @type slave_script: str @param verbose: verbosity level (default: 1) @type verbose: 1|0 @param show_output: display one xterm per slave (default: 0) @type show_output: 1|0 @param add_hosts: add hosts to PVM before starting (default: 1) @type add_hosts: 1|0 @param redistribute: at the end, send same job out several times (default: 1) @type redistribute: 1|0 """ if add_hosts: if verbose: T.errWrite('adding %i hosts to pvm...' % len(hosts)) pvm.addHosts(hosts=hosts) if verbose: T.errWriteln('done') JobMaster.__init__(self, data, chunk_size, hosts, niceness, slave_script, show_output=show_output, redistribute=redistribute, verbose=verbose) self.progress = {} self.disabled_hosts = [] self.slow_hosts = {} self.verbose = verbose ## end of calculation is signalled on lockMsg self.lock = RLock() self.lockMsg = Condition(self.lock) ## this method is called when everything is calculated self.call_done = None
def dump(self, o): """ Try to pickle an object to the currently valid path. @return: the absolute path to which o was pickled @rtype: str """ try: f = self.local() T.dump(f, o) return f except: T.errWriteln("Couldn't dump to %s (constructed from %s)" %\ self.formatted(), self.local() ) raise
def dump( self, o ): """ Try to pickle an object to the currently valid path. @return: the absolute path to which o was pickled @rtype: str """ try: f = self.local() T.dump( f, o ) return f except: T.errWriteln("Couldn't dump to %s (constructed from %s)" %\ self.formatted(), self.local() ) raise
def _assign_seg_ids(self): """ Assign new segment id to each chain. """ counter = self.chainIdOffset for chain in self.chains: ## Assemble segid from pdb code + one letter out of A to Z chain.segment_id = self.pdbname()[:3] + string.uppercase[counter] counter = counter + 1 try: # report changed segement ids chain_id = chain.chain_id self.log.add("changed segment ID of chain "+chain_id+\ " to "+chain.segment_id) except: T.errWriteln("_assign_seg_ids(): logerror")
def __init__(self, data={}, chunk_size=5, hosts=[], niceness={'default':20}, slave_script='', verbose=1, show_output=0, add_hosts=1, redistribute=1 ): """ @param data: dict of items to be processed @type data: {str_id:any} @param chunk_size: number of items that are processed per job @type chunk_size: int @param hosts: list of host-names @type hosts: [str] @param niceness: host niceness dictionary {str_host-name: int_niceness} @type niceness: {str:int} @param slave_script: absolute path to slave-script @type slave_script: str @param verbose: verbosity level (default: 1) @type verbose: 1|0 @param show_output: display one xterm per slave (default: 0) @type show_output: 1|0 @param add_hosts: add hosts to PVM before starting (default: 1) @type add_hosts: 1|0 @param redistribute: at the end, send same job out several times (default: 1) @type redistribute: 1|0 """ if add_hosts: if verbose: T.errWrite('adding %i hosts to pvm...' % len(hosts) ) pvm.addHosts( hosts=hosts ) if verbose: T.errWriteln('done') JobMaster.__init__( self, data, chunk_size, hosts, niceness, slave_script, show_output=show_output, redistribute=redistribute, verbose=verbose ) self.progress = {} self.disabled_hosts = [] self.slow_hosts = {} self.verbose = verbose ## end of calculation is signalled on lockMsg self.lock = RLock() self.lockMsg = Condition( self.lock ) ## this method is called when everything is calculated self.call_done = None
def extractWaters(self): """ Write waters into separate pdb file, called |pdbCode|_waters.pdb. """ try: fTarget = self.outPath + '/' +\ self.pdbname()[:4] + '_waters.pdb' pdb = PDBFile( fTarget, mode='w' ) waters = [] for key in ['HOH', 'DOD']: if self.pdb.molecules.has_key( key ): waters += self.pdb.molecules[ key ] pdb.nextChain(chain_id='', segment_id='1XWW') for w in waters: pdb.nextResidue('TIP3') ## XPLOR wants "ATOM" not "HETATM": pdb.het_flag = 0 pdb.writeAtom('OH2', w.atoms['O'].position) ## keep TIP3 waters as well if len(waters) == 0: try: TIP3_waters = self.pdb.molecules[ 'TIP3' ] except: TIP3_waters = [] for w in TIP3_waters: pdb.nextResidue('TIP3') ## XPLOR wants "ATOM" not "HETATM": pdb.het_flag = 0 pdb.writeAtom('OH2', w.atoms['OH2'].position) pdb.writeAtom('H1', w.atoms['H1'].position) pdb.writeAtom('H2', w.atoms['H2'].position) pdb.close() except: T.errWriteln("Error writing waters to %s: " % fTarget ) T.errWriteln( T.lastError() )
def extractWaters(self): """ Write waters into separate pdb file, called |pdbCode|_waters.pdb. """ try: fTarget = self.outPath + '/' +\ self.pdbname()[:4] + '_waters.pdb' pdb = PDBFile(fTarget, mode='w') waters = [] for key in ['HOH', 'DOD']: if self.pdb.molecules.has_key(key): waters += self.pdb.molecules[key] pdb.nextChain(chain_id='', segment_id='1XWW') for w in waters: pdb.nextResidue('TIP3') ## XPLOR wants "ATOM" not "HETATM": pdb.het_flag = 0 pdb.writeAtom('OH2', w.atoms['O'].position) ## keep TIP3 waters as well if len(waters) == 0: try: TIP3_waters = self.pdb.molecules['TIP3'] except: TIP3_waters = [] for w in TIP3_waters: pdb.nextResidue('TIP3') ## XPLOR wants "ATOM" not "HETATM": pdb.het_flag = 0 pdb.writeAtom('OH2', w.atoms['OH2'].position) pdb.writeAtom('H1', w.atoms['H1'].position) pdb.writeAtom('H2', w.atoms['H2'].position) pdb.close() except: T.errWriteln("Error writing waters to %s: " % fTarget) T.errWriteln(T.lastError())
def test_delphiCharges2( self ): """ PDB2DelphiCharges test """ if self.local: T.errWrite( 'loading PDB...' ) self.m1 = self.MODEL or PDBModel( T.testRoot( 'lig/1A19_dry.model' ) ) Test.MODEL = self.m1 if self.local: T.errWriteln( 'Done.' ) if self.local: T.errWrite( 'Adding hydrogens to model (reduce)...' ) self.rmodel = Reduce( self.m1, verbose=self.local ).run() self.rmodel.xplor2amber() if self.local: T.errWriteln( 'Done.' ) ac = AtomCharger() ac.charge(self.rmodel) self.rmodel.addChainFromSegid() self.dc = PDB2DelphiCharges( self.rmodel ) self.dc.prepare() self.assertEqual( len(self.dc.resmap['LYS']), 2 ) # normal and N' self.assertEqual( len(self.dc.resmap['SER']), 2 ) # normal and C' if self.local: T.errWriteln( 'writing delphi charge file to %s' % self.fcrg ) self.dc.tofile( self.fcrg ) self.assert_( os.path.exists( self.fcrg ) )
def test_delphiCharges2(self): """ PDB2DelphiCharges test """ if self.local: T.errWrite('loading PDB...') self.m1 = self.MODEL or PDBModel(T.testRoot('lig/1A19_dry.model')) Test.MODEL = self.m1 if self.local: T.errWriteln('Done.') if self.local: T.errWrite('Adding hydrogens to model (reduce)...') self.rmodel = Reduce(self.m1, verbose=self.local).run() self.rmodel.xplor2amber() if self.local: T.errWriteln('Done.') ac = AtomCharger() ac.charge(self.rmodel) self.rmodel.addChainFromSegid() self.dc = PDB2DelphiCharges(self.rmodel) self.dc.prepare() self.assertEqual(len(self.dc.resmap['LYS']), 2) # normal and N' self.assertEqual(len(self.dc.resmap['SER']), 2) # normal and C' if self.local: T.errWriteln('writing delphi charge file to %s' % self.fcrg) self.dc.tofile(self.fcrg) self.assert_(os.path.exists(self.fcrg))
def reportClustering(self, raw=None): """ Report the clustering result. Writes: - clustering results to L{F_CLUSTER_LOG} - blast records to L{F_BLAST_OUT} - blast records of centers to L{F_CLUSTER_BLAST_OUT} - raw clustering results to L{F_CLUSTER_RAW} if raw not None @param raw: write raw clustering result to disk (default: None) @type raw: 1|0 """ try: if self.verbose: f = open(self.outFolder + self.F_CLUSTER_LOG, 'w', 1) for cluster in self.clusters: f.write("%i\t" % (len(cluster))) for id in cluster: f.write("%s:%.2f " % (id, self.record_dic[id].resolution)) f.write("\n") f.close() ## write blast records of centers to disc centers = [c[0] for c in self.clusters] self.writeClusteredBlastResult( \ self.outFolder + self.F_BLAST_OUT, self.outFolder + self.F_CLUSTER_BLAST_OUT, centers ) self.copyClusterOut(raw=raw) except IOError, why: T.errWriteln("Can't write cluster report." + str(why))
def reportClustering( self, raw=None ): """ Report the clustering result. Writes: - clustering results to L{F_CLUSTER_LOG} - blast records to L{F_BLAST_OUT} - blast records of centers to L{F_CLUSTER_BLAST_OUT} - raw clustering results to L{F_CLUSTER_RAW} if raw not None @param raw: write raw clustering result to disk (default: None) @type raw: 1|0 """ try: if self.verbose: f = open( self.outFolder +self.F_CLUSTER_LOG, 'w', 1) for cluster in self.clusters: f.write( "%i\t" % ( len( cluster ))) for id in cluster: f.write("%s:%.2f "%(id,self.record_dic[id].resolution)) f.write( "\n") f.close() ## write blast records of centers to disc centers = [ c[0] for c in self.clusters ] self.writeClusteredBlastResult( \ self.outFolder + self.F_BLAST_OUT, self.outFolder + self.F_CLUSTER_BLAST_OUT, centers ) self.copyClusterOut( raw=raw ) except IOError, why: T.errWriteln( "Can't write cluster report." + str(why) )
os.unlink(f) t.dump(complex_lst, options['o']) else: subLst = checkListStatus(complex_lst, update, force, version) if subLst: ## initialize nodes, and start distributed calculation master = ContactMaster(complex_lst, int(options['c']), cpus_all[:host_number], refComplex=refComplex, updateOnly=update, force=force, niceness=nice_dic, outFile=options['o'], com_version=version, show_output=show_x, add_hosts=add_hosts) master.start() else: t.flushPrint("\n #### Nothing to update! #### ") except IOError, why: t.errWriteln("IOError while working on %s:" % t.absfile(options['i']) \ + str(why) ) t.errWriteln(t.lastErrorTrace())
for fname in srcfiles: fname = t.absfile( fname ) shutil.copy( fname, fname + '_' ) methods = re_lst( module, exclude ) fold = open( fname + '_' ) fnew = open( fname, 'w' ) i = 0 for l in fold: i += 1 l = replace_import_statement( l, module, importas ) l, occurrences = replace_line( l, methods, importas + '.' ) if occurrences > 0: t.errWriteln( '%s %5i %2i matches:\n\t%s' % (t.stripFilename(fname), i, occurrences, l) ) fnew.write( l ) fnew.close() fold.close() except: syntax()
for fname in srcfiles: fname = t.absfile(fname) shutil.copy(fname, fname + '_') methods = re_lst(module, exclude) fold = open(fname + '_') fnew = open(fname, 'w') i = 0 for l in fold: i += 1 l = replace_import_statement(l, module, importas) l, occurrences = replace_line(l, methods, importas + '.') if occurrences > 0: t.errWriteln('%s %5i %2i matches:\n\t%s' % (t.stripFilename(fname), i, occurrences, l)) fnew.write(l) fnew.close() fold.close() except: syntax()
complex_lst += sub os.unlink( f ) t.dump( complex_lst, options['o'] ) else: subLst = checkListStatus(complex_lst, update, force, version ) if subLst: ## initialize nodes, and start distributed calculation master = ContactMaster(complex_lst, int( options['c'] ), cpus_all[:host_number], refComplex = refComplex, updateOnly = update, force = force, niceness = nice_dic, outFile = options['o'], com_version = version, show_output = show_x, add_hosts = add_hosts) master.start() else: t.flushPrint( "\n #### Nothing to update! #### " ) except IOError, why: t.errWriteln("IOError while working on %s:" % t.absfile(options['i']) \ + str(why) ) t.errWriteln( t.lastErrorTrace() )