Esempio n. 1
0
    def internalRun(self):
        """Performs the analysis in parallel mode.
        """

        from Scientific.DistributedComputing.MasterSlave import initializeMasterProcess, TaskRaisedException, GlobalStateValue

        pyroServer = setUpPyroServer()

        if self.taskName is None:                                
            # This should be enough to create a unique task name.
            self.taskName = '%s_%s_%s' % (self.db_shortname, getpass.getuser(), '_'.join(asctime().split()))

        if self.architecture == 'multiprocessor':
            tasks = initializeMasterProcess(self.taskName)
            
        elif self.architecture == 'cluster':
            tasks = initializeMasterProcess(self.taskName, slave_module='nMOLDYN.Analysis.Slave')
            
        else:
            raise Error('Illegal parallel mode %s' % self.architecture)

        # The instance of the running analysis is made global for all the process.
        tasks.setGlobalState(analysis=self)
        
        # The master registers the tasks to be done by the slave.
        for fIndex in self.frameIndexes:
            task_id = tasks.requestTask("analysisPerElement",
                                        GlobalStateValue(1, 'analysis'),
                                        fIndex,
                                        self.trajectoryFilename)

        # The slaves are started but the calculation is not started actually.
        # In case of a cluster run, wait for calls to the task_manager.
        startSlaves(self.taskName, self.architecture, self.numberOfProcs)

        # The analysis actual starting time.
        self.chrono = default_timer()

        for fIndex in self.frameIndexes:
            try:
                task_id, tag, (frameIndex, x) = tasks.retrieveResult("analysisPerElement")
                self.combine(frameIndex,x)
                self.updateJobProgress(self.nFrames)
                
            except TaskRaisedException, e:
                LogMessage('error', e.traceback, ['console'])
                raise
            
            except:
Esempio n. 2
0
    def internalRun(self):
        """Performs the analysis in parallel mode.
        """

        from Scientific.DistributedComputing.MasterSlave import initializeMasterProcess, TaskRaisedException, GlobalStateValue

        # The Pyro server is setup.
        pyroServer = setUpPyroServer()
        
        # If no task name was assigned to the job, build one.
        if self.taskName is None:                                
            self.taskName = '%s_%s_%s' % (self.db_shortname, getpass.getuser(), '_'.join(asctime().split()))

        if self.architecture == 'multiprocessor':
            tasks = initializeMasterProcess(self.taskName)
            
        elif self.architecture == 'cluster':
            tasks = initializeMasterProcess(self.taskName, slave_module='nMOLDYN.Analysis.Slave')
            
        else:
            raise Error('Illegal parallel mode %s' % self.architecture)

        tasks.setGlobalState(analysis=self)
        
        for aIndex in self.subset:
            task_id = tasks.requestTask("analysisPerElement",
                                        GlobalStateValue(1, 'analysis'),
                                        aIndex,
                                        self.trajectoryFilename)

        startSlaves(self.taskName, self.architecture, self.numberOfProcs)
        
        # The analysis actual starting time.
        self.chrono = default_timer()

        for aIndex in self.subset:
            try:                
                task_id, tag, (atomIndex, x) = tasks.retrieveResult("analysisPerElement")
                self.combine(atomIndex, x)
                self.updateJobProgress(self.nSelectedAtoms)
                
            except TaskRaisedException, e:
                LogMessage('error', e.traceback, ['console'])
                raise
                
            except:
#
# You can run as many slaves as you want (though for this trivial example,
# the first slave will do all the work before you have time to start a
# second one), and you can run them on any machine on the same local
# network as the one that runs the master process.
#
# See the Pyro manual for other setups, e.g. running slaves on remote
# machines connected to the Internet.
#
# Also see master_slave_demo.py to see how both master and slave can be
# combined within a single script, which is more convenient for short
# scripts.
#

from Scientific.DistributedComputing.MasterSlave import \
     initializeMasterProcess, TaskRaisedException

tasks = initializeMasterProcess("demo", slave_script="slave.py")

# Do the master's work
for i in range(5):
    # For i==0 this raises an exception
    task_id = tasks.requestTask("sqrt", float(i-1))
for i in range(5):
    try:
        task_id, tag, result = tasks.retrieveResult("sqrt")
        print result
    except TaskRaisedException, e:
        print "Task %s raised %s" % (e.task_id, str(e.exception))
        print e.traceback
Esempio n. 4
0
"""
Parallel versions of the most time-consuming routines in CDTK

.. moduleauthor:: Konrad Hinsen <*****@*****.**>
"""

#import Scientific.DistributedComputing.TaskManager
#Scientific.DistributedComputing.TaskManager.debug = True
#import Scientific.DistributedComputing.MasterSlave
#Scientific.DistributedComputing.MasterSlave.debug = True

from Scientific.DistributedComputing.MasterSlave \
     import initializeMasterProcess, TaskRaisedException, GlobalStateValue
import os

tasks = initializeMasterProcess("CDTK_%d" % os.getpid(),
                                slave_module="CDTK.SlaveProcesses")
nprocs = int(os.environ.get("CDTK_DISTRIBUTED_PROCESSES", 0))
if nprocs > 0:
    tasks.launchSlaveJobs(nprocs)

def _evaluateModel_distributed(self, sf, pd, adpd, deriv):
    if not self._distribution_initialized:
        self._distributed_state_id = \
               tasks.setGlobalState(sv = self.sv,
                                    p = self.p,
                                    f_atom = self.f_atom,
                                    e_indices = self.element_indices)
        self._distribution_initialized = True
    ntasks = max(int(os.environ.get("CDTK_DISTRIBUTED_REFINEMENT_TASKS", 0)), 1)
    n = self.natoms/ntasks
    if self.natoms % ntasks > 0: n += 1