def parallelRun(self,verbose=False): import pytom_mpi from pytom.parallel.messages import StatusMessage,MessageError from pytom.basic.exceptions import ParameterError from pytom.basic.structures import PyTomClassError end = False if not pytom_mpi.isInitialised(): pytom_mpi.init() mpi_id = pytom_mpi.rank() while not end: #listen for messages mpi_msgString = pytom_mpi.receive() if verbose: print(mpi_msgString) try: #wait for job and start processing msg = ReconstructionMessage() msg.fromStr(mpi_msgString) self.setJob(msg) self.run() resultMsg = StatusMessage(mpi_id,'0') resultMsg.setStatus('Finished') pytom_mpi.send(str(resultMsg),0) except (MessageError,PyTomClassError,ParameterError): try: #as StatusMessage and finish msg = StatusMessage('','') msg.fromStr(mpi_msgString) if msg.getStatus() == 'End': end = True except (MessageError,PyTomClassError,ParameterError): #print mpi_msgString #raise MessageError('Message unknown!') print('Error parsing message. Message either unknown or invalid.') assert False except: print('wild except')
def growingAverage(particleClassLists,score,angleObject,mask,destinationDirectory,preprocessing,verbose=False): import pytom_mpi if not pytom_mpi.isInitialised(): pytom_mpi.init() if pytom_mpi.size() > 1: mpi_myid = pytom_mpi.rank() if mpi_myid == 0: manager = GAManager(particleClassLists,score,angleObject,mask,destinationDirectory,preprocessing) manager.parallelGA(verbose) manager.parallelEnd() else: w = GAWorker(mpi_myid) w.run() else: print('Processing in sequential mode') manager = GAManager(particleClassLists,score,angleObject,mask,destinationDirectory,preprocessing) manager.sequentialGA(verbose) pytom_mpi.finalise()
def run(self): import pytom_mpi from pytom.parallel.messages import StatusMessage,MessageError from pytom.basic.exceptions import ParameterError from pytom.basic.structures import PyTomClassError if not pytom_mpi.isInitialised(): pytom_mpi.init() end = False while not end: try: mpi_msgString = pytom_mpi.receive() msg = GrowingAverageJobMessage('','') msg.fromStr(mpi_msgString) self.fromJob(msg.getJob()) self._run() returnMessage = GrowingAverageResultMessage(self._mpiId,0) pytom_mpi.send(returnMessage.__str__(),0) except (MessageError,PyTomClassError,ParameterError,IndexError): #as StatusMessage and finish msg = StatusMessage('','') msg.fromStr(mpi_msgString) if msg.getStatus() == 'End': end = True
def __init__(self, suffix=''): import pytom_mpi if not pytom_mpi.isInitialised(): pytom_mpi.init() self.suffix = suffix self.mpi_id = pytom_mpi.rank() self.name = 'node_' + str(self.mpi_id) self.clean()
def __init__(self): import pytom_mpi if not pytom_mpi.isInitialised(): pytom_mpi.init() self.mpi_id = pytom_mpi.rank() self.name = 'node_' + str(self.mpi_id) self.runtimes = 0
def __init__(self): if not pytom_mpi.isInitialised(): pytom_mpi.init() self.mpi_id = pytom_mpi.rank() self.num_workers = pytom_mpi.size()-1 self.node_name = 'node_' + str(self.mpi_id) if self.num_workers < 1: raise RuntimeError("Not enough nodes to parallelize the job!")
def __init__(self): import pytom_mpi if not pytom_mpi.isInitialised(): pytom_mpi.init() self._mpi_id = pytom_mpi.rank() self._numberWorkers = pytom_mpi.size() - 1 self._jobList = []
def parallelInit(self): ''' parallelInit: Initialization for the parallelization ''' import pytom_mpi if not pytom_mpi.isInitialised(): pytom_mpi.init() if pytom_mpi.size() < 2: raise RuntimeError( 'Number of available cluster nodes is less than 2.') self.numWorkers = pytom_mpi.size() - 1
def parallelEnd(self): """ parallelEnd : Sends status message = end to all workers. All workers will terminate upon receiving this message. @author: Thomas Hrabe """ import pytom_mpi from pytom.parallel.messages import StatusMessage if not pytom_mpi.isInitialised(): pytom_mpi.init() mpi_myid = pytom_mpi.rank() mpi_numberNodes = pytom_mpi.size() for i in range(1, mpi_numberNodes): msg = StatusMessage(mpi_myid.__str__(), i.__str__()) msg.setStatus("End") pytom_mpi.send(msg.__str__(), i)
def distributeAverage(particleList,averageName,showProgressBar = False,verbose=False,createInfoVolumes = False,sendEndMessage = False): """ distributeAverage : Distributes averaging to multiple nodes @param particleList: The particles @param averageName: Filename of new average @param verbose: Prints particle information. Disabled by default. @param createInfoVolumes: Create info data (wedge sum, inverted density) too? False by default. @return: A new Reference object @rtype: L{pytom.basic.structures.Reference} @author: Thomas Hrabe """ import pytom_mpi mpiInitialized = pytom_mpi.isInitialised() mpiAvailable = False if not mpiInitialized: try: pytom_mpi.init() if pytom_mpi.size() > 1: mpiAvailable = True except: print('Could not initialize MPI properly! Running in sequential mode!') if mpiAvailable: if pytom_mpi.rank() == 0: return _disrtibuteAverageMPI(particleList,averageName,showProgressBar,verbose,createInfoVolumes,sendEndMessage) else: from pytom.alignment.ExMaxAlignment import ExMaxWorker worker = ExMaxWorker() worker.parallelRun(False) else: print('MPI not available') return average(particleList,averageName,showProgressBar,verbose,createInfoVolumes)
def distributeExpectation(particleLists, iterationDirectory, averagePrefix, verbose=False, symmetry=None): """ distributeExpectation: Distributes particle expectation (averaging) to multiple workers. Required by many algorithms such as MCOEXMX @param particleLists: list of particleLists @param iterationDirectory: @param averagePrefix: @param verbose: @param symmetry: """ import pytom_mpi from pytom.tools.files import checkDirExists from pytom.parallel.alignmentMessages import ExpectationJobMsg, ExpectationResultMsg from pytom.alignment.structures import ExpectationJob from pytom.basic.structures import Reference, ReferenceList from os import mkdir if not pytom_mpi.isInitialised(): pytom_mpi.init() mpi_myid = pytom_mpi.rank() if not mpi_myid == 0: raise RuntimeError( 'This function (distributeExpectation) can only be processed by mpi_id = 0! ID == ' + str(mpi_myid) + ' Aborting!') if not checkDirExists(iterationDirectory): raise IOError('The iteration directory does not exist. ' + iterationDirectory) mpi_numberNodes = pytom_mpi.size() if mpi_numberNodes <= 1: raise RuntimeError( 'You must run clustering with openMPI on multiple CPUs') listIterator = 0 referenceList = ReferenceList() #distribute jobs to all nodes for i in range(1, mpi_numberNodes): if verbose: print('Starting first job distribute step') if listIterator < len(particleLists): if not checkDirExists(iterationDirectory + 'class' + str(listIterator) + '/'): mkdir(iterationDirectory + 'class' + str(listIterator) + '/') averageName = iterationDirectory + 'class' + str( listIterator) + '/' + averagePrefix + '-' + str( listIterator) + '.em' if not symmetry.isOneFold(): newPl = symmetry.apply(particleLists[listIterator]) job = ExpectationJob(newPl, averageName) else: job = ExpectationJob(particleLists[listIterator], averageName) newReference = Reference(averageName, particleLists[listIterator]) referenceList.append(newReference) jobMsg = ExpectationJobMsg(0, str(i)) jobMsg.setJob(job) pytom_mpi.send(str(jobMsg), i) if verbose: print(jobMsg) listIterator = listIterator + 1 finished = False #there are more jobs than nodes. continue distributing and collect results receivedMsgCounter = 0 while not finished: #listen and collect mpi_msgString = pytom_mpi.receive() if verbose: print(mpi_msgString) jobResultMsg = ExpectationResultMsg('', '') jobResultMsg.fromStr(mpi_msgString) receivedMsgCounter = receivedMsgCounter + 1 #send new job to free node if listIterator < len(particleLists): if not checkDirExists(iterationDirectory + 'class' + str(listIterator) + '/'): mkdir(iterationDirectory + 'class' + str(listIterator) + '/') averageName = iterationDirectory + 'class' + str( listIterator) + '/' + averagePrefix + '-' + str( listIterator) + '.em' job = ExpectationJob(particleLists[listIterator], averageName) newReference = Reference(averageName, particleLists[listIterator]) referenceList.append(newReference) jobMsg = ExpectationJobMsg(0, str(jobResultMsg.getSender())) jobMsg.setJob(job) pytom_mpi.send(str(jobMsg), i) if verbose: print(jobMsg) listIterator = listIterator + 1 finished = listIterator >= len( particleLists) and receivedMsgCounter == len(particleLists) return referenceList
def multiRef_EXMXAlign(multiRefJob, doFinalize=True, verbose=False): """ multiRef_EXMXAlign: Performs multi reference alignment on a particle list @param multiRefJob: The multi reference alignment job @param doFinalize: Send finalize msgs to workers or not. Default is true @param verbose: Default is false """ import pytom_mpi if doFinalize: pytom_mpi.init() if pytom_mpi.rank() == 0: from pytom.alignment.ExMaxAlignment import ExMaxManager from pytom.tools.files import checkDirExists from os import mkdir from pytom.basic.resolution import bandToAngstrom, angstromToBand, angleFromResolution particleList = multiRefJob.getParticleList() initialParticleList = particleList previousParticleList = initialParticleList destinationDirectory = multiRefJob.getDestinationDirectory() numberIterations = multiRefJob.getNumberIterations() numberClasses = multiRefJob.getNumberClasses() exMaxJob = multiRefJob.getExMaxJob() p = particleList[0] pVol = p.getVolume() cubeSize = pVol.sizeX() preprocessing = exMaxJob.getPreprocessing() sampleInfo = exMaxJob.getSampleInformation() if verbose: print(multiRefJob) if not checkDirExists(destinationDirectory): raise IOError('Destination directory ' + destinationDirectory + ' not found!') try: particleLists = particleList.splitByClass() if len(particleLists) <= 1: raise Exception() except Exception: from pytom.cluster.clusterFunctions import randomiseParticleListClasses if numberClasses: if verbose: print('Randomizing particle list') particleList = randomiseParticleListClasses( particleList, numberClasses) particleList.toXMLFile(destinationDirectory + '/RandomisedParticleList.xml') particleLists = particleList.splitByClass() else: raise RuntimeError( 'The particle list provided is not pre-classified and you did not set numberClasses for a random seed!' ) iteration = 0 converged = False while iteration < numberIterations and (not converged): if verbose: print('Running iteration ' + str(iteration) + ' of ' + str(numberIterations)) iterationDirectory = destinationDirectory + '/' + str( iteration) + '/' if not checkDirExists(iterationDirectory): mkdir(iterationDirectory) #determine resolution of all classes maxRes = 0 minRes = 1000000 if not checkDirExists(iterationDirectory + 'resolution/'): mkdir(iterationDirectory + 'resolution/') for classIterator in range(len(particleLists)): currentParticleList = particleLists[classIterator] if len(currentParticleList) > 1: [resNyquist, resolutionBand, numberBands] = currentParticleList.determineResolution( criterion=exMaxJob.getFSCCriterion(), numberBands=cubeSize / 2, mask=exMaxJob.getMask(), keepHalfsetAverages=False, halfsetPrefix=iterationDirectory + 'resolution/' + 'class' + str(classIterator) + '_fsc-', verbose=verbose) else: continue resolutionAngstrom = bandToAngstrom(resolutionBand, sampleInfo.getPixelSize(), numberBands, 1) #resolutionAngstrom = bandToAngstrom(resolutionBand,sampleInfo.getPixelSize(),numberBands,exMaxJob.getBinning() ) if resolutionBand > maxRes: maxRes = resolutionBand if resolutionBand < minRes: minRes = resolutionBand if verbose: print( 'Class ', classIterator, ' - current resolution :' + str(resolutionAngstrom) + ' Angstrom') #set highest frequency according to user specification band = maxRes if not multiRefJob.getUseMaxResolution(): band = minRes if band == numberBands: #determineResolution returns numberBands for filter if fsc result is invalid. in that case, use nyquist /2 as filter setting print('Warning MultiRefAlignment.py: LL 114') print( 'Warning: Resolution determined for all classes was invalid. Will use Nyquist/2 for current iteration' ) band = numberBands / 2 preprocessing.setHighestFrequency(band) exMaxJob.setPreprocessing(preprocessing) alignmentLists = [None] * len(particleLists) #generate cluster centers referenceList = distributeExpectation( particleLists, iterationDirectory, 'clusterCenter' + str(iteration), verbose, exMaxJob.getSymmetry()) for classIterator in range(len(particleLists)): classDirectory = iterationDirectory + 'class' + str( classIterator) + '/' #determine distance for all particles refinementDirectory = classDirectory + 'refinement/' if verbose: print(refinementDirectory) if not checkDirExists(refinementDirectory): mkdir(refinementDirectory) exMaxJob.setParticleList(particleList) exMaxJob.setReference(referenceList[classIterator]) exMaxJob.setDestination(refinementDirectory) #run refinement manager = ExMaxManager(exMaxJob) manager.distributeAlignment(verbose) alignmentLists[classIterator] = manager.getAlignmentList() alignmentLists[classIterator].toXMLFile(iterationDirectory + 'AlignmentList' + str(classIterator) + '.xml') #perform classification here if verbose: print('Classifying after iteration ' + str(iteration)) particleList = classifyParticleList(initialParticleList, alignmentLists, verbose) particleList.toXMLFile(iterationDirectory + 'classifiedParticles.xml') particleLists = particleList.splitByClass() difference = previousParticleList.classDifference(particleList) converged = multiRefJob.getEndThreshold() >= difference[3] #set up for next round! previousParticleList = particleList iteration = iteration + 1 if doFinalize: manager.parallelEnd() pytom_mpi.finalise() return [particleList, alignmentLists] else: from pytom.alignment.ExMaxAlignment import ExMaxWorker worker = ExMaxWorker() worker.parallelRun() pytom_mpi.finalise()
def parallelReconstruction(particleList, projectionList, cubeSize, binning, applyWeighting,verbose=False): """ parallelReconstruction """ import pytom_mpi from pytom.parallel.messages import StatusMessage if not pytom_mpi.isInitialised(): pytom_mpi.init() mpi_id = pytom_mpi.rank() if mpi_id == 0: firstDistribute = False numberWorkers = pytom_mpi.size() -1 #split particleList by number nodes splitSize = len(particleList) / numberWorkers pl = [] for i in range(0,len(particleList),splitSize): pl.append(particleList[i:i+splitSize]) for i in range(0,numberWorkers): msg = ReconstructionMessage(0,i+1,pl[i],projectionList,cubeSize, binning,applyWeighting) pytom_mpi.send(str(msg),i+1) finished = False msgCounter = 0 while not finished: mpi_msgString = pytom_mpi.receive() msg = StatusMessage(1,'0') msg.fromStr(mpi_msgString) if not msg.getStatus() == 'Finished': print('Worker ' + str(msg.getSender()) + ' sent status: ' + str(msg.getStatus())) msgCounter += 1 finished = msgCounter == numberWorkers for i in range(0,numberWorkers): msg = StatusMessage(mpi_id,'0') msg.setStatus('End') pytom_mpi.send(str(msg),i+1) else: worker = ReconstructionWorker() worker.parallelRun(verbose) pytom_mpi.finalise()
def mcoAC(annealingJob, doFinalize=True, verbose=False): """ mcoAC: Performs mcoAC clustering on particleList @param annealingJob: @param doFinalize: Send finalize messages to workers or not. Default is true. Should be false when this process is integrated into another parallel process. @param verbose: Default is false """ import pytom_mpi if doFinalize: pytom_mpi.init() if pytom_mpi.rank() == 0: from pytom.cluster.mcoEXMX import mcoEXMX from pytom.tools.files import checkDirExists from os import mkdir, system particleList = annealingJob.getParticleList() if len(particleList) == 0: raise RuntimeError('Particle list is empty! Abort!') initialParticleList = particleList previousParticleList = initialParticleList destinationDirectory = annealingJob.getDestinationDirectory() numberIterations = annealingJob.getNumberIterations() numberClasses = annealingJob.getNumberClasses() if verbose: print(annealingJob) if not checkDirExists(destinationDirectory): raise IOError('Destination directory ' + destinationDirectory + ' not found!') try: particleLists = particleList.splitByClass() if len(particleLists) <= 1 or (len(particleLists) == 1 and len( particleLists[0]) == len(particleList)): raise Exception() except Exception: from pytom.cluster.clusterFunctions import randomiseParticleListClasses if numberClasses: if verbose: print('Randomizing particle list') particleList = randomiseParticleListClasses( particleList, numberClasses) particleList.toXMLFile(destinationDirectory + '/RandomisedParticleList.xml') particleLists = particleList.splitByClass() else: raise RuntimeError( 'The particle list provided is not pre-classified and you did not set numberClasses for a random seed!' ) iteration = 0 converged = False bestScoreSum = None bestParticleList = None while (not annealingJob.cooledDown()) and (not converged): if verbose: print('Running iteration ' + str(iteration) + ' of ' + str(numberIterations)) iterationDirectory = destinationDirectory + '/' + str( iteration) + '/' mcoEXMXDirectory = iterationDirectory + 'mcoEXMX/' if not checkDirExists(iterationDirectory): mkdir(iterationDirectory) annealingJob.setDestinationDirectory(mcoEXMXDirectory) annealingJob.setParticleList(previousParticleList) annealingJob.setNumberIterations(annealingJob.getLocalIncrement()) annealingJob.toXMLFile(iterationDirectory + 'annealingJob.xml') #run local refinement [pl, alignmentLists] = mcoEXMX(annealingJob, doFinalize=False, verbose=verbose) annealingJob.setNumberIterations(numberIterations) #store currently best solution if iteration == 0 or (bestScoreSum < pl.sumOfScores() and len(pl.splitByClass()) > 1): bestScoreSum = pl.sumOfScores() bestParticleList = pl bestParticleList.toXMLFile(iterationDirectory + 'currentBestParticleList.xml') #perform classification here [particleList, swapList ] = classifyParticleList(initialParticleList, alignmentLists, annealingJob.getCriterion(), annealingJob.getTemperature().copy(), verbose) #save iteration results to disk particleList.toXMLFile(iterationDirectory + 'classifiedParticles.xml') swapList.toXMLFile(iterationDirectory + 'swapList.xml') #if not verbose mode, delete mcoEXMX files if not verbose: system('rm -rf ' + mcoEXMXDirectory) #print number class swaps difference = previousParticleList.classDifference(particleList) converged = annealingJob.getEndThreshold() >= difference[3] previousParticleList = particleList #set up for new round annealingJob.decreaseTemperature() particleLists = particleList.splitByClass() iteration = iteration + 1 print('Annealing iteration ' + str(iteration) + ' finished!') if doFinalize: from pytom.alignment.ExMaxAlignment import ExMaxManager manager = ExMaxManager(annealingJob.getExMaxJob()) manager.parallelEnd() pytom_mpi.finalise() return bestParticleList else: from pytom.cluster.mcoEXMXStructures import MCOEXMXWorker worker = MCOEXMXWorker() worker.setDoAlignment(annealingJob.getDoAlignment()) worker.setFRMBandwidth(annealingJob.getFRMBandwidth()) worker.parallelRun(verbose=verbose) pytom_mpi.finalise()
def mcoEXMX(mcoEMJob, doFinalize=True, verbose=False): """ mcoEXMX: Perfomrs kmeans clustering on particleList @param mcoEMJob: The clustering job @param doFinalize: Send finalize msgs to workers or not. Default is true @param verbose: Default is false """ import pytom_mpi if doFinalize: pytom_mpi.init() if pytom_mpi.rank() == 0: from pytom.alignment.ExMaxAlignment import ExMaxManager from pytom.tools.files import checkDirExists from os import mkdir from pytom.basic.plot import plotClassSizes from builtins import min as minList particleList = mcoEMJob.getParticleList() if len(particleList) == 0: raise RuntimeError('Particle list is empty! Abort!') destinationDirectory = mcoEMJob.getDestinationDirectory() numberIterations = mcoEMJob.getNumberIterations() numberClasses = mcoEMJob.getNumberClasses() exMaxJob = mcoEMJob.getExMaxJob() if verbose: print(mcoEMJob) if not checkDirExists(destinationDirectory): raise IOError('Destination directory ' + destinationDirectory + ' not found!') try: particleLists = particleList.splitByClass() if len(particleLists) < 1 or (len(particleLists) == 1 and len( particleLists[0]) == len(particleList)): raise Exception() except Exception: from pytom.cluster.clusterFunctions import randomiseParticleListClasses if numberClasses: if verbose: print('Randomising particle list') particleList = randomiseParticleListClasses( particleList, numberClasses) particleList.toXMLFile(destinationDirectory + '/RandomisedParticleList.xml') particleLists = particleList.splitByClass() else: raise RuntimeError( 'The particle list provided is not pre-classified and you did not set numberClasses for a random seed!' ) initialParticleList = particleList previousParticleList = initialParticleList iteration = 0 converged = False doAdaptiveResolution = mcoEMJob.getAdaptiveResolution() if doAdaptiveResolution: preProcessing = exMaxJob.getPreprocessing() highestFrequency = preProcessing.getHighestFrequency() resolutionList = [highestFrequency] * len(particleLists) while iteration < numberIterations and (not converged): if verbose: print('Running iteration ' + str(iteration) + ' of ' + str(numberIterations)) iterationDirectory = destinationDirectory + '/' + str( iteration) + '/' if not checkDirExists(iterationDirectory): mkdir(iterationDirectory) #referenceList = ReferenceList() alignmentLists = [None] * len(particleLists) #generate cluster centers referenceList = distributeExpectation( particleLists, iterationDirectory, 'clusterCenter' + str(iteration), verbose, exMaxJob.getSymmetry()) for classIterator in range(len(particleLists)): classDirectory = iterationDirectory + 'class' + str( classIterator) + '/' #determine distance for all particles refinementDirectory = classDirectory + 'refinement/' if verbose: print(refinementDirectory) if not checkDirExists(refinementDirectory): mkdir(refinementDirectory) exMaxJob.setParticleList(particleList) exMaxJob.setReference(referenceList[classIterator]) exMaxJob.setDestination(refinementDirectory) #use adaptive resolution -> update lowpass filter if doAdaptiveResolution and len( resolutionList ) > 0 and resolutionList[classIterator] > 0: # preProcessing = exMaxJob.getPreprocessing() # resolution = resolutionList[classIterator] # preProcessing.setHighestFrequency(resolution) # exMaxJob.setPreprocessing(preProcessing) preProcessing = exMaxJob.getPreprocessing() resolution = minList(resolutionList) * 1.1 preProcessing.setHighestFrequency(resolution) exMaxJob.setPreprocessing(preProcessing) #run refinement manager = ExMaxManager(exMaxJob) exMaxJob.toXMLFile(classDirectory + 'Job.xml') manager.distributeAlignment(verbose) alignmentLists[classIterator] = manager.getAlignmentList() alignmentLists[classIterator].toXMLFile(iterationDirectory + 'AlignmentList' + str(classIterator) + '.xml') #perform classification here if verbose: print('Classifying after iteration ' + str(iteration)) particleList = classifyParticleList(initialParticleList, alignmentLists, verbose) particleList.toXMLFile(iterationDirectory + 'classifiedParticles.xml') particleLists = particleList.splitByClass() difference = previousParticleList.classDifference(particleList) converged = mcoEMJob.getEndThreshold() >= difference[3] #determine resolution in each class if doAdaptiveResolution: resolutionList = [-1] * len(particleLists) for classIterator in range(len(particleLists)): classList = particleLists[classIterator] if len(classList) == 1: #if there is only one particle in that class, override resolution print( 'Class ', classIterator, ' has only 1 particle! Will be assigned the lowest resolution determined.' ) continue className = classList[0].getClass() v = classList[0].getVolume() cubeSize = v.sizeX() resolution = classList.determineResolution( criterion=0.5, numberBands=cubeSize / 2, mask=exMaxJob.getMask(), verbose=False, plot='', keepHalfsetAverages=False, halfsetPrefix='class' + str(className), parallel=True) #resolution = [Resolution in Nyquist , resolution in band, numberBands] resolutionList[classIterator] = resolution[1] print('Resolution for class ', classIterator, ' determined to ', resolution[1], ' pixels. Class size is ', len(classList), ' particles') #get lowest resolution determined for classes with more than 1 particle min = 999999999999999 for classIterator in range(len(particleLists)): if min >= resolutionList[classIterator] and resolutionList[ classIterator] >= 0: min = resolutionList[classIterator] #set resolution for all classes with only 1 particle to lowest resolution for classIterator in range(len(particleLists)): if resolutionList[classIterator] < 0: resolutionList[classIterator] = min #set up for next round! previousParticleList = particleList iteration = iteration + 1 if doFinalize: manager.parallelEnd() pytom_mpi.finalise() return [particleList, alignmentLists] else: from pytom.cluster.mcoEXMXStructures import MCOEXMXWorker worker = MCOEXMXWorker() worker.setDoAlignment(mcoEMJob.getDoAlignment()) worker.setFRMBandwidth(mcoEMJob.getFRMBandwidth()) worker.parallelRun() pytom_mpi.finalise()
def distributedCorrelationMatrix(job, verbose=False): """ distributedCorrelationMatrix: Performs calculation of correlation matrix either on multiple processes or sequentially. """ import pytom_mpi pytom_mpi.init() if pytom_mpi.size() > 1: mpi_myid = pytom_mpi.rank() if mpi_myid == 0: manager = CMManager(job) manager.distributeCalculation(mpi_myid, verbose) manager.parallelEnd() manager.saveMatrix() else: from pytom.parallel.clusterMessages import CorrelationVectorMessage, CorrelationVectorJobMessage from pytom.parallel.messages import StatusMessage, MessageError end = False while not end: mpi_msg = pytom_mpi.receive() if verbose: print(mpi_msg) try: msg = CorrelationVectorJobMessage() msg.fromStr(mpi_msg) worker = CMWorker(msg.getJob()) #worker.dumpMsg2Log('node'+str(mpi_myid)+'.log', msg.__str__()) resultVector = worker.run() resultMessage = CorrelationVectorMessage(mpi_myid, 0) resultMessage.setVector(resultVector) #worker.dumpMsg2Log('node'+mpi_myid.__str__()+'.log', resultMessage.__str__()) if verbose and False: print(resultMessage) pytom_mpi.send(resultMessage.__str__(), 0) except (MessageError, RuntimeError, IndexError): msg = StatusMessage('', '') msg.fromStr(mpi_msg) if msg.getStatus() == 'End': end = True print('Node ' + mpi_myid.__str__() + ' finished') else: print('Sequential Processing! Running on one machine only!') manager = CMManager(job) manager.calculateMatrix() manager.saveMatrix() pytom_mpi.finalise()
def parallelWork(self, verbose=False, doFinalize=True): """ parallelWork: Distribute joblist to workers. Leave as it is. @param verbose: @param doFinalize: """ import pytom_mpi from pytom.parallel.messages import Message, StatusMessage, MessageError from pytom.basic.exceptions import ParameterError from pytom.basic.structures import PyTomClassError if not pytom_mpi.isInitialised(): pytom_mpi.init() if self._mpi_id == 0: #if current node == 0, be the master node numberJobs = len(self._jobList) if self._numberWorkers <= numberJobs: numberJobsToSend = self._numberWorkers else: numberJobsToSend = numberJobs for i in range(0, numberJobsToSend): #send out all first numberJobsToSend jobs pytom_mpi.send(str(self._jobList[i]), i + 1) numberFinishedJobs = 0 numberSentJobs = numberJobsToSend finished = numberSentJobs == numberJobs and numberFinishedJobs == numberJobs while not finished: #distribute remaining jobs to finished workers mpi_msgString = pytom_mpi.receive() msg = Message('1', '0') msg.fromStr(mpi_msgString) numberFinishedJobs += 1 if numberSentJobs < numberJobs: pytom_mpi.send(str(self._jobList[numberSentJobs]), int(msg.getSender())) numberSentJobs += 1 finished = numberSentJobs == numberJobs and numberFinishedJobs == numberJobs if doFinalize: for i in range(0, self._numberWorkers): msg = StatusMessage('0', i + 1) msg.setStatus('End') pytom_mpi.send(str(msg), i + 1) print('Sending end msg to:', i + 1) else: #if any other node id, be a worker node end = False while not end: #listen for messages mpi_msgString = pytom_mpi.receive() if verbose: print(mpi_msgString) try: #wait for job and start processing msg = self.getMsgObject(mpi_msgString) self.setJob(msg) self.run() resultMsg = StatusMessage(self._mpi_id, '0') resultMsg.setStatus('Finished') pytom_mpi.send(str(resultMsg), 0) except (MessageError, PyTomClassError, ParameterError): try: #message is a StatusMessage #if message status is End, finish this worker. #You can also add other statuses msg = StatusMessage('', '') msg.fromStr(mpi_msgString) if msg.getStatus() == 'End': end = True except (MessageError, PyTomClassError, ParameterError): #print mpi_msgString #raise MessageError('Message unknown!') raise RuntimeError( 'Error parsing message. Message either unknown or invalid.' ) except: raise RuntimeError( 'Something went terribly wrong. Aborting.') if doFinalize: pytom_mpi.finalise()