Esempio n. 1
0
    def __init__(self, **kwargs):
        """
        CosmoHammer sampler implementation
        
        """
        self.pool = MpiPool(self._getMapFunction())
        self.rank = self.pool.rank

        super(MpiCosmoHammerSampler, self).__init__(pool=self.pool, **kwargs)
 def __init__(self, func, low, high, particleCount=25,req=1e-5, threads=1, InPos=None):
     self.threads = threads
     self.pool = MpiPool(self._getMapFunction())
     super(MpiParticleSwarmOptimizer, self).__init__(func, low, high,
         req=req,
         particleCount=particleCount, 
         pool=self.pool,
         InPos=InPos)
 def __init__(self, **kwargs):
     """
     CosmoHammer sampler implementation
     
     """
     self.pool = MpiPool(self._getMapFunction())
     self.rank = self.pool.rank
     
     super(MpiCosmoHammerSampler, self).__init__(pool=self.pool, **kwargs)
 def __init__(self, func, low, high, particleCount=25, threads=1):
     self.threads = threads
     pool = MpiPool(self._getMapFunction())
     super(MpiParticleSwarmOptimizer,
           self).__init__(func,
                          low,
                          high,
                          particleCount=particleCount,
                          pool=pool)
class MpiParticleSwarmOptimizer(ParticleSwarmOptimizer):
    """
    PSO with support for MPI to distribute the workload over multiple nodes
    """
    
    def __init__(self, func, low, high, particleCount=25,req=1e-5, threads=1, InPos=None):
        self.threads = threads
        self.pool = MpiPool(self._getMapFunction())
        super(MpiParticleSwarmOptimizer, self).__init__(func, low, high,
            req=req,
            particleCount=particleCount, 
            pool=self.pool,
            InPos=InPos)
        
        
    def _getMapFunction(self):
        if self.threads > 1:
            pool = multiprocessing.Pool(self.threads)
            return pool.map
        else:
            return map

    def _converged(self, p, m, n):
        
        if(self.isMaster()):
            converged =  super(MpiParticleSwarmOptimizer, self)._converged( p, m, n)
        else:
            converged = False
        
        converged = mpiBCast(converged)
        return converged
    
    def _get_fitness(self,swarm):
        mapFunction = self.pool.map
        
        mpiSwarm = mpiBCast(swarm)
        
        pos = numpy.array([part.position for part in mpiSwarm])
        results =  mapFunction(self.func, pos)
        lnprob = numpy.array([l[0] for l in results])
        for i, particle in enumerate(swarm):
            particle.fitness = lnprob[i]
            particle.position = pos[i]
    
    def isMaster(self):
        return self.pool.isMaster()
class MpiCosmoHammerSampler(CosmoHammerSampler):
    """
    A sampler implementation extending the regular sampler in order to allow for distributing 
    the computation with MPI.

    :param kwargs:  
        key word arguments passed to the CosmoHammerSampler
    
    """
    def __init__(self, **kwargs):
        """
        CosmoHammer sampler implementation
        
        """
        self.pool = MpiPool(self._getMapFunction())
        self.rank = self.pool.rank
        
        super(MpiCosmoHammerSampler, self).__init__(pool=self.pool, **kwargs)
        
        
        
    def _getMapFunction(self):
        """
        Returns the build in map function
        """
        return map
    
    def createSampleFileUtil(self):
        """
        Returns a new instance of a File Util
        """
        return SampleFileUtil(self.filePrefix, self.isMaster(), reuseBurnin=self.reuseBurnin)
    
       
    def sampleBurnin(self, p0):
        """
        Starts the sampling process. The master node (mpi rank = 0) persists the result to the disk
        """
        p0 = mpiBCast(p0)

        self.log("MPI Process rank "+ str(self.rank)+" starts sampling")
        return super(MpiCosmoHammerSampler, self).sampleBurnin(p0);
   
    def sample(self, burninPos, burninProb, burninRstate, datas):
        """
        Starts the sampling process. The master node (mpi rank = 0) persists the result to the disk
        """
        burninPos = mpiBCast(burninPos)
        burninProb = mpiBCast(burninProb)
        burninRstate = mpiBCast(burninRstate)
        
        self.log("MPI Process rank "+ str(self.rank)+" starts sampling")
        super(MpiCosmoHammerSampler, self).sample(burninPos, burninProb, burninRstate, datas);

            
    def loadBurnin(self):
        """
        loads the burn in form the file system
        """
        if(self.isMaster()):
            pos, prob, rstate = super(MpiCosmoHammerSampler, self).loadBurnin()
        else:
            pos, prob, rstate = []
            
        pos = mpiBCast(pos)
        prob = mpiBCast(prob)
        rstate = mpiBCast(rstate)
        
        self.log("loading done")
        return pos, prob, rstate
    
    def createInitPos(self):
        """
        Factory method to create initial positions
        """   
        #bcast the positions to ensure that all mpi nodes start at the same position
        return mpiBCast(super(MpiCosmoHammerSampler, self).createInitPos())


    def isMaster(self):
        """
        Returns true if the rank is 0
        """
        return self.pool.isMaster()

    def gather(self,value):
        """
        Returns the value. Can be overridden with MPI
        """
        #if(self.isMaster()):
        return mpiMean(value)
Esempio n. 7
0
start_time = time.time()

#path2load = '/mnt/lnec/sibirrer/input.txt'

path2load = str(sys.argv[1])
f = open(path2load, 'rb')

[lensDES, walkerRatio, n_burn, n_run, mean_start, sigma_start, lowerLimit, upperLimit, path2dump] = dill.load(f)
f.close()

end_time = time.time()
#print end_time - start_time, 'time used for initialisation'
# run the computation

from easylens.Fitting.mcmc import MCMC_sampler
sampler = MCMC_sampler(lensDES, fix_center=False)
samples = sampler.mcmc_CH(walkerRatio, n_run, n_burn, mean_start, sigma_start, lowerLimit, upperLimit, threadCount=1, init_pos=None, mpi_monch=True)
# save the output
pool = MpiPool(None)
if pool.isMaster():
    f = open(path2dump, 'wb')
    pickle.dump(samples, f)
    f.close()
    end_time = time.time()
    print(end_time - start_time, 'total time needed for computation')
    print('Result saved in:', path2dump)
    print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')


Esempio n. 8
0
__author__ = 'sibirrer'

#this file is ment to be a shell script to be run with Monch cluster

# set up the scene
from cosmoHammer.util.MpiUtil import MpiPool
import time
import sys
import pickle
import os

from lenstronomy.Workflow.fitting_sequence import FittingSequence

pool = MpiPool(None)

start_time = time.time()

job_name = str(sys.argv[1])
if pool.isMaster():
    print("job %s loaded" % job_name)
# hoffman2 specifics
dir_path_cluster = '/u/flashscratch/s/sibirrer/'
path2load = os.path.join(dir_path_cluster, job_name) + ".txt"
path2dump = os.path.join(dir_path_cluster, job_name) + "_out.txt"

f = open(path2load, 'rb')
input = pickle.load(f)
f.close()
[
    fitting_kwargs_list, multi_band_list, kwargs_model, kwargs_constraints,
    kwargs_likelihood, kwargs_params, init_samples
Esempio n. 9
0
class MpiCosmoHammerSampler(CosmoHammerSampler):
    """
    A sampler implementation extending the regular sampler in order to allow for distributing 
    the computation with MPI.

    :param kwargs:  
        key word arguments passed to the CosmoHammerSampler
    
    """
    def __init__(self, **kwargs):
        """
        CosmoHammer sampler implementation
        
        """
        self.pool = MpiPool(self._getMapFunction())
        self.rank = self.pool.rank

        super(MpiCosmoHammerSampler, self).__init__(pool=self.pool, **kwargs)

    def _getMapFunction(self):
        """
        Returns the build in map function
        """
        return map

    def createSampleFileUtil(self):
        """
        Returns a new instance of a File Util
        """
        return SampleFileUtil(self.filePrefix,
                              self.isMaster(),
                              reuseBurnin=self.reuseBurnin)

    def sampleBurnin(self, p0):
        """
        Starts the sampling process. The master node (mpi rank = 0) persists the result to the disk
        """
        p0 = mpiBCast(p0)

        self.log("MPI Process rank " + str(self.rank) + " starts sampling")
        return super(MpiCosmoHammerSampler, self).sampleBurnin(p0)

    def sample(self, burninPos, burninProb, burninRstate, datas):
        """
        Starts the sampling process. The master node (mpi rank = 0) persists the result to the disk
        """
        burninPos = mpiBCast(burninPos)
        burninProb = mpiBCast(burninProb)
        burninRstate = mpiBCast(burninRstate)

        self.log("MPI Process rank " + str(self.rank) + " starts sampling")
        super(MpiCosmoHammerSampler, self).sample(burninPos, burninProb,
                                                  burninRstate, datas)

    def loadBurnin(self):
        """
        loads the burn in form the file system
        """
        if (self.isMaster()):
            pos, prob, rstate = super(MpiCosmoHammerSampler, self).loadBurnin()
        else:
            pos, prob, rstate = []

        pos = mpiBCast(pos)
        prob = mpiBCast(prob)
        rstate = mpiBCast(rstate)

        self.log("loading done")
        return pos, prob, rstate

    def createInitPos(self):
        """
        Factory method to create initial positions
        """
        #bcast the positions to ensure that all mpi nodes start at the same position
        return mpiBCast(super(MpiCosmoHammerSampler, self).createInitPos())

    def isMaster(self):
        """
        Returns true if the rank is 0
        """
        return self.pool.isMaster()