Exemple #1
0
def test_download_failure(comm):
    CurrentMPIComm.set(comm)

    # initialize with bad redshift
    BAD_REDSHIFT = 100.0
    with pytest.raises(Exception):
        cat = DemoHaloCatalog('bolshoi', 'rockstar', BAD_REDSHIFT)
Exemple #2
0
def test_download(comm):

    from halotools.sim_manager import UserSuppliedHaloCatalog
    CurrentMPIComm.set(comm)

    # download and load the cached catalog
    cat = DemoHaloCatalog('bolshoi', 'rockstar', 0.5)
    assert all(col in cat for col in ['Position', 'Velocity'])

    # convert to halotools catalog
    halotools_cat = cat.to_halotools()
    assert isinstance(halotools_cat, UserSuppliedHaloCatalog)

    # bad simulation name
    with pytest.raises(Exception):
        cat = DemoHaloCatalog('BAD', 'rockstar', 0.5)
Exemple #3
0
    def __enter__(self):
        """
        Split the base communicator such that each task gets allocated
        the specified number of cpus to perform the task with
        """
        chain_ranks = []
        color = 0
        total_ranks = 0
        nworkers = 0

        # split the ranks
        for i, ranks in split_ranks(self.size, self.cpus_per_task, include_all=self.use_all_cpus):
            chain_ranks.append(ranks[0])
            if self.rank in ranks: color = i+1
            total_ranks += len(ranks)
            nworkers = nworkers + 1
        self.workers = nworkers # store the total number of workers

        # check for no workers!
        if self.workers == 0:
            raise ValueError("no pool workers available; try setting `use_all_cpus` = True")

        leftover = (self.size - 1) - total_ranks
        if leftover and self.rank == 0:
            args = (self.cpus_per_task, self.size-1, leftover)
            self.logger.warning("with `cpus_per_task` = %d and %d available rank(s), %d rank(s) will do no work" %args)
            self.logger.warning("set `use_all_cpus=True` to use all available cpus")

        # crash if we only have one process or one worker
        if self.size <= self.workers:
            args = (self.size, self.workers+1, self.workers)
            raise ValueError("only have %d ranks; need at least %d to use the desired %d workers" %args)

        # ranks that will do work have a nonzero color now
        self._valid_worker = color > 0

        # split the comm between the workers
        self.comm = self.basecomm.Split(color, 0)
        self.original_comm = CurrentMPIComm.get()
        CurrentMPIComm.set(self.comm)

        return self
Exemple #4
0
    def __exit__(self, exc_type, exc_value, exc_traceback):
        """
        Exit gracefully by closing and freeing the MPI-related variables
        """
        if exc_value is not None:
            trace = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback, limit=5))
            self.logger.error("an exception has occurred on rank %d:\n%s" %(self.rank, trace))

            # bit of hack that forces mpi4py to exit all ranks
            # see https://groups.google.com/forum/embed/#!topic/mpi4py/RovYzJ8qkbc
            os._exit(1)

        # wait and exit
        self.logger.debug("rank %d process finished" %self.rank)
        self.basecomm.Barrier()

        if self.is_root():
            self.logger.debug("master is finished; terminating")

        CurrentMPIComm.set(self.original_comm)

        if self.comm is not None:
            self.comm.Free()