Ejemplo n.º 1
0
def parallel_profile(prefix):
    r"""A context manager for profiling parallel code execution using cProfile

    This is a simple context manager that automatically profiles the execution
    of a snippet of code.

    Parameters
    ----------
    prefix : string
        A string name to prefix outputs with.

    Examples
    --------

    >>> with parallel_profile('my_profile'):
    ...     yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass')
    """
    import cProfile
    from yt.config import ytcfg
    fn = "%s_%04i_%04i.cprof" % (prefix,
                ytcfg.getint("yt", "__topcomm_parallel_size"),
                ytcfg.getint("yt", "__topcomm_parallel_rank"))
    p = cProfile.Profile()
    p.enable()
    yield fn
    p.disable()
    p.dump_stats(fn)
 def write_out(self, filename_prefix):
     if ytcfg.getboolean("yt","__parallel"):
         pfn = "%s_%03i_%03i" % (filename_prefix,
                  ytcfg.getint("yt", "__global_parallel_rank"),
                 ytcfg.getint("yt", "__global_parallel_size"))
     else:
         pfn = "%s" % (filename_prefix)
     for n, p in sorted(self.profilers.items()):
         fn = "%s_%s.cprof" % (pfn, n)
         mylog.info("Dumping %s into %s", n, fn)
         p.dump_stats(fn)
Ejemplo n.º 3
0
def rpdb_excepthook(exc_type, exc, tb):
    traceback.print_exception(exc_type, exc, tb)
    task = ytcfg.getint("yt", "__global_parallel_rank")
    size = ytcfg.getint("yt", "__global_parallel_size")
    print("Starting RPDB server on task %s ; connect with 'yt rpdb -t %s'" \
            % (task,task))
    handler = pdb_handler(tb)
    server = PdbXMLRPCServer(("localhost", 8010+task))
    server.register_introspection_functions()
    server.register_instance(handler)
    server.register_function(server.shutdown)
    server.serve_forever()
    server.server_close()
    if size > 1:
        from mpi4py import MPI
        # This COMM_WORLD is okay.  We want to barrierize here, while waiting
        # for shutdown from the rest of the parallel group.  If you are running
        # with --rpdb it is assumed you know what you are doing and you won't
        # let this get out of hand.
        MPI.COMM_WORLD.Barrier()
Ejemplo n.º 4
0
 def __init__(self, num_readers, num_writers):
     self.num_readers = num_readers
     psize = ytcfg.getint("yt", "__global_parallel_size")
     if num_writers is None:
         self.num_writers =  psize - num_readers - 1
     else:
         self.num_writers = min(num_writers, psize)
     if self.num_readers + self.num_writers + 1 != psize:
         mylog.error('%i reader + %i writers + 1 server != %i mpi',
                 self.num_readers, self.num_writers, psize)
         raise RuntimeError
 def _chunk_io(self, dobj, cache=True, local_only=False,
               preload_fields=None, chunk_sizing="auto"):
     # local_only is only useful for inline datasets and requires
     # implementation by subclasses.
     if preload_fields is None:
         preload_fields = []
     preload_fields, _ = self._split_fields(preload_fields)
     gfiles = defaultdict(list)
     gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
     fast_index = dobj._current_chunk._fast_index
     for g in gobjs:
         gfiles[g.filename].append(g)
     # We can apply a heuristic here to make sure we aren't loading too
     # many grids all at once.
     if chunk_sizing == "auto":
         chunk_ngrids = len(gobjs)
         if chunk_ngrids > 0:
             nproc = np.float(ytcfg.getint("yt", "__global_parallel_size"))
             chunking_factor = np.ceil(self._grid_chunksize*nproc/chunk_ngrids).astype("int")
             size = max(self._grid_chunksize//chunking_factor, 1)
         else:
             size = self._grid_chunksize
     elif chunk_sizing == "config_file":
         size = ytcfg.getint("yt", "chunk_size")
     elif chunk_sizing == "just_one":
         size = 1
     elif chunk_sizing == "old":
         size = self._grid_chunksize
     else:
         raise RuntimeError("%s is an invalid value for the 'chunk_sizing' argument." % chunk_sizing)
     for fn in sorted(gfiles):
         gs = gfiles[fn]
         for grids in (gs[pos:pos + size] for pos
                       in range(0, len(gs), size)):
             dc = YTDataChunk(dobj, "io", grids,
                     self._count_selection(dobj, grids),
                     cache = cache, fast_index = fast_index)
             # We allow four full chunks to be included.
             with self.io.preload(dc, preload_fields, 
                         4.0 * size):
                 yield dc
Ejemplo n.º 6
0
def is_root():
    """
    This function returns True if it is on the root processor of the
    topcomm and False otherwise.
    """
    from yt.config import ytcfg
    cfg_option = "__topcomm_parallel_rank"
    if not ytcfg.getboolean("yt", "__parallel"):
        return True
    if ytcfg.getint("yt", cfg_option) > 0:
        return False
    return True
Ejemplo n.º 7
0
def is_root():
    """
    This function returns True if it is on the root processor of the
    topcomm and False otherwise.
    """
    from yt.config import ytcfg
    cfg_option = "__topcomm_parallel_rank"
    if not ytcfg.getboolean("yt","__parallel"):
        return True
    if ytcfg.getint("yt", cfg_option) > 0:
        return False
    return True
 def _write_out(self):
     if self._read_only: return
     fn = self._get_db_name()
     f = open("%s.tmp" % fn, 'wb')
     w = csv.DictWriter(f, _field_names)
     maxn = ytcfg.getint("yt","maximumstoreddatasets") # number written
     for h,v in islice(sorted(self._records.items(),
                       key=lambda a: -a[1]['last_seen']), 0, maxn):
         v['hash'] = h
         w.writerow(v)
     f.close()
     os.rename("%s.tmp" % fn, fn)
Ejemplo n.º 9
0
 def __init__(self, num_readers, num_writers):
     self.num_readers = num_readers
     psize = ytcfg.getint("yt", "__global_parallel_size")
     if num_writers is None:
         self.num_writers =  psize - num_readers - 1
     else:
         self.num_writers = min(num_writers, psize)
     if self.num_readers + self.num_writers + 1 != psize:
         raise RuntimeError(
             'The number of MPI processes (%i) does not equal the '
             'number of readers (%i) plus the number of writers '
             '(%i) plus 1 server' % (
                 self.num_readers, self.num_writers, psize))
Ejemplo n.º 10
0
def rpdb_excepthook(exc_type, exc, tb):
    traceback.print_exception(exc_type, exc, tb)
    task = ytcfg.getint("yt", "__global_parallel_rank")
    size = ytcfg.getint("yt", "__global_parallel_size")
    print(
        "Starting RPDB server on task %s ; connect with 'yt rpdb -t %s'" % (task, task)
    )
    handler = pdb_handler(tb)
    server = PdbXMLRPCServer(("localhost", 8010 + task))
    server.register_introspection_functions()
    server.register_instance(handler)
    server.register_function(server.shutdown)
    server.serve_forever()
    server.server_close()
    if size > 1:
        from mpi4py import MPI

        # This COMM_WORLD is okay.  We want to barrierize here, while waiting
        # for shutdown from the rest of the parallel group.  If you are running
        # with --rpdb it is assumed you know what you are doing and you won't
        # let this get out of hand.
        MPI.COMM_WORLD.Barrier()
Ejemplo n.º 11
0
 def _write_out(self):
     if self._read_only: return
     fn = self._get_db_name()
     f = open("%s.tmp" % fn, 'wb')
     w = csv.DictWriter(f, _field_names)
     maxn = ytcfg.getint("yt", "maximumstoreddatasets")  # number written
     for h, v in islice(
             sorted(self._records.items(),
                    key=lambda a: -a[1]['last_seen']), 0, maxn):
         v['hash'] = h
         w.writerow(v)
     f.close()
     os.rename("%s.tmp" % fn, fn)
Ejemplo n.º 12
0
def parallel_profile(prefix):
    r"""A context manager for profiling parallel code execution using cProfile

    This is a simple context manager that automatically profiles the execution
    of a snippet of code.

    Parameters
    ----------
    prefix : string
        A string name to prefix outputs with.

    Examples
    --------

    >>> from yt import PhasePlot
    >>> from yt.testing import fake_random_ds
    ...
    >>> fields = ('density', 'temperature', 'cell_mass')
    >>> units = ('g/cm**3', 'K', 'g')
    >>> ds = fake_random_ds(16, fields=fields, units=units)
    >>> with parallel_profile('my_profile'):
    ...     plot = PhasePlot(ds.all_data(), *fields)
    """
    import cProfile

    from yt.config import ytcfg

    fn = "%s_%04i_%04i.cprof" % (
        prefix,
        ytcfg.getint("yt", "__topcomm_parallel_size"),
        ytcfg.getint("yt", "__topcomm_parallel_rank"),
    )
    p = cProfile.Profile()
    p.enable()
    yield fn
    p.disable()
    p.dump_stats(fn)
Ejemplo n.º 13
0
def only_on_root(func, *args, **kwargs):
    """
    This function accepts a *func*, a set of *args* and *kwargs* and then only
    on the root processor calls the function.  All other processors get "None"
    handed back.
    """
    from yt.config import ytcfg
    if kwargs.pop("global_rootonly", False):
        cfg_option = "__global_parallel_rank"
    else:
        cfg_option = "__topcomm_parallel_rank"
    if not ytcfg.getboolean("yt","__parallel"):
        return func(*args,**kwargs)
    if ytcfg.getint("yt", cfg_option) > 0: return
    return func(*args, **kwargs)
Ejemplo n.º 14
0
def only_on_root(func, *args, **kwargs):
    """
    This function accepts a *func*, a set of *args* and *kwargs* and then only
    on the root processor calls the function.  All other processors get "None"
    handed back.
    """
    from yt.config import ytcfg
    if kwargs.pop("global_rootonly", False):
        cfg_option = "__global_parallel_rank"
    else:
        cfg_option = "__topcomm_parallel_rank"
    if not ytcfg.getboolean("yt", "__parallel"):
        return func(*args, **kwargs)
    if ytcfg.getint("yt", cfg_option) > 0: return
    return func(*args, **kwargs)
def enable_parallelism():
    global parallel_capable, MPI
    try:
        from mpi4py import MPI as _MPI
    except ImportError:
        mylog.info("mpi4py was not found. Disabling parallel computation")
        parallel_capable = False
        return
    MPI = _MPI
    exe_name = os.path.basename(sys.executable)
    parallel_capable = (MPI.COMM_WORLD.size > 1)
    if not parallel_capable: return False
    mylog.info("Global parallel computation enabled: %s / %s",
               MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
    communication_system.push(MPI.COMM_WORLD)
    ytcfg["yt", "__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
    ytcfg["yt", "__global_parallel_size"] = str(MPI.COMM_WORLD.size)
    ytcfg["yt", "__parallel"] = "True"
    if exe_name == "embed_enzo" or \
        ("_parallel" in dir(sys) and sys._parallel == True):
        ytcfg["yt", "inline"] = "True"
    if MPI.COMM_WORLD.rank > 0:
        if ytcfg.getboolean("yt", "LogFile"):
            ytcfg["yt", "LogFile"] = "False"
            yt.utilities.logger.disable_file_logging()
    yt.utilities.logger.uncolorize_logging()
    # Even though the uncolorize function already resets the format string,
    # we reset it again so that it includes the processor.
    f = logging.Formatter("P%03i %s" %
                          (MPI.COMM_WORLD.rank, yt.utilities.logger.ufstring))
    if len(yt.utilities.logger.rootLogger.handlers) > 0:
        yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
    if ytcfg.getboolean("yt", "parallel_traceback"):
        sys.excepthook = traceback_writer_hook("_%03i" % MPI.COMM_WORLD.rank)
    if ytcfg.getint("yt", "LogLevel") < 20:
        yt.utilities.logger.ytLogger.warning(
            "Log Level is set low -- this could affect parallel performance!")
    dtype_names.update(
        dict(
            float32=MPI.FLOAT,
            float64=MPI.DOUBLE,
            int32=MPI.INT,
            int64=MPI.LONG,
            c=MPI.CHAR,
        ))
    op_names.update(dict(sum=MPI.SUM, min=MPI.MIN, max=MPI.MAX))
    return True
def standard_small_simulation(ds_fn, fields):
    if not can_run_ds(ds_fn): return
    dso = [None]
    tolerance = ytcfg.getint("yt", "answer_testing_tolerance")
    bitwise = ytcfg.getboolean("yt", "answer_testing_bitwise")
    for field in fields:
        if bitwise:
            yield GridValuesTest(ds_fn, field)
        if 'particle' in field: continue
        for dobj_name in dso:
            for axis in [0, 1, 2]:
                for weight_field in [None, "Density"]:
                    yield ProjectionValuesTest(
                        ds_fn, axis, field, weight_field,
                        dobj_name, decimals=tolerance)
            yield FieldValuesTest(
                    ds_fn, field, dobj_name, decimals=tolerance)
Ejemplo n.º 17
0
def standard_small_simulation(ds_fn, fields):
    if not can_run_ds(ds_fn): return
    dso = [None]
    tolerance = ytcfg.getint("yt", "answer_testing_tolerance")
    bitwise = ytcfg.getboolean("yt", "answer_testing_bitwise")
    for field in fields:
        if bitwise:
            yield GridValuesTest(ds_fn, field)
        if 'particle' in field: continue
        for dobj_name in dso:
            for axis in [0, 1, 2]:
                for weight_field in [None, "Density"]:
                    yield ProjectionValuesTest(
                        ds_fn, axis, field, weight_field,
                        dobj_name, decimals=tolerance)
            yield FieldValuesTest(
                    ds_fn, field, dobj_name, decimals=tolerance)
Ejemplo n.º 18
0
def light_cone_halo_map(lightCone, map_file='halo_map.out', **kwargs):
    "Make a text list of location of halos in a light cone image with virial quantities."

    haloMap = []

    # Loop through files in light cone solution and get virial quantities.
    for slice in lightCone.light_cone_solution:
        halo_list = _get_halo_list(slice['filename'], **kwargs)
        haloMap.extend(_make_slice_halo_map(slice, halo_list))

    # Write out file.
    if ytcfg.getint("yt", "__parallel_rank") == 0:
        mylog.info("Saving halo map to %s." % map_file)
        f = open(map_file, 'w')
        f.write("#z       x         y        M [Msun]  R [Mpc]   R [image]\n")
        for halo in haloMap:
            f.write("%7.4f %9.6f %9.6f %9.3e %9.3e %9.3e\n" % \
                        (halo['redshift'], halo['x'], halo['y'],
                         halo['mass'], halo['radiusMpc'], halo['radiusImage']))
        f.close()
 def __init__(self, pobj, just_list = False, attr='_grids'):
     self.pobj = pobj
     if hasattr(pobj, attr) and getattr(pobj, attr) is not None:
         gs = getattr(pobj, attr)
     else:
         gs = getattr(pobj._data_source, attr)
     if len(gs) == 0:
         raise YTNoDataInObjectError(pobj)
     if hasattr(gs[0], 'proc_num'):
         # This one sort of knows about MPI, but not quite
         self._objs = [g for g in gs if g.proc_num ==
                       ytcfg.getint('yt','__topcomm_parallel_rank')]
         self._use_all = True
     else:
         self._objs = gs
         if hasattr(self._objs[0], 'filename'):
             self._objs = sorted(self._objs, key = lambda g: g.filename)
         self._use_all = False
     self.ng = len(self._objs)
     self.just_list = just_list
Ejemplo n.º 20
0
 def __init__(self, pobj, just_list = False, attr='_grids'):
     self.pobj = pobj
     if hasattr(pobj, attr) and getattr(pobj, attr) is not None:
         gs = getattr(pobj, attr)
     else:
         gs = getattr(pobj._data_source, attr)
     if len(gs) == 0:
         raise YTNoDataInObjectError(pobj)
     if hasattr(gs[0], 'proc_num'):
         # This one sort of knows about MPI, but not quite
         self._objs = [g for g in gs if g.proc_num ==
                       ytcfg.getint('yt','__topcomm_parallel_rank')]
         self._use_all = True
     else:
         self._objs = gs
         if hasattr(self._objs[0], 'filename'):
             self._objs = sorted(self._objs, key = lambda g: g.filename)
         self._use_all = False
     self.ng = len(self._objs)
     self.just_list = just_list
Ejemplo n.º 21
0
def rootonly(func):
    """
    This is a decorator that, when used, will only call the function on the
    root processor and then broadcast the results of the function to all other
    processors.

    This can be used like so:

    .. code-block:: python

       @rootonly
       def some_root_only_function(...):

    """
    @wraps(func)
    def donothing(*args, **kwargs):
        return

    from yt.config import ytcfg
    if ytcfg.getint("yt", "__parallel_rank") > 0: return donothing
    return func
    def find_max_cell_location(self, field, finest_levels=3):
        if finest_levels is not False:
            # This prevents bad values for the case that the number of grids to
            # search is smaller than the number of processors being applied to
            # the task, by
            nproc = ytcfg.getint("yt", "__topcomm_parallel_size")
            while 1:
                gi = (self.grid_levels >= self.max_level - finest_levels).ravel()
                if gi.sum() >= nproc:
                    break
                elif finest_levels >= self.max_level:
                    raise YTTooParallel
                else:
                    finest_levels += 1

            source = self.grid_collection([0.0] * 3, self.grids[gi])
        else:
            source = self.all_data()
        mylog.debug("Searching %s grids for maximum value of %s", len(source._grids), field)
        max_val, maxi, mx, my, mz = source.quantities["MaxLocation"](field)
        mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", max_val, mx, my, mz)
        self.parameters["Max%sValue" % (field)] = max_val
        self.parameters["Max%sPos" % (field)] = "%s" % ((mx, my, mz),)
        return max_val, np.array((mx, my, mz), dtype="float64")
    rotx = transx * np.cos(beta) - transy * np.sin(beta)
    roty = transx * np.sin(beta) + transy * np.cos(beta)
    
    # cut based on width and length of box
    antiselection2 = (np.abs(rotx) > 0.5*length) | (np.abs(roty) > 0.5 * width)
    newfield[antiselection2] = 1.e-99
    
    return newfield

for snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):
    infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'
    sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'
    
    fileprefix = 'reduced_'+str(snap).zfill(5)+'/'

    if ytcfg.getint('yt', '__topcomm_parallel_rank') == 0:
        if not os.path.exists(fileprefix):
            os.makedirs(fileprefix)       
            # copy the infofile and sinkfile to the reduced directory 
            shutil.copy(infoname, fileprefix)
            if os.path.exists(sinkname):
                shutil.copy(sinkname, fileprefix)
    
    (lmin, lmax) = get_level_min_max(infoname)
    (boxlen, unit_l) = get_boxsize(infoname)

    ds = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])
    
    add_field('CO', function=_CO)
    
    # center on original center of cloud
Ejemplo n.º 24
0
def rootloginfo(*args):
    from yt.config import ytcfg
    if ytcfg.getint("yt", "__topcomm_parallel_rank") > 0: return
    mylog.info(*args)
Ejemplo n.º 25
0
def enable_parallelism(suppress_logging=False, communicator=None):
    """
    This method is used inside a script to turn on MPI parallelism, via
    mpi4py.  More information about running yt in parallel can be found
    here: https://yt-project.org/docs/3.0/analyzing/parallel_computation.html

    Parameters
    ----------
    suppress_logging : bool
       If set to True, only rank 0 will log information after the initial
       setup of MPI.

    communicator : mpi4py.MPI.Comm
        The MPI communicator to use. This controls which processes yt can see.
        If not specified, will be set to COMM_WORLD.
    """
    global parallel_capable, MPI
    try:
        from mpi4py import MPI as _MPI
    except ImportError:
        mylog.info("mpi4py was not found. Disabling parallel computation")
        parallel_capable = False
        return
    MPI = _MPI
    exe_name = os.path.basename(sys.executable)

    # if no communicator specified, set to COMM_WORLD
    if communicator is None:
        communicator = MPI.COMM_WORLD

    parallel_capable = communicator.size > 1
    if not parallel_capable:
        return False
    mylog.info(
        "Global parallel computation enabled: %s / %s",
        communicator.rank,
        communicator.size,
    )
    communication_system.push(communicator)
    ytcfg["yt", "__global_parallel_rank"] = str(communicator.rank)
    ytcfg["yt", "__global_parallel_size"] = str(communicator.size)
    ytcfg["yt", "__parallel"] = "True"
    if exe_name == "embed_enzo" or ("_parallel" in dir(sys) and sys._parallel):
        ytcfg["yt", "inline"] = "True"
    if communicator.rank > 0:
        if ytcfg.getboolean("yt", "LogFile"):
            ytcfg["yt", "LogFile"] = "False"
            yt.utilities.logger.disable_file_logging()
    yt.utilities.logger.uncolorize_logging()
    # Even though the uncolorize function already resets the format string,
    # we reset it again so that it includes the processor.
    f = logging.Formatter("P%03i %s" %
                          (communicator.rank, yt.utilities.logger.ufstring))
    if len(yt.utilities.logger.ytLogger.handlers) > 0:
        yt.utilities.logger.ytLogger.handlers[0].setFormatter(f)

    if ytcfg.getboolean("yt", "parallel_traceback"):
        sys.excepthook = traceback_writer_hook("_%03i" % communicator.rank)
    else:
        sys.excepthook = default_mpi_excepthook

    if ytcfg.getint("yt", "LogLevel") < 20:
        yt.utilities.logger.ytLogger.warning(
            "Log Level is set low -- this could affect parallel performance!")
    dtype_names.update(
        dict(
            float32=MPI.FLOAT,
            float64=MPI.DOUBLE,
            int32=MPI.INT,
            int64=MPI.LONG,
            c=MPI.CHAR,
        ))
    op_names.update(dict(sum=MPI.SUM, min=MPI.MIN, max=MPI.MAX))
    # Turn off logging on all but the root rank, if specified.
    if suppress_logging:
        if communicator.rank > 0:
            mylog.addFilter(FilterAllMessages())
    return True
    mH = 1.6733e-24
    lolim = 1000.0 * mu * mH  # not interested in anything below 10^3 / cm^3
    hilim = 31622.0 * mu * mH  # not interested in anything above 10^4.5 / com^3
    newfield = data["Density"]
    antiselection = (data["Density"] < lolim) | (data["Density"] >= hilim)
    newfield[antiselection] = 1.0e-99
    return newfield


for snap in range(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])):
    infoname = "output_" + str(snap).zfill(5) + "/info_" + str(snap).zfill(5) + ".txt"
    sinkname = "output_" + str(snap).zfill(5) + "/sink_" + str(snap).zfill(5) + ".out"

    fileprefix = "reduced_" + str(snap).zfill(5) + "/"

    if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
        if not os.path.exists(fileprefix):
            os.makedirs(fileprefix)
            # copy the infofile and sinkfile to the reduced directory
            shutil.copy(infoname, fileprefix)
            if os.path.exists(sinkname):
                shutil.copy(sinkname, fileprefix)

    (lmin, lmax) = get_level_min_max(infoname)
    (boxlen, unit_l) = get_boxsize(infoname)

    ds = load(infoname, fields=["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure"])

    add_field("CO", function=_CO)

    # center on original center of cloud
Ejemplo n.º 27
0
 def __init__(self):
     # If this is being run inline, num_readers == comm.size, always.
     psize = ytcfg.getint("yt", "__global_parallel_size")
     self.num_readers = psize
     # No choice for you, everyone's a writer too!
     self.num_writers =  psize
Ejemplo n.º 28
0
        "Pressure",
        "Metallicity",
    ),
}

## Regular expressions used to parse file descriptors
VERSION_RE = re.compile(r"# version: *(\d+)")
# This will match comma-separated strings, discarding whitespaces
# on the left hand side
VAR_DESC_RE = re.compile(r"\s*([^\s]+),\s*([^\s]+),\s*([^\s]+)")

## Configure family mapping
particle_families = {
    "DM": 1,
    "star": 2,
    "cloud": 3,
    "dust": 4,
    "star_tracer": -2,
    "cloud_tracer": -3,
    "dust_tracer": -4,
    "gas_tracer": 0,
}

if ytcfg.has_section("ramses-families"):
    for key in particle_families.keys():
        val = ytcfg.getint("ramses-families", key, fallback=None)
        if val is not None:
            mylog.info("Changing family %s from %s to %s", key,
                       particle_families[key], val)
            particle_families[key] = val
Ejemplo n.º 29
0
 def check_parallel_rank(*args, **kwargs):
     if ytcfg.getint("yt", "__topcomm_parallel_rank") > 0:
         return
     return func(*args, **kwargs)
Ejemplo n.º 30
0
 def check_parallel_rank(*args, **kwargs):
     if ytcfg.getint("yt","__topcomm_parallel_rank") > 0:
         return
     return func(*args, **kwargs)
Ejemplo n.º 31
0
def _calculate_cooling_metallicity(field, data, fc):
    gfields = _get_needed_fields(fc.chemistry_data)
    if field.name[1].endswith('tdt'):
        tdfield = 'total_dynamical_time'
    else:
        tdfield = 'dynamical_time'
    td = data['gas', tdfield].to('code_time').d
    flatten = len(td.shape) > 1
    if flatten:
        td = td.flatten()
    fc_mini = FluidContainer(data.ds.grackle_data, 1)

    fc.calculate_cooling_time()

    def cdrat(Z, my_td):
        fc_mini['metal'][:] = Z * fc_mini['density']
        fc_mini.calculate_cooling_time()
        return my_td + fc_mini['cooling_time'][0]

    field_data = data.ds.arr(np.zeros(td.size), '')
    if isinstance(data, FieldDetector):
        return field_data

    if field_data.size > 200000:
        my_str = "Reticulating splines"
        if ytcfg.getboolean("yt", "__parallel"):
            my_str = "P%03d %s" % \
                (ytcfg.getint("yt", "__global_parallel_rank"),
                 my_str)
        pbar = get_pbar(my_str, field_data.size, parallel=True)
    else:
        pbar = DummyProgressBar()
    for i in range(field_data.size):
        pbar.update(i)
        if td[i] + fc['cooling_time'][i] > 0:
            continue
        for mfield in gfields:
            fc_mini[mfield][:] = fc[mfield][i]
        success = False
        if i > 0 and field_data[i - 1] > 0:
            try:
                field_data[i] = brentq(cdrat,
                                       0.1 * field_data[i - 1],
                                       10 * field_data[i - 1],
                                       args=(td[i]),
                                       xtol=1e-6)
                success = True
            except:
                pass
        if not success:
            bds = np.logspace(-2, 2, 5)
            for bd in bds:
                try:
                    field_data[i] = brentq(cdrat,
                                           1e-6,
                                           bd,
                                           args=(td[i]),
                                           xtol=1e-6)
                    success = True
                    break
                except:
                    continue
            if not success:
                field_data[i] = np.nan
                # field_data[i] = 0. # hack for imaging
    pbar.finish()

    if flatten:
        field_data = field_data.reshape(data.ActiveDimensions)
    return field_data
Ejemplo n.º 32
0
def rootloginfo(*args):
    from yt.config import ytcfg
    if ytcfg.getint("yt", "__topcomm_parallel_rank") > 0: return
    mylog.info(*args)
Ejemplo n.º 33
0
def get_num_threads():
    from .config import ytcfg
    nt = ytcfg.getint("yt","numthreads")
    if nt < 0:
        return os.environ.get("OMP_NUM_THREADS", 0)
    return nt
Ejemplo n.º 34
0
                        'd'), ('nstep', 2, 'i'), ('stat', 3, 'd'),
                ('cosm', 7, 'd'), ('timing', 5, 'd'), ('mass_sph', 1, 'd'))
    yield next_set


field_aliases = {
    'standard_five':
    ('Density', 'x-velocity', 'y-velocity', 'z-velocity', 'Pressure'),
    'standard_six': ('Density', 'x-velocity', 'y-velocity', 'z-velocity',
                     'Pressure', 'Metallicity'),
}

particle_families = {
    'DM': 1,
    'star': 2,
    'cloud': 3,
    'dust': 4,
    'star_tracer': -2,
    'cloud_tracer': -3,
    'dust_tracer': -4,
    'gas_tracer': 0
}

if ytcfg.has_section('ramses-families'):
    for key in particle_families.keys():
        val = ytcfg.getint('ramses-families', key, fallback=None)
        if val is not None:
            mylog.info('Changing family %s from %s to %s' %
                       (key, particle_families[key], val))
            particle_families[key] = val
Ejemplo n.º 35
0
def get_num_threads():
    from .config import ytcfg
    nt = ytcfg.getint("yt", "numthreads")
    if nt < 0:
        return os.environ.get("OMP_NUM_THREADS", 0)
    return nt
Ejemplo n.º 36
0
        elif(levelno >= 40):
            color = '\x1b[31m'  # red
        elif(levelno >= 30):
            color = '\x1b[33m'  # yellow
        elif(levelno >= 20):
            color = '\x1b[32m'  # green
        elif(levelno >= 10):
            color = '\x1b[35m'  # pink
        else:
            color = '\x1b[0m'  # normal
        ln = color + args[0].levelname + '\x1b[0m'
        args[0].levelname = ln
        return fn(*args)
    return new

level = min(max(ytcfg.getint("yt", "loglevel"), 0), 50)
ufstring = "%(name)-3s: [%(levelname)-9s] %(asctime)s %(message)s"
cfstring = "%(name)-3s: [%(levelname)-18s] %(asctime)s %(message)s"

if ytcfg.getboolean("yt", "stdoutStreamLogging"):
    stream = sys.stdout
else:
    stream = sys.stderr

ytLogger = logging.getLogger("yt")

yt_sh = logging.StreamHandler(stream=stream)
# create formatter and add it to the handlers
formatter = logging.Formatter(ufstring)
yt_sh.setFormatter(formatter)
# add the handler to the logger
    rotx = transx * np.cos(beta) - transy * np.sin(beta)
    roty = transx * np.sin(beta) + transy * np.cos(beta)
    
    # cut based on width and length of box
    antiselection2 = (np.abs(rotx) > 0.5*length) | (np.abs(roty) > 0.5 * width)
    newfield[antiselection2] = 1.e-99
    
    return newfield

for snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):
    infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'
    sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'
    
    fileprefix = 'reduced_'+str(snap).zfill(5)+'/'

    if ytcfg.getint('yt', '__topcomm_parallel_rank') == 0:
        if not os.path.exists(fileprefix):
            os.makedirs(fileprefix)       
            # copy the infofile and sinkfile to the reduced directory 
            shutil.copy(infoname, fileprefix)
            if os.path.exists(sinkname):
                shutil.copy(sinkname, fileprefix)
    
    (lmin, lmax) = get_level_min_max(infoname)
    (boxlen, unit_l) = get_boxsize(infoname)

    ds = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])
    
    add_field('CO', function=_CO)
    
    # center on original center of cloud
def enable_parallelism(suppress_logging=False, communicator=None):
    """
    This method is used inside a script to turn on MPI parallelism, via
    mpi4py.  More information about running yt in parallel can be found
    here: http://yt-project.org/docs/3.0/analyzing/parallel_computation.html

    Parameters
    ----------
    suppress_logging : bool
       If set to True, only rank 0 will log information after the initial
       setup of MPI.

    communicator : mpi4py.MPI.Comm
        The MPI communicator to use. This controls which processes yt can see.
        If not specified, will be set to COMM_WORLD.
    """
    global parallel_capable, MPI
    try:
        from mpi4py import MPI as _MPI
    except ImportError:
        mylog.info("mpi4py was not found. Disabling parallel computation")
        parallel_capable = False
        return
    MPI = _MPI
    exe_name = os.path.basename(sys.executable)

    # if no communicator specified, set to COMM_WORLD
    if communicator is None:
        communicator = MPI.COMM_WORLD

    parallel_capable = (communicator.size > 1)
    if not parallel_capable: return False
    mylog.info("Global parallel computation enabled: %s / %s",
               communicator.rank, communicator.size)
    communication_system.push(communicator)
    ytcfg["yt","__global_parallel_rank"] = str(communicator.rank)
    ytcfg["yt","__global_parallel_size"] = str(communicator.size)
    ytcfg["yt","__parallel"] = "True"
    if exe_name == "embed_enzo" or \
        ("_parallel" in dir(sys) and sys._parallel is True):
        ytcfg["yt","inline"] = "True"
    if communicator.rank > 0:
        if ytcfg.getboolean("yt","LogFile"):
            ytcfg["yt","LogFile"] = "False"
            yt.utilities.logger.disable_file_logging()
    yt.utilities.logger.uncolorize_logging()
    # Even though the uncolorize function already resets the format string,
    # we reset it again so that it includes the processor.
    f = logging.Formatter("P%03i %s" % (communicator.rank,
                                        yt.utilities.logger.ufstring))
    if len(yt.utilities.logger.ytLogger.handlers) > 0:
        yt.utilities.logger.ytLogger.handlers[0].setFormatter(f)

    if ytcfg.getboolean("yt", "parallel_traceback"):
        sys.excepthook = traceback_writer_hook("_%03i" % communicator.rank)
    else:
        sys.excepthook = default_mpi_excepthook

    if ytcfg.getint("yt","LogLevel") < 20:
        yt.utilities.logger.ytLogger.warning(
          "Log Level is set low -- this could affect parallel performance!")
    dtype_names.update(dict(
            float32 = MPI.FLOAT,
            float64 = MPI.DOUBLE,
            int32   = MPI.INT,
            int64   = MPI.LONG,
            c       = MPI.CHAR,
    ))
    op_names.update(dict(
        sum = MPI.SUM,
        min = MPI.MIN,
        max = MPI.MAX
    ))
    # Turn off logging on all but the root rank, if specified.
    if suppress_logging:
        if communicator.rank > 0:
            mylog.addFilter(FilterAllMessages())
    return True
Ejemplo n.º 39
0
 def setup_pool(self):
     pool = ProcessorPool()
     # Everyone is a reader, and when we're inline, that's all that matters.
     readers = np.arange(ytcfg.getint("yt", "__global_parallel_size"))
     pool.add_workgroup(ranks=readers, name="readers")
     return pool, pool.workgroups[0]
Ejemplo n.º 40
0
  (at your option) any later version.

  This program is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.

  You should have received a copy of the GNU General Public License
  along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

import logging, os
import logging.handlers as handlers
from yt.config import ytcfg

level = min(max(ytcfg.getint("yt", "loglevel"), 0), 50)
fstring = "%(name)-10s %(levelname)-10s %(asctime)s %(message)s"
logging.basicConfig(format=fstring, level=level)

f = logging.Formatter("%(levelname)-10s %(asctime)s %(message)s")

rootLogger = logging.getLogger()

ytLogger = logging.getLogger("yt")
ytLogger.debug("Set log level to %s", level)

fidoLogger = logging.getLogger("yt.fido")
ravenLogger = logging.getLogger("yt.raven")
lagosLogger = logging.getLogger("yt.lagos")
enkiLogger = logging.getLogger("yt.enki")
deliveratorLogger = logging.getLogger("yt.deliverator")
Ejemplo n.º 41
0
    def project_light_cone(self,
                           field,
                           weight_field=None,
                           apply_halo_mask=False,
                           node=None,
                           save_stack=True,
                           save_slice_images=False,
                           flatten_stack=False,
                           photon_field=False,
                           **kwargs):
        """
        Create projections for light cone, then add them together.
        :param weight_field (str): the weight field of the projection.  This has the same meaning as in standard 
               projections.  Default: None.
        :param apply_halo_mask (bool): if True, a boolean mask is apply to the light cone projection.  See below for a 
               description of halo masks.  Default: False.
        :param node (str): a prefix to be prepended to the node name under which the projection data is serialized.  
               Default: None.
        :param save_stack (bool): if True, the unflatted light cone data including each individual slice is written to 
               an hdf5 file.  Default: True.
        :param save_slice_images (bool): save images for each individual projection slice.  Default: False.
        :param flatten_stack (bool): if True, the light cone stack is continually flattened each time a slice is added 
               in order to save memory.  This is generally not necessary.  Default: False.
        :param photon_field (bool): if True, the projection data for each slice is decremented by 4 Pi R^2`, where R 
               is the luminosity distance between the observer and the slice redshift.  Default: False.
        """

        # Clear projection stack.
        self.projection_stack = []
        self.projection_weight_field_stack = []
        if (self.light_cone_solution[-1].has_key('object')):
            del self.light_cone_solution[-1]['object']

        if not (self.output_dir.endswith("/")):
            self.output_dir += "/"

        for q, output in enumerate(self.light_cone_solution):
            if node is None:
                name = "%s%s_%04d_%04d" % (self.output_dir, self.output_prefix,
                                           q, len(self.light_cone_solution))
            else:
                name = "%s%s_%s_%04d_%04d" % (self.output_dir,
                                              self.output_prefix, node, q,
                                              len(self.light_cone_solution))
            output['object'] = lagos.EnzoStaticOutput(output['filename'])
            frb = LightConeProjection(output,
                                      field,
                                      self.pixels,
                                      weight_field=weight_field,
                                      save_image=save_slice_images,
                                      name=name,
                                      node=node,
                                      **kwargs)
            if ytcfg.getint("yt", "__parallel_rank") == 0:
                if photon_field:
                    # Decrement the flux by the luminosity distance. Assume field in frb is in erg/s/cm^2/Hz
                    co = lagos.Cosmology(
                        HubbleConstantNow=(
                            100.0 *
                            self.enzoParameters['CosmologyHubbleConstantNow']),
                        OmegaMatterNow=self.
                        enzoParameters['CosmologyOmegaMatterNow'],
                        OmegaLambdaNow=self.
                        enzoParameters['CosmologyOmegaLambdaNow'])
                    dL = self.cosmology.LuminosityDistance(
                        self.observer_redshift, output['redshift'])  #in Mpc
                    boxSizeProper = self.enzoParameters[
                        'CosmologyComovingBoxSize'] / (
                            self.enzoParameters['CosmologyHubbleConstantNow'] *
                            (1.0 + output['redshift']))
                    pixelarea = (boxSizeProper /
                                 self.pixels)**2  #in proper cm^2
                    factor = pixelarea / (4.0 * na.pi * dL**2)
                    mylog.info("Distance to slice = %e" % dL)
                    frb[field] *= factor  #in erg/s/cm^2/Hz on observer's image plane.

            if ytcfg.getint("yt", "__parallel_rank") == 0:
                if weight_field is not None:
                    # Data come back normalized by the weight field.
                    # Undo that so it can be added up for the light cone.
                    self.projection_stack.append(frb[field] *
                                                 frb['weight_field'])
                    self.projection_weight_field_stack.append(
                        frb['weight_field'])
                else:
                    self.projection_stack.append(frb[field])

                # Delete the frb.  This saves a decent amount of ram.
                if (q < len(self.light_cone_solution) - 1):
                    del frb

                # Flatten stack to save memory.
                if flatten_stack and (len(self.projection_stack) > 1):
                    self.projection_stack = [sum(self.projection_stack)]
                    if weight_field is not None:
                        self.projection_weight_field_stack = [
                            sum(self.projection_weight_field_stack)
                        ]

            # Delete the plot collection now that the frb is deleted.
            del output['pc']

            # Unless this is the last slice, delete the dataset object.
            # The last one will be saved to make the plot collection.
            if (q < len(self.light_cone_solution) - 1):
                del output['object']

        if ytcfg.getint("yt", "__parallel_rank") == 0:
            # Add up slices to make light cone projection.
            if (weight_field is None):
                lightConeProjection = sum(self.projection_stack)
            else:
                lightConeProjection = sum(self.projection_stack) / sum(
                    self.projection_weight_field_stack)

            if node is None:
                filename = "%s%s" % (self.output_dir, self.output_prefix)
            else:
                filename = "%s%s_%s" % (self.output_dir, self.output_prefix,
                                        node)

            # Save the last fixed resolution buffer for the plot collection,
            # but replace the data with the full light cone projection data.
            frb.data[field] = lightConeProjection

            # Write stack to hdf5 file.
            if save_stack:
                self._save_light_cone_stack(field=field,
                                            weight_field=weight_field,
                                            filename=filename)

            # Apply halo mask.
            if apply_halo_mask:
                if len(self.halo_mask) > 0:
                    mylog.info("Applying halo mask.")
                    frb.data[field] *= self.halo_mask
                else:
                    mylog.error("No halo mask loaded, call get_halo_mask.")

            # Make a plot collection for the light cone projection.
            center = [
                0.5 *
                (self.light_cone_solution[-1]['object'].
                 parameters['DomainLeftEdge'][w] + self.light_cone_solution[-1]
                 ['object'].parameters['DomainRightEdge'][w])
                for w in range(self.light_cone_solution[-1]
                               ['object'].parameters['TopGridRank'])
            ]
            pc = raven.PlotCollection(self.light_cone_solution[-1]['object'],
                                      center=center)
            pc.add_fixed_resolution_plot(frb, field, **kwargs)
            pc.save(filename)

            # Return the plot collection so the user can remake the plot if they want.
            return pc
Ejemplo n.º 42
0
 def __init__(self):
     # If this is being run inline, num_readers == comm.size, always.
     psize = ytcfg.getint("yt", "__global_parallel_size")
     self.num_readers = psize
     # No choice for you, everyone's a writer too!
     self.num_writers =  psize
Ejemplo n.º 43
0
 def setup_pool(self):
     pool = ProcessorPool()
     # Everyone is a reader, and when we're inline, that's all that matters.
     readers = np.arange(ytcfg.getint("yt", "__global_parallel_size"))
     pool.add_workgroup(ranks=readers, name="readers")
     return pool, pool.workgroups[0]
Ejemplo n.º 44
0
    def __init__(self,
                 EnzoParameterFile,
                 initial_redshift=1.0,
                 final_redshift=0.0,
                 observer_redshift=0.0,
                 field_of_view_in_arcminutes=600.0,
                 image_resolution_in_arcseconds=60.0,
                 use_minimum_datasets=True,
                 deltaz_min=0.0,
                 minimum_coherent_box_fraction=0.0,
                 output_dir='LC',
                 output_prefix='LightCone'):
        """
        Initialize a LightCone object.
        :param initial_redshift (float): the initial (highest) redshift for the light cone.  Default: 1.0.
        :param final_redshift (float): the final (lowest) redshift for the light cone.  Default: 0.0.
        :param observer_redshift (float): the redshift of the observer.  Default: 0.0.
        :param field_of_view_in_arcminutes (float): the field of view of the image in units of arcminutes.  
               Default: 600.0.
        :param image_resolution_in_arcseconds (float): the size of each image pixel in units of arcseconds.  
               Default: 60.0.
        :param use_minimum_datasets (bool): if True, the minimum number of datasets is used to connect the 
               initial and final redshift.  If false, the light cone solution will contain as many entries 
               as possible within the redshift interval.  Default: True.
        :param deltaz_min (float): specifies the minimum :math:`\Delta z` between consecutive datasets in 
               the returned list.  Default: 0.0.
        :param minimum_coherent_box_fraction (float): used with use_minimum_datasets set to False, this 
               parameter specifies the fraction of the total box size to be traversed before rerandomizing 
               the projection axis and center.  This was invented to allow light cones with thin slices to 
               sample coherent large scale structure, but in practice does not work so well.  Try setting 
               this parameter to 1 and see what happens.  Default: 0.0.
        :param output_dir (str): the directory in which images and data files will be written.  Default: 'LC'.
        :param output_prefix (str): the prefix of all images and data files.  Default: 'LightCone'.
        """

        self.initial_redshift = initial_redshift
        self.final_redshift = final_redshift
        self.observer_redshift = observer_redshift
        self.field_of_view_in_arcminutes = field_of_view_in_arcminutes
        self.image_resolution_in_arcseconds = image_resolution_in_arcseconds
        self.use_minimum_datasets = use_minimum_datasets
        self.deltaz_min = deltaz_min
        self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
        self.output_dir = output_dir
        self.output_prefix = output_prefix

        self.master_solution = []  # kept to compare with recycled solutions
        self.projection_stack = []
        self.projection_weight_field_stack = []
        self.halo_mask = []

        # Original random seed of the first solution.
        self.originalRandomSeed = 0

        # Parameters for recycling light cone solutions.
        self.recycleSolution = False
        self.recycleRandomSeed = 0

        # Initialize EnzoSimulation machinery for getting dataset list.
        EnzoSimulation.__init__(
            self,
            EnzoParameterFile,
            initial_redshift=self.initial_redshift,
            final_redshift=self.final_redshift,
            links=True,
            enzo_parameters={'CosmologyComovingBoxSize': float})

        # Calculate number of pixels.
        self.pixels = int(self.field_of_view_in_arcminutes * 60.0 / \
                          self.image_resolution_in_arcseconds)

        if ytcfg.getint("yt", "__parallel_rank") == 0:
            # Create output directory.
            if (os.path.exists(self.output_dir)):
                if not (os.path.isdir(self.output_dir)):
                    mylog.error(
                        "Output directory exists, but is not a directory: %s."
                        % self.output_dir)
                    self.output_dir = './'
            else:
                os.mkdir(self.output_dir)

        # Get list of datasets for light cone solution.
        self.light_cone_solution = self.create_cosmology_splice(
            minimal=self.use_minimum_datasets, deltaz_min=self.deltaz_min)