コード例 #1
0
ファイル: funcs.py プロジェクト: matthewturk/yt
def get_pbar(title, maxval, parallel=False):
    """
    This returns a progressbar of the most appropriate type, given a *title*
    and a *maxval*.
    """
    maxval = max(maxval, 1)
    from yt.config import ytcfg

    if (
        ytcfg.getboolean("yt", "suppressStreamLogging")
        or ytcfg.getboolean("yt", "__withintesting")
        or maxval == 1
    ):
        return DummyProgressBar()
    elif ytcfg.getboolean("yt", "__parallel"):
        # If parallel is True, update progress on root only.
        if parallel:
            if is_root():
                return TqdmProgressBar(title, maxval)
            else:
                return DummyProgressBar()
        else:
            return ParallelProgressBar(title, maxval)
    pbar = TqdmProgressBar(title, maxval)
    return pbar
コード例 #2
0
def get_pbar(title, maxval):
    """
    This returns a progressbar of the most appropriate type, given a *title*
    and a *maxval*.
    """
    from yt.config import ytcfg
    if ytcfg.getboolean("yt", "inGui"):
        if maxval > ytcfg.getint("reason", "minpbar"):  # Arbitrary number
            return GUIProgressBar(title, maxval)
        else:
            return DummyProgressBar()
    elif ytcfg.getboolean("yt", "suppressStreamLogging"):
        return DummyProgressBar()
    elif ytcfg.getboolean("yt", "__parallel"):
        return ParallelProgressBar(title, maxval)
    elif "SAGE_ROOT" in os.environ:
        try:
            from sage.server.support import EMBEDDED_MODE
            if EMBEDDED_MODE: return DummyProgressBar()
        except:
            pass
    elif "CODENODE" in os.environ:
        return DummyProgressBar()
    widgets = [
        title,
        pb.Percentage(), ' ',
        pb.Bar(marker=pb.RotatingMarker()), ' ',
        pb.ETA(), ' '
    ]
    pbar = pb.ProgressBar(widgets=widgets, maxval=maxval).start()
    return pbar
コード例 #3
0
def time_execution(func):
    """
    Decorator for seeing how long a given function takes, depending on whether
    or not the global 'yt.timefunctions' config parameter is set.

    This can be used like so:

    .. code-block:: python

       @time_execution
    def some_longrunning_function(...):

    """
    @wraps(func)
    def wrapper(*arg, **kw):
        t1 = time.time()
        res = func(*arg, **kw)
        t2 = time.time()
        mylog.debug('%s took %0.3f s', func.func_name, (t2 - t1))
        return res

    from yt.config import ytcfg
    if ytcfg.getboolean("yt", "timefunctions") == True:
        return wrapper
    else:
        return func
コード例 #4
0
ファイル: performance_counters.py プロジェクト: caicairay/yt
 def __init__(self):
     self.counters = defaultdict(lambda: 0.0)
     self.counting = defaultdict(lambda: False)
     self.starttime = defaultdict(lambda: 0)
     self.endtime = defaultdict(lambda: 0)
     self._on = ytcfg.getboolean("yt", "timefunctions")
     self.exit()
コード例 #5
0
 def __init__(self):
     self.counters = defaultdict(lambda: 0.0)
     self.counting = defaultdict(lambda: False)
     self.starttime = defaultdict(lambda: 0)
     self.endtime = defaultdict(lambda: 0)
     self._on = ytcfg.getboolean("yt", "timefunctions")
     self.exit()
コード例 #6
0
    def _update_parallel_state(self, new_comm):
        from yt.config import ytcfg

        ytcfg["yt", "__topcomm_parallel_size"] = str(new_comm.size)
        ytcfg["yt", "__topcomm_parallel_rank"] = str(new_comm.rank)
        if new_comm.rank > 0 and ytcfg.getboolean("yt", "serialize"):
            ytcfg["yt", "onlydeserialize"] = "True"
コード例 #7
0
def enable_parallelism():
    global parallel_capable, MPI
    try:
        from mpi4py import MPI as _MPI
    except ImportError:
        mylog.info("mpi4py was not found. Disabling parallel computation")
        parallel_capable = False
        return
    MPI = _MPI
    exe_name = os.path.basename(sys.executable)
    parallel_capable = (MPI.COMM_WORLD.size > 1)
    if not parallel_capable: return False
    mylog.info("Global parallel computation enabled: %s / %s",
               MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
    communication_system.push(MPI.COMM_WORLD)
    ytcfg["yt", "__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
    ytcfg["yt", "__global_parallel_size"] = str(MPI.COMM_WORLD.size)
    ytcfg["yt", "__parallel"] = "True"
    if exe_name == "embed_enzo" or \
        ("_parallel" in dir(sys) and sys._parallel == True):
        ytcfg["yt", "inline"] = "True"
    if MPI.COMM_WORLD.rank > 0:
        if ytcfg.getboolean("yt", "LogFile"):
            ytcfg["yt", "LogFile"] = "False"
            yt.utilities.logger.disable_file_logging()
    yt.utilities.logger.uncolorize_logging()
    # Even though the uncolorize function already resets the format string,
    # we reset it again so that it includes the processor.
    f = logging.Formatter("P%03i %s" %
                          (MPI.COMM_WORLD.rank, yt.utilities.logger.ufstring))
    if len(yt.utilities.logger.rootLogger.handlers) > 0:
        yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
    if ytcfg.getboolean("yt", "parallel_traceback"):
        sys.excepthook = traceback_writer_hook("_%03i" % MPI.COMM_WORLD.rank)
    if ytcfg.getint("yt", "LogLevel") < 20:
        yt.utilities.logger.ytLogger.warning(
            "Log Level is set low -- this could affect parallel performance!")
    dtype_names.update(
        dict(
            float32=MPI.FLOAT,
            float64=MPI.DOUBLE,
            int32=MPI.INT,
            int64=MPI.LONG,
            c=MPI.CHAR,
        ))
    op_names.update(dict(sum=MPI.SUM, min=MPI.MIN, max=MPI.MAX))
    return True
コード例 #8
0
ファイル: __init__.py プロジェクト: tukss/yt
def _check_deprecated_parameters():
    from yt.config import ytcfg
    from yt.funcs import issue_deprecation_warning

    if ytcfg.getboolean("yt", "loadfieldplugins"):
        issue_deprecation_warning(
            "Found deprecated parameter 'loadfieldplugins' parameter in yt rcfile."
        )
コード例 #9
0
    def _initialize_data_storage(self):
        if not ytcfg.getboolean("yt", "serialize"):
            return
        fn = self.ds.storage_filename
        if fn is None:
            if os.path.isfile(
                    os.path.join(self.directory,
                                 f"{self.ds.unique_identifier}.yt")):
                fn = os.path.join(self.directory,
                                  f"{self.ds.unique_identifier}.yt")
            else:
                fn = os.path.join(self.directory,
                                  f"{self.dataset.basename}.yt")
        dir_to_check = os.path.dirname(fn)
        if dir_to_check == "":
            dir_to_check = "."
        # We have four options:
        #    Writeable, does not exist      : create, open as append
        #    Writeable, does exist          : open as append
        #    Not writeable, does not exist  : do not attempt to open
        #    Not writeable, does exist      : open as read-only
        exists = os.path.isfile(fn)
        if not exists:
            writeable = os.access(dir_to_check, os.W_OK)
        else:
            writeable = os.access(fn, os.W_OK)
        writeable = writeable and not ytcfg.getboolean("yt", "onlydeserialize")
        # We now have our conditional stuff
        self.comm.barrier()
        if not writeable and not exists:
            return
        if writeable:
            try:
                if not exists:
                    self.__create_data_file(fn)
                self._data_mode = "a"
            except IOError:
                self._data_mode = None
                return
        else:
            self._data_mode = "r"

        self.__data_filename = fn
        self._data_file = h5py.File(fn, self._data_mode)
コード例 #10
0
ファイル: data_structures.py プロジェクト: victorgabr/yt
 def _populate_grid_objects(self):
     reconstruct = ytcfg.getboolean("yt", "reconstruct_index")
     for g, f in izip(self.grids, self.filenames):
         g._prepare_grid()
         g._setup_dx()
         g.set_filename(f[0])
         if reconstruct:
             if g.Parent is not None: g._guess_properties_from_parent()
     del self.filenames  # No longer needed.
     self.max_level = self.grid_levels.max()
コード例 #11
0
 def _populate_grid_objects(self):
     reconstruct = ytcfg.getboolean("yt","reconstruct_index")
     for g,f in izip(self.grids, self.filenames):
         g._prepare_grid()
         g._setup_dx()
         g.set_filename(f[0])
         if reconstruct:
             if g.Parent is not None: g._guess_properties_from_parent()
     del self.filenames # No longer needed.
     self.max_level = self.grid_levels.max()
コード例 #12
0
def only_on_root(func, *args, **kwargs):
    """
    This function accepts a *func*, a set of *args* and *kwargs* and then only
    on the root processor calls the function.  All other processors get "None"
    handed back.
    """
    from yt.config import ytcfg
    if not ytcfg.getboolean("yt", "__parallel"):
        return func(*args, **kwargs)
    if ytcfg.getint("yt", "__parallel_rank") > 0: return
    return func(*args, **kwargs)
コード例 #13
0
 def write_out(self, filename_prefix):
     if ytcfg.getboolean("yt","__parallel"):
         pfn = "%s_%03i_%03i" % (filename_prefix,
                  ytcfg.getint("yt", "__global_parallel_rank"),
                 ytcfg.getint("yt", "__global_parallel_size"))
     else:
         pfn = "%s" % (filename_prefix)
     for n, p in sorted(self.profilers.items()):
         fn = "%s_%s.cprof" % (pfn, n)
         mylog.info("Dumping %s into %s", n, fn)
         p.dump_stats(fn)
コード例 #14
0
ファイル: performance_counters.py プロジェクト: caicairay/yt
 def write_out(self, filename_prefix):
     if ytcfg.getboolean("yt", "__parallel"):
         pfn = "%s_%03i_%03i" % (
             filename_prefix, ytcfg.getint("yt", "__global_parallel_rank"),
             ytcfg.getint("yt", "__global_parallel_size"))
     else:
         pfn = "%s" % (filename_prefix)
     for n, p in sorted(self.profilers.items()):
         fn = "%s_%s.cprof" % (pfn, n)
         mylog.info("Dumping %s into %s", n, fn)
         p.dump_stats(fn)
コード例 #15
0
def is_root():
    """
    This function returns True if it is on the root processor of the
    topcomm and False otherwise.
    """
    from yt.config import ytcfg
    cfg_option = "__topcomm_parallel_rank"
    if not ytcfg.getboolean("yt","__parallel"):
        return True
    if ytcfg.getint("yt", cfg_option) > 0:
        return False
    return True
コード例 #16
0
def is_root():
    """
    This function returns True if it is on the root processor of the
    topcomm and False otherwise.
    """
    from yt.config import ytcfg
    cfg_option = "__topcomm_parallel_rank"
    if not ytcfg.getboolean("yt", "__parallel"):
        return True
    if ytcfg.getint("yt", cfg_option) > 0:
        return False
    return True
コード例 #17
0
    def _initialize_data_storage(self):
        if not ytcfg.getboolean('yt','serialize'): return
        fn = self.ds.storage_filename
        if fn is None:
            if os.path.isfile(os.path.join(self.directory,
                                "%s.yt" % self.ds.unique_identifier)):
                fn = os.path.join(self.directory,"%s.yt" % self.ds.unique_identifier)
            else:
                fn = os.path.join(self.directory,
                        "%s.yt" % self.dataset.basename)
        dir_to_check = os.path.dirname(fn)
        if dir_to_check == '':
            dir_to_check = '.'
        # We have four options:
        #    Writeable, does not exist      : create, open as append
        #    Writeable, does exist          : open as append
        #    Not writeable, does not exist  : do not attempt to open
        #    Not writeable, does exist      : open as read-only
        exists = os.path.isfile(fn)
        if not exists:
            writeable = os.access(dir_to_check, os.W_OK)
        else:
            writeable = os.access(fn, os.W_OK)
        writeable = writeable and not ytcfg.getboolean('yt','onlydeserialize')
        # We now have our conditional stuff
        self.comm.barrier()
        if not writeable and not exists: return
        if writeable:
            try:
                if not exists: self.__create_data_file(fn)
                self._data_mode = 'a'
            except IOError:
                self._data_mode = None
                return
        else:
            self._data_mode = 'r'

        self.__data_filename = fn
        self._data_file = h5py.File(fn, self._data_mode)
コード例 #18
0
 def __init__(self,
              ts,
              num_readers=1,
              num_writers=None,
              outbase="rockstar_halos",
              particle_type="all",
              force_res=None,
              total_particles=None,
              dm_only=False,
              particle_mass=None,
              min_halo_size=25):
     if is_root():
         mylog.info(
             "The citation for the Rockstar halo finder can be found at")
         mylog.info("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
     ParallelAnalysisInterface.__init__(self)
     # Decide how we're working.
     if ytcfg.getboolean("yt", "inline") == True:
         self.runner = InlineRunner()
     else:
         self.runner = StandardRunner(num_readers, num_writers)
     self.num_readers = self.runner.num_readers
     self.num_writers = self.runner.num_writers
     mylog.info("Rockstar is using %d readers and %d writers",
                self.num_readers, self.num_writers)
     # Note that Rockstar does not support subvolumes.
     # We assume that all of the snapshots in the time series
     # use the same domain info as the first snapshots.
     if not isinstance(ts, DatasetSeries):
         ts = DatasetSeries([ts])
     self.ts = ts
     self.particle_type = particle_type
     self.outbase = outbase
     self.min_halo_size = min_halo_size
     if force_res is None:
         tds = ts[-1]  # Cache a reference
         self.force_res = tds.index.get_smallest_dx().in_units("Mpc/h")
         # We have to delete now to wipe the index
         del tds
     else:
         self.force_res = force_res
     self.total_particles = total_particles
     self.dm_only = dm_only
     self.particle_mass = particle_mass
     # Setup pool and workgroups.
     self.pool, self.workgroup = self.runner.setup_pool()
     p = self._setup_parameters(ts)
     params = self.comm.mpi_bcast(p, root=self.pool['readers'].ranks[0])
     self.__dict__.update(params)
     self.handler = rockstar_interface.RockstarInterface(self.ts)
コード例 #19
0
ファイル: davetools.py プロジェクト: luzloujv/p19_newscripts
def ImRoot():
    """
    This function accepts a *func*, a set of *args* and *kwargs* and then only
    on the root processor calls the function.  All other processors get "None"
    handed back.
    """
    from yt.config import ytcfg
    if not ytcfg.getboolean("yt", "__parallel"):
        return True
    try:
        if ytcfg.getint("yt", "__parallel_rank") > 0: return False
    except:
        if ytcfg.getint("yt", "__topcomm_parallel_rank") > 0: return False
    return True
コード例 #20
0
def get_pbar(title, maxval):
    """
    This returns a progressbar of the most appropriate type, given a *title*
    and a *maxval*.
    """
    maxval = max(maxval, 1)
    from yt.config import ytcfg
    if ytcfg.getboolean("yt", "suppressStreamLogging") or \
       "__IPYTHON__" in dir(builtins) or \
       ytcfg.getboolean("yt", "__withintesting"):
        return DummyProgressBar()
    elif ytcfg.getboolean("yt", "__withinreason"):
        from yt.gui.reason.extdirect_repl import ExtProgressBar
        return ExtProgressBar(title, maxval)
    elif ytcfg.getboolean("yt", "__parallel"):
        return ParallelProgressBar(title, maxval)
    widgets = [ title,
            pb.Percentage(), ' ',
            pb.Bar(marker=pb.RotatingMarker()),
            ' ', pb.ETA(), ' ']
    pbar = pb.ProgressBar(widgets=widgets,
                          maxval=maxval).start()
    return pbar
コード例 #21
0
ファイル: funcs.py プロジェクト: Xarthisius/yt-drone
def get_pbar(title, maxval):
    """
    This returns a progressbar of the most appropriate type, given a *title*
    and a *maxval*.
    """
    maxval = max(maxval, 1)
    from yt.config import ytcfg
    if ytcfg.getboolean("yt", "suppressStreamLogging") or \
       "__IPYTHON__" in dir(builtins) or \
       ytcfg.getboolean("yt", "__withintesting"):
        return DummyProgressBar()
    elif ytcfg.getboolean("yt", "__withinreason"):
        from yt.gui.reason.extdirect_repl import ExtProgressBar
        return ExtProgressBar(title, maxval)
    elif ytcfg.getboolean("yt", "__parallel"):
        return ParallelProgressBar(title, maxval)
    widgets = [ title,
            pb.Percentage(), ' ',
            pb.Bar(marker=pb.RotatingMarker()),
            ' ', pb.ETA(), ' ']
    pbar = pb.ProgressBar(widgets=widgets,
                          maxval=maxval).start()
    return pbar
コード例 #22
0
def only_on_root(func, *args, **kwargs):
    """
    This function accepts a *func*, a set of *args* and *kwargs* and then only
    on the root processor calls the function.  All other processors get "None"
    handed back.
    """
    from yt.config import ytcfg
    if kwargs.pop("global_rootonly", False):
        cfg_option = "__global_parallel_rank"
    else:
        cfg_option = "__topcomm_parallel_rank"
    if not ytcfg.getboolean("yt","__parallel"):
        return func(*args,**kwargs)
    if ytcfg.getint("yt", cfg_option) > 0: return
    return func(*args, **kwargs)
コード例 #23
0
 def __init__(self, in_memory=False):
     """
     This class is designed to be a semi-persistent storage for parameter
     files.  By identifying each parameter file with a unique hash, objects
     can be stored independently of parameter files -- when an object is
     loaded, the parameter file is as well, based on the hash.  For
     storage concerns, only a few hundred will be retained in cache.
     """
     if ytcfg.getboolean("yt", "StoreParameterFiles"):
         self._read_only = False
         self.init_db()
         self._records = self.read_db()
     else:
         self._read_only = True
         self._records = {}
コード例 #24
0
    def __init__(self, in_memory=False):
        """
        Create the dataset database if yt is configured to store them.
        Otherwise, use read-only settings.

        """
        if self._register == False: return
        if ytcfg.getboolean("yt", "StoreParameterFiles"):
            self._read_only = False
            self.init_db()
            self._records = self.read_db()
        else:
            self._read_only = True
            self._records = {}
        self._register = False
コード例 #25
0
    def __init__(self, in_memory=False):
        """
        Create the dataset database if yt is configured to store them.
        Otherwise, use read-only settings.

        """
        if self._register is False: return
        if ytcfg.getboolean("yt", "StoreParameterFiles"):
            self._read_only = False
            self.init_db()
            self._records = self.read_db()
        else:
            self._read_only = True
            self._records = {}
        self._register = False
コード例 #26
0
def only_on_root(func, *args, **kwargs):
    """
    This function accepts a *func*, a set of *args* and *kwargs* and then only
    on the root processor calls the function.  All other processors get "None"
    handed back.
    """
    from yt.config import ytcfg
    if kwargs.pop("global_rootonly", False):
        cfg_option = "__global_parallel_rank"
    else:
        cfg_option = "__topcomm_parallel_rank"
    if not ytcfg.getboolean("yt", "__parallel"):
        return func(*args, **kwargs)
    if ytcfg.getint("yt", cfg_option) > 0: return
    return func(*args, **kwargs)
コード例 #27
0
ファイル: testing.py プロジェクト: tukss/yt
    def ffalse(func):
        # returning a lambda : None causes an error when using pytest. Having
        # a function (skip) that returns None does work, but pytest marks the
        # test as having passed, which seems bad, since it wasn't actually run.
        # Using pytest.skip() means that a change to test_requires_backend was
        # needed since None is no longer returned, so we check for the skip
        # exception in the xfail case for that test
        def skip(*args, **kwargs):
            msg = f"`{backend}` backend not found, skipping: `{func.__name__}`"
            print(msg)
            pytest.skip(msg)

        if ytcfg.getboolean("yt", "__withinpytest"):
            return skip
        else:
            return lambda: None
コード例 #28
0
def standard_small_simulation(ds_fn, fields):
    if not can_run_ds(ds_fn): return
    dso = [None]
    tolerance = ytcfg.getint("yt", "answer_testing_tolerance")
    bitwise = ytcfg.getboolean("yt", "answer_testing_bitwise")
    for field in fields:
        if bitwise:
            yield GridValuesTest(ds_fn, field)
        if 'particle' in field: continue
        for dobj_name in dso:
            for axis in [0, 1, 2]:
                for weight_field in [None, "Density"]:
                    yield ProjectionValuesTest(
                        ds_fn, axis, field, weight_field,
                        dobj_name, decimals=tolerance)
            yield FieldValuesTest(
                    ds_fn, field, dobj_name, decimals=tolerance)
コード例 #29
0
def standard_small_simulation(ds_fn, fields):
    if not can_run_ds(ds_fn): return
    dso = [None]
    tolerance = ytcfg.getint("yt", "answer_testing_tolerance")
    bitwise = ytcfg.getboolean("yt", "answer_testing_bitwise")
    for field in fields:
        if bitwise:
            yield GridValuesTest(ds_fn, field)
        if 'particle' in field: continue
        for dobj_name in dso:
            for axis in [0, 1, 2]:
                for weight_field in [None, "Density"]:
                    yield ProjectionValuesTest(
                        ds_fn, axis, field, weight_field,
                        dobj_name, decimals=tolerance)
            yield FieldValuesTest(
                    ds_fn, field, dobj_name, decimals=tolerance)
コード例 #30
0
def time_execution(func):
    r"""
    Decorator for seeing how long a given function takes, depending on whether
    or not the global 'yt.timefunctions' config parameter is set.
    """
    @wraps(func)
    def wrapper(*arg, **kw):
        t1 = time.time()
        res = func(*arg, **kw)
        t2 = time.time()
        mylog.debug('%s took %0.3f s', func.__name__, (t2-t1))
        return res
    from yt.config import ytcfg
    if ytcfg.getboolean("yt","timefunctions") == True:
        return wrapper
    else:
        return func
コード例 #31
0
def time_execution(func):
    r"""
    Decorator for seeing how long a given function takes, depending on whether
    or not the global 'yt.timefunctions' config parameter is set.
    """
    @wraps(func)
    def wrapper(*arg, **kw):
        t1 = time.time()
        res = func(*arg, **kw)
        t2 = time.time()
        mylog.debug('%s took %0.3f s', func.__name__, (t2-t1))
        return res
    from yt.config import ytcfg
    if ytcfg.getboolean("yt","timefunctions") is True:
        return wrapper
    else:
        return func
コード例 #32
0
    def partition_index_3d(self, ds, padding=0.0, rank_ratio = 1):
        LE, RE = np.array(ds.left_edge), np.array(ds.right_edge)
        # We need to establish if we're looking at a subvolume, in which case
        # we *do* want to pad things.
        if (LE == self.ds.domain_left_edge).all() and \
                (RE == self.ds.domain_right_edge).all():
            subvol = False
        else:
            subvol = True
        if not self._distributed and not subvol:
            return False, LE, RE, ds
        if not self._distributed and subvol:
            return True, LE, RE, \
            self.ds.region(self.center, LE-padding, RE+padding)
        elif ytcfg.getboolean("yt", "inline"):
            # At this point, we want to identify the root grid tile to which
            # this processor is assigned.
            # The only way I really know how to do this is to get the level-0
            # grid that belongs to this processor.
            grids = self.ds.index.select_grids(0)
            root_grids = [g for g in grids
                          if g.proc_num == self.comm.rank]
            if len(root_grids) != 1: raise RuntimeError
            #raise KeyError
            LE = root_grids[0].LeftEdge
            RE = root_grids[0].RightEdge
            return True, LE, RE, self.ds.region(self.center, LE, RE)

        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
        mi = self.comm.rank % (self.comm.size // rank_ratio)
        cx, cy, cz = np.unravel_index(mi, cc)
        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]

        LE = np.array([x[0], y[0], z[0]], dtype='float64')
        RE = np.array([x[1], y[1], z[1]], dtype='float64')

        if padding > 0:
            return True, \
                LE, RE, self.ds.region(self.center,
                LE-padding, RE+padding)

        return False, LE, RE, self.ds.region(self.center, LE, RE)
コード例 #33
0
    def partition_index_3d(self, ds, padding=0.0, rank_ratio = 1):
        LE, RE = np.array(ds.left_edge), np.array(ds.right_edge)
        # We need to establish if we're looking at a subvolume, in which case
        # we *do* want to pad things.
        if (LE == self.ds.domain_left_edge).all() and \
                (RE == self.ds.domain_right_edge).all():
            subvol = False
        else:
            subvol = True
        if not self._distributed and not subvol:
            return False, LE, RE, ds
        if not self._distributed and subvol:
            return True, LE, RE, \
            self.ds.region(self.center, LE-padding, RE+padding)
        elif ytcfg.getboolean("yt", "inline"):
            # At this point, we want to identify the root grid tile to which
            # this processor is assigned.
            # The only way I really know how to do this is to get the level-0
            # grid that belongs to this processor.
            grids = self.ds.index.select_grids(0)
            root_grids = [g for g in grids
                          if g.proc_num == self.comm.rank]
            if len(root_grids) != 1: raise RuntimeError
            #raise KeyError
            LE = root_grids[0].LeftEdge
            RE = root_grids[0].RightEdge
            return True, LE, RE, self.ds.region(self.center, LE, RE)

        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
        mi = self.comm.rank % (self.comm.size / rank_ratio)
        cx, cy, cz = np.unravel_index(mi, cc)
        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]

        LE = np.array([x[0], y[0], z[0]], dtype='float64')
        RE = np.array([x[1], y[1], z[1]], dtype='float64')

        if padding > 0:
            return True, \
                LE, RE, self.ds.region(self.center,
                LE-padding, RE+padding)

        return False, LE, RE, self.ds.region(self.center, LE, RE)
コード例 #34
0
ファイル: framework.py プロジェクト: pshriwise/yt
def can_run_ds(ds_fn, file_check=False):
    result_storage = AnswerTestingTest.result_storage
    if isinstance(ds_fn, Dataset):
        return result_storage is not None
    path = ytcfg.get("yt", "test_data_dir")
    if not os.path.isdir(path):
        return False
    if file_check:
        return os.path.isfile(os.path.join(
            path, ds_fn)) and result_storage is not None
    try:
        load(ds_fn)
    except FileNotFoundError:
        if ytcfg.getboolean("yt", "requires_ds_strict"):
            if result_storage is not None:
                result_storage["tainted"] = True
            raise
        return False
    return result_storage is not None
コード例 #35
0
ファイル: framework.py プロジェクト: pshriwise/yt
def can_run_sim(sim_fn, sim_type, file_check=False):
    result_storage = AnswerTestingTest.result_storage
    if isinstance(sim_fn, SimulationTimeSeries):
        return result_storage is not None
    path = ytcfg.get("yt", "test_data_dir")
    if not os.path.isdir(path):
        return False
    if file_check:
        return os.path.isfile(os.path.join(
            path, sim_fn)) and result_storage is not None
    try:
        simulation(sim_fn, sim_type)
    except FileNotFoundError:
        if ytcfg.getboolean("yt", "requires_ds_strict"):
            if result_storage is not None:
                result_storage["tainted"] = True
            raise
        return False
    return result_storage is not None
コード例 #36
0
 def __init__(self, ts, num_readers = 1, num_writers = None,
         outbase="rockstar_halos", particle_type="all",
         force_res=None, total_particles=None, dm_only=False,
         particle_mass=None, min_halo_size=25):
     if is_root():
         mylog.info("The citation for the Rockstar halo finder can be found at")
         mylog.info("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
     ParallelAnalysisInterface.__init__(self)
     # Decide how we're working.
     if ytcfg.getboolean("yt", "inline") == True:
         self.runner = InlineRunner()
     else:
         self.runner = StandardRunner(num_readers, num_writers)
     self.num_readers = self.runner.num_readers
     self.num_writers = self.runner.num_writers
     mylog.info("Rockstar is using %d readers and %d writers",
         self.num_readers, self.num_writers)
     # Note that Rockstar does not support subvolumes.
     # We assume that all of the snapshots in the time series
     # use the same domain info as the first snapshots.
     if not isinstance(ts, DatasetSeries):
         ts = DatasetSeries([ts])
     self.ts = ts
     self.particle_type = particle_type
     self.outbase = outbase
     self.min_halo_size = min_halo_size
     if force_res is None:
         tds = ts[-1] # Cache a reference
         self.force_res = tds.index.get_smallest_dx().in_units("Mpc/h")
         # We have to delete now to wipe the index
         del tds
     else:
         self.force_res = force_res
     self.total_particles = total_particles
     self.dm_only = dm_only
     self.particle_mass = particle_mass
     # Setup pool and workgroups.
     self.pool, self.workgroup = self.runner.setup_pool()
     p = self._setup_parameters(ts)
     params = self.comm.mpi_bcast(p, root = self.pool['readers'].ranks[0])
     self.__dict__.update(params)
     self.handler = rockstar_interface.RockstarInterface(self.ts)
コード例 #37
0
def can_run_sim(sim_fn, sim_type, file_check=False):
    issue_deprecation_warning("This function is no longer used in the " +
                              "yt project testing framework and is " +
                              "targeted for deprecation.")
    result_storage = AnswerTestingTest.result_storage
    if isinstance(sim_fn, SimulationTimeSeries):
        return result_storage is not None
    path = ytcfg.get("yt", "test_data_dir")
    if not os.path.isdir(path):
        return False
    if file_check:
        return os.path.isfile(os.path.join(
            path, sim_fn)) and result_storage is not None
    try:
        load_simulation(sim_fn, sim_type)
    except FileNotFoundError:
        if ytcfg.getboolean("yt", "__strict_requires"):
            if result_storage is not None:
                result_storage["tainted"] = True
            raise
        return False
    return result_storage is not None
コード例 #38
0
ファイル: test_testing.py プロジェクト: tukss/yt
def test_requires_backend():
    backend = matplotlib.get_backend().lower()
    other_backends = {"gtkagg", "macosx", "wx", "tkagg"} - {backend}

    @requires_backend(other_backends.pop())
    def plot_a():
        return True

    @requires_backend(backend)
    def plot_b():
        return True

    assert_equal(plot_b(), True)
    if not ytcfg.getboolean("yt", "__withinpytest"):
        assert_equal(plot_a(), None)
    else:
        # NOTE: This doesn't actually work. pytest.skip() doesn't actually
        # raise the exception but rather returns control to the function's
        # (test_requires_backend) caller, breaking immediately. As such,
        # this assert_rasies never actually happens. See the comment
        # in the definition of requires_backend for why pytest.skip is used
        np.testing.assert_raises(plot_a(), pytest.skip.Exception)
コード例 #39
0
 def __new__(cls, filename=None, *args, **kwargs):
     from yt.frontends.stream.data_structures import StreamHandler
     if not isinstance(filename, str):
         obj = object.__new__(cls)
         # The Stream frontend uses a StreamHandler object to pass metadata
         # to __init__.
         is_stream = (hasattr(filename, 'get_fields')
                      and hasattr(filename, 'get_particle_type'))
         if not is_stream:
             obj.__init__(filename, *args, **kwargs)
         return obj
     apath = os.path.abspath(filename)
     #if not os.path.exists(apath): raise IOError(filename)
     if ytcfg.getboolean("yt", "skip_dataset_cache"):
         obj = object.__new__(cls)
     elif apath not in _cached_datasets:
         obj = object.__new__(cls)
         if obj._skip_cache is False:
             _cached_datasets[apath] = obj
     else:
         obj = _cached_datasets[apath]
     return obj
コード例 #40
0
 def __new__(cls, filename=None, *args, **kwargs):
     from yt.frontends.stream.data_structures import StreamHandler
     if not isinstance(filename, str):
         obj = object.__new__(cls)
         # The Stream frontend uses a StreamHandler object to pass metadata
         # to __init__.
         is_stream = (hasattr(filename, 'get_fields') and
                      hasattr(filename, 'get_particle_type'))
         if not is_stream:
             obj.__init__(filename, *args, **kwargs)
         return obj
     apath = os.path.abspath(filename)
     #if not os.path.exists(apath): raise IOError(filename)
     if ytcfg.getboolean("yt","skip_dataset_cache"):
         obj = object.__new__(cls)
     elif apath not in _cached_datasets:
         obj = object.__new__(cls)
         if obj._skip_cache is False:
             _cached_datasets[apath] = obj
     else:
         obj = _cached_datasets[apath]
     return obj
コード例 #41
0
#

import os
from yt import *

# This next item will handle most of the actual startup procedures, but it will
# also attempt to parse the command line and set up the global state of various
# operations.  The variable unparsed_args is not used internally but is
# provided as a convenience for users who wish to parse arguments in scripts.
# See http://lists.spacepope.org/pipermail/yt-dev-spacepope.org/2011-December/
#     001727.html
import yt.startup_tasks as __startup_tasks
unparsed_args = __startup_tasks.unparsed_args

from yt.config import ytcfg, ytcfg_defaults

from yt.utilities.logger import level as __level
if __level >= int(ytcfg_defaults["loglevel"]):
    # This won't get displayed.
    mylog.debug("Turning off NumPy error reporting")
    np.seterr(all = 'ignore')

# We load plugins.  Keep in mind, this can be fairly dangerous -
# the primary purpose is to allow people to have a set of functions
# that get used every time that they don't have to *define* every time.
# This way, other command-line tools can be used very simply.
# Unfortunately, for now, I think the easiest and simplest way of doing
# this is also the most dangerous way.
if ytcfg.getboolean("yt","loadfieldplugins"):
    enable_plugins()
コード例 #42
0
 def _update_parallel_state(self, new_comm):
     from yt.config import ytcfg
     ytcfg["yt","__topcomm_parallel_size"] = str(new_comm.size)
     ytcfg["yt","__topcomm_parallel_rank"] = str(new_comm.rank)
     if new_comm.rank > 0 and ytcfg.getboolean("yt","serialize"):
         ytcfg["yt","onlydeserialize"] = "True"
コード例 #43
0
def enable_parallelism(suppress_logging=False, communicator=None):
    """
    This method is used inside a script to turn on MPI parallelism, via
    mpi4py.  More information about running yt in parallel can be found
    here: http://yt-project.org/docs/3.0/analyzing/parallel_computation.html

    Parameters
    ----------
    suppress_logging : bool
       If set to True, only rank 0 will log information after the initial
       setup of MPI.

    communicator : mpi4py.MPI.Comm
        The MPI communicator to use. This controls which processes yt can see.
        If not specified, will be set to COMM_WORLD.
    """
    global parallel_capable, MPI
    try:
        from mpi4py import MPI as _MPI
    except ImportError:
        mylog.info("mpi4py was not found. Disabling parallel computation")
        parallel_capable = False
        return
    MPI = _MPI
    exe_name = os.path.basename(sys.executable)

    # if no communicator specified, set to COMM_WORLD
    if communicator is None:
        communicator = MPI.COMM_WORLD

    parallel_capable = (communicator.size > 1)
    if not parallel_capable: return False
    mylog.info("Global parallel computation enabled: %s / %s",
               communicator.rank, communicator.size)
    communication_system.push(communicator)
    ytcfg["yt","__global_parallel_rank"] = str(communicator.rank)
    ytcfg["yt","__global_parallel_size"] = str(communicator.size)
    ytcfg["yt","__parallel"] = "True"
    if exe_name == "embed_enzo" or \
        ("_parallel" in dir(sys) and sys._parallel is True):
        ytcfg["yt","inline"] = "True"
    if communicator.rank > 0:
        if ytcfg.getboolean("yt","LogFile"):
            ytcfg["yt","LogFile"] = "False"
            yt.utilities.logger.disable_file_logging()
    yt.utilities.logger.uncolorize_logging()
    # Even though the uncolorize function already resets the format string,
    # we reset it again so that it includes the processor.
    f = logging.Formatter("P%03i %s" % (communicator.rank,
                                        yt.utilities.logger.ufstring))
    if len(yt.utilities.logger.ytLogger.handlers) > 0:
        yt.utilities.logger.ytLogger.handlers[0].setFormatter(f)

    if ytcfg.getboolean("yt", "parallel_traceback"):
        sys.excepthook = traceback_writer_hook("_%03i" % communicator.rank)
    else:
        sys.excepthook = default_mpi_excepthook

    if ytcfg.getint("yt","LogLevel") < 20:
        yt.utilities.logger.ytLogger.warning(
          "Log Level is set low -- this could affect parallel performance!")
    dtype_names.update(dict(
            float32 = MPI.FLOAT,
            float64 = MPI.DOUBLE,
            int32   = MPI.INT,
            int64   = MPI.LONG,
            c       = MPI.CHAR,
    ))
    op_names.update(dict(
        sum = MPI.SUM,
        min = MPI.MIN,
        max = MPI.MAX
    ))
    # Turn off logging on all but the root rank, if specified.
    if suppress_logging:
        if communicator.rank > 0:
            mylog.addFilter(FilterAllMessages())
    return True
コード例 #44
0
ファイル: logger.py プロジェクト: sflarkin/yt-agora
ravenLogger = logging.getLogger("yt.raven")
lagosLogger = logging.getLogger("yt.lagos")
enkiLogger = logging.getLogger("yt.enki")
deliveratorLogger = logging.getLogger("yt.deliverator")
reasonLogger = logging.getLogger("yt.reason")

# Maybe some day we'll make this more configurable...  unfortunately, for now,
# we preserve thread-safety by opening in the current directory.

mb = 10 * 1024 * 1024
bc = 10

loggers = []
file_handlers = []

if ytcfg.getboolean("yt", "logfile") and os.access(".", os.W_OK):
    log_file_name = ytcfg.get("yt", "LogFileName")
    ytFileHandler = handlers.RotatingFileHandler(log_file_name,
                                                 maxBytes=mb,
                                                 backupCount=bc)
    k = logging.Formatter(fstring)
    ytFileHandler.setFormatter(k)
    ytLogger.addHandler(ytFileHandler)
    loggers.append(ytLogger)
    file_handlers.append(ytFileHandler)


def disable_stream_logging():
    # We just remove the root logger's handlers
    for handler in rootLogger.handlers:
        if isinstance(handler, logging.StreamHandler):
コード例 #45
0
        elif(levelno >= 20):
            color = '\x1b[32m'  # green
        elif(levelno >= 10):
            color = '\x1b[35m'  # pink
        else:
            color = '\x1b[0m'  # normal
        ln = color + args[0].levelname + '\x1b[0m'
        args[0].levelname = ln
        return fn(*args)
    return new

level = min(max(ytcfg.getint("yt", "loglevel"), 0), 50)
ufstring = "%(name)-3s: [%(levelname)-9s] %(asctime)s %(message)s"
cfstring = "%(name)-3s: [%(levelname)-18s] %(asctime)s %(message)s"

if ytcfg.getboolean("yt", "stdoutStreamLogging"):
    stream = sys.stdout
else:
    stream = sys.stderr

ytLogger = logging.getLogger("yt")

yt_sh = logging.StreamHandler(stream=stream)
# create formatter and add it to the handlers
formatter = logging.Formatter(ufstring)
yt_sh.setFormatter(formatter)
# add the handler to the logger
ytLogger.addHandler(yt_sh)
ytLogger.setLevel(level)
ytLogger.propagate = False
 
コード例 #46
0
parser.add_argument("--paste-detailed", action=SetExceptionHandling,
    help = "Paste a detailed traceback with local variables to " +
           "paste.yt-project.org", nargs = 0)
parser.add_argument("--detailed", action=SetExceptionHandling,
    help = "Display detailed traceback.", nargs = 0)
parser.add_argument("--rpdb", action=SetExceptionHandling,
    help = "Enable remote pdb interaction (for parallel debugging).", nargs = 0)
parser.add_argument("--parallel", action="store_true", default=False,
    dest = "parallel",
    help = "Run in MPI-parallel mode (must be launched as an MPI task)")
if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []

unparsed_args = []

parallel_capable = False
if not ytcfg.getboolean("yt","__command_line"):
    opts, unparsed_args = parser.parse_known_args()
    # THIS IS NOT SUCH A GOOD IDEA:
    #sys.argv = [a for a in unparsed_args]
    if opts.parallel:
        parallel_capable = turn_on_parallelism()
    subparsers = parser.add_subparsers(title="subcommands",
                        dest='subcommands',
                        description="Valid subcommands",)
else:
    subparsers = parser.add_subparsers(title="subcommands",
                        dest='subcommands',
                        description="Valid subcommands",)
    def print_help(*args, **kwargs):
        parser.print_help()
    help_parser = subparsers.add_parser("help", help="Print help message")
コード例 #47
0
def enable_parallelism(suppress_logging=False, communicator=None):
    """
    This method is used inside a script to turn on MPI parallelism, via
    mpi4py.  More information about running yt in parallel can be found
    here: https://yt-project.org/docs/3.0/analyzing/parallel_computation.html

    Parameters
    ----------
    suppress_logging : bool
       If set to True, only rank 0 will log information after the initial
       setup of MPI.

    communicator : mpi4py.MPI.Comm
        The MPI communicator to use. This controls which processes yt can see.
        If not specified, will be set to COMM_WORLD.
    """
    global parallel_capable, MPI
    try:
        from mpi4py import MPI as _MPI
    except ImportError:
        mylog.info("mpi4py was not found. Disabling parallel computation")
        parallel_capable = False
        return
    MPI = _MPI
    exe_name = os.path.basename(sys.executable)

    # if no communicator specified, set to COMM_WORLD
    if communicator is None:
        communicator = MPI.COMM_WORLD

    parallel_capable = communicator.size > 1
    if not parallel_capable:
        return False
    mylog.info(
        "Global parallel computation enabled: %s / %s",
        communicator.rank,
        communicator.size,
    )
    communication_system.push(communicator)
    ytcfg["yt", "__global_parallel_rank"] = str(communicator.rank)
    ytcfg["yt", "__global_parallel_size"] = str(communicator.size)
    ytcfg["yt", "__parallel"] = "True"
    if exe_name == "embed_enzo" or ("_parallel" in dir(sys) and sys._parallel):
        ytcfg["yt", "inline"] = "True"
    if communicator.rank > 0:
        if ytcfg.getboolean("yt", "LogFile"):
            ytcfg["yt", "LogFile"] = "False"
            yt.utilities.logger.disable_file_logging()
    yt.utilities.logger.uncolorize_logging()
    # Even though the uncolorize function already resets the format string,
    # we reset it again so that it includes the processor.
    f = logging.Formatter("P%03i %s" %
                          (communicator.rank, yt.utilities.logger.ufstring))
    if len(yt.utilities.logger.ytLogger.handlers) > 0:
        yt.utilities.logger.ytLogger.handlers[0].setFormatter(f)

    if ytcfg.getboolean("yt", "parallel_traceback"):
        sys.excepthook = traceback_writer_hook("_%03i" % communicator.rank)
    else:
        sys.excepthook = default_mpi_excepthook

    if ytcfg.getint("yt", "LogLevel") < 20:
        yt.utilities.logger.ytLogger.warning(
            "Log Level is set low -- this could affect parallel performance!")
    dtype_names.update(
        dict(
            float32=MPI.FLOAT,
            float64=MPI.DOUBLE,
            int32=MPI.INT,
            int64=MPI.LONG,
            c=MPI.CHAR,
        ))
    op_names.update(dict(sum=MPI.SUM, min=MPI.MIN, max=MPI.MAX))
    # Turn off logging on all but the root rank, if specified.
    if suppress_logging:
        if communicator.rank > 0:
            mylog.addFilter(FilterAllMessages())
    return True
コード例 #48
0
ファイル: startup_tasks.py プロジェクト: cgyurgyik/yt
    nargs=0,
)
parser.add_argument(
    "--parallel",
    action="store_true",
    default=False,
    dest="parallel",
    help="Run in MPI-parallel mode (must be launched as an MPI task)",
)
if not hasattr(sys, "argv") or sys.argv is None:
    sys.argv = []

unparsed_args = []

parallel_capable = False
if not ytcfg.getboolean("yt", "__command_line"):
    opts, unparsed_args = parser.parse_known_args()
    # THIS IS NOT SUCH A GOOD IDEA:
    # sys.argv = [a for a in unparsed_args]
    if opts.parallel:
        parallel_capable = turn_on_parallelism()
    subparsers = parser.add_subparsers(
        title="subcommands",
        dest="subcommands",
        description="Valid subcommands",
    )
else:
    subparsers = parser.add_subparsers(
        title="subcommands",
        dest="subcommands",
        description="Valid subcommands",