Exemple #1
0
def format_datetime(dt, fmt):
    r"""Wrapper around datetime.datetime.strftime

    This function allows one to specify the precision of fractional
    seconds (mircoseconds) using something like '%2f' to round to
    the nearest hundredth of a second.

    Args:
        dt (datetime.datetime): time to format
        fmt (str): format string that strftime understands with the
            addition of '%\.?[0-9]f' to the syntax.

    Returns:
        str
    """
    if len(fmt) == 0:
        msec_fmt = ['1']
        fmt = "%Y-%m-%d %H:%M:%S.%f"
    else:
        msec_fmt = re.findall(r"%\.?([0-9]*)f", fmt)
        fmt = re.sub(r"%\.?([0-9]*)f", "%f", fmt)

    tstr = datetime.strftime(dt, fmt)

    # now go back and for any %f -> [0-9]{6}, reformat the precision
    it = list(izip(msec_fmt, re.finditer("[0-9]{6}", tstr)))
    for ffmt, m in reversed(it):
        a, b = m.span()
        val = float("0." + tstr[a:b])
        ifmt = int(ffmt) if len(ffmt) > 0 else 6
        f = "{0:0.{1}f}".format(val, ifmt)[2:]
        tstr = tstr[:a] + f + tstr[b:]
    return tstr
Exemple #2
0
def resolve_path(dset, loc, first=False):
    """Search for globbed paths in a nested dict-like hierarchy

    Args:
        dset (dict): Root of some nested dict-like hierarchy
        loc (str): path as a glob pattern
        first (bool): Stop at first match and return a single value

    Raises:
        KeyError: If there are no glob matches

    Returns:
        If first == True, (value, path)
        else, ([value0, value1, ...], [path0, path1, ...])
    """
    try:
        if first:
            return dset[loc], loc
        else:
            return [dset[loc]], [loc]
    except KeyError:
        searches = [loc.strip('/').split('/')]
        dsets = [dset]
        paths = [[]]

        while any(searches):
            next_dsets = []
            next_searches = []
            next_paths = []
            for dset, search, path in izip(dsets, searches, paths):
                try:
                    next_dsets.append(dset[search[0]])
                    next_searches.append(search[1:])
                    next_paths.append(path + [search[0]])
                except (KeyError, TypeError, IndexError):
                    s = [{}.items()]
                    if hasattr(dset, 'items'):
                        s.append(dset.items())
                    if hasattr(dset, 'attrs'):
                        s.append(dset.attrs.items())
                    for key, val in chain(*s):
                        if fnmatch.fnmatchcase(key, search[0]):
                            next_dsets.append(val)
                            next_searches.append(search[1:])
                            next_paths.append(path + [key])
                            if first:
                                break
            dsets = next_dsets
            searches = next_searches
            paths = next_paths

    if dsets:
        dsets, paths = dsets, ['/'.join(p) for p in paths]
        if first:
            return dsets[0], paths[0]
        else:
            return dsets, paths
    else:
        raise KeyError("Path {0} has no matches".format(loc))
Exemple #3
0
def resolve_path(dset, loc, first=False):
    """Search for globbed paths in a nested dict-like hierarchy

    Args:
        dset (dict): Root of some nested dict-like hierarchy
        loc (str): path as a glob pattern
        first (bool): Stop at first match and return a single value

    Raises:
        KeyError: If there are no glob matches

    Returns:
        If first == True, (value, path)
        else, ([value0, value1, ...], [path0, path1, ...])
    """
    try:
        if first:
            return dset[loc], loc
        else:
            return [dset[loc]], [loc]
    except KeyError:
        searches = [loc.strip('/').split('/')]
        dsets = [dset]
        paths = [[]]

        while any(searches):
            next_dsets = []
            next_searches = []
            next_paths = []
            for dset, search, path in izip(dsets, searches, paths):
                try:
                    next_dsets.append(dset[search[0]])
                    next_searches.append(search[1:])
                    next_paths.append(path + [search[0]])
                except (KeyError, TypeError, IndexError):
                    s = [{}.items()]
                    if hasattr(dset, 'items'):
                        s.append(dset.items())
                    if hasattr(dset, 'attrs'):
                        s.append(dset.attrs.items())
                    for key, val in chain(*s):
                        if fnmatch.fnmatchcase(key, search[0]):
                            next_dsets.append(val)
                            next_searches.append(search[1:])
                            next_paths.append(path + [key])
                            if first:
                                break
            dsets = next_dsets
            searches = next_searches
            paths = next_paths

    if dsets:
        dsets, paths = dsets, ['/'.join(p) for p in paths]
        if first:
            return dsets[0], paths[0]
        else:
            return dsets, paths
    else:
        raise KeyError("Path {0} has no matches".format(loc))
Exemple #4
0
def follow_fluid_generic(grid_iter, dt, initial_seeds, plot_function,
                         stream_opts, add_seed_cadence=0.0, add_seed_pts=None,
                         speed_scale=1.0):
    """Trace fluid elements

    Args:
        grid_iter (iterable): Some iterable that yields grids
        dt (float): Either one float for uniform dt, or a list
            where di[i] = grid_iter[i + 1].time - grid_iter[i].time
            The last element is repeated until grid_iter is exhausted
        initial_seeds: any SeedGen object
        plot_function: function that is called each time step,
            arguments should be exactly: (i [int], grid, v [Vector
            Field], v_lines [result of streamline trace],
            root_seeds [SeedGen])
        stream_opts: must have ds0 and max_length, maxit will be
            automatically calculated
        add_seed_cadence: how often to add the add_seeds points
        add_seed_pts: an n x 3 ndarray of n points to add every
            add_seed_cadence (xyz)
        speed_scale: speed_scale * v should be in units of ds0 / dt

    Returns:
        TYPE: Description
    """
    if not hasattr(dt, "__iter__"):
        dt = itertools.repeat(dt)
    else:
        dt = itertools.chain(dt, itertools.repeat(dt[-1]))

    root_seeds = initial_seeds

    # setup maximum number of iterations for a streamline
    if "ds0" in stream_opts and "max_length" in stream_opts:
        max_length = stream_opts["max_length"]
        ds0 = stream_opts["ds0"]
        stream_opts["maxit"] = (max_length // ds0) + 1
    else:
        raise KeyError("ds0 and max_length must be keys of stream_opts, "
                       "otherwise I don't know how to follow the fluid")

    # iterate through the time steps from time_slice
    for i, grid, dti in izip(itertools.count(), grid_iter, dt):
        if i == 0:
            last_add_time = grid.time

        root_pts = _follow_fluid_step(i, dti, grid, root_seeds,
                                      plot_function, stream_opts, speed_scale)

        # maybe add some new seed points to account for those that have left
        if (add_seed_pts is not None and
            abs(grid.time - last_add_time) >= add_seed_cadence):
            #
            root_pts = np.concatenate([root_pts, add_seed_pts.T], axis=1)
            last_add_time = grid.time
        root_seeds = seed.Point(root_pts)

    return root_seeds
Exemple #5
0
def make_multiplot(vfile,
                   plot_func=None,
                   nr_procs=1,
                   time_slice=":",
                   **kwargs):
    """Make lots of plots

    Calls plot_func (or `_do_multiplot` if plot_func is None) with 2
    positional arguments (int, Grid), and all the kwargs given to
    multiplot.

    Grid is determined by vfile.iter_times(time_slice).

    plot_func gets additional keyword arguments first_run (bool) and
    first_run_result (whatever is returned from plot_func by the first
    call).

    This is the function used by the ``p2d`` script. It may be useful
    to you.

    Args:
        vfile (VFile, Grid): Something that has iter_times
        plot_func (callable): Function that makes a single plot. It
            must take an int (index of time slice), a Grid, and any
            number of keyword argumets. If None, _do_multiplot is used
        nr_procs (int): number of parallel processes to farm out
            plot_func to
        time_slice (str): passed to vfile.iter_times()
        **kwargs: passed as keword aguments to plot_func
    """
    # make sure time slice yields >= 1 actual time slice
    try:
        next(vfile.iter_times(time_slice))
    except StopIteration:
        raise ValueError("Time slice '{0}' yields no data".format(time_slice))

    if plot_func is None:
        plot_func = _do_multiplot

    grid_iter = izip(itertools.count(), vfile.iter_times(time_slice))

    args_kw = kwargs.copy()
    args_kw["first_run"] = True
    args_kw["first_run_result"] = None

    if "subplot_params" not in args_kw.get("kwopts", {}):
        r = parallel.map(1,
                         plot_func, [next(grid_iter)],
                         args_kw=args_kw,
                         force_subprocess=(nr_procs > 1))

    # now get back to your regularly scheduled programming
    args_kw["first_run"] = False
    args_kw["first_run_result"] = r[0]
    parallel.map(nr_procs, plot_func, grid_iter, args_kw=args_kw)
Exemple #6
0
def map_async(nr_procs, func, args_iter, args_kw=None, daemonic=True,
              pool=None):
    """Wrap python's ``map_async``

    This has some utility stuff like star passthrough

    Run func on nr_procs with arguments given by args_iter. args_iter
    should be an iterable of the list of arguments that can be unpacked
    for each invocation. kwargs are passed to func as keyword arguments

    Returns:
        (tuple) (pool, multiprocessing.pool.AsyncResult)

    Note: daemonic can be set to False if one needs to spawn child
        processes in func, BUT this could be vulnerable to creating
        an undead army of worker processes, only use this if you
        really really need it, and know what you're doing

    Example:
        >>> func = lambda i, letter: print i, letter
        >>> p, r = map_async(2, func, itertools.izip(itertools.count(), 'abc'))
        >>> r.get(1e8)
        >>> p.join()
        >>> # the following is printed from 2 processes
        0 a
        1 b
        2 c
    """
    if sys.platform == 'darwin' and ("mayavi.mlab" in sys.modules or
                                     "mayavi" in sys.modules):
        import mayavi
        if mayavi.ETSConfig.toolkit == 'qt4':
            viscid.logger.critical("Using multiprocessing with Mayavi + Qt4 "
                                   "will cause segfaults on join.\n"
                                   "A workaround is to use the wx backend "
                                   "(`os.environ['ETS_TOOLKIT'] = 'wx'`).")

    if args_kw is None:
        args_kw = {}
    args_iter = izip(repeat(func), args_iter, repeat(args_kw))

    # if given a pool, don't close it when we're done delegating tasks
    if pool is not None:
        return pool, pool.map_async(_star_passthrough, args_iter)
    else:
        if daemonic:
            pool = mp.Pool(nr_procs)
        else:
            pool = NoDaemonPool(nr_procs)

        with closing(pool) as p:
            return p, p.map_async(_star_passthrough, args_iter)
Exemple #7
0
def integrate_along_lines(lines,
                          fld,
                          reduction="dot",
                          mask_func=None,
                          interp_kind='trilin'):
    """Integrate the value of fld along a list of lines

    Args:
        lines (list): list of 3xN ndarrays, N needs not be the same for
            all lines
        fld (Field): Field to interpolate / integrate
        reduction (str): If fld is a vector field, what quantity to
            integrate. Can be "dot" to dot the vectors with ds along
            the line, or "mag" to integrate the magnitude.
        interp_kind (str): which interpolation to use, const or trilin

    Returns:
        ndarray with shape (len(lines), )
    """
    arr = np.zeros((len(lines), ), dtype=fld.dtype)

    cum_n = np.cumsum([0] + [line.shape[1] for line in lines])
    all_verts = np.concatenate(lines, axis=1)
    fld_on_verts = viscid.interp(fld, all_verts, kind=interp_kind).data

    for i, start, stop in izip(count(), cum_n[:-1], cum_n[1:]):
        ds = np.linalg.norm(lines[i][:, 1:] - lines[i][:, :-1], axis=0)

        if len(fld_on_verts.shape) > 1:
            reduction = reduction.strip().lower()
            if reduction == "dot":
                dsvec = lines[i][:, 1:] - lines[i][:, :-1]
                dsvec = dsvec / np.linalg.norm(dsvec, axis=0)
                values = 0.5 * (fld_on_verts[start:stop - 1, :] +
                                fld_on_verts[start + 1:stop, :])
                values = values * dsvec.T
                if mask_func is not None:
                    values = np.ma.masked_where(mask_func(values), values)
                values = np.sum(values, axis=1)
            elif reduction in ["mag", "magnitude", "norm"]:
                mag = np.linalg.norm(fld_on_verts[start:stop], axis=1)
                values = 0.5 * (mag[start:stop - 1] + mag[start + 1:stop])
            else:
                raise ValueError("Unknown reduction: {0}".format(reduction))
        else:
            values = 0.5 * (fld_on_verts[start:stop - 1] +
                            fld_on_verts[start + 1:stop])

        arr[i] = np.sum(values * ds)

    return arr
Exemple #8
0
def multiplot(vfile, plot_func=None, nr_procs=1, time_slice=":", **kwargs):
    """Make lots of plots

    Calls plot_func (or vlab._do_multiplot if plot_func is None) with 2
    positional arguments (int, Grid), and all the kwargs given to
    multiplot.

    Grid is determined by vfile.iter_times(time_slice).

    plot_func gets additional keyword arguments first_run (bool) and
    first_run_result (whatever is returned from plot_func by the first
    call).

    This is the function used by the ``p2d`` script. It may be useful
    to you.

    Args:
        vfile (VFile, Grid): Something that has iter_times
        plot_func (callable): Function that makes a single plot. It
            must take an int (index of time slice), a Grid, and any
            number of keyword argumets. If None, _do_multiplot is used
        nr_procs (int): number of parallel processes to farm out
            plot_func to
        time_slice (str): passed to vfile.iter_times()
        **kwargs: passed as keword aguments to plot_func
    """
    # make sure time slice yields >= 1 actual time slice
    try:
        next(vfile.iter_times(time_slice))
    except StopIteration:
        raise ValueError("Time slice '{0}' yields no data".format(time_slice))

    if plot_func is None:
        plot_func = _do_multiplot

    grid_iter = izip(itertools.count(), vfile.iter_times(time_slice))

    args_kw = kwargs.copy()
    args_kw["first_run"] = True
    args_kw["first_run_result"] = None

    if "subplot_params" not in args_kw.get("kwopts", {}):
        r = parallel.map(1, plot_func, [next(grid_iter)], args_kw=args_kw,
                         force_subprocess=(nr_procs > 1))

    # now get back to your regularly scheduled programming
    args_kw["first_run"] = False
    args_kw["first_run_result"] = r[0]
    parallel.map(nr_procs, plot_func, grid_iter, args_kw=args_kw)
Exemple #9
0
    def iter_points(self, **kwargs):  # pylint: disable=unused-argument
        """Make an iterator that yields `(x, y, z)` points

        This can be overridden in a subclass if it's more efficient to
        iterate than a call to :meth:`_make_points`. Calling iter_points
        should make an effort to be the fastest way to iterate through
        the points, regardless of caching.

        Note:
            In Cython, it seems to take twice the time to
            iterate over a generator than to use a for loop over
            indices of an array, but only if the array already exists.
        """
        pts = self.get_points()
        return izip(pts[0, :], pts[1, :], pts[2, :])
Exemple #10
0
    def iter_points(self, **kwargs):  # pylint: disable=unused-argument
        """Make an iterator that yields `(x, y, z)` points

        This can be overridden in a subclass if it's more efficient to
        iterate than a call to :meth:`_make_points`. Calling iter_points
        should make an effort to be the fastest way to iterate through
        the points, regardless of caching.

        Note:
            In Cython, it seems to take twice the time to
            iterate over a generator than to use a for loop over
            indices of an array, but only if the array already exists.
        """
        pts = self.get_points()
        return izip(pts[0, :], pts[1, :], pts[2, :])
Exemple #11
0
def integrate_along_lines(lines, fld, reduction="dot", mask_func=None,
                          interp_kind='trilin'):
    """Integrate the value of fld along a list of lines

    Args:
        lines (list): list of 3xN ndarrays, N needs not be the same for
            all lines
        fld (Field): Field to interpolate / integrate
        reduction (str): If fld is a vector field, what quantity to
            integrate. Can be "dot" to dot the vectors with ds along
            the line, or "mag" to integrate the magnitude.
        interp_kind (str): which interpolation to use, const or trilin

    Returns:
        ndarray with shape (len(lines), )
    """
    arr = np.zeros((len(lines),), dtype=fld.dtype)

    cum_n = np.cumsum([0] + [line.shape[1] for line in lines])
    all_verts = np.concatenate(lines, axis=1)
    fld_on_verts = viscid.interp(fld, all_verts, kind=interp_kind).data

    for i, start, stop in izip(count(), cum_n[:-1], cum_n[1:]):
        ds = np.linalg.norm(lines[i][:, 1:] - lines[i][:, :-1], axis=0)

        if len(fld_on_verts.shape) > 1:
            reduction = reduction.strip().lower()
            if reduction == "dot":
                dsvec = lines[i][:, 1:] - lines[i][:, :-1]
                dsvec = dsvec / np.linalg.norm(dsvec, axis=0)
                values = 0.5 * (fld_on_verts[start:stop - 1, :] +
                                fld_on_verts[start + 1:stop, :])
                values = values * dsvec.T
                if mask_func is not None:
                    values = np.ma.masked_where(mask_func(values), values)
                values = np.sum(values, axis=1)
            elif reduction in ["mag", "magnitude", "norm"]:
                mag = np.linalg.norm(fld_on_verts[start:stop], axis=1)
                values = 0.5 * (mag[start:stop - 1] + mag[start + 1:stop])
            else:
                raise ValueError("Unknown reduction: {0}".format(reduction))
        else:
            values = 0.5 * (fld_on_verts[start:stop - 1] +
                            fld_on_verts[start + 1:stop])

        arr[i] = np.sum(values * ds)

    return arr
Exemple #12
0
def map(nr_procs,
        func,
        args_iter,
        args_kw=None,
        timeout=1e8,
        daemonic=True,
        threads=False,
        pool=None,
        force_subprocess=False):
    """Just like ``subprocessing.map``?

    same as :meth:`map_async`, except it waits for the result to
    be ready and returns it

    Note:
        When using threads, this is WAY faster than map_async since
        map_async uses the builtin python ThreadPool. I have no idea
        why that's slower than making threads by hand.
    """
    nr_procs = sanitize_nr_procs(nr_procs)
    if args_kw is None:
        args_kw = {}

    # don't waste time spinning up a new process
    if threads:
        args = [(func, ai, args_kw) for ai in args_iter]
        with futures.ThreadPoolExecutor(max_workers=nr_procs) as executor:
            ret = [val for val in executor.map(_star_passthrough, args)]
    elif pool is None and nr_procs == 1 and not force_subprocess:
        args_iter = izip(repeat(func), args_iter, repeat(args_kw))
        ret = [_star_passthrough(args) for args in args_iter]
    else:
        p, r = map_async(nr_procs,
                         func,
                         args_iter,
                         args_kw=args_kw,
                         daemonic=daemonic,
                         threads=threads,
                         pool=pool)
        ret = r.get(int(timeout))
        # in principle this join should return almost immediately since
        # we already called r.get
        p.join()

    return ret
Exemple #13
0
def to_slices(arrs, slices, endpoint=True, tol=100):
    """Wraps :py:func:`to_slice` for multiple arrays / slices

    Args:
        arrs (list, None): list of arrays for float lookups, must be
            the same length as s.split(','). If all slices are by
            index, then `arrs` can be `None`.
        slices (list, str): list of things that
            :py:func:`viscid.vutil.str2slice` understands, or a comma
            separated string of slices
        endpoint (bool): passed to :py:func:`extract_index` if needed
        tol (int): passed to :py:func:`extract_index` if needed

    Returns:
        tuple of slice objects

    See Also:
        * :py:func:`to_slice`
        * :py:func:`extract_index`
    """
    try:
        slices = "".join(slices.split())
        slices = slices.split(",")
    except AttributeError:
        pass

    if not isinstance(slices, (list, tuple)):
        raise TypeError("To wrap a single slice use vutil.to_slice(...)")

    if arrs is None:
        arrs = [None] * len(slices)

    arrs, slices = _expand_newaxis(arrs, slices)

    if len(arrs) != len(slices):
        raise ValueError("len(arrs) must == len(slices):: {0} {1}"
                         "".format(len(arrs), len(slices)))

    ret = []
    for arr, slcstr in izip(arrs, slices):
        ret.append(to_slice(arr, slcstr, endpoint=endpoint, tol=tol))
    return tuple(ret)
Exemple #14
0
def to_slices(arrs, slices, endpoint=True, tol=100):
    """Wraps :py:func:`to_slice` for multiple arrays / slices

    Args:
        arrs (list, None): list of arrays for float lookups, must be
            the same length as s.split(','). If all slices are by
            index, then `arrs` can be `None`.
        slices (list, str): list of things that
            :py:func:`to_slice` understands, or a comma
            separated string of slices
        endpoint (bool): passed to :py:func:`extract_index` if needed
        tol (int): passed to :py:func:`extract_index` if needed

    Returns:
        tuple of slice objects

    See Also:
        * :py:func:`to_slice`
        * :py:func:`extract_index`
    """
    try:
        slices = "".join(slices.split())
        slices = slices.split(",")
    except AttributeError:
        pass

    if not isinstance(slices, (list, tuple)):
        raise TypeError("To wrap a single slice use vutil.to_slice(...)")

    if arrs is None:
        arrs = [None] * len(slices)

    arrs, slices = _expand_newaxis(arrs, slices)

    if len(arrs) != len(slices):
        raise ValueError("len(arrs) must == len(slices):: {0} {1}"
                         "".format(len(arrs), len(slices)))

    ret = []
    for arr, slcstr in izip(arrs, slices):
        ret.append(to_slice(arr, slcstr, endpoint=endpoint, tol=tol))
    return tuple(ret)
Exemple #15
0
def map(
    nr_procs,
    func,
    args_iter,
    args_kw=None,
    timeout=1e8,
    daemonic=True,
    threads=False,
    pool=None,
    force_subprocess=False,
):
    """Just like ``subprocessing.map``?

    same as :meth:`map_async`, except it waits for the result to
    be ready and returns it

    Note:
        When using threads, this is WAY faster than map_async since
        map_async uses the builtin python ThreadPool. I have no idea
        why that's slower than making threads by hand.
    """
    nr_procs = sanitize_nr_procs(nr_procs)
    if args_kw is None:
        args_kw = {}

    # don't waste time spinning up a new process
    if threads:
        args = [(func, ai, args_kw) for ai in args_iter]
        with futures.ThreadPoolExecutor(max_workers=nr_procs) as executor:
            ret = [val for val in executor.map(_star_passthrough, args)]
    elif pool is None and nr_procs == 1 and not force_subprocess:
        args_iter = izip(repeat(func), args_iter, repeat(args_kw))
        ret = [_star_passthrough(args) for args in args_iter]
    else:
        p, r = map_async(nr_procs, func, args_iter, args_kw=args_kw, daemonic=daemonic, threads=threads, pool=pool)
        ret = r.get(int(timeout))
        # in principle this join should return almost immediately since
        # we already called r.get
        p.join()

    return ret
Exemple #16
0
def map(nr_procs, func, args_iter, args_kw=None, timeout=1e8,
        daemonic=True, pool=None, force_subprocess=False):
    """Just like ``subprocessing.map``?

    same as :meth:`map_async`, except it waits for the result to
    be ready and returns it
    """
    if args_kw is None:
        args_kw = {}

    # don't waste time spinning up a new process
    if nr_procs == 1 and not force_subprocess:
        args_iter = izip(repeat(func), args_iter, repeat(args_kw))
        return [_star_passthrough(args) for args in args_iter]
    else:
        p, r = map_async(nr_procs, func, args_iter, args_kw=args_kw,
                         daemonic=daemonic, pool=pool)
        ret = r.get(int(timeout))
        # in principle this join should return almost immediately since
        # we already called r.get
        p.join()
        return ret
Exemple #17
0
def integrate_along_lines(lines, fld):
    """Integrate the value of fld along a list of lines

    Args:
        lines (list): list of 3xN ndarrays, N needs not be the same for
            all lines
        fld (Field): Field to interpolate / integrate

    Returns:
        ndarray with shape (len(lines), )
    """
    arr = np.zeros((len(lines),), dtype=fld.dtype)

    cum_n = np.cumsum([0] + [line.shape[1] for line in lines])
    all_verts = np.concatenate(lines, axis=1)
    fld_on_verts = viscid.interp_trilin(fld, all_verts)

    for i, start, stop in izip(count(), cum_n[:-1], cum_n[1:]):
        ds = np.linalg.norm(lines[i][:, 1:] - lines[i][:, :-1], axis=0)
        values = 0.5 * (fld_on_verts[start:stop - 1] +
                        fld_on_verts[start + 1:stop])
        arr[i] = np.sum(values * ds)

    return arr
Exemple #18
0
def make_fwd_slice(shape, slices, reverse=None, cull_second=True):
    """Make sure slices go forward

    This function returns two slices equivalent to `slices` such
    that the first slice always goes forward. This is necessary because
    h5py can't deal with reverse slices such as [::-1].

    The optional `reverse` can be used to interpret a dimension as
    flipped. This is used if the indices in a slice are based on a
    coordinate array that has already been flipped. For instance, the
    result is equivalent to `arr[::-1][slices]`, but in a way that can
    be  handled by h5py. This lets us efficiently load small subsets
    of large arrays on disk, which is most useful when the large array
    is coming through sshfs.

    Note:
        The only restriction on slices is that neither start nor stop
        can be outide the range [-L, L].

    Args:
        shape: shape of the array that is to be sliced
        slices: a tuple of slices to work with
        reverse (optional): list of bools that indicate if the
            corresponding value in slices should be ineterpreted as
            flipped
        cull_second (bool, optional): iff True, remove elements of
            the second slice for dimensions that don't exist after
            the first slice has completed. This is only here for
            a super-hacky case when slicing fields.
    Returns:
        (first_slice, second_slice)

        * first_slice: a forward-only slice that retrieves the
          desired elements of an array
        * second_slice: a slice that does [::1] or [::-1] as needed
          to make the result equivalent to slices. If keep_all,
          then this may contain None indicating that this
          dimension no longer exists after the first slice.

    Examples:
        >> a = np.arange(8)
        >> first, second = make_fwd_slice(len(a),slice(None, None, -1))
        >> (a[::-1] == a[first][second]).all()
        True

        >> a = np.arange(4*5*6).reshape((4, 5, 6))
        >> first, second = make_fwd_slice(a.shape,
        >>                                [slice(None, -1, 1),
        >>                                 slice(-1, None, 1),
        >>                                 slice(-4, -1, 2)],
        >>                                [True, True, True])
        >> a1 = a[::-1, ::-1, ::-1][:-1, -1:, -4:-1:2]
        >> a2 = a[first][second]
        >> a1 == a2
        True
    """
    if reverse is None:
        reverse = []
    if not isinstance(shape, (list, tuple, np.ndarray)):
        shape = [shape]
    if not isinstance(slices, (list, tuple)):
        slices = [slices]
    if not isinstance(reverse, (list, tuple)):
        reverse = [reverse]

    newax_inds = [i for i, x in enumerate(slices) if x == np.newaxis]
    shape = list(shape)
    for i in newax_inds:
        shape.insert(i, 1)

    # ya know, lets just go through all the dimensions in shape
    # just to be safe and default to an empty slice / no reverse
    slices = slices + [slice(None)] * (len(shape) - len(slices))
    reverse = reverse + [False] * (len(slices) - len(reverse))

    first_slc = [slice(None)] * len(slices)
    second_slc = [slice(None, None, 1)] * len(first_slc)

    for i, slc, L, rev in izip(count(), slices, shape, reverse):
        if isinstance(slc, slice):
            step = slc.step if slc.step is not None else 1
            start = slc.start if slc.start is not None else 0
            stop = slc.stop if slc.stop is not None else L
            if start < 0:
                start += L
            if stop < 0:
                stop += L

            # sanity check the start/stop since we're gunna be playing
            # fast and loose with them
            if start < 0 or stop < 0:
                raise IndexError("((start = {0}) or (stop = {1})) < 0"
                                 "".format(start, stop))
            if start > L or stop > L:
                raise IndexError("((start={0}) or (stop={1})) > (L={2})"
                                 "".format(start, stop, L))

            # now do the math of flipping the slice if needed, these branches
            # change start, stop, and step so they can be used to create a new
            # slice below
            if rev:
                if step < 0:
                    step = -step
                    if slc.start is None:
                        start = L - 1
                    if slc.stop is None:
                        start = L - 1 - start
                        stop = None
                    else:
                        start, stop = L - 1 - start, L - 1 - stop
                else:
                    start, stop = L - stop, L - start
                    start += ((stop - 1 - start) % step)
                    second_slc[i] = slice(None, None, -1)
            elif step < 0:
                step = -step
                if slc.start is None:
                    start = L - 1

                if slc.stop is None:
                    start, stop = 0, start + 1
                    start = ((stop - 1 - start) % step)
                else:
                    start, stop = stop + 1, start + 1
                    start += ((stop - 1 - start) % step)

                second_slc[i] = slice(None, None, -1)

            # check that our slice is valid
            assert start is None or (start >= 0 and start <= L), \
                "start (={0}) is outside range".format(start)
            assert start is None or (start >= 0 and start <= L), \
                "start (={0}) is outside range".format(start)
            assert start is None or stop is None or start == stop == 0 or \
                start < stop, "bad slice ordering: {0} !< {1}".format(start, stop)
            assert step > 0
            slc = slice(start, stop, step)

        elif isinstance(slc, (int, np.integer)):
            second_slc[i] = None
            if rev:
                slc = (L - 1) - slc

        elif slc == np.newaxis:
            second_slc[i] = "NEWAXIS"

        first_slc[i] = slc

    first_slc = [s for s in first_slc if s is not np.newaxis]
    if cull_second:
        second_slc = [s for s in second_slc if s is not None]
    second_slc = [np.newaxis if s == "NEWAXIS" else s for s in second_slc]
    return first_slc, second_slc
Exemple #19
0
def follow_fluid_generic(grid_iter,
                         dt,
                         initial_seeds,
                         plot_function,
                         stream_opts,
                         add_seed_cadence=0.0,
                         add_seed_pts=None,
                         speed_scale=1.0):
    """Trace fluid elements

    Args:
        grid_iter (iterable): Some iterable that yields grids
        dt (float): Either one float for uniform dt, or a list
            where di[i] = grid_iter[i + 1].time - grid_iter[i].time
            The last element is repeated until grid_iter is exhausted
        initial_seeds: any SeedGen object
        plot_function: function that is called each time step,
            arguments should be exactly: (i [int], grid, v [Vector
            Field], v_lines [result of streamline trace],
            root_seeds [SeedGen])
        stream_opts: must have ds0 and max_length, maxit will be
            automatically calculated
        add_seed_cadence: how often to add the add_seeds points
        add_seed_pts: an n x 3 ndarray of n points to add every
            add_seed_cadence (xyz)
        speed_scale: speed_scale * v should be in units of ds0 / dt

    Returns:
        TYPE: Description
    """
    if not hasattr(dt, "__iter__"):
        dt = itertools.repeat(dt)
    else:
        dt = itertools.chain(dt, itertools.repeat(dt[-1]))

    root_seeds = initial_seeds

    # setup maximum number of iterations for a streamline
    if "ds0" in stream_opts and "max_length" in stream_opts:
        max_length = stream_opts["max_length"]
        ds0 = stream_opts["ds0"]
        stream_opts["maxit"] = (max_length // ds0) + 1
    else:
        raise KeyError("ds0 and max_length must be keys of stream_opts, "
                       "otherwise I don't know how to follow the fluid")

    # iterate through the time steps from time_slice
    for i, grid, dti in izip(itertools.count(), grid_iter, dt):
        if i == 0:
            last_add_time = grid.time

        root_pts = _follow_fluid_step(i, dti, grid, root_seeds, plot_function,
                                      stream_opts, speed_scale)

        # maybe add some new seed points to account for those that have left
        if (add_seed_pts is not None
                and abs(grid.time - last_add_time) >= add_seed_cadence):
            #
            root_pts = np.concatenate([root_pts, add_seed_pts.T], axis=1)
            last_add_time = grid.time
        root_seeds = seed.Point(root_pts)

    return root_seeds
Exemple #20
0
def map_async(nr_procs,
              func,
              args_iter,
              args_kw=None,
              daemonic=True,
              threads=False,
              pool=None):
    """Wrap python's ``map_async``

    This has some utility stuff like star passthrough

    Run func on nr_procs with arguments given by args_iter. args_iter
    should be an iterable of the list of arguments that can be unpacked
    for each invocation. kwargs are passed to func as keyword arguments

    Returns:
        (tuple) (pool, multiprocessing.pool.AsyncResult)

    Note:
        When using threads, this is WAY slower than map since
        map_async uses the builtin python ThreadPool. I have no idea
        why that's slower than making threads by hand.

    Note: daemonic can be set to False if one needs to spawn child
        processes in func, BUT this could be vulnerable to creating
        an undead army of worker processes, only use this if you
        really really need it, and know what you're doing

    Example:
        >>> func = lambda i, letter: print i, letter
        >>> p, r = map_async(2, func, itertools.izip(itertools.count(), 'abc'))
        >>> r.get(1e8)
        >>> p.join()
        >>> # the following is printed from 2 processes
        0 a
        1 b
        2 c
    """
    nr_procs = sanitize_nr_procs(nr_procs)
    if args_kw is None:
        args_kw = {}

    if not threads and sys.platform == 'darwin' and (
            "mayavi.mlab" in sys.modules or "mayavi" in sys.modules):
        import mayavi
        if mayavi.ETSConfig.toolkit == 'qt4':
            viscid.logger.critical("Using multiprocessing with Mayavi + Qt4 "
                                   "will cause segfaults on join.\n"
                                   "A workaround is to use the wx backend "
                                   "(`os.environ['ETS_TOOLKIT'] = 'wx'`).")

    args_iter = izip(repeat(func), args_iter, repeat(args_kw))

    # if given a pool, don't close it when we're done delegating tasks
    if pool is not None:
        return pool, pool.map_async(_star_passthrough, args_iter)
    else:
        if threads:
            pool = mp.pool.ThreadPool(nr_procs)
        elif daemonic:
            pool = mp.Pool(nr_procs)
        else:
            pool = NoDaemonPool(nr_procs)

        with closing(pool) as p:
            return p, p.map_async(_star_passthrough, args_iter)
Exemple #21
0
def make_fwd_slice(shape, slices, reverse=None, cull_second=True):
    """Make sure slices go forward

    This function returns two slices equivalent to `slices` such
    that the first slice always goes forward. This is necessary because
    h5py can't deal with reverse slices such as [::-1].

    The optional `reverse` can be used to interpret a dimension as
    flipped. This is used if the indices in a slice are based on a
    coordinate array that has already been flipped. For instance, the
    result is equivalent to `arr[::-1][slices]`, but in a way that can
    be  handled by h5py. This lets us efficiently load small subsets
    of large arrays on disk, which is most useful when the large array
    is coming through sshfs.

    Note:
        The only restriction on slices is that neither start nor stop
        can be outide the range [-L, L].

    Args:
        shape: shape of the array that is to be sliced
        slices: a tuple of slices to work with
        reverse (optional): list of bools that indicate if the
            corresponding value in slices should be ineterpreted as
            flipped
        cull_second (bool, optional): iff True, remove elements of
            the second slice for dimensions that don't exist after
            the first slice has completed. This is only here for
            a super-hacky case when slicing fields.
    Returns:
        (first_slice, second_slice)

        * first_slice: a forward-only slice that retrieves the
          desired elements of an array
        * second_slice: a slice that does [::1] or [::-1] as needed
          to make the result equivalent to slices. If keep_all,
          then this may contain None indicating that this
          dimension no longer exists after the first slice.

    Examples:
        >> a = np.arange(8)
        >> first, second = make_fwd_slice(len(a),slice(None, None, -1))
        >> (a[::-1] == a[first][second]).all()
        True

        >> a = np.arange(4*5*6).reshape((4, 5, 6))
        >> first, second = make_fwd_slice(a.shape,
        >>                                [slice(None, -1, 1),
        >>                                 slice(-1, None, 1),
        >>                                 slice(-4, -1, 2)],
        >>                                [True, True, True])
        >> a1 = a[::-1, ::-1, ::-1][:-1, -1:, -4:-1:2]
        >> a2 = a[first][second]
        >> a1 == a2
        True
    """
    if reverse is None:
        reverse = []
    if not isinstance(shape, (list, tuple, np.ndarray)):
        shape = [shape]
    if not isinstance(slices, (list, tuple)):
        slices = [slices]
    if not isinstance(reverse, (list, tuple)):
        reverse = [reverse]

    newax_inds = [i for i, x in enumerate(slices) if x == np.newaxis]
    shape = list(shape)
    for i in newax_inds:
        shape.insert(i, 1)

    # ya know, lets just go through all the dimensions in shape
    # just to be safe and default to an empty slice / no reverse
    slices = slices + [slice(None)] * (len(shape) - len(slices))
    reverse = reverse + [False] * (len(slices) - len(reverse))

    first_slc = [slice(None)] * len(slices)
    second_slc = [slice(None, None, 1)] * len(first_slc)

    for i, slc, L, rev in izip(count(), slices, shape, reverse):
        if isinstance(slc, slice):
            step = slc.step if slc.step is not None else 1
            start = slc.start if slc.start is not None else 0
            stop = slc.stop if slc.stop is not None else L
            if start < 0:
                start += L
            if stop < 0:
                stop += L

            # sanity check the start/stop since we're gunna be playing
            # fast and loose with them
            if start < 0 or stop < 0:
                raise IndexError("((start = {0}) or (stop = {1})) < 0"
                                 "".format(start, stop))
            if start > L or stop > L:
                raise IndexError("((start={0}) or (stop={1})) > (L={2})"
                                 "".format(start, stop, L))

            # now do the math of flipping the slice if needed, these branches
            # change start, stop, and step so they can be used to create a new
            # slice below
            if rev:
                if step < 0:
                    step = -step
                    if slc.start is None:
                        start = L - 1
                    if slc.stop is None:
                        start = L - 1 - start
                        stop = None
                    else:
                        start, stop = L - 1 - start, L - 1 - stop
                else:
                    start, stop = L - stop, L - start
                    start += ((stop - 1 - start) % step)
                    second_slc[i] = slice(None, None, -1)
            elif step < 0:
                step = -step
                if slc.start is None:
                    start = L - 1

                if slc.stop is None:
                    start, stop = 0, start + 1
                    start = ((stop - 1 - start) % step)
                else:
                    start, stop = stop + 1, start + 1
                    start += ((stop - 1 - start) % step)

                second_slc[i] = slice(None, None, -1)

            # check that our slice is valid
            assert start is None or (start >= 0 and start <= L), \
                "start (={0}) is outside range".format(start)
            assert start is None or (start >= 0 and start <= L), \
                "start (={0}) is outside range".format(start)
            assert start is None or stop is None or start == stop == 0 or \
                start < stop, "bad slice ordering: {0} !< {1}".format(start, stop)
            assert step > 0
            slc = slice(start, stop, step)

        elif isinstance(slc, (int, np.integer)):
            second_slc[i] = None
            if rev:
                slc = (L - 1) - slc

        elif slc == np.newaxis:
            second_slc[i] = "NEWAXIS"

        first_slc[i] = slc

    first_slc = [s for s in first_slc if s is not np.newaxis]
    if cull_second:
        second_slc = [s for s in second_slc if s is not None]
    second_slc = [np.newaxis if s == "NEWAXIS" else s for s in second_slc]
    return first_slc, second_slc