示例#1
0
文件: utils.py 项目: mpcsdspa/pysph
def iter_output(files, *arrays):
    """Given an iterable of the solution files, this loads the files, and
    yields the solver data and the requested arrays.

    If arrays is not supplied, it returns a dictionary of the arrays.

    Parameters
    ----------

    files : iterable
        Iterates over the list of desired files

    *arrays : strings
        Optional series of array names of arrays to return.

    Examples
    --------

    >>> files = get_files('elliptical_drop_output')
    >>> for solver_data, arrays in iter_output(files):
    ...     print(solver_data['t'], arrays.keys())

    >>> files = get_files('elliptical_drop_output')
    >>> for solver_data, fluid in iter_output(files, 'fluid'):
    ...     print(solver_data['t'], fluid.name)

    """
    for file in files:
        data = load(file)
        solver_data = data['solver_data']
        if len(arrays) == 0:
            yield solver_data, data['arrays']
        else:
            _arrays = [data['arrays'][x] for x in arrays]
            yield [solver_data] + _arrays
示例#2
0
文件: utils.py 项目: mpcsdspa/pysph
def load_and_concatenate(prefix, nprocs=1, directory=".", count=None):
    """Load the results from multiple files.

    Given a filename prefix and the number of processors, return a
    concatenated version of the dictionary returned via load.

    Parameters
    ----------

    prefix : str
        A filename prefix for the output file.

    nprocs : int
        The number of processors (files) to read

    directory : str
        The directory for the files

    count : int
        The file iteration count to read. If None, the last available
        one is read

    """

    if count is None:
        counts = [
            i.rsplit('_', 1)[1][:-4] for i in os.listdir(directory)
            if i.startswith(prefix) and i.endswith('.npz')
        ]
        counts = sorted([int(i) for i in counts])
        count = counts[-1]

    arrays_by_rank = {}

    for rank in range(nprocs):
        fname = os.path.join(
            directory, prefix + '_' + str(rank) + '_' + str(count) + '.npz')

        data = load(fname)
        arrays_by_rank[rank] = data["arrays"]

    arrays = _concatenate_arrays(arrays_by_rank, nprocs)

    data["arrays"] = arrays

    return data
示例#3
0
def run(options):
    for fname in options.inputfile:
        if os.path.isdir(fname):
            files = [os.path.join(fname, file) for file in os.listdir(fname)
                     if file.endswith(output_formats)]
            files = remove_irrelevant_files(files)
            options.inputfile.extend(files)
            continue
        data = load(fname)
        particles = []
        for ptype, pdata in data['arrays'].items():
            particles.append(pdata)
        filename = os.path.splitext(fname)[0]
        if options.outdir is not None:
            filename = options.outdir + os.path.split(filename)[1]
        dump_vtk(filename, particles, scalars=options.scalars,
                 velocity=['u', 'v', 'w'])
示例#4
0
def run(options):
    for fname in options.inputfile:
        if os.path.isdir(fname):
            files = get_files(fname)
            options.inputfile.extend(files)
            continue
        data = load(fname)
        particles = []
        for ptype, pdata in data['arrays'].items():
            particles.append(pdata)
        filename = os.path.splitext(fname)[0]
        outdir = options.outdir
        if outdir is not None:
            if not os.path.exists(outdir):
                os.makedirs(outdir)
            filename = os.path.join(outdir, os.path.basename(filename))
        dump_vtk(filename,
                 particles,
                 scalars=options.scalars,
                 velocity=['u', 'v', 'w'])