コード例 #1
0
 def _set_empty_dicts(self):
     if self._ordered:
         self._ref_count = OrderedDict()
         self._hash_lookup = OrderedDict()
         self._handles = OrderedDict()
         self._items = OrderedDict()
     else:
         self._ref_count = {}
         self._hash_lookup = {}
         self._handles = {}
         self._items = {}
コード例 #2
0
ファイル: ggcm_jrrle.py プロジェクト: zcl-maker/Viscid
    def __init__(self, filename):
        self._read_func = [
            _jrrle.read_jrrle1d, _jrrle.read_jrrle2d, _jrrle.read_jrrle3d
        ]

        self.fields_seen = OrderedDict()
        self.seen_all_fields = False
        super(JrrleFileWrapper, self).__init__(filename)
コード例 #3
0
    def inquire_all_fields(self, reinquire=False):
        if reinquire:
            self.seen_all_fields = False
            self.fields_seen = OrderedDict()

        if self.seen_all_fields:
            return

        self._file.seek(0)
        while not self.seen_all_fields:
            self.inquire_next()
            self._file.seek(self.file_meta['nbytes'], 1)
コード例 #4
0
ファイル: ggcm_jrrle.py プロジェクト: zcl-maker/Viscid
    def inquire_all_fields(self, reinquire=False):
        if reinquire:
            self.seen_all_fields = False
            self.fields_seen = OrderedDict()

        if self.seen_all_fields:
            return

        self.rewind()
        while not self.seen_all_fields:
            self.inquire_next()
            # last_seen, meta = self.inquire_next()
            # if meta is not None:
            #     print(last_seen, "lives at", meta["file_position"])
            self.advance_one_line()
コード例 #5
0
    def save_fields(cls, fname, flds, complevel=0, compression='gzip',
                    compression_opts=None, **kwargs):
        """ save some fields using the format given by the class """
        # FIXME: this is only good for writing cartesian rectilnear flds
        # FIXME: axes are renamed if flds[0] is 1D or 2D
        assert len(flds) > 0
        fname = os.path.expanduser(os.path.expandvars(fname))

        if complevel and compression == 'gzip' and compression_opts is None:
            compression_opts = complevel
        # TODO: what if compression != 'gzip'
        do_compression = compression_opts is not None

        if isinstance(flds, list):
            if isinstance(flds[0], (list, tuple)):
                flds = OrderedDict(flds)
            else:
                flds = OrderedDict([(fld.name, fld) for fld in flds])

        # FIXME: all coordinates are saved as non-uniform, the proper
        #        way to do this is to have let coordinate format its own
        #        hdf5 / xdmf / numpy binary output
        fld0 = next(iter(flds.values()))
        clist = fld0.crds.get_clist(full_arrays=True)
        crd_arrs = [np.array([0.0])] * 3
        crd_names = ["x", "y", "z"]
        for i, c in enumerate(clist):
            crd_arrs[i] = c[1]
        crd_shape = [len(arr) for arr in crd_arrs]
        time = fld0.time

        # write arrays to the hdf5 file
        with h5py.File(fname, 'w') as f:
            for axis_name, arr in zip(crd_names, crd_arrs):
                loc = cls._CRDS_GROUP + '/' + axis_name
                if do_compression:
                    f.create_dataset(loc, data=arr, compression=compression,
                                     compression_opts=compression_opts)
                else:
                    f[loc] = arr

            for name, fld in flds.items():
                loc = cls._FLD_GROUPS[fld.center.lower()] + '/' + name
                # xdmf files use kji ordering
                if do_compression:
                    f.create_dataset(loc, data=fld.data.T, compression=compression,
                                     compression_opts=compression_opts)
                else:
                    f[loc] = fld.data.T

            # big bad openggcm time_str hack to put basetime into hdf5 file
            for fld in flds.values():
                try:
                    tfmt = "%Y:%m:%d:%H:%M:%S.%f"
                    sec_td = viscid.as_timedelta64(fld.time, 's')
                    dtime = viscid.as_datetime(fld.basetime + sec_td).strftime(tfmt)
                    epoch = viscid.readers.openggcm.GGCM_EPOCH
                    ts = viscid.as_timedelta(fld.basetime - epoch).total_seconds()
                    ts += fld.time
                    timestr = "time= {0} {1:.16e} {2} 300c".format(fld.time, ts, dtime)
                    f.create_group('openggcm')
                    f['openggcm'].attrs['time_str'] = np.string_(timestr)
                    break
                except viscid.NoBasetimeError:
                    pass

        # now write an xdmf file
        xdmf_fname = os.path.splitext(fname)[0] + ".xdmf"
        relh5fname = "./" + os.path.basename(fname)
        with open(xdmf_fname, 'w') as f:
            xloc = cls._CRDS_GROUP + '/' + crd_names[0]
            yloc = cls._CRDS_GROUP + '/' + crd_names[1]
            zloc = cls._CRDS_GROUP + '/' + crd_names[2]
            dim_str = " ".join([str(l) for l in crd_shape][::-1])
            f.write(cls._XDMF_TEMPLATE_BEGIN.format(time=time))
            s = cls._XDMF_TEMPLATE_RECTILINEAR_GRID_BEGIN.format(
                grid_name="vgrid", crd_dims=dim_str, h5fname=relh5fname,
                xdim=crd_shape[0], ydim=crd_shape[1], zdim=crd_shape[2],
                xloc=xloc, yloc=yloc, zloc=zloc)
            f.write(s)

            for fld in flds.values():
                _crd_system = viscid.as_crd_system(fld, None)
                if _crd_system:
                    f.write(cls._XDMF_INFO_TEMPLATE.format(name="crd_system",
                                                           value=_crd_system))
                    break

            for name, fld in flds.items():
                fld = fld.as_flat().T
                dt = fld.dtype.name.rstrip("0123456789").title()
                precision = fld.dtype.itemsize
                fld_dim_str = " ".join([str(l) for l in fld.shape])
                loc = cls._FLD_GROUPS[fld.center.lower()] + '/' + name
                s = cls._XDMF_TEMPLATE_ATTRIBUTE.format(
                    fld_name=name,
                    fld_type=fld.fldtype, center=fld.center.title(),
                    dtype=dt, precision=precision, fld_dims=fld_dim_str,
                    h5fname=relh5fname, fld_loc=loc)
                f.write(s)

            f.write(cls._XDMF_TEMPLATE_GRID_END)
            f.write(cls._XDMF_TEMPLATE_END)
コード例 #6
0
 def __init__(self, name, short_name, implementations=(), doc=""):
     self.opname = name
     self.short_name = short_name
     self._imps = OrderedDict()
     self.add_implementations(implementations)
     setattr(self, "__doc__", doc)
コード例 #7
0
def follow_fluid(dset,
                 initial_seeds,
                 time_slice=slice(None),
                 curator=None,
                 callback=default_fluid_callback,
                 speed_scale=1.0,
                 dt=None,
                 tstart=None,
                 tstop=None,
                 duration=None,
                 dt_interp=None,
                 v_key='v',
                 anc_keys=(),
                 fld_slc=Ellipsis,
                 stream_opts={},
                 callback_kwargs={}):
    """Trace fluid elements

    Note:
        you want speed_scale if say V is in km/s and x/y/z is in Re ;)

    Parameters:
        vfile: a vFile object that we can call iter_times on
        time_slice: string, slice notation, like 1200:2400:1
        initial_seeds: any SeedGen object
        plot_function: function that is called each time step,
            arguments should be exactly: (i [int], grid, v [Vector
            Field], v_lines [result of streamline trace],
            root_seeds [SeedGen])
        stream_opts: must have ds0 and max_length, maxit will be
            automatically calculated

    Returns:
        root points after following the fluid
    """
    curator = SeedCurator() if curator is None else curator

    grids = [grid for grid in dset.iter_times(time_slice)]
    times = [g.time for g in grids]

    slc_range = dset.tslc_range(time_slice)
    time_slice_dir = np.sign(times[-1] - times[0]).astype('f')
    slice_min_dt = 1.0 if len(times) <= 1 else np.min(np.abs(np.diff(times)))

    # figure out direction (forward / backward)
    if tstart is not None and tstop is not None:
        tdir = np.sign(tstop - tstart).astype('f')
    elif (dt is not None and dt < 0) or (duration is not None
                                         and duration < 0):
        tdir = -1.0
    else:
        tdir = 1.0 if time_slice_dir == 0.0 else time_slice_dir

    # enforce that grids and times arrays are reordered to match tdir
    if (tdir > 0 and time_slice_dir < 0) or (tdir < 0 and time_slice_dir > 0):
        grids = grids[::-1]
        times = times[::-1]
        slc_range = slc_range[::-1]
        time_slice_dir *= -1

    # set tstart and tstop if they're not already given
    if tstart is None:
        tstart = slc_range[0]

    if tstop is None:
        if duration is not None:
            tstop = tstart + tdir * np.abs(duration)
        else:
            tstop = slc_range[1]

    # set dt if they're not given
    dt = np.abs(dt) if dt is not None else slice_min_dt
    dt_interp = np.abs(dt_interp) if dt_interp is not None else dt

    # ------ main loop
    fld_keys = [v_key] + list(anc_keys)

    times = np.array(times)
    t = tstart
    if np.any(np.sign(np.diff(times)) != tdir):
        raise RuntimeError("times is not monotonic")

    i = 0
    seeds = initial_seeds.get_points()

    while tdir * (t - tstop) <= 0.0:
        idx0 = max(np.sum(tdir * (times - t) < 0.0) - 1, 0)
        idx1 = min(idx0 + 1, len(grids) - 1)
        time0, grid0 = times[idx0], grids[idx0]
        time1, grid1 = times[idx1], grids[idx1]

        frac_interp = 0.0 if time0 == time1 else (t - time0) / (time1 - time0)

        # get / calculate fields for each key at the current time
        if grid0 is grid1:
            flds = [grid0[key] for key in fld_keys]
        else:
            a = frac_interp
            b = 1.0 - frac_interp
            flds = [
                viscid.axpby(a, grid0[k][fld_slc], b, grid1[k][fld_slc])
                for k in fld_keys
            ]
        anc_fields = OrderedDict([(k, v) for k, v in zip(anc_keys, flds[1:])])

        t_next_interp = t + tdir * dt_interp

        while tdir * (t - t_next_interp) < 0 and tdir * (t - tstop) <= 0.0:
            if 'method' not in stream_opts:
                stream_opts['method'] = 'rk45'
            vpaths = viscid.calc_streamlines(tdir * speed_scale * flds[0],
                                             seeds,
                                             max_t=dt,
                                             output=viscid.OUTPUT_STREAMLINES,
                                             stream_dir=viscid.DIR_FORWARD,
                                             **stream_opts)[0]

            callback(i,
                     t,
                     seeds=seeds,
                     v_field=flds[0],
                     anc_fields=anc_fields,
                     grid0=grid0,
                     grid1=grid1,
                     streamlines=vpaths,
                     **callback_kwargs)
            i += 1

            # prepare seeds for next iteration
            for iseed in range(seeds.shape[1]):
                seeds[:, iseed] = vpaths[iseed][:, -1]
            seeds = curator.update(flds[0], seeds, time=t)
            t += tdir * dt
コード例 #8
0
    def load_files(self,
                   fnames,
                   index_handle=True,
                   file_type=None,
                   prefer=None,
                   force_reload=False,
                   _add_ref=False,
                   **kwargs):
        """Load files, and add them to the bucket

        Initialize obj before it's put into the list, whatever is returned
        is what gets stored, returning None means object init failed, do
        not add to the _objs list

        Parameters:
            fnames: a list of file names (can cantain glob patterns)
            index_handle: ??
            file_type: a class that is a subclass of VFile, if given,
                use this file type, don't use the autodetect mechanism
            kwargs: passed to file constructor

        Returns:
            A list of VFile instances. The length may not be the same
            as the length of fnames, and the order may not be the same
            in order to accomidate globs and file grouping.
        """
        orig_fnames = fnames

        if not isinstance(fnames, (list, tuple)):
            fnames = [fnames]
        file_lst = []

        # glob and convert to absolute paths
        globbed_fnames = []
        for fname in fnames:
            slglob = slice_globbed_filenames(fname)
            if isinstance(slglob, string_types):
                slglob = [slglob]
            globbed_fnames += slglob
            # print(">>", fname)
            # print("==", globbed_fnames)
            # expanded_fname = os.path.expanduser(os.path.expandvars(fname))
            # absfname = os.path.abspath(expanded_fname)
            # if '*' in absfname or '?' in absfname:
            #     globbed_fnames += glob(absfname)
            # else:
            #     globbed_fnames += [absfname]
            # Is it necessary to recall abspath here? We did it before
            # the glob to make sure it didn't start with a '.' since that
            # tells glob not to fill wildcards
        fnames = globbed_fnames

        # detect file types
        types_detected = OrderedDict()
        for i, fname in enumerate(fnames):
            _ftype = None
            if file_type is None:
                _ftype = VFile.detect_type(fname, prefer=prefer)
            elif isinstance(file_type, string_types):
                _ftype = VFile.resolve_type(file_type)
            else:
                _ftype = file_type
            if not _ftype:
                raise RuntimeError("Can't determine type "
                                   "for {0}".format(fname))
            value = (fname, i)
            try:
                types_detected[_ftype].append(value)
            except KeyError:
                types_detected[_ftype] = [value]

        # see if the file's already been loaded, or load it, and add it
        # to the bucket and all that good stuff
        file_lst = []
        for ftype, vals in types_detected.items():
            names = [v[0] for v in vals]
            # group all file names of a given type
            groups = ftype.group_fnames(names)

            # iterate all the groups and add them
            for group in groups:
                f = None

                handle_name = ftype.collective_name(group)

                try:
                    f = self[handle_name]
                    if force_reload:
                        f.reload()
                except KeyError:
                    try:
                        f = ftype(group, parent_bucket=self, **kwargs)
                        f.handle_name = handle_name
                    except IOError as e:
                        s = " IOError on file: {0}\n".format(handle_name)
                        s += "              File Type: {0}\n".format(
                            handle_name)
                        s += "              {0}".format(str(e))
                        logger.warning(s)
                    except ValueError as e:
                        # ... why am i explicitly catching ValueErrors?
                        # i'm probably breaking something by re-raising
                        # this exception, but i didn't document what :(
                        s = " ValueError on file load: {0}\n".format(
                            handle_name)
                        s += "              File Type: {0}\n".format(
                            handle_name)
                        s += "              {0}".format(str(e))
                        logger.warning(s)
                        # re-raise the last expection
                        raise

                self.set_item([handle_name],
                              f,
                              index_handle=index_handle,
                              _add_ref=_add_ref)
                file_lst.append(f)

        if len(file_lst) == 0:
            logger.warning("No files loaded for '{0}', is the path "
                           "correct?".format(orig_fnames))
        return file_lst
コード例 #9
0
 def __init__(self, filename):
     self.filename = filename
     self.fields_seen = OrderedDict()
     self.seen_all_fields = False
コード例 #10
0
 def field_dict(self, fld_names=None, **kwargs):
     """ fields as dict of {name: field} """
     return OrderedDict(list(self.iter_field_items(fld_names=fld_names)))