Esempio n. 1
0
class Operation(object):
    default_backends = ["numexpr", "cython", "numpy"]

    _imps = None  # implementations
    opname = None
    short_name = None

    def __init__(self, name, short_name, implementations=(), doc=""):
        self.opname = name
        self.short_name = short_name
        self._imps = OrderedDict()
        self.add_implementations(implementations)
        setattr(self, "__doc__", doc)

    @property
    def __name__(self):
        return self.opname

    def add_implementation(self, name, func):
        self._imps[name] = func

    def add_implementations(self, implementations):
        for name, func in implementations:
            self.add_implementation(name, func)

    def _get_imp(self, preferred, only=False):
        if not isinstance(preferred, (list, tuple)):
            if preferred is None:
                preferred = list(self._imps.keys())
            else:
                preferred = [preferred]

        for name in preferred:
            if name in self._imps:
                return self._imps[name]

        msg = "{0} :: {1}".format(self.opname, preferred)
        if only:
            raise verror.BackendNotFound(msg)
        logger.info("No preferred backends available: " + msg)

        for name in self.default_backends:
            if name in self._imps:
                return self._imps[name]

        if len(self._imps) == 0:
            raise verror.BackendNotFound("No backends available")
        return list(self._imps.values())[0]

    def __call__(self, *args, **kwargs):
        preferred = kwargs.pop("preferred", None)
        only = kwargs.pop("only", False)
        func = self._get_imp(preferred, only)
        return func(*args, **kwargs)
Esempio n. 2
0
class Operation(object):
    default_backends = ["numexpr", "cython", "numpy"]

    _imps = None  # implementations
    opname = None
    short_name = None

    def __init__(self, name, short_name, implementations=(), doc=""):
        self.opname = name
        self.short_name = short_name
        self._imps = OrderedDict()
        self.add_implementations(implementations)
        setattr(self, "__doc__", doc)

    @property
    def __name__(self):
        return self.opname

    def add_implementation(self, name, func):
        self._imps[name] = func

    def add_implementations(self, implementations):
        for name, func in implementations:
            self.add_implementation(name, func)

    def _get_imp(self, preferred, only=False):
        if not isinstance(preferred, (list, tuple)):
            if preferred is None:
                preferred = list(self._imps.keys())
            else:
                preferred = [preferred]

        for name in preferred:
            if name in self._imps:
                return self._imps[name]

        msg = "{0} :: {1}".format(self.opname, preferred)
        if only:
            raise verror.BackendNotFound(msg)
        logger.info("No preferred backends available: " + msg)

        for name in self.default_backends:
            if name in self._imps:
                return self._imps[name]

        if len(self._imps) == 0:
            raise verror.BackendNotFound("No backends available")
        return list(self._imps.values())[0]

    def __call__(self, *args, **kwargs):
        preferred = kwargs.pop("preferred", None)
        only = kwargs.pop("only", False)
        func = self._get_imp(preferred, only)
        return func(*args, **kwargs)
Esempio n. 3
0
    def save_fields(cls, fname, flds, **kwargs):
        assert len(flds) > 0
        fname = os.path.expanduser(os.path.expandvars(fname))

        if isinstance(flds, list):
            if isinstance(flds[0], (list, tuple)):
                flds = OrderedDict(flds)
            else:
                flds = OrderedDict([(fld.name, fld) for fld in flds])

        fld_dict = {}

        # setup crds
        # FIXME: all coordinates are saved as non-uniform, the proper
        #        way to do this is to have let coordinate format its own
        #        hdf5 / xdmf / numpy binary output
        fld0 = next(iter(flds.values()))
        clist = fld0.crds.get_clist(full_arrays=True)
        axis_names = []
        for axis_name, crdarr in clist:
            fld_dict[axis_name] = crdarr
            axis_names.append(axis_name)
        fld_dict[cls._KEY_CRDS] = np.array(axis_names)

        # setup fields
        # dict comprehension invalid in Python 2.6
        # fld_names = {key.lower(): [] for key in cls._KEY_FLDS.keys()}
        fld_names = {}
        for key in cls._KEY_FLDS.keys():
            fld_names[key.lower()] = []

        for name, fld in flds.items():
            fld_names[fld.center.lower()].append(name)
            fld_dict[name] = fld.data

        for center, names_lst in fld_names.items():
            fld_dict[cls._KEY_FLDS[center.lower()]] = np.array(names_lst)

        if fname.endswith(".npz"):
            fname = fname[:-4]
        np.savez(fname, **fld_dict)
Esempio n. 4
0
    def save_fields(cls, fname, flds, complevel=0, compression='gzip',
                    compression_opts=None, **kwargs):
        """ save some fields using the format given by the class """
        # FIXME: this is only good for writing cartesian rectilnear flds
        # FIXME: axes are renamed if flds[0] is 1D or 2D
        assert len(flds) > 0
        fname = os.path.expanduser(os.path.expandvars(fname))

        if complevel and compression == 'gzip' and compression_opts is None:
            compression_opts = complevel
        # TODO: what if compression != 'gzip'
        do_compression = compression_opts is not None

        if isinstance(flds, list):
            if isinstance(flds[0], (list, tuple)):
                flds = OrderedDict(flds)
            else:
                flds = OrderedDict([(fld.name, fld) for fld in flds])

        # FIXME: all coordinates are saved as non-uniform, the proper
        #        way to do this is to have let coordinate format its own
        #        hdf5 / xdmf / numpy binary output
        fld0 = next(iter(flds.values()))
        clist = fld0.crds.get_clist(full_arrays=True)
        crd_arrs = [np.array([0.0])] * 3
        crd_names = ["x", "y", "z"]
        for i, c in enumerate(clist):
            crd_arrs[i] = c[1]
        crd_shape = [len(arr) for arr in crd_arrs]
        time = fld0.time

        # write arrays to the hdf5 file
        with h5py.File(fname, 'w') as f:
            for axis_name, arr in zip(crd_names, crd_arrs):
                loc = cls._CRDS_GROUP + '/' + axis_name
                if do_compression:
                    f.create_dataset(loc, data=arr, compression=compression,
                                     compression_opts=compression_opts)
                else:
                    f[loc] = arr

            for name, fld in flds.items():
                loc = cls._FLD_GROUPS[fld.center.lower()] + '/' + name
                # xdmf files use kji ordering
                if do_compression:
                    f.create_dataset(loc, data=fld.data.T, compression=compression,
                                     compression_opts=compression_opts)
                else:
                    f[loc] = fld.data.T

            # big bad openggcm time_str hack to put basetime into hdf5 file
            for fld in flds.values():
                try:
                    tfmt = "%Y:%m:%d:%H:%M:%S.%f"
                    sec_td = viscid.as_timedelta64(fld.time, 's')
                    dtime = viscid.as_datetime(fld.basetime + sec_td).strftime(tfmt)
                    epoch = viscid.readers.openggcm.GGCM_EPOCH
                    ts = viscid.as_timedelta(fld.basetime - epoch).total_seconds()
                    ts += fld.time
                    timestr = "time= {0} {1:.16e} {2} 300c".format(fld.time, ts, dtime)
                    f.create_group('openggcm')
                    f['openggcm'].attrs['time_str'] = np.string_(timestr)
                    break
                except viscid.NoBasetimeError:
                    pass

        # now write an xdmf file
        xdmf_fname = os.path.splitext(fname)[0] + ".xdmf"
        relh5fname = "./" + os.path.basename(fname)
        with open(xdmf_fname, 'w') as f:
            xloc = cls._CRDS_GROUP + '/' + crd_names[0]
            yloc = cls._CRDS_GROUP + '/' + crd_names[1]
            zloc = cls._CRDS_GROUP + '/' + crd_names[2]
            dim_str = " ".join([str(l) for l in crd_shape][::-1])
            f.write(cls._XDMF_TEMPLATE_BEGIN.format(time=time))
            s = cls._XDMF_TEMPLATE_RECTILINEAR_GRID_BEGIN.format(
                grid_name="vgrid", crd_dims=dim_str, h5fname=relh5fname,
                xdim=crd_shape[0], ydim=crd_shape[1], zdim=crd_shape[2],
                xloc=xloc, yloc=yloc, zloc=zloc)
            f.write(s)

            for fld in flds.values():
                _crd_system = viscid.as_crd_system(fld, None)
                if _crd_system:
                    f.write(cls._XDMF_INFO_TEMPLATE.format(name="crd_system",
                                                           value=_crd_system))
                    break

            for name, fld in flds.items():
                fld = fld.as_flat().T
                dt = fld.dtype.name.rstrip("0123456789").title()
                precision = fld.dtype.itemsize
                fld_dim_str = " ".join([str(l) for l in fld.shape])
                loc = cls._FLD_GROUPS[fld.center.lower()] + '/' + name
                s = cls._XDMF_TEMPLATE_ATTRIBUTE.format(
                    fld_name=name,
                    fld_type=fld.fldtype, center=fld.center.title(),
                    dtype=dt, precision=precision, fld_dims=fld_dim_str,
                    h5fname=relh5fname, fld_loc=loc)
                f.write(s)

            f.write(cls._XDMF_TEMPLATE_GRID_END)
            f.write(cls._XDMF_TEMPLATE_END)
Esempio n. 5
0
class Bucket(object):
    """ This is basically a glorified dict

    It's a convenient dict-like object if you want lots of
    keys for a given value.

    NOTE:
        You can add non-hashable items, but this is poorly tested.
        When adding / removing non-hashable items (items, not handles)
        the comparison is done using the object's id. This is
        fundamentally different than using an object's __hash__, but
        it should be fairly transparent.
    """
    _ordered = False

    _ref_count = None  # keys are hashable items, values are # of times item was added
    _hash_lookup = None  # keys are hashable items, values are actual items
    _handles = None  # keys are hashable items, values are list of handles
    _items = None  # keys are handles, values are actual items

    # if index handle, set_item adds this number as a handle and increments it
    # this is useful for hiding loads that are not user initiated, such as
    # an xdmf file loading an h5 file under the covers
    _int_counter = None

    def __init__(self, ordered=False):
        self._ordered = ordered

        self._set_empty_dicts()
        self._int_counter = 0

    def _set_empty_dicts(self):
        if self._ordered:
            self._ref_count = OrderedDict()
            self._hash_lookup = OrderedDict()
            self._handles = OrderedDict()
            self._items = OrderedDict()
        else:
            self._ref_count = {}
            self._hash_lookup = {}
            self._handles = {}
            self._items = {}

    @staticmethod
    def _make_hashable(item):
        try:
            hash(item)
            return item
        except TypeError:
            return "<{0} @ {1}>".format(type(item), hex(id(item)))

    def items(self):
        for hashable_item, item in self._hash_lookup.items():
            yield self._handles[hashable_item], item

    def keys(self):
        return self._handles.values()

    def values(self):
        return self._hash_lookup.values()

    def set_item(self, handles, item, index_handle=True, _add_ref=False):
        """ if index_handle is true then the index of item will be included as
            a handle making the bucket indexable like a list """
        # found = False
        if handles is None:
            handles = []
        if not isinstance(handles, list):
            raise TypeError("handle must by of list type")

        # make sure we have a hashable "item" for doing reverse
        # lookups of handles using an item
        hashable_item = self._make_hashable(item)
        if hashable_item not in self._hash_lookup:
            if index_handle:
                handles += [self._int_counter]
                self._int_counter += 1

        handles_added = []
        for h in handles:
            # check if we're stealing a handle from another item
            try:
                hash(h)
            except TypeError:
                logger.error("A bucket says handle '{0}' is not hashable, "
                             "ignoring it".format(h))
                continue

            if (h in self._items) and (item is self._items[h]):
                continue
            elif h in self._items:
                logger.error("The handle '{0}' is being hijacked! Memory leak "
                             "could ensue.".format(h))
                # romove handle from old item, since this check is here,
                # there sholdn't be 2 items with the same handle in the
                # items dict
                old_item = self._items[h]
                old_hashable_item = self._make_hashable(old_item)
                self._handles[old_hashable_item].remove(h)
                if len(self._handles[old_hashable_item]) == 0:
                    self.remove_item(old_item)
            self._items[h] = item
            handles_added.append(h)

        try:
            self._handles[hashable_item] += handles_added
            if _add_ref:
                self._ref_count[hashable_item] += 1
        except KeyError:
            if len(handles_added) == 0:
                logger.error("No valid handles given, item '{0}' not added to "
                             "bucket".format(hashable_item))

            else:
                self._handles[hashable_item] = handles_added
                self._hash_lookup[hashable_item] = item
                self._ref_count[hashable_item] = 1

        return None

    def _remove_item(self, item):
        """remove item no matter what

        You may want to use remove_
        , raises ValueError if item is not found """
        hashable_item = self._make_hashable(item)
        handles = self._handles[hashable_item]
        for h in handles:
            del self._items[h]
        del self._hash_lookup[hashable_item]
        del self._handles[hashable_item]
        del self._ref_count[hashable_item]

    def _remove_item_by_handle(self, handle):
        self._remove_item(self._items[handle])

    def remove_item(self, item):
        self._remove_item(item)

    def remove_item_by_handle(self, handle):
        """ remove item by handle, raises KeyError if handle is not found """
        self.remove_item(self._items[handle])

    def remove_reference(self, item, _ref_count=1):
        hashable_item = self._make_hashable(item)
        try:
            self._ref_count[hashable_item] -= _ref_count
        except KeyError:
            item = self[item]
            hashable_item = self._make_hashable(item)
            if _ref_count:
                self._ref_count[hashable_item] -= _ref_count
            else:
                self._ref_count[hashable_item] = 0

        # FIXME: unload_all_files breaks this assert check... probably a bug
        # assert self._ref_count[hashable_item] >= 0, \
        #     "problem with bucket ref counting {0}".format(hashable_item)
        if self._ref_count[hashable_item] <= 0:
            self._remove_item(item)

    def remove_all_items(self):
        """ unload all items """
        self._set_empty_dicts()

    def items_as_list(self):
        return list(self._hash_lookup.values())

    def get_primary_handles(self):
        """Return a list of the first handles for all items"""
        return [handles[0] for handles in self._handles.values()]

    def handle_string(self, prefix=""):
        """ return string representation of handles and items """
        # this is inefficient, but probably doesn't matter
        s = ""
        for item, handles in self._handles.items():
            hands = [repr(h) for h in handles]
            s += "{0}handles: {1}\n".format(prefix, ", ".join(hands))
            s += "{0}  item: {1}\n".format(prefix, str(item))
        return s

    def print_tree(self, prefix=""):
        print(self.handle_string(prefix=prefix), end='')

    def __getitem__(self, handle):
        return self._items[handle]

    def __setitem__(self, key, value):
        if isinstance(key, (list, tuple)):
            key = list(key)
        elif key is not None:
            key = [key]
        self.set_item(key, value)

    def __delitem__(self, handle):
        try:
            self.remove_item_by_handle(handle)
        except (KeyError, TypeError):
            # maybe we are asking to remove an item explicitly
            self.remove_item(handle)

    def __iter__(self):
        return self.values().__iter__()

    def contains_item(self, item):
        hashable_item = self._make_hashable(item)
        return hashable_item in self._handles

    def contains_handle(self, handle):
        try:
            return handle in self._items
        except TypeError:
            return False

    def __contains__(self, handle):
        return self.contains_handle(handle) or self.contains_item(handle)

    def __len__(self):
        return len(self._hash_lookup)

    def __str__(self):
        return self.handle_string()
Esempio n. 6
0
class Bucket(object):
    """ This is basically a glorified dict

    It's a convenient dict-like object if you want lots of
    keys for a given value.

    NOTE:
        You can add non-hashable items, but this is poorly tested.
        When adding / removing non-hashable items (items, not handles)
        the comparison is done using the object's id. This is
        fundamentally different than using an object's __hash__, but
        it should be fairly transparent.
    """
    _ordered = False

    _ref_count = None  # keys are hashable items, values are # of times item was added
    _hash_lookup = None  # keys are hashable items, values are actual items
    _handles = None  # keys are hashable items, values are list of handles
    _items = None  # keys are handles, values are actual items

    # if index handle, set_item adds this number as a handle and increments it
    # this is useful for hiding loads that are not user initiated, such as
    # an xdmf file loading an h5 file under the covers
    _int_counter = None

    def __init__(self, ordered=False):
        self._ordered = ordered

        self._set_empty_dicts()
        self._int_counter = 0

    def _set_empty_dicts(self):
        if self._ordered:
            self._ref_count = OrderedDict()
            self._hash_lookup = OrderedDict()
            self._handles = OrderedDict()
            self._items = OrderedDict()
        else:
            self._ref_count = {}
            self._hash_lookup = {}
            self._handles = {}
            self._items = {}

    @staticmethod
    def _make_hashable(item):
        try:
            hash(item)
            return item
        except TypeError:
            return "<{0} @ {1}>".format(type(item), hex(id(item)))

    def items(self):
        for hashable_item, item in self._hash_lookup.items():
            yield self._handles[hashable_item], item

    def keys(self):
        return self._handles.values()

    def values(self):
        return self._hash_lookup.values()

    def set_item(self, handles, item, index_handle=True, _add_ref=False):
        """ if index_handle is true then the index of item will be included as
            a handle making the bucket indexable like a list """
        # found = False
        if handles is None:
            handles = []
        if not isinstance(handles, list):
            raise TypeError("handle must by of list type")

        # make sure we have a hashable "item" for doing reverse
        # lookups of handles using an item
        hashable_item = self._make_hashable(item)
        if hashable_item not in self._hash_lookup:
            if index_handle:
                handles += [self._int_counter]
                self._int_counter += 1

        handles_added = []
        for h in handles:
            # check if we're stealing a handle from another item
            try:
                hash(h)
            except TypeError:
                logger.error("A bucket says handle '{0}' is not hashable, "
                             "ignoring it".format(h))
                continue

            if (h in self._items) and (item is self._items[h]):
                continue
            elif h in self._items:
                logger.error("The handle '{0}' is being hijacked! Memory leak "
                             "could ensue.".format(h))
                # romove handle from old item, since this check is here,
                # there sholdn't be 2 items with the same handle in the
                # items dict
                old_item = self._items[h]
                old_hashable_item = self._make_hashable(old_item)
                self._handles[old_hashable_item].remove(h)
                if len(self._handles[old_hashable_item]) == 0:
                    self.remove_item(old_item)
            self._items[h] = item
            handles_added.append(h)

        try:
            self._handles[hashable_item] += handles_added
            if _add_ref:
                self._ref_count[hashable_item] += 1
        except KeyError:
            if len(handles_added) == 0:
                logger.error("No valid handles given, item '{0}' not added to "
                             "bucket".format(hashable_item))

            else:
                self._handles[hashable_item] = handles_added
                self._hash_lookup[hashable_item] = item
                self._ref_count[hashable_item] = 1

        return None

    def _remove_item(self, item):
        """remove item no matter what

        You may want to use remove_
        , raises ValueError if item is not found """
        hashable_item = self._make_hashable(item)
        handles = self._handles[hashable_item]
        for h in handles:
            del self._items[h]
        del self._hash_lookup[hashable_item]
        del self._handles[hashable_item]
        del self._ref_count[hashable_item]

    def _remove_item_by_handle(self, handle):
        self._remove_item(self._items[handle])

    def remove_item(self, item):
        self._remove_item(item)

    def remove_item_by_handle(self, handle):
        """ remove item by handle, raises KeyError if handle is not found """
        self.remove_item(self._items[handle])

    def remove_reference(self, item, _ref_count=1):
        hashable_item = self._make_hashable(item)
        try:
            self._ref_count[hashable_item] -= _ref_count
        except KeyError:
            item = self[item]
            hashable_item = self._make_hashable(item)
            if _ref_count:
                self._ref_count[hashable_item] -= _ref_count
            else:
                self._ref_count[hashable_item] = 0

        # FIXME: unload_all_files breaks this assert check... probably a bug
        # assert self._ref_count[hashable_item] >= 0, \
        #     "problem with bucket ref counting {0}".format(hashable_item)
        if self._ref_count[hashable_item] <= 0:
            self._remove_item(item)

    def remove_all_items(self):
        """ unload all items """
        self._set_empty_dicts()

    def items_as_list(self):
        return list(self._hash_lookup.values())

    def get_primary_handles(self):
        """Return a list of the first handles for all items"""
        return [handles[0] for handles in self._handles.values()]

    def handle_string(self, prefix=""):
        """ return string representation of handles and items """
        # this is inefficient, but probably doesn't matter
        s = ""
        for item, handles in self._handles.items():
            hands = [repr(h) for h in handles]
            s += "{0}handles: {1}\n".format(prefix, ", ".join(hands))
            s += "{0}  item: {1}\n".format(prefix, str(item))
        return s

    def print_tree(self, prefix=""):
        print(self.handle_string(prefix=prefix), end='')

    def __getitem__(self, handle):
        return self._items[handle]

    def __setitem__(self, key, value):
        if isinstance(key, (list, tuple)):
            key = list(key)
        elif key is not None:
            key = [key]
        self.set_item(key, value)

    def __delitem__(self, handle):
        try:
            self.remove_item_by_handle(handle)
        except (KeyError, TypeError):
            # maybe we are asking to remove an item explicitly
            self.remove_item(handle)

    def __iter__(self):
        return self.values().__iter__()

    def contains_item(self, item):
        hashable_item = self._make_hashable(item)
        return hashable_item in self._handles

    def contains_handle(self, handle):
        try:
            return handle in self._items
        except TypeError:
            return False

    def __contains__(self, handle):
        return self.contains_handle(handle) or self.contains_item(handle)

    def __len__(self):
        return len(self._hash_lookup)

    def __str__(self):
        return self.handle_string()
Esempio n. 7
0
    def save_fields(cls, fname, flds, **kwargs):
        """ save some fields using the format given by the class """
        # FIXME: this is only good for writing cartesian rectilnear flds
        # FIXME: axes are renamed if flds[0] is 1D or 2D
        assert len(flds) > 0
        fname = os.path.expanduser(os.path.expandvars(fname))

        if isinstance(flds, list):
            if isinstance(flds[0], (list, tuple)):
                flds = OrderedDict(flds)
            else:
                flds = OrderedDict([(fld.name, fld) for fld in flds])

        # FIXME: all coordinates are saved as non-uniform, the proper
        #        way to do this is to have let coordinate format its own
        #        hdf5 / xdmf / numpy binary output
        fld0 = next(iter(flds.values()))
        clist = fld0.crds.get_clist(full_arrays=True)
        crd_arrs = [np.array([0.0])] * 3
        crd_names = ["x", "y", "z"]
        for i, c in enumerate(clist):
            crd_arrs[i] = c[1]
        crd_shape = [len(arr) for arr in crd_arrs]
        time = fld0.time

        # write arrays to the hdf5 file
        with h5py.File(fname, 'w') as f:
            for axis_name, arr in zip(crd_names, crd_arrs):
                loc = cls._CRDS_GROUP + '/' + axis_name
                f[loc] = arr

            for name, fld in flds.items():
                loc = cls._FLD_GROUPS[fld.center.lower()] + '/' + name
                # xdmf files use kji ordering
                f[loc] = fld.data.T

            # big bad openggcm time_str hack to put basetime into hdf5 file
            for fld in flds.values():
                try:
                    tfmt = "%Y:%m:%d:%H:%M:%S.%f"
                    sec_td = viscid.as_timedelta64(fld.time, 's')
                    dtime = viscid.as_datetime(fld.basetime + sec_td).strftime(tfmt)
                    epoch = viscid.readers.openggcm.GGCM_EPOCH
                    ts = viscid.as_timedelta(fld.basetime - epoch).total_seconds()
                    ts += fld.time
                    timestr = "time= {0} {1:.16e} {2} 300c".format(fld.time, ts, dtime)
                    f.create_group('openggcm')
                    f['openggcm'].attrs['time_str'] = np.string_(timestr)
                    break
                except viscid.NoBasetimeError:
                    pass

        # now write an xdmf file
        xdmf_fname = os.path.splitext(fname)[0] + ".xdmf"
        relh5fname = "./" + os.path.basename(fname)
        with open(xdmf_fname, 'w') as f:
            xloc = cls._CRDS_GROUP + '/' + crd_names[0]
            yloc = cls._CRDS_GROUP + '/' + crd_names[1]
            zloc = cls._CRDS_GROUP + '/' + crd_names[2]
            dim_str = " ".join([str(l) for l in crd_shape][::-1])
            f.write(cls._XDMF_TEMPLATE_BEGIN.format(time=time))
            s = cls._XDMF_TEMPLATE_RECTILINEAR_GRID_BEGIN.format(
                grid_name="vgrid", crd_dims=dim_str, h5fname=relh5fname,
                xdim=crd_shape[0], ydim=crd_shape[1], zdim=crd_shape[2],
                xloc=xloc, yloc=yloc, zloc=zloc)
            f.write(s)

            for fld in flds.values():
                _crd_system = viscid.as_crd_system(fld, None)
                if _crd_system:
                    f.write(cls._XDMF_INFO_TEMPLATE.format(name="crd_system",
                                                           value=_crd_system))
                    break

            for name, fld in flds.items():
                fld = fld.as_flat().T
                dt = fld.dtype.name.rstrip("0123456789").title()
                precision = fld.dtype.itemsize
                fld_dim_str = " ".join([str(l) for l in fld.shape])
                loc = cls._FLD_GROUPS[fld.center.lower()] + '/' + name
                s = cls._XDMF_TEMPLATE_ATTRIBUTE.format(
                    fld_name=name,
                    fld_type=fld.fldtype, center=fld.center.title(),
                    dtype=dt, precision=precision, fld_dims=fld_dim_str,
                    h5fname=relh5fname, fld_loc=loc)
                f.write(s)

            f.write(cls._XDMF_TEMPLATE_GRID_END)
            f.write(cls._XDMF_TEMPLATE_END)