Exemplo n.º 1
0
Arquivo: io.py Projeto: vadsem/yt
    def _generic_fluid_handler(self, chunks, selector, fields, size, ftype):
        tr = defaultdict(list)

        for chunk in chunks:
            for subset in chunk.objs:
                fname = None
                for fh in subset.domain.field_handlers:
                    if fh.ftype == ftype:
                        file_handler = fh
                        fname = fh.fname
                        break

                if fname is None:
                    raise YTFieldTypeNotFound(ftype)

                # Now we read the entire thing
                with open(fname, "rb") as f:
                    content = IO(f.read())
                # This contains the boundary information, so we skim through
                # and pick off the right vectors
                rv = subset.fill(content, fields, selector, file_handler)
                for ft, f in fields:
                    d = rv.pop(f)
                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s zones)",
                                f, d.size, d.min(), d.max(), d.size)
                    tr[(ft, f)].append(d)
        d = {}
        for field in fields:
            d[field] = np.concatenate(tr.pop(field))

        return d
Exemplo n.º 2
0
Arquivo: io.py Projeto: vadsem/yt
    def _read_particle_subset(self, subset, fields):
        '''Read the particle files.'''
        tr = {}

        # Sequential read depending on particle type
        for ptype in set(f[0] for f in fields):

            # Select relevant fiels
            subs_fields = filter(lambda f: f[0] == ptype, fields)

            ok = False
            for ph in subset.domain.particle_handlers:
                if ph.ptype == ptype:
                    fname = ph.fname
                    foffsets = ph.field_offsets
                    data_types = ph.field_types
                    ok = True
                    count = ph.local_particle_count
                    break
            if not ok:
                raise YTFieldTypeNotFound(ptype)

            tr.update(
                _ramses_particle_file_handler(fname,
                                              foffsets,
                                              data_types,
                                              subset,
                                              subs_fields,
                                              count=count))

        return tr
Exemplo n.º 3
0
    def _read_particle_subset(self, subset, fields):
        '''Read the particle files.'''
        tr = {}

        # Sequential read depending on particle type
        for ptype in set(f[0] for f in fields):

            # Select relevant fiels
            subs_fields = filter(lambda f: f[0] == ptype, fields)

            ok = False
            for ph in subset.domain.particle_handlers:
                if ph.ptype == ptype:
                    fname = ph.fname
                    foffsets = ph.field_offsets
                    data_types = ph.field_types
                    ok = True
                    count = ph.local_particle_count
                    break
            if not ok:
                raise YTFieldTypeNotFound(ptype)

            cosmo = self.ds.cosmological_simulation
            if (ptype, 'particle_birth_time') in foffsets and cosmo:
                foffsets[ptype, 'conformal_birth_time'] = \
                    foffsets[ptype, 'particle_birth_time']
                data_types[ptype, 'conformal_birth_time'] = \
                    data_types[ptype, 'particle_birth_time']

            tr.update(_ramses_particle_file_handler(
                fname, foffsets, data_types, subset, subs_fields,
                count=count
            ))

        return tr
Exemplo n.º 4
0
Arquivo: io.py Projeto: egentry/yt
    def _read_particle_subset(self, subset, fields):
        '''Read the particle files.'''
        tr = {}

        # Sequential read depending on particle type (io or sink)
        for ptype in set(f[0] for f in fields):

            # Select relevant fiels
            subs_fields = filter(lambda f: f[0] == ptype, fields)

            if ptype == 'io':
                fname = subset.domain.part_fn
                foffsets = subset.domain.particle_field_offsets
                data_types = subset.domain.particle_field_types

            elif ptype == 'sink':
                fname = subset.domain.sink_fn
                foffsets = subset.domain.sink_field_offsets
                data_types = subset.domain.sink_field_types

            else:
                # Raise here an exception
                raise YTFieldTypeNotFound(ptype)

            tr.update(
                _ramses_particle_file_handler(fname, foffsets, data_types,
                                              subset, subs_fields))

        return tr
Exemplo n.º 5
0
    def _read_fluid_selection(self, chunks, selector, fields, size):
        tr = defaultdict(list)

        # Set of field types
        ftypes = {f[0] for f in fields}
        for chunk in chunks:
            # Gather fields by type to minimize i/o operations
            for ft in ftypes:
                # Get all the fields of the same type
                field_subs = list(filter(lambda f: f[0] == ft, fields))

                # Loop over subsets
                for subset in chunk.objs:
                    fname = None
                    for fh in subset.domain.field_handlers:
                        if fh.ftype == ft:
                            file_handler = fh
                            fname = fh.fname
                            break

                    if fname is None:
                        raise YTFieldTypeNotFound(ft)

                    # Now we read the entire thing
                    with FortranFile(fname) as fd:
                        # This contains the boundary information, so we skim through
                        # and pick off the right vectors
                        rv = subset.fill(fd, field_subs, selector,
                                         file_handler)
                    for ft, f in field_subs:
                        d = rv.pop(f)
                        mylog.debug(
                            "Filling %s with %s (%0.3e %0.3e) (%s zones)",
                            f,
                            d.size,
                            d.min(),
                            d.max(),
                            d.size,
                        )
                        tr[(ft, f)].append(d)
        d = {}
        for field in fields:
            d[field] = np.concatenate(tr.pop(field))

        return d