示例#1
0
def _get_backup_file(ds):
    backup_filename = ds.backup_filename
    if os.path.exists(backup_filename):
        # backup file already exists, open it. We use parallel
        # h5py if it is available
        if communication_system.communicators[-1].size > 1 and h5py.get_config(
        ).mpi:
            mpi4py_communicator = communication_system.communicators[-1].comm
            f = h5py.File(backup_filename,
                          mode="r+",
                          driver="mpio",
                          comm=mpi4py_communicator)
        else:
            f = h5py.File(backup_filename, mode="r+")
        yield f
        f.close()
    else:
        # backup file does not exist, create it
        with _create_new_gdf(
                ds,
                backup_filename,
                data_author=None,
                data_comment=None,
                particle_type_name="dark_matter",
        ) as f:
            yield f
示例#2
0
文件: writer.py 项目: victorgabr/yt
def _create_new_gdf(ds,
                    gdf_path,
                    data_author=None,
                    data_comment=None,
                    dataset_units=None,
                    particle_type_name="dark_matter",
                    clobber=False):

    # Make sure we have the absolute path to the file first
    gdf_path = os.path.abspath(gdf_path)

    # Is the file already there? If so, are we allowing
    # clobbering?
    if os.path.exists(gdf_path) and not clobber:
        raise YTGDFAlreadyExists(gdf_path)

    ###
    # Create and open the file with h5py. We use parallel
    # h5py if it is available.
    ###
    if communication_system.communicators[-1].size > 1 and \
            h5py.get_config().mpi is True:
        mpi4py_communicator = communication_system.communicators[-1].comm
        f = h5py.File(gdf_path, "w", driver='mpio', comm=mpi4py_communicator)
    else:
        f = h5py.File(gdf_path, "w")

    ###
    # "gridded_data_format" group
    ###
    g = f.create_group("gridded_data_format")
    g.attrs["data_software"] = "yt"
    g.attrs["data_software_version"] = yt_version
    if data_author is not None:
        g.attrs["data_author"] = data_author
    if data_comment is not None:
        g.attrs["data_comment"] = data_comment

    ###
    # "simulation_parameters" group
    ###
    g = f.create_group("simulation_parameters")
    g.attrs["refine_by"] = ds.refine_by
    g.attrs["dimensionality"] = ds.dimensionality
    g.attrs["domain_dimensions"] = ds.domain_dimensions
    g.attrs["current_time"] = ds.current_time
    g.attrs["domain_left_edge"] = ds.domain_left_edge
    g.attrs["domain_right_edge"] = ds.domain_right_edge
    g.attrs["unique_identifier"] = ds.unique_identifier
    g.attrs["cosmological_simulation"] = ds.cosmological_simulation
    # @todo: Where is this in the yt API?
    g.attrs["num_ghost_zones"] = 0
    # @todo: Where is this in the yt API?
    g.attrs["field_ordering"] = 0
    # @todo: not yet supported by yt.
    g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32')

    if ds.cosmological_simulation:
        g.attrs["current_redshift"] = ds.current_redshift
        g.attrs["omega_matter"] = ds.omega_matter
        g.attrs["omega_lambda"] = ds.omega_lambda
        g.attrs["hubble_constant"] = ds.hubble_constant

    if dataset_units is None:
        dataset_units = {}

    g = f.create_group("dataset_units")
    for u in ["length", "time", "mass", "velocity", "magnetic"]:
        unit_name = u + "_unit"
        if unit_name in dataset_units:
            value, units = dataset_units[unit_name]
        else:
            attr = getattr(ds, unit_name)
            value = float(attr)
            units = str(attr.units)
        d = g.create_dataset(unit_name, data=value)
        d.attrs["unit"] = units

    ###
    # "field_types" group
    ###
    g = f.create_group("field_types")

    ###
    # "particle_types" group
    ###
    g = f.create_group("particle_types")

    # @todo: Particle type iterator
    sg = g.create_group(particle_type_name)
    sg["particle_type_name"] = np.string_(particle_type_name)

    ###
    # root datasets -- info about the grids
    ###
    f["grid_dimensions"] = ds.index.grid_dimensions
    f["grid_left_index"] = np.array([
        grid.get_global_startindex() for grid in ds.index.grids
    ]).reshape(ds.index.grid_dimensions.shape[0], 3)
    f["grid_level"] = ds.index.grid_levels.flat
    # @todo: Fill with proper values
    f["grid_parent_id"] = -np.ones(ds.index.grid_dimensions.shape[0])
    f["grid_particle_count"] = ds.index.grid_particle_count

    ###
    # "data" group -- where we should spend the most time
    ###

    g = f.create_group("data")
    for grid in ds.index.grids:
        # add group for this grid
        grid_group = g.create_group("grid_%010i" % (grid.id - grid._id_offset))
        # add group for the particles on this grid
        particles_group = grid_group.create_group("particles")
        particles_group.create_group(particle_type_name)

    yield f

    # close the file when done
    f.close()