コード例 #1
0
ファイル: snapshot.py プロジェクト: pencil-code/pencil-code
def write_h5_snapshot(
    snapshot,
    file_name="VAR0",
    datadir="data/allprocs",
    precision="d",
    nghost=3,
    persist=None,
    settings=None,
    param=None,
    grid=None,
    lghosts=False,
    indx=None,
    proc=None,
    ipx=None,
    ipy=None,
    ipz=None,
    procdim=None,
    unit=None,
    t=None,
    x=None,
    y=None,
    z=None,
    state="a",
    quiet=True,
    lshear=False,
    driver=None,
    comm=None,
    overwrite=False,
    rank=0,
    size=1,
):
    """
    Write a snapshot given as numpy array.
    We assume by default that a run simulation directory has already been
    constructed and start completed successfully in h5 format so that
    files dim, grid and param files are already present.
    If not the contents of these will need to be supplied as dictionaries
    along with persist if included.

    call signature:

    write_h5_snapshot(snapshot, file_name='VAR0', datadir='data/allprocs',
                   precision='d', nghost=3, persist=None, settings=None,
                   param=None, grid=None, lghosts=False, indx=None,
                   unit=None, t=None, x=None, y=None, z=None, procdim=None,
                   quiet=True, lshear=False, driver=None, comm=None)

    Keyword arguments:

    *snapshot*:
      Numpy array containing the snapshot.
      Must be of shape [nvar, nz, ny, nx] without boundaries or.
      Must be of shape [nvar, mz, my, mx] with boundaries for lghosts=True.

    *file_name*:
      Name of the snapshot file to be written, e.g. VAR0 or var.

    *datadir*:
      Directory where the data is stored.

    *precision*:
      Single 'f' or double 'd' precision.

    *persist*:
      optional dictionary of persistent variable.

    *settings*:
      optional dictionary of persistent variable.

    *param*:
      optional Param object.

    *grid*:
      optional Pencil Grid object of grid parameters.

    *nghost*:
      Number of ghost zones.

    *lghosts*:
      If True the snapshot contains the ghost zones.

    *indx*
      Index object of index for each variable in f-array

    *unit*:
      Optional dictionary of simulation units.

    *quiet*:
      Option to print output.

    *t*:
      Time of the snapshot.

    *xyz*:
      xyz arrays of the domain with ghost zones.
      This will normally be obtained from Grid object, but facility to
      redefine an alternative grid value.

    *lshear*:
      Flag for the shear.

    *driver*
      File driver for hdf5 io for use in serial or MPI parallel.

    *comm*
      MPI objects supplied if driver is 'mpio'.

    *overwrite*
      flag to replace existing h5 snapshot file.

    *rank*
      rank of process with root=0.
    """

    import numpy as np
    from os.path import join

    from pencil import read
    from pencil.io import open_h5, group_h5, dataset_h5
    from pencil import is_sim_dir

    # test if simulation directory
    if not is_sim_dir():
        print("ERROR: Directory needs to be a simulation")
        sys.stdout.flush()
    if indx == None:
        indx = read.index()
    #
    if settings == None:
        settings = {}
        skeys = [
            "l1",
            "l2",
            "m1",
            "m2",
            "n1",
            "n2",
            "nx",
            "ny",
            "nz",
            "mx",
            "my",
            "mz",
            "nprocx",
            "nprocy",
            "nprocz",
            "maux",
            "mglobal",
            "mvar",
            "precision",
        ]
        dim = read.dim()
        for key in skeys:
            settings[key] = dim.__getattribute__(key)
        settings["precision"] = precision.encode()
        settings["nghost"] = nghost
        settings["version"] = np.int32(0)
    nprocs = settings["nprocx"] * settings["nprocy"] * settings["nprocz"]
    gkeys = [
        "x",
        "y",
        "z",
        "Lx",
        "Ly",
        "Lz",
        "dx",
        "dy",
        "dz",
        "dx_1",
        "dy_1",
        "dz_1",
        "dx_tilde",
        "dy_tilde",
        "dz_tilde",
    ]
    if grid == None:
        grid = read.grid(quiet=True)
    else:
        gd_err = False
        for key in gkeys:
            if not key in grid.__dict__.keys():
                print("ERROR: key " + key + " missing from grid")
                sys.stdout.flush()
                gd_err = True
        if gd_err:
            print("ERROR: grid incomplete")
            sys.stdout.flush()
    ukeys = [
        "length",
        "velocity",
        "density",
        "magnetic",
        "time",
        "temperature",
        "flux",
        "energy",
        "mass",
        "system",
    ]
    if param == None:
        param = read.param(quiet=True)
        param.__setattr__("unit_mass",
                          param.unit_density * param.unit_length**3)
        param.__setattr__("unit_energy",
                          param.unit_mass * param.unit_velocity**2)
        param.__setattr__("unit_time", param.unit_length / param.unit_velocity)
        param.__setattr__("unit_flux", param.unit_mass / param.unit_time**3)
        param.unit_system = param.unit_system.encode()

    # check whether the snapshot matches the simulation shape
    if lghosts:
        try:
            snapshot.shape[0] == settings["mvar"]
            snapshot.shape[1] == settings["mx"]
            snapshot.shape[2] == settings["my"]
            snapshot.shape[3] == settings["mz"]
        except ValueError:
            print("ERROR: snapshot shape {} ".format(snapshot.shape) +
                  "does not match simulation dimensions with ghosts.")
            sys.stdout.flush()
    else:
        try:
            snapshot.shape[0] == settings["mvar"]
            snapshot.shape[1] == settings["nx"]
            snapshot.shape[2] == settings["ny"]
            snapshot.shape[3] == settings["nz"]
        except ValueError:
            print("ERROR: snapshot shape {} ".format(snapshot.shape) +
                  "does not match simulation dimensions without ghosts.")
            sys.stdout.flush()

    # Determine the precision used and ensure snapshot has correct data_type.
    if precision == "f":
        data_type = np.float32
        snapshot = np.float32(snapshot)
    elif precision == "d":
        data_type = np.float64
        snapshot = np.float64(snapshot)
    else:
        print("ERROR: Precision {0} not understood.".format(precision) +
              " Must be either 'f' or 'd'")
        sys.stdout.flush()
        return -1

    # Check that the shape does not conflict with the proc numbers.
    if ((settings["nx"] % settings["nprocx"] > 0)
            or (settings["ny"] % settings["nprocy"] > 0)
            or (settings["nz"] % settings["nprocz"] > 0)):
        print("ERROR: Shape of the input array is not compatible with the " +
              "cpu layout. Make sure that nproci devides ni.")
        sys.stdout.flush()
        return -1

    # Check the shape of the xyz arrays if specified and overwrite grid values.
    if x != None:
        if len(x) != settings["mx"]:
            print("ERROR: x array is incompatible with the shape of snapshot.")
            sys.stdout.flush()
            return -1
        grid.x = data_type(x)
    if y != None:
        if len(y) != settings["my"]:
            print("ERROR: y array is incompatible with the shape of snapshot.")
            sys.stdout.flush()
            return -1
        grid.y = data_type(y)
    if z != None:
        if len(z) != settings["mz"]:
            print("ERROR: z array is incompatible with the shape of snapshot.")
            sys.stdout.flush()
            return -1
        grid.z = data_type(z)

    # Define a time.
    if t is None:
        t = data_type(0.0)

    # making use of pc_hdf5 functionality:
    if not proc == None:
        state = "a"
    else:
        state = "w"
    filename = join(datadir, file_name)
    print("write_h5_snapshot: filename =", filename)
    with open_h5(
            filename,
            state,
            driver=driver,
            comm=comm,
            overwrite=overwrite,
            rank=rank,
            size=size,
    ) as ds:
        data_grp = group_h5(
            ds,
            "data",
            status=state,
            delete=False,
            overwrite=overwrite,
            rank=rank,
            size=size,
        )
        if not procdim:
            for key in indx.__dict__.keys():
                if key in ["uu", "keys", "aa", "KR_Frad", "uun", "gg", "bb"]:
                    continue
                #create ghost zones if required
                if not lghosts:
                    tmp_arr = np.zeros([
                        snapshot.shape[1] + 2 * nghost,
                        snapshot.shape[2] + 2 * nghost,
                        snapshot.shape[3] + 2 * nghost,
                    ])
                    tmp_arr[dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                            dim.l1:dim.l2 + 1] = np.array(
                                snapshot[indx.__getattribute__(key) - 1])
                    dataset_h5(
                        data_grp,
                        key,
                        status=state,
                        data=tmp_arr,
                        dtype=data_type,
                        overwrite=overwrite,
                        rank=rank,
                        comm=comm,
                        size=size,
                    )
                else:
                    dataset_h5(
                        data_grp,
                        key,
                        status=state,
                        data=np.array(snapshot[indx.__getattribute__(key) -
                                               1]),
                        dtype=data_type,
                        overwrite=overwrite,
                        rank=rank,
                        comm=comm,
                        size=size,
                    )
        else:
            for key in indx.__dict__.keys():
                if key in ["uu", "keys", "aa", "KR_Frad", "uun", "gg", "bb"]:
                    continue
                dataset_h5(
                    data_grp,
                    key,
                    status=state,
                    shape=(settings["mz"], settings["my"], settings["mx"]),
                    dtype=data_type,
                    rank=rank,
                    comm=comm,
                    size=size,
                )
            # adjust indices to include ghost zones at boundaries
            l1, m1, n1 = procdim.l1, procdim.m1, procdim.n1
            if procdim.ipx == 0:
                l1 = 0
            if procdim.ipy == 0:
                m1 = 0
            if procdim.ipz == 0:
                n1 = 0
            l2, m2, n2 = procdim.l2, procdim.m2, procdim.n2
            if procdim.ipx == settings["nprocx"] - 1:
                l2 = procdim.l2 + settings["nghost"]
            if procdim.ipy == settings["nprocy"] - 1:
                m2 = procdim.m2 + settings["nghost"]
            if procdim.ipz == settings["nprocz"] - 1:
                n2 = procdim.n2 + settings["nghost"]
            nx, ny, nz = procdim.nx, procdim.ny, procdim.nz
            ipx, ipy, ipz = procdim.ipx, procdim.ipy, procdim.ipz
            for key in indx.__dict__.keys():
                if key in ["uu", "keys", "aa", "KR_Frad", "uun", "gg", "bb"]:
                    continue
                tmp_arr = np.array(snapshot[indx.__getattribute__(key) - 1])
                data_grp[key][n1 + ipz * nz:n2 + ipz * nz + 1,
                              m1 + ipy * ny:m2 + ipy * ny + 1,
                              l1 + ipx * nx:l2 + ipx * nx +
                              1, ] = tmp_arr[n1:n2 + 1, m1:m2 + 1, l1:l2 + 1]
        dataset_h5(
            ds,
            "time",
            status=state,
            data=np.array(t),
            size=size,
            dtype=data_type,
            rank=rank,
            comm=comm,
            overwrite=overwrite,
        )
        # add settings
        sets_grp = group_h5(
            ds,
            "settings",
            status=state,
            delete=False,
            overwrite=overwrite,
            rank=rank,
            size=size,
        )
        for key in settings.keys():
            if "precision" in key:
                dataset_h5(
                    sets_grp,
                    key,
                    status=state,
                    data=(settings[key], ),
                    dtype=None,
                    rank=rank,
                    comm=comm,
                    size=size,
                    overwrite=overwrite,
                )
            else:
                dataset_h5(
                    sets_grp,
                    key,
                    status=state,
                    data=(settings[key], ),
                    dtype=data_type,
                    rank=rank,
                    comm=comm,
                    size=size,
                    overwrite=overwrite,
                )
        # add grid
        grid_grp = group_h5(
            ds,
            "grid",
            status=state,
            delete=False,
            overwrite=overwrite,
            rank=rank,
            size=size,
        )
        for key in gkeys:
            dataset_h5(
                grid_grp,
                key,
                status=state,
                data=(grid.__getattribute__(key)),
                dtype=data_type,
                rank=rank,
                comm=comm,
                size=size,
                overwrite=overwrite,
            )
        dataset_h5(
            grid_grp,
            "Ox",
            status=state,
            data=(param.__getattribute__("xyz0")[0], ),
            dtype=data_type,
            rank=rank,
            comm=comm,
            size=size,
            overwrite=overwrite,
        )
        dataset_h5(
            grid_grp,
            "Oy",
            status=state,
            data=(param.__getattribute__("xyz0")[1], ),
            dtype=data_type,
            rank=rank,
            comm=comm,
            size=size,
            overwrite=overwrite,
        )
        dataset_h5(
            grid_grp,
            "Oz",
            status=state,
            data=(param.__getattribute__("xyz0")[2], ),
            dtype=data_type,
            rank=rank,
            comm=comm,
            size=size,
            overwrite=overwrite,
        )
        # add physical units
        unit_grp = group_h5(
            ds,
            "unit",
            status=state,
            delete=False,
            overwrite=overwrite,
            rank=rank,
            size=size,
        )
        for key in ukeys:
            if "system" in key:
                dataset_h5(
                    unit_grp,
                    key,
                    status=state,
                    data=(param.__getattribute__("unit_" + key), ),
                    rank=rank,
                    comm=comm,
                    size=size,
                    overwrite=overwrite,
                )
            else:
                dataset_h5(
                    unit_grp,
                    key,
                    status=state,
                    data=param.__getattribute__("unit_" + key),
                    rank=rank,
                    comm=comm,
                    size=size,
                    overwrite=overwrite,
                )
        # add optional persistent data
        if persist != None:
            pers_grp = group_h5(
                ds,
                "persist",
                status=state,
                size=size,
                delete=False,
                overwrite=overwrite,
                rank=rank,
            )
            for key in persist.keys():
                if not quiet:
                    print(key, type(persist[key][()]))
                    sys.stdout.flush()
                arr = np.empty(nprocs, dtype=type(persist[key][()]))
                arr[:] = persist[key][()]
                dataset_h5(
                    pers_grp,
                    key,
                    status=state,
                    data=(arr),
                    size=size,
                    dtype=data_type,
                    rank=rank,
                    comm=comm,
                    overwrite=overwrite,
                )
コード例 #2
0
    def read(self, var_file='', datadir='data', proc=-1, ivar=-1, quiet=True,
             trimall=False, magic=None, sim=None, precision='d',
             lpersist=False, dtype=np.float64):
        """
        Read VAR files from Pencil Code. If proc < 0, then load all data
        and assemble, otherwise load VAR file from specified processor.

        The file format written by output() (and used, e.g. in var.dat)
        consists of the followinig Fortran records:
        1. data(mx, my, mz, nvar)
        2. t(1), x(mx), y(my), z(mz), dx(1), dy(1), dz(1), deltay(1)
        Here nvar denotes the number of slots, i.e. 1 for one scalar field, 3
        for one vector field, 8 for var.dat in the case of MHD with entropy.
        but, deltay(1) is only there if lshear is on! need to know parameters.

        call signature:

        var(var_file='', datadir='data', proc=-1, ivar=-1, quiet=True,
            trimall=False, magic=None, sim=None, precision='d')

        Keyword arguments:
            var_file:   Name of the VAR file.
            datadir:    Directory where the data is stored.
            proc:       Processor to be read. If -1 read all and assemble to one array.
            ivar:       Index of the VAR file, if var_file is not specified.
            quiet:      Flag for switching off output.
            trimall:    Trim the data cube to exclude ghost zones.
            magic:      Values to be computed from the data, e.g. B = curl(A).
            sim:        Simulation sim object.
            precision:  Float (f), double (d) or half (half).
            dtype:      precision for var.obj, default double
        """

        import os
        from scipy.io import FortranFile
        from pencil.math.derivatives import curl, curl2
        from pencil import read
        from pencil.sim import __Simulation__

        def persist(self, infile=None, precision='d', quiet=quiet):
            """An open Fortran file potentially containing persistent variables appended
               to the f array and grid data are read from the first proc data

               Record types provide the labels and id record for the peristent
               variables in the depricated fortran binary format
            """
            record_types = {}
            for key in read.record_types.keys():
                if read.record_types[key][1] == 'd':
                    record_types[key]=(read.record_types[key][0],
                                      precision)
                else:
                    record_types[key] = read.record_types[key]

            try:
                tmp_id = infile.read_record('h')
            except:
                return -1
            block_id = 0
            for i in range(2000):
                i += 1
                tmp_id = infile.read_record('h')
                block_id = tmp_id[0]
                if block_id == 2000:
                    break
                for key in record_types.keys():
                    if record_types[key][0] == block_id:
                         tmp_val = infile.read_record(record_types[key][1])
                         self.__setattr__(key, tmp_val[0])
                         if not quiet:
                             print(key, record_types[key][0],
                                        record_types[key][1],tmp_val)
            return self

        dim = None
        param = None
        index = None

        if isinstance(sim, __Simulation__):
            datadir = os.path.expanduser(sim.datadir)
            dim = sim.dim
            param = read.param(datadir=sim.datadir, quiet=True,
                               conflicts_quiet=True)
            index = read.index(datadir=sim.datadir)
        else:
            datadir = os.path.expanduser(datadir)
            if dim is None:
                if var_file[0:2].lower() == 'og':
                    dim = read.ogdim(datadir, proc)
                else:
                    if var_file[0:4] == 'VARd':
                        dim = read.dim(datadir, proc, down=True)
                    else:
                        dim = read.dim(datadir, proc)
            if param is None:
                param = read.param(datadir=datadir, quiet=quiet,
                                   conflicts_quiet=True)
            if index is None:
                index = read.index(datadir=datadir)

        if param.lwrite_aux:
            total_vars = dim.mvar + dim.maux
        else:
            total_vars = dim.mvar

        if os.path.exists(os.path.join(datadir, 'grid.h5')):
#
#  Read HDF5 files.
#
            import h5py
            run2D = param.lwrite_2d

            # Set up the global array.
            if not run2D:
                self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx),
                                  dtype=dtype)
            else:
                if dim.ny == 1:
                    self.f = np.zeros((total_vars, dim.mz, dim.mx),
                                      dtype=dtype)
                else:
                    self.f = np.zeros((total_vars, dim.my, dim.mx),
                                      dtype=dtype)

            if not var_file:
                if ivar < 0:
                    var_file = 'var.h5'
                else:
                    var_file = 'VAR' + str(ivar) + '.h5'

            file_name = os.path.join(datadir, 'allprocs', var_file)
            with h5py.File(file_name, 'r') as tmp:
                for key in tmp['data'].keys():
                    self.f[index.__getattribute__(key)-1, :] = dtype(
                                                         tmp['data/'+key][:])
                t = (tmp['time'][()]).astype(precision)
                x = (tmp['grid/x'][()]).astype(precision)
                y = (tmp['grid/y'][()]).astype(precision)
                z = (tmp['grid/z'][()]).astype(precision)
                dx = (tmp['grid/dx'][()]).astype(precision)
                dy = (tmp['grid/dy'][()]).astype(precision)
                dz = (tmp['grid/dz'][()]).astype(precision)
                if param.lshear:
                    deltay = (tmp['persist/shear_delta_y'][(0)]).astype(precision)
                if lpersist:
                    for key in tmp['persist'].keys():
                        self.__setattr__(key, (tmp['persist'][key][0]).astype(precision))
        else:
#
#  Read scattered Fortran binary files.
#
            run2D = param.lwrite_2d

            if dim.precision == 'D':
                read_precision = 'd'
            else:
                read_precision = 'f'

            if not var_file:
                if ivar < 0:
                    var_file = 'var.dat'
                else:
                    var_file = 'VAR' + str(ivar)

            if proc < 0:
                proc_dirs = self.__natural_sort(
                    filter(lambda s: s.startswith('proc'),
                           os.listdir(datadir)))
                if (proc_dirs.count("proc_bounds.dat") > 0):
                    proc_dirs.remove("proc_bounds.dat")
                if param.lcollective_io:
                    # A collective IO strategy is being used
                    proc_dirs = ['allprocs']
#                else:
#                    proc_dirs = proc_dirs[::dim.nprocx*dim.nprocy]
            else:
                proc_dirs = ['proc' + str(proc)]

            # Set up the global array.
            if not run2D:
                self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx),
                                  dtype=dtype)
            else:
                if dim.ny == 1:
                    self.f = np.zeros((total_vars, dim.mz, dim.mx),
                                      dtype=dtype)
                else:
                    self.f = np.zeros((total_vars, dim.my, dim.mx),
                                      dtype=dtype)

            x = np.zeros(dim.mx, dtype=precision)
            y = np.zeros(dim.my, dtype=precision)
            z = np.zeros(dim.mz, dtype=precision)

            for directory in proc_dirs:
                if not param.lcollective_io:
                    proc = int(directory[4:])
                    if var_file[0:2].lower() == 'og':
                        procdim = read.ogdim(datadir, proc)
                    else:
                        if var_file[0:4] == 'VARd':
                            procdim = read.dim(datadir, proc, down=True)
                        else:
                            procdim = read.dim(datadir, proc)
                    if not quiet:
                        print("Reading data from processor"+
                              " {0} of {1} ...".format(proc, len(proc_dirs)))

                else:
                    # A collective IO strategy is being used
                    procdim = dim
#                else:
#                    procdim.mx = dim.mx
#                    procdim.my = dim.my
#                    procdim.nx = dim.nx
#                    procdim.ny = dim.ny
#                    procdim.ipx = dim.ipx
#                    procdim.ipy = dim.ipy

                mxloc = procdim.mx
                myloc = procdim.my
                mzloc = procdim.mz

                # Read the data.
                file_name = os.path.join(datadir, directory, var_file)
                infile = FortranFile(file_name)
                if not run2D:
                    f_loc = dtype(infile.read_record(dtype=read_precision))
                    f_loc = f_loc.reshape((-1, mzloc, myloc, mxloc))
                else:
                    if dim.ny == 1:
                        f_loc = dtype(infile.read_record(dtype=read_precision))
                        f_loc = f_loc.reshape((-1, mzloc, mxloc))
                    else:
                        f_loc = dtype(infile.read_record(dtype=read_precision))
                        f_loc = f_loc.reshape((-1, myloc, mxloc))
                raw_etc = infile.read_record(dtype=read_precision)
                if lpersist:
                    persist(self, infile=infile, precision=read_precision, quiet=quiet)
                infile.close()

                t = raw_etc[0]
                x_loc = raw_etc[1:mxloc+1]
                y_loc = raw_etc[mxloc+1:mxloc+myloc+1]
                z_loc = raw_etc[mxloc+myloc+1:mxloc+myloc+mzloc+1]
                if param.lshear:
                    shear_offset = 1
                    deltay = raw_etc[-1]
                else:
                    shear_offset = 0

                dx = raw_etc[-3-shear_offset]
                dy = raw_etc[-2-shear_offset]
                dz = raw_etc[-1-shear_offset]

                if len(proc_dirs) > 1:
                    # Calculate where the local processor will go in
                    # the global array.
                    #
                    # Don't overwrite ghost zones of processor to the
                    # left (and accordingly in y and z direction -- makes
                    # a difference on the diagonals)
                    #
                    # Recall that in NumPy, slicing is NON-INCLUSIVE on
                    # the right end, ie, x[0:4] will slice all of a
                    # 4-digit array, not produce an error like in idl.

                    if procdim.ipx == 0:
                        i0x = 0
                        i1x = i0x + procdim.mx
                        i0xloc = 0
                        i1xloc = procdim.mx
                    else:
                        i0x = procdim.ipx*procdim.nx + procdim.nghostx
                        i1x = i0x + procdim.mx - procdim.nghostx
                        i0xloc = procdim.nghostx
                        i1xloc = procdim.mx

                    if procdim.ipy == 0:
                        i0y = 0
                        i1y = i0y + procdim.my
                        i0yloc = 0
                        i1yloc = procdim.my
                    else:
                        i0y = procdim.ipy*procdim.ny + procdim.nghosty
                        i1y = i0y + procdim.my - procdim.nghosty
                        i0yloc = procdim.nghosty
                        i1yloc = procdim.my

                    if procdim.ipz == 0:
                        i0z = 0
                        i1z = i0z+procdim.mz
                        i0zloc = 0
                        i1zloc = procdim.mz
                    else:
                        i0z = procdim.ipz*procdim.nz + procdim.nghostz
                        i1z = i0z + procdim.mz - procdim.nghostz
                        i0zloc = procdim.nghostz
                        i1zloc = procdim.mz

                    x[i0x:i1x] = x_loc[i0xloc:i1xloc]
                    y[i0y:i1y] = y_loc[i0yloc:i1yloc]
                    z[i0z:i1z] = z_loc[i0zloc:i1zloc]

                    if not run2D:
                        self.f[:, i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[:, i0zloc:i1zloc,
                                                                i0yloc:i1yloc, i0xloc:i1xloc]
                    else:
                        if dim.ny == 1:
                            self.f[:, i0z:i1z, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0xloc:i1xloc]
                        else:
                            self.f[i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[i0zloc:i1zloc,
                                                                 i0yloc:i1yloc, i0xloc:i1xloc]
                else:
                    self.f = f_loc
                    x = x_loc
                    y = y_loc
                    z = z_loc

        if magic is not None:
            if 'bb' in magic:
                # Compute the magnetic field before doing trimall.
                aa = self.f[index.ax-1:index.az, ...]
                self.bb = dtype(curl(aa, dx, dy, dz, x=x, y=y, run2D=run2D,
                               coordinate_system=param.coord_system))
                if trimall:
                    self.bb = self.bb[:, dim.n1:dim.n2+1,
                                      dim.m1:dim.m2+1, dim.l1:dim.l2+1]
            if 'jj' in magic:
                # Compute the electric current field before doing trimall.
                aa = self.f[index.ax-1:index.az, ...]
                self.jj = dtype(curl2(aa, dx, dy, dz, x=x, y=y,
                                coordinate_system=param.coord_system))
                if trimall:
                    self.jj = self.jj[:, dim.n1:dim.n2+1,
                                      dim.m1:dim.m2+1, dim.l1:dim.l2+1]
            if 'vort' in magic:
                # Compute the vorticity field before doing trimall.
                uu = self.f[index.ux-1:index.uz, ...]
                self.vort = dtype(curl(uu, dx, dy, dz, x=x, y=y, run2D=run2D,
                                 coordinate_system=param.coord_system))
                if trimall:
                    if run2D:
                        if dim.nz == 1:
                            self.vort = self.vort[:, dim.m1:dim.m2+1,
                                                  dim.l1:dim.l2+1]
                        else:
                            self.vort = self.vort[:, dim.n1:dim.n2+1,
                                                  dim.l1:dim.l2+1]
                    else:
                        self.vort = self.vort[:, dim.n1:dim.n2+1,
                                              dim.m1:dim.m2+1,
                                              dim.l1:dim.l2+1]

        # Trim the ghost zones of the global f-array if asked.
        if trimall:
            self.x = x[dim.l1:dim.l2+1]
            self.y = y[dim.m1:dim.m2+1]
            self.z = z[dim.n1:dim.n2+1]
            if not run2D:
                self.f = self.f[:, dim.n1:dim.n2+1,
                                dim.m1:dim.m2+1, dim.l1:dim.l2+1]
            else:
                if dim.ny == 1:
                    self.f = self.f[:, dim.n1:dim.n2+1, dim.l1:dim.l2+1]
                else:
                    self.f = self.f[:, dim.m1:dim.m2+1, dim.l1:dim.l2+1]
        else:
            self.x = x
            self.y = y
            self.z = z
            self.l1 = dim.l1
            self.l2 = dim.l2 + 1
            self.m1 = dim.m1
            self.m2 = dim.m2 + 1
            self.n1 = dim.n1
            self.n2 = dim.n2 + 1

        # Assign an attribute to self for each variable defined in
        # 'data/index.pro' so that e.g. self.ux is the x-velocity
        aatest = []
        uutest = []
        for key in index.__dict__.keys():
            if 'aatest' in key:
                aatest.append(key)
            if 'uutest' in key:
                uutest.append(key)
            if key != 'global_gg' and key != 'keys' and 'aatest' not in key\
                                  and  'uutest' not in key:
                value = index.__dict__[key]
                setattr(self, key, self.f[value-1, ...])
        # Special treatment for vector quantities.
        if hasattr(index, 'uu'):
            self.uu = self.f[index.ux-1:index.uz, ...]
        if hasattr(index, 'aa'):
            self.aa = self.f[index.ax-1:index.az, ...]
        if hasattr(index, 'uu_sph'):
            self.uu_sph = self.f[index.uu_sphx-1:index.uu_sphz, ...]
        if hasattr(index, 'bb_sph'):
            self.bb_sph = self.f[index.bb_sphx-1:index.bb_sphz, ...]
        # Special treatment for test method vector quantities.
        #Note index 1,2,3,...,0 last vector may be the zero field/flow
        if hasattr(index, 'aatest1'):
            naatest = int(len(aatest)/3)
            for j in range(0,naatest):
                key = 'aatest'+str(np.mod(j+1,naatest))
                value = index.__dict__['aatest1'] + 3*j
                setattr(self, key, self.f[value-1:value+2, ...])
        if hasattr(index, 'uutest1'):
            nuutest = int(len(uutest)/3)
            for j in range(0,nuutest):
                key = 'uutest'+str(np.mod(j+1,nuutest))
                value = index.__dict__['uutest'] + 3*j
                setattr(self, key, self.f[value-1:value+2, ...])

        self.t = t
        self.dx = dx
        self.dy = dy
        self.dz = dz
        if param.lshear:
            self.deltay = deltay

        # Do the rest of magic after the trimall (i.e. no additional curl.)
        self.magic = magic
        if self.magic is not None:
            self.magic_attributes(param, dtype=dtype)
コード例 #3
0
ファイル: pc2vtk.py プロジェクト: asnodin/pencil-code
def var2vtk(var_file='var.dat',
            datadir='data',
            proc=-1,
            variables=None,
            b_ext=False,
            magic=[],
            destination='work',
            quiet=True,
            trimall=True,
            ti=-1,
            tf=-1):
    """
    Convert data from PencilCode format to vtk.

    call signature::

      var2vtk(var_file='', datadir='data', proc=-1,
             variables='', b_ext=False,
             destination='work', quiet=True, trimall=True, ti=-1, tf=-1)

    Read *var_file* and convert its content into vtk format. Write the result
    in *destination*.

    Keyword arguments:

      *var_file*:
        The original var_file.

      *datadir*:
        Directory where the data is stored.

      *proc*:
        Processor which should be read. Set to -1 for all processors.

      *variables*:
        List of variables which should be written. If None all.

      *b_ext*:
        Add the external magnetic field.

      *destination*:
        Destination file.

      *quiet*:
        Keep quiet when reading the var files.

      *trimall*:
        Trim the data cube to exclude ghost zones.

      *ti, tf*:
        Start and end index for animation. Leave negative for no animation.
        Overwrites variable var_file.
    """

    import numpy as np
    import sys
    from pencil import read
    from pencil import math

    # Determine of we want an animation.
    if ti < 0 or tf < 0:
        animation = False
    else:
        animation = True

    # If no variables specified collect all by default
    if not variables:
        variables = []
        indx = read.index()
        for key in indx.__dict__.keys():
            if 'keys' not in key:
                variables.append(key)
        if 'uu' in variables:
            magic.append('vort')
            variables.append('vort')
        if 'rho' in variables or 'lnrho' in variables:
            if 'ss' in variables:
                magic.append('tt')
                variables.append('tt')
                magic.append('pp')
                variables.append('pp')
        if 'aa' in variables:
            magic.append('bb')
            variables.append('bb')
            magic.append('jj')
            variables.append('jj')
            variables.append('ab')
            variables.append('b_mag')
            variables.append('j_mag')
    else:
        # Convert single variable string into length 1 list of arrays.
        if (len(variables) > 0):
            if (len(variables[0]) == 1):
                variables = [variables]
        if 'tt' in variables:
            magic.append('tt')
        if 'pp' in variables:
            magic.append('pp')
        if 'bb' in variables:
            magic.append('bb')
        if 'jj' in variables:
            magic.append('jj')
        if 'vort' in variables:
            magic.append('vort')
        if 'b_mag' in variables and not 'bb' in magic:
            magic.append('bb')
        if 'j_mag' in variables and not 'jj' in magic:
            magic.append('jj')
        if 'ab' in variables and not 'bb' in magic:
            magic.append('bb')

    for t_idx in range(ti, tf + 1):
        if animation:
            var_file = 'VAR' + str(t_idx)

        # Read the PencilCode variables and set the dimensions.
        var = read.var(var_file=var_file,
                       datadir=datadir,
                       proc=proc,
                       magic=magic,
                       trimall=True,
                       quiet=quiet)

        grid = read.grid(datadir=datadir, proc=proc, trim=trimall, quiet=True)

        params = read.param(quiet=True)

        # Add external magnetic field.
        if (b_ext == True):
            B_ext = np.array(params.b_ext)
            var.bb[0, ...] += B_ext[0]
            var.bb[1, ...] += B_ext[1]
            var.bb[2, ...] += B_ext[2]

        dimx = len(grid.x)
        dimy = len(grid.y)
        dimz = len(grid.z)
        dim = dimx * dimy * dimz
        dx = (np.max(grid.x) - np.min(grid.x)) / (dimx - 1)
        dy = (np.max(grid.y) - np.min(grid.y)) / (dimy - 1)
        dz = (np.max(grid.z) - np.min(grid.z)) / (dimz - 1)

        # Write the vtk header.
        if animation:
            fd = open(destination + str(t_idx) + '.vtk', 'wb')
        else:
            fd = open(destination + '.vtk', 'wb')
        fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8'))
        fd.write('VAR files\n'.encode('utf-8'))
        fd.write('BINARY\n'.encode('utf-8'))
        fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8'))
        fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(dimx, dimy,
                                                         dimz).encode('utf-8'))
        fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(
            grid.x[0], grid.y[0], grid.z[0]).encode('utf-8'))
        fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
            dx, dy, dz).encode('utf-8'))
        fd.write('POINT_DATA {0:9}\n'.format(dim).encode('utf-8'))

        # Write the data.
        for v in variables:
            print('Writing {0}.'.format(v))
            # Prepare the data to the correct format.
            if v == 'ab':
                data = math.dot(var.aa, var.bb)
            elif v == 'b_mag':
                data = np.sqrt(math.dot2(var.bb))
            elif v == 'j_mag':
                data = np.sqrt(math.dot2(var.jj))
            else:
                data = getattr(var, v)
            if sys.byteorder == 'little':
                data = data.astype(np.float32).byteswap()
            else:
                data = data.astype(np.float32)
            # Check if we have vectors or scalars.
            if data.ndim == 4:
                data = np.moveaxis(data, 0, 3)
                fd.write('VECTORS {0} float\n'.format(v).encode('utf-8'))
            else:
                fd.write('SCALARS {0} float\n'.format(v).encode('utf-8'))
                fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            fd.write(data.tobytes())

        del (var)

        fd.close()
コード例 #4
0
ファイル: fort2h5.py プロジェクト: pencil-code/pencil-code
def sim2h5(
    newdir=".",
    olddir=".",
    varfile_names=None,
    todatadir="data/allprocs",
    fromdatadir="data",
    precision="d",
    nghost=3,
    lpersist=True,
    x=None,
    y=None,
    z=None,
    lshear=False,
    snap_by_proc=False,
    aver_by_proc=False,
    lremove_old_snapshots=False,
    lremove_old_slices=False,
    lread_all_videoslices=False,
    vlarge=100000000,
    lremove_old_averages=False,
    execute=False,
    quiet=True,
    l2D=True,
    lvars=True,
    lvids=True,
    laver=True,
    laver2D=False,
    lremove_deprecated_vids=False,
    lsplit_slices=False,
):
    """
    Copy a simulation object written in Fortran binary to hdf5.
    The default is to copy all snapshots from/to the current simulation
    directory. Optionally the old files can be removed to

    call signature:

    sim2h5(newdir='.', olddir='.', varfile_names=None,
           todatadir='data/allprocs', fromdatadir='data',
           precision='d', nghost=3, lpersist=False,
           x=None, y=None, z=None, lshear=False,
           snap_by_proc=False, aver_by_proc=False,
           lremove_old_snapshots=False,
           lremove_old_slices=False, lread_all_videoslices=True,
           lremove_old_averages=False, execute=False, quiet=True,
           l2D=True, lvars=True, lvids=True, laver=True)

    Keyword arguments:

    *olddir*:
      String path to simulation source directory.
      Path may be relative or absolute.

    *newdir*:
      String path to simulation destination directory.
      Path may be relative or absolute.

    *varfile_names*:
      A list of names of the snapshot files to be written, e.g. VAR0
      If None all varfiles in olddir+'/data/proc0/' will be converted

    *todatadir*:
      Directory to which the data is stored.

    *fromdatadir*:
      Directory from which the data is collected.

    *precision*:
      Single 'f' or double 'd' precision for new data.

    *nghost*:
      Number of ghost zones.
      TODO: handle switching size of ghost zones.

    *lpersist*:
      option to include persistent variables from snapshots.

    *xyz*:
      xyz arrays of the domain with ghost zones.
      This will normally be obtained from Grid object, but facility to
      redefine an alternative grid value.

    *lshear*:
      Flag for the shear.

    *execute*:
      optional confirmation required if lremove_old.

    *lremove_old_snapshots*:
      If True the old snapshot data will be deleted once the new h5 data
      has been saved.

    *lremove_old_slices*:
      If True the old video slice data will be deleted once the new h5 data
      has been saved.

    *lremove_old_averages*:
      If True the old averages data will be deleted once the new h5 data
      has been saved.

    *aver_by_proc*
      Option to read old binary files by processor and write in
      parallel

    *laver2D*
      If True apply to each plane_list 'y', 'z' and load each variable
      sequentially

    *l_mpi*:
      Applying MPI parallel process

    *driver*:
      HDF5 file io driver either None or mpio

    *comm*:
      MPI library calls

    *rank*:
      Integer ID of processor

    *size*:
      Number of MPI processes
    """

    import glob
    import numpy as np
    import os
    from os.path import exists, join
    import subprocess as sub
    import sys

    from .. import read
    from .. import sim
    from . import write_h5_grid
    from pencil.util import is_sim_dir

    try:
        from mpi4py import MPI

        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        size = comm.Get_size()
        driver = "mpio"
        l_mpi = True
        l_mpi = l_mpi and (size != 1)
    except ImportError:
        comm = None
        driver = None
        rank = 0
        size = 1
        l_mpi = False
    if not l_mpi:
        comm = None
        driver = None
    print("rank {} and size {}".format(rank, size))
    sys.stdout.flush()
    if rank == size - 1:
        print("l_mpi", l_mpi)
        sys.stdout.flush()

    # test if simulation directories
    if newdir == ".":
        newdir = os.getcwd()
    if olddir == ".":
        olddir = os.getcwd()
    os.chdir(olddir)
    if not is_sim_dir():
        if rank == 0:
            print("ERROR: Directory (" + olddir + ") needs to be a simulation")
            sys.stdout.flush()
        return -1
    if newdir != olddir:
        if not exists(newdir):
            cmd = "pc_newrun -s " + newdir
            if rank == size - 1:
                process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                output, error = process.communicate()
                print(cmd, output, error)
                # os.system(cmd)
            if comm:
                comm.Barrier()
        os.chdir(newdir)
        if not is_sim_dir():
            if rank == 0:
                print("ERROR: Directory (" + newdir +
                      ") needs to be a simulation")
                sys.stdout.flush()
            return -1
    #
    lremove_old = lremove_old_snapshots or lremove_old_slices or lremove_old_averages
    if lremove_old:
        if not execute:
            os.chdir(olddir)
            if rank == 0:
                print("WARNING: Are you sure you wish to remove the Fortran" +
                      " binary files from \n" + os.getcwd() + ".\n" +
                      "Set execute=True to proceed.")
                sys.stdout.flush()
            return -1

    os.chdir(olddir)
    if lvars:
        if varfile_names == None:
            os.chdir(fromdatadir + "/proc0")
            lVARd = False
            varfiled_names = natural_sort(glob.glob("VARd*"))
            if len(varfiled_names) > 0:
                varfile_names = natural_sort(glob.glob("VAR*"))
                for iv in range(len(varfile_names) - 1, -1, -1):
                    if "VARd" in varfile_names[iv]:
                        varfile_names.remove(varfile_names[iv])
                lVARd = True
            else:
                varfile_names = natural_sort(glob.glob("VAR*"))
            os.chdir(olddir)
        else:
            lVARd = False
            if isinstance(varfile_names, list):
                varfile_names = varfile_names
            else:
                varfile_names = [varfile_names]
            varfiled_names = []
            tmp_names = []
            for varfile_name in varfile_names:
                if "VARd" in varfile_names:
                    varfiled_names.append(varfile_name)
                    lVARd = True
                else:
                    tmp_names.append(varfile_name)
            varfile_names = tmp_names
    gkeys = [
        "x",
        "y",
        "z",
        "Lx",
        "Ly",
        "Lz",
        "dx",
        "dy",
        "dz",
        "dx_1",
        "dy_1",
        "dz_1",
        "dx_tilde",
        "dy_tilde",
        "dz_tilde",
    ]
    grid = None
    if rank == size - 1:
        grid = read.grid(quiet=True)
    if l_mpi:
        grid = comm.bcast(grid, root=size - 1)
    if not quiet:
        print(rank, grid)
        sys.stdout.flush()
    for key in gkeys:
        if not key in grid.__dict__.keys():
            if rank == 0:
                print("ERROR: key " + key + " missing from grid")
                sys.stdout.flush()
            return -1
    # obtain the settings from the old simulation
    settings = {}
    skeys = [
        "l1",
        "l2",
        "m1",
        "m2",
        "n1",
        "n2",
        "nx",
        "ny",
        "nz",
        "mx",
        "my",
        "mz",
        "nprocx",
        "nprocy",
        "nprocz",
        "maux",
        "mglobal",
        "mvar",
        "precision",
    ]
    if rank == 0:
        olddim = read.dim()
        for key in skeys:
            settings[key] = np.array(olddim.__getattribute__(key))
        olddim = None
        settings["nghost"] = np.array(nghost)
        settings["precision"] = precision.encode()
    if l_mpi:
        settings = comm.bcast(settings, root=0)
    if snap_by_proc:
        nprocs = settings["nprocx"] * settings["nprocy"] * settings["nprocz"]
        if np.mod(nprocs, size) != 0:
            print("WARNING: efficiency requires cpus to divide ncpus")
            sys.stdout.flush()
    if not quiet:
        print(rank, grid)
        sys.stdout.flush()
    # obtain physical units from old simulation
    ukeys = [
        "length",
        "velocity",
        "density",
        "magnetic",
        "time",
        "temperature",
        "flux",
        "energy",
        "mass",
        "system",
    ]
    param = read.param(quiet=True)
    param.__setattr__("unit_mass", param.unit_density * param.unit_length**3)
    param.__setattr__("unit_energy", param.unit_mass * param.unit_velocity**2)
    param.__setattr__("unit_time", param.unit_length / param.unit_velocity)
    param.__setattr__("unit_flux", param.unit_mass / param.unit_time**3)
    param.unit_system = param.unit_system.encode()
    # index list for variables in f-array
    if not quiet:
        print(rank, param)
        sys.stdout.flush()
    indx = None
    if rank == 0:
        indx = read.index()
    if l_mpi:
        indx = comm.bcast(indx, root=0)

    # check consistency between Fortran binary and h5 data
    os.chdir(newdir)
    dim = None
    if is_sim_dir():
        if rank == size - 1:
            if exists(join(newdir, "data", "dim.dat")):
                try:
                    dim = read.dim()
                except ValueError:
                    pass
        if l_mpi:
            dim = comm.bcast(dim, root=size - 1)
        if dim:
            if not quiet:
                print(rank, dim)
                sys.stdout.flush()
            try:
                dim.mvar == settings["mvar"]
                dim.mx == settings["mx"]
                dim.my == settings["my"]
                dim.mz == settings["mz"]
            except ValueError:
                if rank == size - 1:
                    print("ERROR: new simulation dimensions do not match.")
                    sys.stdout.flush()
                return -1
            dim = None
    os.chdir(olddir)
    if rank == size - 1:
        print("precision is ", precision)
        sys.stdout.flush()
    if laver2D:
        aver2h5(
            newdir,
            olddir,
            todatadir="data/averages",
            fromdatadir="data",
            l2D=False,
            precision=precision,
            quiet=quiet,
            laver2D=laver2D,
            lremove_old_averages=False,
            aver_by_proc=aver_by_proc,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
        l2D = False
    # copy snapshots
    if lvars and len(varfile_names) > 0:
        var2h5(
            newdir,
            olddir,
            varfile_names,
            todatadir,
            fromdatadir,
            snap_by_proc,
            precision,
            lpersist,
            quiet,
            nghost,
            settings,
            param,
            grid,
            x,
            y,
            z,
            lshear,
            lremove_old_snapshots,
            indx,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    # copy downsampled snapshots if present
    if lvars and lVARd:
        var2h5(
            newdir,
            olddir,
            varfiled_names,
            todatadir,
            fromdatadir,
            False,
            precision,
            lpersist,
            quiet,
            nghost,
            settings,
            param,
            grid,
            x,
            y,
            z,
            lshear,
            lremove_old_snapshots,
            indx,
            trimall=True,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    if lvars:
        var2h5(
            newdir,
            olddir,
            [
                "var.dat",
            ],
            todatadir,
            fromdatadir,
            snap_by_proc,
            precision,
            lpersist,
            quiet,
            nghost,
            settings,
            param,
            grid,
            x,
            y,
            z,
            lshear,
            lremove_old_snapshots,
            indx,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    # copy old video slices to new h5 sim
    if lvids:
        if lremove_deprecated_vids:
            for ext in [
                    "bb.", "uu.", "ux.", "uy.", "uz.", "bx.", "by.", "bz."
            ]:
                cmd = "rm -f " + join(olddir, fromdatadir, "proc*",
                                      "slice_" + ext + "*")
                if rank == 0:
                    process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                    output, error = process.communicate()
                    print(cmd, output, error)
                cmd = "rm -f " + join(fromdatadir, "slice_" + ext + "*")
                if rank == 0:
                    process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                    output, error = process.communicate()
                    print(cmd, output, error)
        if comm:
            comm.Barrier()
        cmd = "src/read_all_videofiles.x"
        if rank == size - 1 and lread_all_videoslices:
            process = sub.Popen(cmd.split(), stdout=sub.PIPE)
            output, error = process.communicate()
            print(cmd, output, error)
        if comm:
            comm.Barrier()
        slices2h5(
            newdir,
            olddir,
            grid,
            todatadir="data/slices",
            fromdatadir=fromdatadir,
            precision=precision,
            quiet=quiet,
            vlarge=vlarge,
            lsplit_slices=lsplit_slices,
            lremove_old_slices=lremove_old_slices,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    # copy old averages data to new h5 sim
    if laver:
        aver2h5(
            newdir,
            olddir,
            todatadir="data/averages",
            fromdatadir=fromdatadir,
            l2D=l2D,
            precision=precision,
            quiet=quiet,
            aver_by_proc=False,
            lremove_old_averages=lremove_old_averages,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    # check some critical sim files are present for new sim without start
    # construct grid.h5 sim information if requied for new h5 sim
    os.chdir(newdir)
    if l_mpi:
        comm.Barrier()
    if rank == 0:
        write_h5_grid(
            file_name="grid",
            datadir="data",
            precision=precision,
            nghost=nghost,
            settings=settings,
            param=param,
            grid=grid,
            unit=None,
            quiet=quiet,
        )
        source_file = join(olddir, fromdatadir, "proc0/varN.list")
        target_file = join(newdir, todatadir, "varN.list")
        if exists(source_file):
            cmd = "cp " + source_file + " " + target_file
            process = sub.Popen(cmd.split(), stdout=sub.PIPE)
            output, error = process.communicate()
            print(cmd, output, error)
        items = [
            "def_var.pro",
            "index.pro",
            "jobid.dat",
            "param.nml",
            "particle_index.pro",
            "pc_constants.pro",
            "pointmass_index.pro",
            "pt_positions.dat",
            "sn_series.dat",
            "svnid.dat",
            "time_series.dat",
            "tsnap.dat",
            "tspec.dat",
            "tvid.dat",
            "t2davg.dat",
            "var.general",
            "variables.pro",
            "varname.dat",
        ]
        for item in items:
            source_file = join(olddir, fromdatadir, item)
            target_file = join(newdir, fromdatadir, item)
            if exists(source_file):
                if not exists(target_file):
                    cmd = "cp " + source_file + " " + target_file
                    process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                    output, error = process.communicate()
                    print(cmd, output, error)
    print("Simulation Fortran to h5 completed on rank {}.".format(rank))
    sys.stdout.flush()
コード例 #5
0
    def read(
        self,
        var_file="",
        datadir="data",
        proc=-1,
        ivar=-1,
        quiet=True,
        trimall=False,
        magic=None,
        sim=None,
        precision="d",
        lpersist=False,
        dtype=np.float64,
    ):
        """
        read(var_file='', datadir='data', proc=-1, ivar=-1, quiet=True,
             trimall=False, magic=None, sim=None, precision='f')

        Read VAR files from Pencil Code. If proc < 0, then load all data
        and assemble, otherwise load VAR file from specified processor.

        The file format written by output() (and used, e.g. in var.dat)
        consists of the followinig Fortran records:
        1. data(mx, my, mz, nvar)
        2. t(1), x(mx), y(my), z(mz), dx(1), dy(1), dz(1), deltay(1)
        Here nvar denotes the number of slots, i.e. 1 for one scalar field, 3
        for one vector field, 8 for var.dat in the case of MHD with entropy.
        but, deltay(1) is only there if lshear is on! need to know parameters.


        Parameters
        ----------
         var_file : string
             Name of the VAR file.
             If not specified, use var.dat (which is the latest snapshot of the fields)

         datadir : string
             Directory where the data is stored.

         proc : int
             Processor to be read. If -1 read all and assemble to one array.

         ivar : int
           Index of the VAR file, if var_file is not specified.

         quiet : bool
             Flag for switching off output.

         trimall : bool
             Trim the data cube to exclude ghost zones.

         magic : bool
             Values to be computed from the data, e.g. B = curl(A).

         sim : pencil code simulation object
             Contains information about the local simulation.

         precision : string
             Float 'f', double 'd' or half 'half'.

         lpersist : bool
             Read the persistent variables if they exist

        Returns
        -------
        DataCube
            Instance of the pencil.read.var.DataCube class.
            All of the computed fields are imported as class members.

        Examples
        --------
        Read the latest var.dat file and print the shape of the uu array:
        >>> var = pc.read.var()
        >>> print(var.uu.shape)

        Read the VAR2 file, compute the magnetic field B = curl(A),
        the vorticity omega = curl(u) and remove the ghost zones:
        >>> var = pc.read.var(var_file='VAR2', magic=['bb', 'vort'], trimall=True)
        >>> print(var.bb.shape)
        """

        import os
        from scipy.io import FortranFile
        from pencil.math.derivatives import curl, curl2
        from pencil import read
        from pencil.sim import __Simulation__

        def persist(self, infile=None, precision="d", quiet=quiet):
            """An open Fortran file potentially containing persistent variables appended
            to the f array and grid data are read from the first proc data

            Record types provide the labels and id record for the peristent
            variables in the depricated fortran binary format
            """
            record_types = {}
            for key in read.record_types.keys():
                if read.record_types[key][1] == "d":
                    record_types[key] = (read.record_types[key][0], precision)
                else:
                    record_types[key] = read.record_types[key]

            try:
                tmp_id = infile.read_record("h")
            except:
                return -1
            block_id = 0
            for i in range(2000):
                i += 1
                tmp_id = infile.read_record("h")
                block_id = tmp_id[0]
                if block_id == 2000:
                    break
                for key in record_types.keys():
                    if record_types[key][0] == block_id:
                        tmp_val = infile.read_record(record_types[key][1])
                        self.__setattr__(key, tmp_val[0])
                        if not quiet:
                            print(key, record_types[key][0],
                                  record_types[key][1], tmp_val)
            return self

        dim = None
        param = None
        index = None

        if isinstance(sim, __Simulation__):
            datadir = os.path.expanduser(sim.datadir)
            dim = sim.dim
            param = read.param(datadir=sim.datadir,
                               quiet=True,
                               conflicts_quiet=True)
            index = read.index(datadir=sim.datadir)
        else:
            datadir = os.path.expanduser(datadir)
            if dim is None:
                if var_file[0:2].lower() == "og":
                    dim = read.ogdim(datadir, proc)
                else:
                    if var_file[0:4] == "VARd":
                        dim = read.dim(datadir, proc, down=True)
                    else:
                        dim = read.dim(datadir, proc)
            if param is None:
                param = read.param(datadir=datadir,
                                   quiet=quiet,
                                   conflicts_quiet=True)
            if index is None:
                index = read.index(datadir=datadir)

        if param.lwrite_aux:
            total_vars = dim.mvar + dim.maux
        else:
            total_vars = dim.mvar

        if os.path.exists(os.path.join(datadir, "grid.h5")):
            #
            #  Read HDF5 files.
            #
            import h5py

            run2D = param.lwrite_2d

            # Set up the global array.
            if not run2D:
                self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx),
                                  dtype=dtype)
            else:
                if dim.ny == 1:
                    self.f = np.zeros((total_vars, dim.mz, dim.mx),
                                      dtype=dtype)
                else:
                    self.f = np.zeros((total_vars, dim.my, dim.mx),
                                      dtype=dtype)

            if not var_file:
                if ivar < 0:
                    var_file = "var.h5"
                else:
                    var_file = "VAR" + str(ivar) + ".h5"

            file_name = os.path.join(datadir, "allprocs", var_file)
            with h5py.File(file_name, "r") as tmp:
                for key in tmp["data"].keys():
                    self.f[index.__getattribute__(key) - 1, :] = dtype(
                        tmp["data/" + key][:])
                t = (tmp["time"][()]).astype(precision)
                x = (tmp["grid/x"][()]).astype(precision)
                y = (tmp["grid/y"][()]).astype(precision)
                z = (tmp["grid/z"][()]).astype(precision)
                dx = (tmp["grid/dx"][()]).astype(precision)
                dy = (tmp["grid/dy"][()]).astype(precision)
                dz = (tmp["grid/dz"][()]).astype(precision)
                if param.lshear:
                    deltay = (tmp["persist/shear_delta_y"][(
                        0)]).astype(precision)
                if lpersist:
                    for key in tmp["persist"].keys():
                        self.__setattr__(
                            key, (tmp["persist"][key][0]).astype(precision))
        else:
            #
            #  Read scattered Fortran binary files.
            #
            run2D = param.lwrite_2d

            if dim.precision == "D":
                read_precision = "d"
            else:
                read_precision = "f"

            if not var_file:
                if ivar < 0:
                    var_file = "var.dat"
                else:
                    var_file = "VAR" + str(ivar)

            if proc < 0:
                proc_dirs = self.__natural_sort(
                    filter(lambda s: s.startswith("proc"),
                           os.listdir(datadir)))
                if proc_dirs.count("proc_bounds.dat") > 0:
                    proc_dirs.remove("proc_bounds.dat")
                if param.lcollective_io:
                    # A collective IO strategy is being used
                    proc_dirs = ["allprocs"]
            #                else:
            #                    proc_dirs = proc_dirs[::dim.nprocx*dim.nprocy]
            else:
                proc_dirs = ["proc" + str(proc)]

            # Set up the global array.
            if not run2D:
                self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx),
                                  dtype=dtype)
            else:
                if dim.ny == 1:
                    self.f = np.zeros((total_vars, dim.mz, dim.mx),
                                      dtype=dtype)
                else:
                    self.f = np.zeros((total_vars, dim.my, dim.mx),
                                      dtype=dtype)

            x = np.zeros(dim.mx, dtype=precision)
            y = np.zeros(dim.my, dtype=precision)
            z = np.zeros(dim.mz, dtype=precision)

            for directory in proc_dirs:
                if not param.lcollective_io:
                    proc = int(directory[4:])
                    if var_file[0:2].lower() == "og":
                        procdim = read.ogdim(datadir, proc)
                    else:
                        if var_file[0:4] == "VARd":
                            procdim = read.dim(datadir, proc, down=True)
                        else:
                            procdim = read.dim(datadir, proc)
                    if not quiet:
                        print("Reading data from processor" +
                              " {0} of {1} ...".format(proc, len(proc_dirs)))

                else:
                    # A collective IO strategy is being used
                    procdim = dim
                #                else:
                #                    procdim.mx = dim.mx
                #                    procdim.my = dim.my
                #                    procdim.nx = dim.nx
                #                    procdim.ny = dim.ny
                #                    procdim.ipx = dim.ipx
                #                    procdim.ipy = dim.ipy

                mxloc = procdim.mx
                myloc = procdim.my
                mzloc = procdim.mz

                # Read the data.
                file_name = os.path.join(datadir, directory, var_file)
                infile = FortranFile(file_name)
                if not run2D:
                    f_loc = dtype(infile.read_record(dtype=read_precision))
                    f_loc = f_loc.reshape((-1, mzloc, myloc, mxloc))
                else:
                    if dim.ny == 1:
                        f_loc = dtype(infile.read_record(dtype=read_precision))
                        f_loc = f_loc.reshape((-1, mzloc, mxloc))
                    else:
                        f_loc = dtype(infile.read_record(dtype=read_precision))
                        f_loc = f_loc.reshape((-1, myloc, mxloc))
                raw_etc = infile.read_record(dtype=read_precision)
                if lpersist:
                    persist(self,
                            infile=infile,
                            precision=read_precision,
                            quiet=quiet)
                infile.close()

                t = raw_etc[0]
                x_loc = raw_etc[1:mxloc + 1]
                y_loc = raw_etc[mxloc + 1:mxloc + myloc + 1]
                z_loc = raw_etc[mxloc + myloc + 1:mxloc + myloc + mzloc + 1]
                if param.lshear:
                    shear_offset = 1
                    deltay = raw_etc[-1]
                else:
                    shear_offset = 0

                dx = raw_etc[-3 - shear_offset]
                dy = raw_etc[-2 - shear_offset]
                dz = raw_etc[-1 - shear_offset]

                if len(proc_dirs) > 1:
                    # Calculate where the local processor will go in
                    # the global array.
                    #
                    # Don't overwrite ghost zones of processor to the
                    # left (and accordingly in y and z direction -- makes
                    # a difference on the diagonals)
                    #
                    # Recall that in NumPy, slicing is NON-INCLUSIVE on
                    # the right end, ie, x[0:4] will slice all of a
                    # 4-digit array, not produce an error like in idl.

                    if procdim.ipx == 0:
                        i0x = 0
                        i1x = i0x + procdim.mx
                        i0xloc = 0
                        i1xloc = procdim.mx
                    else:
                        i0x = procdim.ipx * procdim.nx + procdim.nghostx
                        i1x = i0x + procdim.mx - procdim.nghostx
                        i0xloc = procdim.nghostx
                        i1xloc = procdim.mx

                    if procdim.ipy == 0:
                        i0y = 0
                        i1y = i0y + procdim.my
                        i0yloc = 0
                        i1yloc = procdim.my
                    else:
                        i0y = procdim.ipy * procdim.ny + procdim.nghosty
                        i1y = i0y + procdim.my - procdim.nghosty
                        i0yloc = procdim.nghosty
                        i1yloc = procdim.my

                    if procdim.ipz == 0:
                        i0z = 0
                        i1z = i0z + procdim.mz
                        i0zloc = 0
                        i1zloc = procdim.mz
                    else:
                        i0z = procdim.ipz * procdim.nz + procdim.nghostz
                        i1z = i0z + procdim.mz - procdim.nghostz
                        i0zloc = procdim.nghostz
                        i1zloc = procdim.mz

                    x[i0x:i1x] = x_loc[i0xloc:i1xloc]
                    y[i0y:i1y] = y_loc[i0yloc:i1yloc]
                    z[i0z:i1z] = z_loc[i0zloc:i1zloc]

                    if not run2D:
                        self.f[:, i0z:i1z, i0y:i1y,
                               i0x:i1x] = f_loc[:, i0zloc:i1zloc,
                                                i0yloc:i1yloc, i0xloc:i1xloc]
                    else:
                        if dim.ny == 1:
                            self.f[:, i0z:i1z,
                                   i0x:i1x] = f_loc[:, i0zloc:i1zloc,
                                                    i0xloc:i1xloc]
                        else:
                            self.f[i0z:i1z, i0y:i1y,
                                   i0x:i1x] = f_loc[i0zloc:i1zloc,
                                                    i0yloc:i1yloc,
                                                    i0xloc:i1xloc]
                else:
                    self.f = f_loc
                    x = x_loc
                    y = y_loc
                    z = z_loc

        if magic is not None:
            if not np.all(param.lequidist):
                raise NotImplementedError(
                    "Magic functions are only implemented for equidistant grids."
                )
            if "bb" in magic:
                # Compute the magnetic field before doing trimall.
                aa = self.f[index.ax - 1:index.az, ...]
                self.bb = dtype(
                    curl(
                        aa,
                        dx,
                        dy,
                        dz,
                        x=x,
                        y=y,
                        run2D=run2D,
                        coordinate_system=param.coord_system,
                    ))
                if trimall:
                    self.bb = self.bb[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                                      dim.l1:dim.l2 + 1]
            if "jj" in magic:
                # Compute the electric current field before doing trimall.
                aa = self.f[index.ax - 1:index.az, ...]
                self.jj = dtype(
                    curl2(aa,
                          dx,
                          dy,
                          dz,
                          x=x,
                          y=y,
                          coordinate_system=param.coord_system))
                if trimall:
                    self.jj = self.jj[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                                      dim.l1:dim.l2 + 1]
            if "vort" in magic:
                # Compute the vorticity field before doing trimall.
                uu = self.f[index.ux - 1:index.uz, ...]
                self.vort = dtype(
                    curl(
                        uu,
                        dx,
                        dy,
                        dz,
                        x=x,
                        y=y,
                        run2D=run2D,
                        coordinate_system=param.coord_system,
                    ))
                if trimall:
                    if run2D:
                        if dim.nz == 1:
                            self.vort = self.vort[:, dim.m1:dim.m2 + 1,
                                                  dim.l1:dim.l2 + 1]
                        else:
                            self.vort = self.vort[:, dim.n1:dim.n2 + 1,
                                                  dim.l1:dim.l2 + 1]
                    else:
                        self.vort = self.vort[:, dim.n1:dim.n2 + 1,
                                              dim.m1:dim.m2 + 1,
                                              dim.l1:dim.l2 + 1, ]

        # Trim the ghost zones of the global f-array if asked.
        if trimall:
            self.x = x[dim.l1:dim.l2 + 1]
            self.y = y[dim.m1:dim.m2 + 1]
            self.z = z[dim.n1:dim.n2 + 1]
            if not run2D:
                self.f = self.f[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                                dim.l1:dim.l2 + 1]
            else:
                if dim.ny == 1:
                    self.f = self.f[:, dim.n1:dim.n2 + 1, dim.l1:dim.l2 + 1]
                else:
                    self.f = self.f[:, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1]
        else:
            self.x = x
            self.y = y
            self.z = z
            self.l1 = dim.l1
            self.l2 = dim.l2 + 1
            self.m1 = dim.m1
            self.m2 = dim.m2 + 1
            self.n1 = dim.n1
            self.n2 = dim.n2 + 1

        # Assign an attribute to self for each variable defined in
        # 'data/index.pro' so that e.g. self.ux is the x-velocity
        aatest = []
        uutest = []
        for key in index.__dict__.keys():
            if "aatest" in key:
                aatest.append(key)
            if "uutest" in key:
                uutest.append(key)
            if (key != "global_gg" and key != "keys" and "aatest" not in key
                    and "uutest" not in key):
                value = index.__dict__[key]
                setattr(self, key, self.f[value - 1, ...])
        # Special treatment for vector quantities.
        if hasattr(index, "uu"):
            self.uu = self.f[index.ux - 1:index.uz, ...]
        if hasattr(index, "aa"):
            self.aa = self.f[index.ax - 1:index.az, ...]
        if hasattr(index, "uu_sph"):
            self.uu_sph = self.f[index.uu_sphx - 1:index.uu_sphz, ...]
        if hasattr(index, "bb_sph"):
            self.bb_sph = self.f[index.bb_sphx - 1:index.bb_sphz, ...]
        # Special treatment for test method vector quantities.
        # Note index 1,2,3,...,0 last vector may be the zero field/flow
        if hasattr(index, "aatest1"):
            naatest = int(len(aatest) / 3)
            for j in range(0, naatest):
                key = "aatest" + str(np.mod(j + 1, naatest))
                value = index.__dict__["aatest1"] + 3 * j
                setattr(self, key, self.f[value - 1:value + 2, ...])
        if hasattr(index, "uutest1"):
            nuutest = int(len(uutest) / 3)
            for j in range(0, nuutest):
                key = "uutest" + str(np.mod(j + 1, nuutest))
                value = index.__dict__["uutest"] + 3 * j
                setattr(self, key, self.f[value - 1:value + 2, ...])

        self.t = t
        self.dx = dx
        self.dy = dy
        self.dz = dz
        if param.lshear:
            self.deltay = deltay

        # Do the rest of magic after the trimall (i.e. no additional curl.)
        self.magic = magic
        if self.magic is not None:
            self.magic_attributes(param, dtype=dtype)