Exemple #1
0
    def __read_phiaver(self,
                       datadir,
                       variables,
                       aver_file_name,
                       n_vars,
                       var_index,
                       iter_list,
                       precision='f',
                       l_h5=False):
        """
        Read the PHIAVG file
        Return the time, cylindrical r and z and raw data.
        """

        import os
        import numpy as np
        from scipy.io import FortranFile
        from pencil import read

        # Read the data
        if l_h5:
            import h5py
            #
            # Not implemented
            #
        else:
            glob_dim = read.dim(datadir)
            nu = glob_dim.nx / 2
            nv = glob_dim.nz

            dim = read.dim(datadir)
            if dim.precision == 'S':
                read_precision = np.float32
            if dim.precision == 'D':
                read_precision = np.float64

            # Prepare the raw data.
            raw_data = []
            t = []

            # Read records
            #path=os.path.join(datadir, aver_file_name)
            #print(path)
            file_id = FortranFile(os.path.join(datadir, aver_file_name))

            data1 = file_id.read_record(dtype='i4')
            nr_phiavg = data1[0]
            nz_phiavg = data1[1]
            nvars = data1[2]
            nprocz = data1[3]

            data2 = file_id.read_record(dtype=read_precision).astype(precision)
            t = data2[0]
            r_cyl = data2[1:nr_phiavg + 1]
            z_cyl = data2[nr_phiavg + 1:nr_phiavg + nz_phiavg + 1]

            data3 = file_id.read_record(dtype=read_precision).astype(precision)
            raw_data = data3.reshape(nvars, nz_phiavg, nr_phiavg)

            return t, r_cyl, z_cyl, raw_data
Exemple #2
0
    def read(self, var_name, datadir="data", dim=None, nfield=1):
        """
        read(var_name, datadir='data', dim=None, nfield=1)

        Read vertical profiles written in data/proc*/zprof_varname.dat.
        Returns a ZProfile object with z and profiles(z).

        Parameters
        ----------
        var_name : string
          Name of the zprof var file.

        datadir : string
          Directory where the data is stored.

        dim : obj
          Dimension object.

        nfield : int
          Number of fields to be read.
        """

        import os as os
        import numpy as np
        from pencil import read

        if not dim:
            dim = read.dim()

        nz = int(dim.nzgrid / dim.nprocz)
        self.z = np.zeros(nz * dim.nprocz, dtype=np.float32)
        if nfield > 1:
            self.prof = np.zeros((nfield, dim.nzgrid), dtype=np.float32)
        else:
            self.prof = np.zeros(dim.nzgrid, dtype=np.float32)

        # Loop over all processors and records in file.
        izcount = 0
        for iprocz in range(0, dim.nprocz):
            proc_name = "proc{0}".format(iprocz)
            file_name = os.path.join(datadir, proc_name, "zprof_", var_name,
                                     ".dat")
            fd = open(file_name, "r")

            #  When reading a zprof_once_X file, the first dim.nghostz gridpoints are
            #  not saved.
            if var_name.find("once") != -1:
                for i in range(dim.nghostz):
                    line = fd.readline()
            for i in range(nz):
                line = fd.readline()
                data = np.asarray(line.split()).astype(np.float32)
                self.z[izcount] = data[0]
                if nfield > 1:
                    for j in range(nfield):
                        self.prof[j, izcount] = data[j + 1]
                else:
                    self.prof[izcount] = data[1]
                izcount = izcount + 1
        fd.close()
Exemple #3
0
    def read(self, datadir='data', param=None, dim=None):
        """
        Read Pencil Code index data from index.pro.

        call signature:

        read(self, datadir='data', param=None, dim=None)

        Keyword arguments:

        *datadir*:
          Directory where the data is stored.

        *param*
          Parameter object.

        *dim*
          Dimension object.
        """

        import os
        import re
        import numpy as np
        from pencil import read

        if param is None:
            param = read.param(datadir=datadir, quiet=True)
        if dim is None:
            dim = read.dim(datadir=datadir)

        if param.lwrite_aux:
            totalvars = dim.mvar + dim.maux
        else:
            totalvars = dim.mvar

        index_file = open(os.path.join(datadir, 'index.pro'))
        ntestfield, ntestflow, ntestlnrho, ntestscalar = 0, 0, 0, 0
        for line in index_file.readlines():
            clean = line.strip()
            name = clean.split('=')[0].strip().replace('[',
                                                       '').replace(']', '')
            if clean.split('=')[1].strip().startswith('intarr(370)'):
                continue
            try:
                val = int(clean.split('=')[1].strip())
            except:
                val = np.arange(int(re.search(r"\(([0-9]+)\)", clean).group(1))) + \
                      int(clean.split('=')[1].strip().split('+')[1])

            if val != 0  and val <= totalvars \
                and not name.startswith('i_') and name.startswith('i'):
                name = name.lstrip('i')
                if name == 'lnTT' and param.ltemperature_nolog:
                    name = 'tt'
                if name == 'aatest':
                    iaatest = val
                if name == 'uutest':
                    iuutest = val
                if name == 'hhtest':
                    ihhtest = val
                if name == 'cctest':
                    icctest = val
                setattr(self, name, val)

            elif name == 'ntestfield':
                ntestfield = val
            elif name == 'ntestflow':
                ntestflow = val
            elif name == 'ntestlnrho':
                ntestlnrho = val
            elif name == 'ntestscalar':
                ntestscalar = val
        if ntestfield > 0:
            self.__delattr__('aatest')
            for i in range(1, ntestfield + 1):
                setattr(self, 'aatest' + str(i), iaatest - 1 + i)
        if ntestflow > 0:
            self.__delattr__('uutest')
            for i in range(1, ntestflow + 1):
                setattr(self, 'uutest' + str(i), iuutest - 1 + i)
        if ntestlnrho > 0:
            self.__delattr__('hhtest')
            for i in range(1, ntestlnrho + 1):
                setattr(self, 'hhtest' + str(i), ihhtest - 1 + i)
        if ntestscalar > 0:
            self.__delattr__('cctest')
            for i in range(1, ntestscalar + 1):
                setattr(self, 'cctest' + str(i), icctest - 1 + i)
Exemple #4
0
def zav2h5(
    folder='.',
    dataset='',
    filename='emftensors.h5',
    timereducer='mean',
    hdf5dir='data/',
    l_correction=True,
    t_correction=8972.,
    rmfzeros=4,
    rmbzeros=2,
    dgroup='emftensor',
):
    """
    If large dataset MPI may be required.
    Loads Averages object and applies tensors calculation and reforms for
    efficient writing to hdf5 for mean field module simulations.
    MPI call needs to be improved to avoid MemoryError for large files
    with read.aver(plane_list=['z'])
    timereducers needs to be expanded to include various smoothing options
    """
    import numpy as np
    from pencil import read
    from pencil.read import aver
    from pencil.export import create_h5, fvars, create_aver_sph
    #    from pencil.export import create_h5.fvars as fvars
    #    from pencil.export import create_aver_sph
    from pencil.calc import tensors_sph
    import h5py
    import copy

    timereducers = {
        'mean':
        lambda x, args: np.mean(x, axis=-3, keepdims=True),
        #np.std(x,axis=-3)),
        'mean_last':
        lambda x, args: np.mean(np.take(
            x, np.arange(-int(args[0]), 0, 1), axis=-3),
                                axis=-3,
                                keepdims=True),
        'none':
        lambda x, args: x
    }
    if not timereducer in timereducers:
        raise ValueError(
            'timereducer "{}" undefined in timereducers'.format(timereducer) +
            ' options: {}'.format(timereducers.keys()))
    if len(dataset) == 0:
        dataset = timereducer
    with open('zaver.in', 'r') as f:
        zavers = f.read().splitlines()
    """ Find out if the calculation is parallel and distribute the arrays
        according to y-index and ipz=0 processor layout
    """
    try:
        from mpi4py import MPI

        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()  # rank of processor on which this script runs
        size = comm.Get_size()  # number of  ~  ~  ~  ~

        l_mpi = True
        l_mpi = l_mpi and (size != 1)
    except ImportError:
        l_mpi = False
        rank = 0
        size = 1
        comm = None
    dim = read.dim()
    nx, nny = dim.nx, dim.ny
    ayindex = np.arange(nny)
    if l_mpi:
        y_chunks = np.array_split(ayindex, size, axis=0)
        yindex = y_chunks[rank]
        ny = yindex.size
    else:
        yindex = ayindex  # vector 0 ... nygrid-1
        ny = nny

    ncpus = dim.nprocx * dim.nprocy
    aprocs = np.arange(ncpus)  # vector 0 ... nprocx*nprocy-1
    if np.mod(ncpus, size) > 0:
        print('number of processes must divide {} cpus'.format(ncpus))
        quit()
    if l_mpi:
        if size > aprocs.size:
            nsz = size / aprocs.size
            for ii in range(1, nsz):
                tmproc = np.append(aprocs, aprocs)
            aprocs = np.sort(tmproc)
        proc_chunks = np.array_split(aprocs, size, axis=0)
        proc = proc_chunks[rank]
    else:
        proc = aprocs
    """Set up hdf5 file and create datasets in which to save the tensors
    """
    lskip_zeros = rmfzeros + rmbzeros > 0
    if rank == 0:  # if root processor
        grid = read.grid(trim=True, quiet=True)  # read grid
        zav = read.aver(proc=0,
                        plane_list=['z'
                                    ])  # read zaverages of root proc of PC run
        tensor = tensors_sph(  # decompose into individual effect tensors
            zav,
            proc=proc[0],
            rank=0,
            lskip_zeros=lskip_zeros,
            iy=[
                int(ny / 2 / dim.nprocy),
            ],
            timereducer=timereducers[timereducer],
            #trargs=trargs,
            rmfzeros=rmfzeros,
            rmbzeros=rmbzeros,
            l_correction=l_correction,
            t_correction=t_correction,
            dim=dim,
            #tindex=tindex
        )
        if 'mean' in dataset:
            nt = 1
        else:
            nt = tensor.t.size
        create_aver_sph(hdf5dir + filename,
                        dataset,
                        fvars, (1, ny, nx, nt), (0, grid.y, grid.x, tensor.t),
                        hdf5dir=hdf5dir,
                        dgroup=dgroup)
    if l_mpi:
        imask = comm.bcast(tensor.imask, root=0)
    else:
        imask = tensor.imask

    import os

    if os.path.exists(os.path.join(folder, 'averages/z.h5')):
        zav = aver(plane_list=['z'])  # read all averages
        tensor_buf = tensors_sph(  # calculate tensors
            aver=zav,
            rank=rank,
            lskip_zeros=lskip_zeros,
            timereducer=timereducers[timereducer],
            #trargs=trargs,
            rmfzeros=rmfzeros,
            rmbzeros=rmbzeros,
            l_correction=l_correction,
            t_correction=t_correction,
            dim=dim,
            #tindex=tindex,
            imask=imask)
    else:
        yndx_tmp = np.array_split(yindex, dim.nprocy)
        # list of vectors ipy*ny/nprocy ... (ipy+1)*ny/nprocy - 1, ipy=0,nprocy-1

        for ipy in range(dim.nprocy):  # over all y processors of the PC run
            for ipx in range(
                    dim.nprocx):  # over all x processors of the PC run

                iproc = dim.nprocx * ipy + ipx  # proc rank of the PC run (0 ... nprocx*nprocy-1)
                yndx = yndx_tmp[ipy] - ipy * int(dim.nygrid / dim.nprocy)

                zav = aver(proc=iproc,
                           plane_list=['z'])  # read averages from proc iproc

                print('calculating tensors on proc {0} rank {1}'.format(
                    iproc, rank))
                """
                if iproc==1:             # as there is corrupted data on proc 1
                    with open('zaver.in', 'r') as f:
                        zavers = f.read().splitlines()
                    for zaver in  zavers:
                        zav.z.__setattr__(zaver,np.insert(
                                zav.z.__getattribute__(zaver),3766,
                                0.5*(zav.z.__getattribute__(zaver)[3766]+
                                zav.z.__getattribute__(zaver)[3767]),axis=0))
                        zav.t=np.insert(zav.t,3766,0.5*(zav.t[3766]+zav.t[3767]),axis=0)
                """
                tensor_buf = tensors_sph(  # calculate tensors
                    aver=zav,
                    proc=iproc,
                    rank=rank,
                    lskip_zeros=lskip_zeros,
                    iy=yndx,
                    timereducer=timereducers[timereducer],
                    #trargs=trargs,
                    rmfzeros=rmfzeros,
                    rmbzeros=rmbzeros,
                    l_correction=l_correction,
                    t_correction=t_correction,
                    dim=dim,
                    #tindex=tindex,
                    imask=imask)
                if ipx == 0:
                    tensor = copy.deepcopy(tensor_buf)
                else:
                    for field, comp in fvars:
                        setattr(
                            tensor, field,
                            np.concatenate(
                                (tensor.__getattribute__(field),
                                 tensor_buf.__getattribute__(field)),
                                axis=len(comp) + 2))

        if l_mpi:
            comm.barrier()
            ds = h5py.File(hdf5dir + filename, 'a', driver='mpio', comm=comm)
        else:
            ds = h5py.File(hdf5dir + filename, 'a')  # open HDF5 file

        for field, comp in fvars:

            print('writing {0} from rank {1} for proc {2}'.format(
                field, rank, iproc))

            dsname = '{0}/{1}/{2}'.format(dgroup, field, dataset)
            if len(comp) == 1:
                ds[dsname][:, :,
                           yndx_tmp[ipy], :] = tensor.__getattribute__(field)
            elif len(comp) == 2:
                ds[dsname][:, :, :,
                           yndx_tmp[ipy], :] = tensor.__getattribute__(field)
            else:
                ds[dsname][:, :, :, :,
                           yndx_tmp[ipy], :] = tensor.__getattribute__(field)
        ds.close()
Exemple #5
0
    def update(self, hard=False, quiet=True):
        """Update simulation object:
            if not read in:
                - read param.nml
                - read grid and ghost grid

            Set hard=True to force update.
        """
        from os.path import exists
        from os.path import join
        from pencil.read import param, grid, dim

        REEXPORT = False

        if hard == True:
            self.param = False
            self.grid = False
            self.ghost_grid = False
            self.dim = False
            REEXPORT = True

        if self.param == False:
            try:
                if exists(join(self.datadir, 'param.nml')):
                    print('~ Reading param.nml.. ')
                    param = param(quiet=quiet, datadir=self.datadir)
                    self.param = {}
                    # read params into Simulation object
                    for key in dir(param):
                        if key.startswith('_') or key == 'read': continue
                        if type(getattr(param,
                                        key)) in [bool, list, float, int, str]:
                            self.param[key] = getattr(param, key)
                        else:
                            try:
                                # allow for nested param objects
                                self.param[key] = {}
                                for subkey in dir(getattr(param, key)):
                                    if subkey.startswith(
                                            '_') or subkey == 'read':
                                        continue
                                    if type(
                                            getattr(getattr(param, key),
                                                    subkey)) in [
                                                        bool, list, float, int,
                                                        str
                                                    ]:
                                        self.param[key][subkey] = getattr(
                                            getattr(param, key), subkey)
                            except:
                                # not nested param objects
                                continue
                    REEXPORT = True
                else:
                    if not quiet:
                        print('? WARNING: for ' + self.path +
                              '\n? Simulation has ' +
                              'not run yet! Meaning: No param.nml found!')
                    REEXPORT = True
            except:
                print('! ERROR: while reading param.nml for ' + self.path)
                self.param = False
                REEXPORT = True

        if self.param != False and (self.grid == False
                                    or self.ghost_grid == False):
            # read grid only if param is not False
            try:
                print('~ Reading grid.. ')
                self.grid = grid(datadir=self.datadir, trim=True, quiet=True)
                print('~ Reading ghost_grid.. ')
                self.ghost_grid = grid(datadir=self.datadir,
                                       trim=False,
                                       quiet=True)
                print('~ Reading dim.. ')
                self.dim = dim(datadir=self.datadir)
                if not quiet:
                    print('# Updating grid and ghost_grid succesfull')
                REEXPORT = True
                # adding lx, dx etc to params
                self.param['Lx'] = self.grid.Lx
                self.param['Ly'] = self.grid.Ly
                self.param['Lz'] = self.grid.Lz
                self.param['lx'] = self.grid.Lx
                self.param['ly'] = self.grid.Ly
                self.param['lz'] = self.grid.Lz
                self.param['dx'] = self.grid.dx
                self.param['dy'] = self.grid.dy
                self.param['dz'] = self.grid.dz
            except:
                if not quiet:
                    print(
                        '? WARNING: Updating grid and ghost_grid ' +
                        'was not successfull, since run has not yet started.')
                if self.started() or (not quiet):
                    print('? WARNING: Couldnt load grid for ' + self.path)
                self.grid = False
                self.ghost_grid = False
                self.dim = False
                REEXPORT = True
        elif self.param == False:
            if not quiet:
                print('? WARNING: Updating grid and ghost_grid ' +
                      'was not successfull, since run has not yet started.')
            self.grid = False
            self.ghost_grid = False
            self.dim = False
            REEXPORT = True

        if REEXPORT == True: self.export()
        return self
Exemple #6
0
    def __read_1d_aver(self,
                       plane,
                       datadir,
                       variables,
                       aver_file_name,
                       n_vars,
                       var_index,
                       iter_list,
                       proc,
                       l_h5=False,
                       precision='f'):
        """
        Read the yaverages.dat, zaverages.dat.
        Return the raw data and the time array.
        """

        import os
        import numpy as np
        from scipy.io import FortranFile
        from pencil import read

        # Read the data
        if l_h5:
            import h5py
            file_id = os.path.join(datadir, aver_file_name)
            print(file_id)
            sys.stdout.flush()
            with h5py.File(file_id, 'r') as tmp:
                n_times = len(tmp.keys()) - 1
                # Determine the structure of the xy/xz/yz averages.
                for var in variables:
                    nu = tmp[str(0) + '/' + var.strip()].shape[0]
                    nv = tmp[str(0) + '/' + var.strip()].shape[1]
                    break
            raw_data = np.zeros([n_times, n_vars, nu, nv], dtype=precision)
            t = np.zeros(n_times, dtype=precision)
            with h5py.File(file_id, 'r') as tmp:
                for t_idx in range(0, n_times):
                    t[t_idx] = tmp[str(t_idx) + '/time'][()]
                    raw_idx = 0
                    for var in variables:
                        raw_data[t_idx, raw_idx] = \
                                    tmp[str(t_idx) + '/' +var.strip()][()]
                        raw_idx += 1
        else:
            glob_dim = read.dim(datadir)
            if plane == 'y':
                nu = glob_dim.nx
                nv = glob_dim.nz
            if plane == 'z':
                nu = glob_dim.nx
                nv = glob_dim.ny

            if proc < 0:
                offset = glob_dim.nprocx * glob_dim.nprocy
                if plane == 'z':
                    proc_list = range(offset)
                if plane == 'y':
                    proc_list = []
                    xr = range(glob_dim.nprocx)
                    for iz in range(glob_dim.nprocz):
                        proc_list.extend(xr)
                        xr = [x + offset for x in xr]
                all_procs = True
            else:
                proc_list = [proc]
                all_procs = False

            dim = read.dim(datadir, proc)
            if dim.precision == 'S':
                read_precision = np.float32
            if dim.precision == 'D':
                read_precision = np.float64

            # Prepare the raw data.
            # This will be reformatted at the end.
            raw_data = []
            for proc in proc_list:
                proc_dir = 'proc{0}'.format(proc)
                proc_dim = read.dim(datadir, proc)
                if plane == 'y':
                    pnu = proc_dim.nx
                    pnv = proc_dim.nz
                if plane == 'z':
                    pnu = proc_dim.nx
                    pnv = proc_dim.ny
                if var_index >= 0:
                    inx1 = var_index * pnu * pnv
                    inx2 = (var_index + 1) * pnu * pnv
                # Read the data.
                t = []
                proc_data = []
                try:
                    file_id = FortranFile(
                        os.path.join(datadir, proc_dir, aver_file_name))
                except:
                    # Not all proc dirs have a [yz]averages.dat.
                    print("Averages of processor {0} missing.".format(proc))
                    sys.stdout.flush()
                    break
                if iter_list:
                    if isinstance(iter_list, list):
                        iter_list = iter_list
                    else:
                        iter_list = [iter_list]
                    # split by iteration overrules split by variable
                    var_index = -1
                    iiter = 0
                    while True:
                        try:
                            if iiter in iter_list:
                                t.append(
                                    file_id.read_record(
                                        dtype=read_precision)[0])
                                proc_data.append(
                                    file_id.read_record(dtype=read_precision))
                                if iiter >= iter_list[-1]:
                                    # Finished reading.
                                    break
                                iiter += 1
                            else:
                                file_id.read_record(dtype=read_precision)[0]
                                file_id.read_record(dtype=read_precision)
                                iiter += 1
                        except:
                            # Finished reading.
                            break
                else:
                    while True:
                        try:
                            t.append(
                                file_id.read_record(dtype=read_precision)[0])
                            if var_index >= 0:
                                proc_data.append(
                                    file_id.read_record(dtype=read_precision)
                                    [inx1:inx2].astype(precision))
                            else:
                                proc_data.append(
                                    file_id.read_record(dtype=read_precision).
                                    astype(precision))
                        except:
                            # Finished reading.
                            break
                file_id.close()
                # Reshape the proc data into [len(t), pnu, pnv].
                proc_data = np.array(proc_data, dtype=precision)
                if var_index >= 0:
                    proc_data = proc_data.reshape([len(t), 1, pnv, pnu])
                else:
                    proc_data = proc_data.reshape([len(t), n_vars, pnv, pnu])

                if not all_procs:
                    return np.array(t,
                                    dtype=precision), proc_data.swapaxes(2, 3)

                # Add the proc_data (one proc) to the raw_data (all procs)
                if plane == 'y':
                    if all_procs:
                        idx_u = proc_dim.ipx * proc_dim.nx
                        idx_v = proc_dim.ipz * proc_dim.nz
                    else:
                        idx_v = 0
                        idx_u = 0
                if plane == 'z':
                    if all_procs:
                        idx_u = proc_dim.ipx * proc_dim.nx
                        idx_v = proc_dim.ipy * proc_dim.ny
                    else:
                        idx_v = 0
                        idx_u = 0

                if not isinstance(raw_data, np.ndarray):
                    #Initialize the raw_data array with correct dimensions.
                    if var_index >= 0:
                        raw_data = np.zeros([len(t), 1, nv, nu],
                                            dtype=precision)
                    else:
                        raw_data = np.zeros([len(t), n_vars, nv, nu],
                                            dtype=precision)
                raw_data[:, :, idx_v:idx_v+pnv, idx_u:idx_u+pnu] = \
                                                                proc_data.copy()

            t = np.array(t, dtype=precision)
            raw_data = np.swapaxes(raw_data, 2, 3)

        return t, raw_data
Exemple #7
0
    def read(self, var_name, datadir='data', dim=None, nfield=1):
        """
        Read vertical profiles written in data/proc*/zprof_varname.dat.
        Returns a ZProfile object with z and profiles(z).

        call signature:

        zprof(var_name, datadir='data', dim=None, nfield=1):

        Keyword arguments:

        *var_name*:
          Name of the zprof var file.

        *datadir*:
          Directory where the data is stored.

        *dim*
          Dimension object.

        *nfield*
          Number of fields to be read.
        """

        import os as os
        import numpy as np
        from pencil import read

        if not dim:
            dim = read.dim()

        nz = int(dim.nzgrid / dim.nprocz)
        self.z = np.zeros(nz * dim.nprocz, dtype=np.float32)
        if nfield > 1:
            self.prof = np.zeros((nfield, dim.nzgrid), dtype=np.float32)
        else:
            self.prof = np.zeros(dim.nzgrid, dtype=np.float32)

        # Loop over all processors and records in file.
        izcount = 0
        for iprocz in range(0, dim.nprocz):
            proc_name = 'proc{0}'.format(iprocz)
            file_name = os.path.join(datadir, proc_name, 'zprof_', var_name,
                                     '.dat')
            fd = open(file_name, 'r')

            #  When reading a zprof_once_X file, the first dim.nghostz gridpoints are
            #  not saved.
            if var_name.find('once') != -1:
                for i in range(dim.nghostz):
                    line = fd.readline()
            for i in range(nz):
                line = fd.readline()
                data = np.asarray(line.split()).astype(np.float32)
                self.z[izcount] = data[0]
                if nfield > 1:
                    for j in range(nfield):
                        self.prof[j, izcount] = data[j + 1]
                else:
                    self.prof[izcount] = data[1]
                izcount = izcount + 1
        fd.close()
Exemple #8
0
def write_h5_averages(
    aver,
    file_name="xy",
    datadir="data/averages",
    nt=None,
    precision="d",
    indx=None,
    trange=None,
    quiet=True,
    append=False,
    procdim=None,
    dim=None,
    aver_by_proc=False,
    proc=-1,
    driver=None,
    comm=None,
    rank=0,
    size=1,
    overwrite=False,
    nproc=1,
):
    """
    Write an hdf5 format averages dataset given as an Averages object.
    We assume by default that a run simulation directory has already been
    constructed and start completed successfully in h5 format so that
    files dim, grid and param files are already present.
    If not the contents of these will need to be supplied as dictionaries
    along with persist if included.

    call signature:

    write_h5_averages(aver, file_name='xy', datadir='data/averages',
                   precision='d', indx=None, trange=None, quiet=True)

    Keyword arguments:

    *aver*:
      Averages object.
      Must be of shape [n_vars, n1] for averages across 'xy', 'xz' or 'yz'.
      Must be of shape [n_vars, n1, n2] for averages across 'y', 'z'.

    *file_name*:
      Name of the snapshot file to be written, e.g. 'xy', 'xz', 'yz', 'y', 'z'.

    *datadir*:
      Directory where the data is stored.

    *precision*:
      Single 'f' or double 'd' precision.

    *indx*
      Restrict iterative range to be written.

    *trange*:
      Restrict time range to be written.

    *append*
      For large binary files the data may need to be appended iteratively.

    *dim*
      Dim object required if the large binary files are supplied in chunks.
    """

    import numpy as np
    import os
    from os.path import join, exists

    from pencil import read
    from pencil.io import open_h5, group_h5, dataset_h5
    from pencil import is_sim_dir

    # test if simulation directory
    if not is_sim_dir():
        print("ERROR: Directory needs to be a simulation")
        sys.stdout.flush()
        return -1
    if not exists(datadir):
        try:
            os.mkdir(datadir)
        except FileExistsError:
            pass
    # open file for writing data
    filename = join(datadir, file_name + ".h5")
    if append:
        state = "a"
    else:
        state = "w"
    if not quiet:
        print("rank", rank, "saving " + filename)
        sys.stdout.flush()
    if not (file_name == "y" or file_name == "z"):
        aver_by_proc = False
    if aver_by_proc:
        n1, n2 = None, None
        if not dim:
            dim = read.dim()
        if not procdim:
            procdim = read.dim(proc=proc)
        if file_name == "y":
            nproc = dim.nprocz
            n1 = dim.nz
            nn = procdim.nz
        if file_name == "z":
            nproc = dim.nprocy
            n1 = dim.ny
            nn = procdim.ny
        n2 = dim.nx
        # number of iterations to record
    if not nt:
        nt = aver.t.shape[0]
    with open_h5(filename,
                 state,
                 driver=driver,
                 comm=comm,
                 overwrite=overwrite,
                 rank=rank) as ds:
        if indx:
            if isinstance(indx, list):
                indx = indx
            else:
                indx = [indx]
        else:
            indx = list(range(0, nt))
        if not quiet:
            print("rank", rank, "nt", nt, "indx", indx)
            sys.stdout.flush()
        dataset_h5(
            ds,
            "last",
            status=state,
            data=(nt - 1, ),
            dtype="i",
            overwrite=overwrite,
            rank=rank,
            comm=comm,
            size=size,
        )
        for it in range(0, nt):
            group_h5(
                ds,
                str(it),
                status=state,
                delete=False,
                overwrite=overwrite,
                rank=rank,
                size=size,
            )
        for it in range(0, nt):
            dataset_h5(
                ds[str(it)],
                "time",
                status=state,
                shape=(1, ),
                dtype=precision,
                overwrite=overwrite,
                rank=rank,
                comm=comm,
                size=size,
            )
        for key in aver.__getattribute__(file_name).__dict__.keys():
            data = aver.__getattribute__(file_name).__getattribute__(key)
            if file_name == "y" or file_name == "z":
                data = np.swapaxes(data, 1, 2)
            for it in range(0, nt):
                if aver_by_proc:
                    dataset_h5(
                        ds[str(it)],
                        key,
                        status=state,
                        shape=(n1, n2),
                        dtype=precision,
                        overwrite=overwrite,
                        rank=rank,
                        comm=comm,
                        size=size,
                    )
                else:
                    dataset_h5(
                        ds[str(it)],
                        key,
                        status=state,
                        shape=data[0].shape,
                        dtype=precision,
                        overwrite=overwrite,
                        rank=rank,
                        comm=comm,
                        size=size,
                    )
        for it in indx:
            ds[str(it)]["time"][:] = aver.t[it - indx[0]]
        for key in aver.__getattribute__(file_name).__dict__.keys():
            # key needs to be broadcast as order of keys may vary on each process
            # causing segmentation fault
            data = aver.__getattribute__(file_name).__getattribute__(key)
            if file_name == "y" or file_name == "z":
                data = np.swapaxes(data, 1, 2)
            if not quiet:
                print("writing", key, "on rank", rank)
                sys.stdout.flush()
            for it in indx:
                if aver_by_proc:
                    ds[str(it)][key][proc * nn:(proc + 1) * nn] = data[it -
                                                                       indx[0]]
                else:
                    ds[str(it)][key][:] = data[it - indx[0]]
    if not quiet:
        print(filename + " written on rank {}".format(rank))
        sys.stdout.flush()
Exemple #9
0
def write_h5_snapshot(
    snapshot,
    file_name="VAR0",
    datadir="data/allprocs",
    precision="d",
    nghost=3,
    persist=None,
    settings=None,
    param=None,
    grid=None,
    lghosts=False,
    indx=None,
    proc=None,
    ipx=None,
    ipy=None,
    ipz=None,
    procdim=None,
    unit=None,
    t=None,
    x=None,
    y=None,
    z=None,
    state="a",
    quiet=True,
    lshear=False,
    driver=None,
    comm=None,
    overwrite=False,
    rank=0,
    size=1,
):
    """
    Write a snapshot given as numpy array.
    We assume by default that a run simulation directory has already been
    constructed and start completed successfully in h5 format so that
    files dim, grid and param files are already present.
    If not the contents of these will need to be supplied as dictionaries
    along with persist if included.

    call signature:

    write_h5_snapshot(snapshot, file_name='VAR0', datadir='data/allprocs',
                   precision='d', nghost=3, persist=None, settings=None,
                   param=None, grid=None, lghosts=False, indx=None,
                   unit=None, t=None, x=None, y=None, z=None, procdim=None,
                   quiet=True, lshear=False, driver=None, comm=None)

    Keyword arguments:

    *snapshot*:
      Numpy array containing the snapshot.
      Must be of shape [nvar, nz, ny, nx] without boundaries or.
      Must be of shape [nvar, mz, my, mx] with boundaries for lghosts=True.

    *file_name*:
      Name of the snapshot file to be written, e.g. VAR0 or var.

    *datadir*:
      Directory where the data is stored.

    *precision*:
      Single 'f' or double 'd' precision.

    *persist*:
      optional dictionary of persistent variable.

    *settings*:
      optional dictionary of persistent variable.

    *param*:
      optional Param object.

    *grid*:
      optional Pencil Grid object of grid parameters.

    *nghost*:
      Number of ghost zones.

    *lghosts*:
      If True the snapshot contains the ghost zones.

    *indx*
      Index object of index for each variable in f-array

    *unit*:
      Optional dictionary of simulation units.

    *quiet*:
      Option to print output.

    *t*:
      Time of the snapshot.

    *xyz*:
      xyz arrays of the domain with ghost zones.
      This will normally be obtained from Grid object, but facility to
      redefine an alternative grid value.

    *lshear*:
      Flag for the shear.

    *driver*
      File driver for hdf5 io for use in serial or MPI parallel.

    *comm*
      MPI objects supplied if driver is 'mpio'.

    *overwrite*
      flag to replace existing h5 snapshot file.

    *rank*
      rank of process with root=0.
    """

    import numpy as np
    from os.path import join

    from pencil import read
    from pencil.io import open_h5, group_h5, dataset_h5
    from pencil import is_sim_dir

    # test if simulation directory
    if not is_sim_dir():
        print("ERROR: Directory needs to be a simulation")
        sys.stdout.flush()
    if indx == None:
        indx = read.index()
    #
    if settings == None:
        settings = {}
        skeys = [
            "l1",
            "l2",
            "m1",
            "m2",
            "n1",
            "n2",
            "nx",
            "ny",
            "nz",
            "mx",
            "my",
            "mz",
            "nprocx",
            "nprocy",
            "nprocz",
            "maux",
            "mglobal",
            "mvar",
            "precision",
        ]
        dim = read.dim()
        for key in skeys:
            settings[key] = dim.__getattribute__(key)
        settings["precision"] = precision.encode()
        settings["nghost"] = nghost
        settings["version"] = np.int32(0)
    nprocs = settings["nprocx"] * settings["nprocy"] * settings["nprocz"]
    gkeys = [
        "x",
        "y",
        "z",
        "Lx",
        "Ly",
        "Lz",
        "dx",
        "dy",
        "dz",
        "dx_1",
        "dy_1",
        "dz_1",
        "dx_tilde",
        "dy_tilde",
        "dz_tilde",
    ]
    if grid == None:
        grid = read.grid(quiet=True)
    else:
        gd_err = False
        for key in gkeys:
            if not key in grid.__dict__.keys():
                print("ERROR: key " + key + " missing from grid")
                sys.stdout.flush()
                gd_err = True
        if gd_err:
            print("ERROR: grid incomplete")
            sys.stdout.flush()
    ukeys = [
        "length",
        "velocity",
        "density",
        "magnetic",
        "time",
        "temperature",
        "flux",
        "energy",
        "mass",
        "system",
    ]
    if param == None:
        param = read.param(quiet=True)
        param.__setattr__("unit_mass",
                          param.unit_density * param.unit_length**3)
        param.__setattr__("unit_energy",
                          param.unit_mass * param.unit_velocity**2)
        param.__setattr__("unit_time", param.unit_length / param.unit_velocity)
        param.__setattr__("unit_flux", param.unit_mass / param.unit_time**3)
        param.unit_system = param.unit_system.encode()

    # check whether the snapshot matches the simulation shape
    if lghosts:
        try:
            snapshot.shape[0] == settings["mvar"]
            snapshot.shape[1] == settings["mx"]
            snapshot.shape[2] == settings["my"]
            snapshot.shape[3] == settings["mz"]
        except ValueError:
            print("ERROR: snapshot shape {} ".format(snapshot.shape) +
                  "does not match simulation dimensions with ghosts.")
            sys.stdout.flush()
    else:
        try:
            snapshot.shape[0] == settings["mvar"]
            snapshot.shape[1] == settings["nx"]
            snapshot.shape[2] == settings["ny"]
            snapshot.shape[3] == settings["nz"]
        except ValueError:
            print("ERROR: snapshot shape {} ".format(snapshot.shape) +
                  "does not match simulation dimensions without ghosts.")
            sys.stdout.flush()

    # Determine the precision used and ensure snapshot has correct data_type.
    if precision == "f":
        data_type = np.float32
        snapshot = np.float32(snapshot)
    elif precision == "d":
        data_type = np.float64
        snapshot = np.float64(snapshot)
    else:
        print("ERROR: Precision {0} not understood.".format(precision) +
              " Must be either 'f' or 'd'")
        sys.stdout.flush()
        return -1

    # Check that the shape does not conflict with the proc numbers.
    if ((settings["nx"] % settings["nprocx"] > 0)
            or (settings["ny"] % settings["nprocy"] > 0)
            or (settings["nz"] % settings["nprocz"] > 0)):
        print("ERROR: Shape of the input array is not compatible with the " +
              "cpu layout. Make sure that nproci devides ni.")
        sys.stdout.flush()
        return -1

    # Check the shape of the xyz arrays if specified and overwrite grid values.
    if x != None:
        if len(x) != settings["mx"]:
            print("ERROR: x array is incompatible with the shape of snapshot.")
            sys.stdout.flush()
            return -1
        grid.x = data_type(x)
    if y != None:
        if len(y) != settings["my"]:
            print("ERROR: y array is incompatible with the shape of snapshot.")
            sys.stdout.flush()
            return -1
        grid.y = data_type(y)
    if z != None:
        if len(z) != settings["mz"]:
            print("ERROR: z array is incompatible with the shape of snapshot.")
            sys.stdout.flush()
            return -1
        grid.z = data_type(z)

    # Define a time.
    if t is None:
        t = data_type(0.0)

    # making use of pc_hdf5 functionality:
    if not proc == None:
        state = "a"
    else:
        state = "w"
    filename = join(datadir, file_name)
    print("write_h5_snapshot: filename =", filename)
    with open_h5(
            filename,
            state,
            driver=driver,
            comm=comm,
            overwrite=overwrite,
            rank=rank,
            size=size,
    ) as ds:
        data_grp = group_h5(
            ds,
            "data",
            status=state,
            delete=False,
            overwrite=overwrite,
            rank=rank,
            size=size,
        )
        if not procdim:
            for key in indx.__dict__.keys():
                if key in ["uu", "keys", "aa", "KR_Frad", "uun", "gg", "bb"]:
                    continue
                #create ghost zones if required
                if not lghosts:
                    tmp_arr = np.zeros([
                        snapshot.shape[1] + 2 * nghost,
                        snapshot.shape[2] + 2 * nghost,
                        snapshot.shape[3] + 2 * nghost,
                    ])
                    tmp_arr[dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                            dim.l1:dim.l2 + 1] = np.array(
                                snapshot[indx.__getattribute__(key) - 1])
                    dataset_h5(
                        data_grp,
                        key,
                        status=state,
                        data=tmp_arr,
                        dtype=data_type,
                        overwrite=overwrite,
                        rank=rank,
                        comm=comm,
                        size=size,
                    )
                else:
                    dataset_h5(
                        data_grp,
                        key,
                        status=state,
                        data=np.array(snapshot[indx.__getattribute__(key) -
                                               1]),
                        dtype=data_type,
                        overwrite=overwrite,
                        rank=rank,
                        comm=comm,
                        size=size,
                    )
        else:
            for key in indx.__dict__.keys():
                if key in ["uu", "keys", "aa", "KR_Frad", "uun", "gg", "bb"]:
                    continue
                dataset_h5(
                    data_grp,
                    key,
                    status=state,
                    shape=(settings["mz"], settings["my"], settings["mx"]),
                    dtype=data_type,
                    rank=rank,
                    comm=comm,
                    size=size,
                )
            # adjust indices to include ghost zones at boundaries
            l1, m1, n1 = procdim.l1, procdim.m1, procdim.n1
            if procdim.ipx == 0:
                l1 = 0
            if procdim.ipy == 0:
                m1 = 0
            if procdim.ipz == 0:
                n1 = 0
            l2, m2, n2 = procdim.l2, procdim.m2, procdim.n2
            if procdim.ipx == settings["nprocx"] - 1:
                l2 = procdim.l2 + settings["nghost"]
            if procdim.ipy == settings["nprocy"] - 1:
                m2 = procdim.m2 + settings["nghost"]
            if procdim.ipz == settings["nprocz"] - 1:
                n2 = procdim.n2 + settings["nghost"]
            nx, ny, nz = procdim.nx, procdim.ny, procdim.nz
            ipx, ipy, ipz = procdim.ipx, procdim.ipy, procdim.ipz
            for key in indx.__dict__.keys():
                if key in ["uu", "keys", "aa", "KR_Frad", "uun", "gg", "bb"]:
                    continue
                tmp_arr = np.array(snapshot[indx.__getattribute__(key) - 1])
                data_grp[key][n1 + ipz * nz:n2 + ipz * nz + 1,
                              m1 + ipy * ny:m2 + ipy * ny + 1,
                              l1 + ipx * nx:l2 + ipx * nx +
                              1, ] = tmp_arr[n1:n2 + 1, m1:m2 + 1, l1:l2 + 1]
        dataset_h5(
            ds,
            "time",
            status=state,
            data=np.array(t),
            size=size,
            dtype=data_type,
            rank=rank,
            comm=comm,
            overwrite=overwrite,
        )
        # add settings
        sets_grp = group_h5(
            ds,
            "settings",
            status=state,
            delete=False,
            overwrite=overwrite,
            rank=rank,
            size=size,
        )
        for key in settings.keys():
            if "precision" in key:
                dataset_h5(
                    sets_grp,
                    key,
                    status=state,
                    data=(settings[key], ),
                    dtype=None,
                    rank=rank,
                    comm=comm,
                    size=size,
                    overwrite=overwrite,
                )
            else:
                dataset_h5(
                    sets_grp,
                    key,
                    status=state,
                    data=(settings[key], ),
                    dtype=data_type,
                    rank=rank,
                    comm=comm,
                    size=size,
                    overwrite=overwrite,
                )
        # add grid
        grid_grp = group_h5(
            ds,
            "grid",
            status=state,
            delete=False,
            overwrite=overwrite,
            rank=rank,
            size=size,
        )
        for key in gkeys:
            dataset_h5(
                grid_grp,
                key,
                status=state,
                data=(grid.__getattribute__(key)),
                dtype=data_type,
                rank=rank,
                comm=comm,
                size=size,
                overwrite=overwrite,
            )
        dataset_h5(
            grid_grp,
            "Ox",
            status=state,
            data=(param.__getattribute__("xyz0")[0], ),
            dtype=data_type,
            rank=rank,
            comm=comm,
            size=size,
            overwrite=overwrite,
        )
        dataset_h5(
            grid_grp,
            "Oy",
            status=state,
            data=(param.__getattribute__("xyz0")[1], ),
            dtype=data_type,
            rank=rank,
            comm=comm,
            size=size,
            overwrite=overwrite,
        )
        dataset_h5(
            grid_grp,
            "Oz",
            status=state,
            data=(param.__getattribute__("xyz0")[2], ),
            dtype=data_type,
            rank=rank,
            comm=comm,
            size=size,
            overwrite=overwrite,
        )
        # add physical units
        unit_grp = group_h5(
            ds,
            "unit",
            status=state,
            delete=False,
            overwrite=overwrite,
            rank=rank,
            size=size,
        )
        for key in ukeys:
            if "system" in key:
                dataset_h5(
                    unit_grp,
                    key,
                    status=state,
                    data=(param.__getattribute__("unit_" + key), ),
                    rank=rank,
                    comm=comm,
                    size=size,
                    overwrite=overwrite,
                )
            else:
                dataset_h5(
                    unit_grp,
                    key,
                    status=state,
                    data=param.__getattribute__("unit_" + key),
                    rank=rank,
                    comm=comm,
                    size=size,
                    overwrite=overwrite,
                )
        # add optional persistent data
        if persist != None:
            pers_grp = group_h5(
                ds,
                "persist",
                status=state,
                size=size,
                delete=False,
                overwrite=overwrite,
                rank=rank,
            )
            for key in persist.keys():
                if not quiet:
                    print(key, type(persist[key][()]))
                    sys.stdout.flush()
                arr = np.empty(nprocs, dtype=type(persist[key][()]))
                arr[:] = persist[key][()]
                dataset_h5(
                    pers_grp,
                    key,
                    status=state,
                    data=(arr),
                    size=size,
                    dtype=data_type,
                    rank=rank,
                    comm=comm,
                    overwrite=overwrite,
                )
Exemple #10
0
    def read(self, datadir="data", proc=-1, quiet=False, precision="f", trim=False):
        """
        read(datadir='data', proc=-1, quiet=False, trim=False)

        Read the grid data from the pencil code simulation.
        If proc < 0, then load all data and assemble.
        Otherwise, load grid from specified processor.

        Parameters
        ----------
        datadir : string
          Directory where the data is stored.

        proc : int
          Processor to be read. If proc is -1, then read the 'global'
          grid. If proc is >=0, then read the grid.dat in the
          corresponding processor directory.

        quiet : bool
          Flag for switching of output.

        trim : bool
          Cuts off the ghost points.

        Returns
        -------
        Class containing the grid information.
        """

        import numpy as np
        import os
        from scipy.io import FortranFile
        from pencil import read

        if precision == "f":
            dtype = np.float32
        elif precision == "d":
            dtype = np.float64
        elif precision == "half":
            dtype = np.float16
        else:
            print('read grid: {} precision not set, using "f"'.format(precision))
            dtype = np.float32

        if os.path.exists(os.path.join(datadir, "grid.h5")):
            dim = read.dim(datadir, proc)
            import h5py

            with h5py.File(os.path.join(datadir, "grid.h5"), "r") as tmp:
                x = dtype(tmp["grid"]["x"][()])
                y = dtype(tmp["grid"]["y"][()])
                z = dtype(tmp["grid"]["z"][()])
                dx_1 = dtype(tmp["grid"]["dx_1"][()])
                dy_1 = dtype(tmp["grid"]["dy_1"][()])
                dz_1 = dtype(tmp["grid"]["dz_1"][()])
                dx_tilde = dtype(tmp["grid"]["dx_tilde"][()])
                dy_tilde = dtype(tmp["grid"]["dy_tilde"][()])
                dz_tilde = dtype(tmp["grid"]["dz_tilde"][()])
                dx = dtype(tmp["grid"]["dx"][()])
                dy = dtype(tmp["grid"]["dy"][()])
                dz = dtype(tmp["grid"]["dz"][()])
                Lx = dtype(tmp["grid"]["Lx"][()])
                Ly = dtype(tmp["grid"]["Ly"][()])
                Lz = dtype(tmp["grid"]["Lz"][()])
                t = dtype(0.0)
        else:
            datadir = os.path.expanduser(datadir)
            dim = read.dim(datadir, proc)
            param = read.param(datadir=datadir, quiet=True, conflicts_quiet=True)
            if dim.precision == "D":
                read_precision = "d"
            else:
                read_precision = "f"

            if proc < 0:
                proc_dirs = list(
                    filter(
                        lambda string: string.startswith("proc"), os.listdir(datadir)
                    )
                )
                if proc_dirs.count("proc_bounds.dat") > 0:
                    proc_dirs.remove("proc_bounds.dat")
                if param.lcollective_io:
                    # A collective IO strategy is being used
                    proc_dirs = ["allprocs"]
            else:
                proc_dirs = ["proc" + str(proc)]

            # Define the global arrays.
            x = np.zeros(dim.mx, dtype=precision)
            y = np.zeros(dim.my, dtype=precision)
            z = np.zeros(dim.mz, dtype=precision)
            dx_1 = np.zeros(dim.mx, dtype=precision)
            dy_1 = np.zeros(dim.my, dtype=precision)
            dz_1 = np.zeros(dim.mz, dtype=precision)
            dx_tilde = np.zeros(dim.mx, dtype=precision)
            dy_tilde = np.zeros(dim.my, dtype=precision)
            dz_tilde = np.zeros(dim.mz, dtype=precision)

            for directory in proc_dirs:
                if not param.lcollective_io:
                    proc = int(directory[4:])
                    procdim = read.dim(datadir, proc)
                    if not quiet:
                        print(
                            "reading grid data from processor"
                            + " {0} of {1} ...".format(proc, len(proc_dirs))
                        )
                else:
                    procdim = dim
                mxloc = procdim.mx
                myloc = procdim.my
                mzloc = procdim.mz

                # Read the grid data.
                file_name = os.path.join(datadir, directory, "grid.dat")
                infile = FortranFile(file_name, "r")
                grid_raw = infile.read_record(dtype=read_precision)
                dx, dy, dz = tuple(infile.read_record(dtype=read_precision))
                Lx, Ly, Lz = tuple(infile.read_record(dtype=read_precision))
                dx_1_raw = infile.read_record(dtype=read_precision)
                dx_tilde_raw = infile.read_record(dtype=read_precision)
                infile.close()

                # Reshape the arrays.
                t = dtype(grid_raw[0])
                x_loc = grid_raw[1 : mxloc + 1]
                y_loc = grid_raw[mxloc + 1 : mxloc + myloc + 1]
                z_loc = grid_raw[mxloc + myloc + 1 : mxloc + myloc + mzloc + 1]
                dx_1_loc = dx_1_raw[0:mxloc]
                dy_1_loc = dx_1_raw[mxloc : mxloc + myloc]
                dz_1_loc = dx_1_raw[mxloc + myloc : mxloc + myloc + mzloc]
                dx_tilde_loc = dx_tilde_raw[0:mxloc]
                dy_tilde_loc = dx_tilde_raw[mxloc : mxloc + myloc]
                dz_tilde_loc = dx_tilde_raw[mxloc + myloc : mxloc + myloc + mzloc]

                if len(proc_dirs) > 1:
                    if procdim.ipx == 0:
                        i0x = 0
                        i1x = i0x + procdim.mx
                        i0x_loc = 0
                        i1x_loc = procdim.mx
                    else:
                        i0x = procdim.ipx * procdim.nx + procdim.nghostx
                        i1x = i0x + procdim.mx - procdim.nghostx
                        i0x_loc = procdim.nghostx
                        i1x_loc = procdim.mx

                    if procdim.ipy == 0:
                        i0y = 0
                        i1y = i0y + procdim.my
                        i0y_loc = 0
                        i1y_loc = procdim.my
                    else:
                        i0y = procdim.ipy * procdim.ny + procdim.nghosty
                        i1y = i0y + procdim.my - procdim.nghosty
                        i0y_loc = procdim.nghosty
                        i1y_loc = procdim.my

                    if procdim.ipz == 0:
                        i0z = 0
                        i1z = i0z + procdim.mz
                        i0z_loc = 0
                        i1z_loc = procdim.mz
                    else:
                        i0z = procdim.ipz * procdim.nz + procdim.nghostz
                        i1z = i0z + procdim.mz - procdim.nghostz
                        i0z_loc = procdim.nghostz
                        i1z_loc = procdim.mz

                    x[i0x:i1x] = x_loc[i0x_loc:i1x_loc]
                    y[i0y:i1y] = y_loc[i0y_loc:i1y_loc]
                    z[i0z:i1z] = z_loc[i0z_loc:i1z_loc]
                    dx_1[i0x:i1x] = dx_1_loc[i0x_loc:i1x_loc]
                    dy_1[i0y:i1y] = dy_1_loc[i0y_loc:i1y_loc]
                    dz_1[i0z:i1z] = dz_1_loc[i0z_loc:i1z_loc]
                    dx_tilde[i0x:i1x] = dx_tilde_loc[i0x_loc:i1x_loc]
                    dy_tilde[i0y:i1y] = dy_tilde_loc[i0y_loc:i1y_loc]
                    dz_tilde[i0z:i1z] = dz_tilde_loc[i0z_loc:i1z_loc]

                else:
                    # x = dtype(x_loc.astype)
                    x = dtype(x_loc)
                    y = dtype(y_loc)
                    z = dtype(z_loc)
                    dx_1 = dtype(dx_1_loc)
                    dy_1 = dtype(dy_1_loc)
                    dz_1 = dtype(dz_1_loc)
                    dx_tilde = dtype(dx_tilde_loc)
                    dy_tilde = dtype(dy_tilde_loc)
                    dz_tilde = dtype(dz_tilde_loc)

        if trim:
            self.x = x[dim.l1 : dim.l2 + 1]
            self.y = y[dim.m1 : dim.m2 + 1]
            self.z = z[dim.n1 : dim.n2 + 1]
            self.dx_1 = dx_1[dim.l1 : dim.l2 + 1]
            self.dy_1 = dy_1[dim.m1 : dim.m2 + 1]
            self.dz_1 = dz_1[dim.n1 : dim.n2 + 1]
            self.dx_tilde = dx_tilde[dim.l1 : dim.l2 + 1]
            self.dy_tilde = dy_tilde[dim.m1 : dim.m2 + 1]
            self.dz_tilde = dz_tilde[dim.n1 : dim.n2 + 1]
        else:
            self.x = x
            self.y = y
            self.z = z
            self.dx_1 = dx_1
            self.dy_1 = dy_1
            self.dz_1 = dz_1
            self.dx_tilde = dx_tilde
            self.dy_tilde = dy_tilde
            self.dz_tilde = dz_tilde

        self.t = t
        self.dx = dx
        self.dy = dy
        self.dz = dz
        self.Lx = Lx
        self.Ly = Ly
        self.Lz = Lz
Exemple #11
0
    def read(self, datadir="data", param=None, dim=None):
        """
        read(datadir='data', param=None, dim=None)

        Read Pencil Code index data from index.pro.

        Parameters
        ----------
        datadir : string
          Directory where the data is stored.

        param : obj
          Parameter object.

        dim : obj
          Dimension object.

        Returns
        -------
        Class containing the index information.
        """

        import os
        import re
        import numpy as np
        from pencil import read

        if param is None:
            param = read.param(datadir=datadir, quiet=True)
        if dim is None:
            dim = read.dim(datadir=datadir)

        if param.lwrite_aux:
            totalvars = dim.mvar + dim.maux
        else:
            totalvars = dim.mvar

        index_file = open(os.path.join(datadir, "index.pro"))
        ntestfield, ntestflow, ntestlnrho, ntestscalar = 0, 0, 0, 0
        for line in index_file.readlines():
            clean = line.strip()
            name = clean.split("=")[0].strip().replace("[",
                                                       "").replace("]", "")
            if clean.split("=")[1].strip().startswith("intarr(370)"):
                continue
            try:
                val = int(clean.split("=")[1].strip())
            except:
                val = np.arange(int(
                    re.search(r"\(([0-9]+)\)", clean).group(1)))[0] + int(
                        clean.split("=")[1].strip().split("+")[1])

            if (val != 0 and val <= totalvars and not name.startswith("i_")
                    and name.startswith("i")):
                name = name.lstrip("i")
                if name == "lnTT" and param.ltemperature_nolog:
                    name = "tt"
                if name == "aatest":
                    iaatest = val
                if name == "uutest":
                    iuutest = val
                if name == "hhtest":
                    ihhtest = val
                if name == "cctest":
                    icctest = val
                setattr(self, name, val)

            elif name == "ntestfield":
                ntestfield = val
            elif name == "ntestflow":
                ntestflow = val
            elif name == "ntestlnrho":
                ntestlnrho = val
            elif name == "ntestscalar":
                ntestscalar = val
        if ntestfield > 0:
            self.__delattr__("aatest")
            for i in range(1, ntestfield + 1):
                setattr(self, "aatest" + str(i), iaatest - 1 + i)
        if ntestflow > 0:
            self.__delattr__("uutest")
            for i in range(1, ntestflow + 1):
                setattr(self, "uutest" + str(i), iuutest - 1 + i)
        if ntestlnrho > 0:
            self.__delattr__("hhtest")
            for i in range(1, ntestlnrho + 1):
                setattr(self, "hhtest" + str(i), ihhtest - 1 + i)
        if ntestscalar > 0:
            self.__delattr__("cctest")
            for i in range(1, ntestscalar + 1):
                setattr(self, "cctest" + str(i), icctest - 1 + i)
Exemple #12
0
    def read(self, datadir="data", file_name="", quiet=False):
        """
        read(datadir='data', file_name='', quiet=False)
    
        Read the power spectra.
    
        Parameters
        ----------
        datadir : string
            Directory where the data is stored.
    
        file_name : string
            Filename to read.
            If a filename is given, only that power spectrum is read.
            By default it reads all the power spectrum files.
    
        quiet : bool
            Flag for switching off output.
    
        Returns
        -------
        Class containing the different power spectrum as attributes.
    
        Notes
        -----
        Use the attribute keys to get a list of attributes
    
        Examples
        --------
        >>> pw = pc.read.power()
        >>> pw.keys()
        t
        kin
        krms
        hel_kin
        """

        import os
        import os.path as op
        import numpy as np
        from pencil import read
        from pencil.util import ffloat

        # import sys
        import matplotlib as plt
        import re

        power_list = []
        file_list = []

        if file_name:
            print("Reading only ", file_name)
            try:
                if op.isfile(op.join(datadir, file_name)):
                    # print("read one file")
                    if file_name[:5] == "power" and file_name[-4:] == ".dat":
                        if file_name[:6] == "power_":
                            power_list.append(file_name.split(".")[0][6:])
                            print("appending", file_name.split(".")[0][6:])
                        else:
                            power_list.append(file_name.split(".")[0][5:])
                            print("appending", file_name.split(".")[0][5:])
                        file_list.append(file_name)
                else:
                    print("File does not exist, exiting")
            except IOError:
                print("File does not exist, exiting")
                return

        else:

            # Find the existing power files.

            # power_list = []
            # file_list = []
            for file_name in os.listdir(datadir):
                if file_name[:5] == "power" and file_name[-4:] == ".dat":
                    if file_name[:6] == "power_":
                        power_list.append(file_name.split(".")[0][6:])
                    else:
                        power_list.append(file_name.split(".")[0][5:])
                    file_list.append(file_name)

        # Determine the file and data structure.
        dim = read.dim(datadir=datadir)
        block_size = np.ceil(int(dim.nxgrid / 2) / 8.0) + 1

        # Read the power spectra.
        for power_idx, file_name in enumerate(file_list):
            # Read the raw file.
            infile = open(os.path.join(datadir, file_name), "r")
            line_list = infile.readlines()
            infile.close()

            # Extract the numbers from the file strings.
            n_blocks = int(len(line_list) / block_size)

            if not quiet:
                print(file_name)

            # For the moment, exclude some incompatible files.
            # if file_name == 'powero.dat' or file_name == 'poweru.dat' or \
            if (file_name == "powero.dat" or file_name == "powerb.dat"
                    or file_name == "powera.dat"):
                continue
            elif (file_name == "powerux_xy.dat"
                  or file_name == "poweruy_xy.dat"
                  or file_name == "poweruz_xy.dat"):
                # This file has a different number of k

                # This files has the k vector, and irrational numbers
                # Get k vectors:
                nk = 0
                if "k_x" in line_list[1]:
                    nkx = int(
                        line_list[1].split()[line_list[1].split().index("k_x")
                                             + 1].split(")")[0][1:])
                    ini = 2
                    kx = []
                    for i in range(ini, int(np.ceil(nkx / 8)) + ini):
                        kx.append([float(j) for j in line_list[i].split()])
                    kx = np.array(list(plt.cbook.flatten(kx)))
                    setattr(self, "kx", kx)
                    ini = i + 1
                    nk = max(nk, nkx)

                if "k_y" in line_list[1]:
                    nky = int(
                        line_list[1].split()[line_list[1].split().index("k_y")
                                             + 1].split(")")[0][1:])
                    ky = []
                    for i in range(ini, int(np.ceil(nky / 8)) + ini):
                        ky.append([float(j) for j in line_list[i].split()])
                    ky = np.array(list(plt.cbook.flatten(ky)))
                    setattr(self, "ky", ky)
                    ini = i + 1
                    nk = max(nk, nky)

                if "k_z" in line_list[1]:
                    nkz = int(
                        line_list[1].split()[line_list[1].split().index("k_z")
                                             + 1].split(")")[0][1:])
                    kz = []
                    for i in range(ini, int(np.ceil(nkz / 8)) + ini):
                        kz.append([float(j) for j in line_list[i].split()])
                    kz = np.array(list(plt.cbook.flatten(ky)))
                    setattr(self, "kz", kz)
                    ini = i + 1
                    nk = max(nk, nkz)
                # Now read z-positions, if any
                if "z-pos" in line_list[ini]:
                    print("More than 1 z-pos")
                    nzpos = int(re.search(r"\((\d+)\)", line_list[ini])[1])
                    ini += 1
                    zpos = np.array([float(j) for j in line_list[ini].split()])
                    ini += 1
                    setattr(self, "nzpos", nzpos)
                    setattr(self, "zpos", zpos)
                else:
                    nzpos = 1
                # If more than one z-pos, the file will give the results concatenated for the 3 positions and the lenght of the block will increase

                # Now read the rest of the file
                # print('ini', ini)
                line_list = line_list[ini:]
                # I think this is not needed now
                # if line_list[0].strip() == "-Infinity":
                #    line_list = line_list[1:]
                # if line_list[0][0] == "z":
                #    line_list = line_list[2:]
                time = []
                power_array = []
                # print('nk', nk)
                # The power spectrum can be complex or real, hence len 8 or 16
                linelen = len(line_list[1].strip().split())

                # if linelen == 8:
                #    print("Reading a real power spectrum")
                #    block_size = np.ceil(int(nk*nzpos) / linelen) + 1

                # elif linelen == 16:
                #    print("Reading a complex power spectrum")
                #    block_size = np.ceil(int(nk *nzpos * 2) / linelen) + 1

                block_size = np.ceil(int(nk * nzpos) / 8) + 1
                # print(f"block size {block_size}")

                n_blocks = int(len(line_list) / block_size)

                for line_idx, line in enumerate(line_list):
                    if np.mod(line_idx, block_size) == 0:
                        # print(float(line.strip()))
                        time.append(float(line.strip()))
                        # print("line_idx", line_idx)
                    else:
                        # maxi = len(line.strip().split())
                        if linelen == 8:
                            for value_string in line.strip().split():
                                power_array.append(ffloat(value_string))

                        elif linelen == 16:
                            for j in range(0, linelen, 2):
                                a = line.strip().split()[j]

                                b = line.strip().split()[j + 1]

                                power_array.append(
                                    complex(real=ffloat(a), imag=ffloat(b)))

                time = np.array(time)
                if linelen == 8:
                    power_array = (np.array(power_array).reshape(
                        [n_blocks, int(nzpos), int(nk)]).astype(np.float32))

                if linelen == 16:
                    power_array = (np.array(power_array).reshape(
                        [n_blocks, int(nzpos), int(nk)]).astype(np.complex))

                self.t = time.astype(np.float32)
                setattr(self, power_list[power_idx], power_array)

            elif (file_name == "poweruz_x.dat" or file_name == "powerux_x.dat"
                  or file_name == "poweruy_x.dat"):
                # this has irrational numbers

                time = []
                # print('complex reading of file ', file_name)
                power_array = []
                for line_idx, line in enumerate(line_list):
                    if np.mod(line_idx, block_size) == 0:
                        # print(float(line.strip()))
                        time.append(float(line.strip()))
                    else:
                        if (
                                line.find(",") == -1
                        ):  # if the line does not contain ',', assume it represents a series of real numbers.
                            for value_string in line.strip().split():
                                power_array.append(float(value_string))
                        else:  # Assume we have complex numbers.
                            for value_string in line.strip().split("( ")[1:]:
                                value_string = (value_string.replace(
                                    ")",
                                    "j").strip().replace(", ",
                                                         "").replace(" ", "+"))
                                power_array.append(complex(value_string))

                time = np.array(time)
                power_array = np.array(power_array).reshape(
                    [n_blocks, int(dim.nxgrid / 2)])
                self.t = time
                setattr(self, power_list[power_idx], power_array)

            elif file_name == "power_krms.dat":
                power_array = []
                for line_idx, line in enumerate(line_list):
                    if line_idx < block_size - 1:
                        for value_string in line.strip().split():
                            power_array.append(float(value_string))
                power_array = (np.array(power_array).reshape(
                    [int(dim.nxgrid / 2)]).astype(np.float32))
                setattr(self, power_list[power_idx], power_array)
            else:
                time = []
                power_array = []
                for line_idx, line in enumerate(line_list):
                    if np.mod(line_idx, block_size) == 0:
                        time.append(float(line.strip()))
                    else:
                        for value_string in line.strip().split():
                            power_array.append(ffloat(value_string))

                # Reformat into arrays.
                time = np.array(time)
                power_array = (np.array(power_array).reshape(
                    [n_blocks, int(dim.nxgrid / 2)]).astype(np.float32))
                self.t = time.astype(np.float32)
                setattr(self, power_list[power_idx], power_array)
Exemple #13
0
def slices2vtk(field='',
               extension='',
               datadir='data',
               destination='slices',
               proc=-1):
    """
    Convert slices from PencilCode format to vtk.

    call signature::

      slices2vtk(field='', extension='', datadir='data', destination='slices', proc=-1)

    Read slice files specified by *variables* and convert
    them into vtk format for the specified extensions.
    Write the result in *destination*.
    NB: You need to have called src/read_videofiles.x before using this script.

    Keyword arguments:

      *field*:
        All allowed fields which can be written as slice files, e.g. b2, uu1, lnrho, ...
        See the pencil code manual for more (chapter: "List of parameters for `video.in'").

      *extension*:
        List of slice positions.

      *datadir*:
        Directory where the data is stored.

      *destination*:
        Destination files.

      *proc*:
        Processor which should be read. Set to -1 for all processors.
    """

    import sys
    import numpy as np
    from pencil import read

    # Convert single variable string into length 1 list of arrays.
    if (len(field) > 0):
        if (len(field[0]) == 1):
            field = [field]
    if (len(extension) > 0):
        if (len(extension[0]) == 1):
            extension = [extension]

    # Read the grid dimensions.
    grid = read.grid(datadir=datadir, proc=proc, trim=True, quiet=True)

    # Read the dimensions.
    dim = read.dim(datadir=datadir, proc=proc)

    # Read the user given parameters for the slice positions.
    params = read.param(quiet=True)

    # Read the slice file for all specified variables and extensions.
    slices = read.slices(field=field,
                         extension=extension,
                         datadir=datadir,
                         proc=proc)

    # Determine the position of the slices.
    if params.ix != -1:
        x0 = grid.x[params.ix]
    elif params.slice_position == 'm':
        x0 = grid.x[int(len(grid.x) / 2)]
    if params.iy != -1:
        y0 = grid.y[params.iy]
    elif params.slice_position == 'm':
        y0 = grid.y[int(len(grid.y) / 2)]
    if params.iz != -1:
        z0 = grid.z[params.iz]
    elif params.slice_position == 'm':
        z0 = grid.z[int(len(grid.z) / 2)]
    if params.iz2 != -1:
        z02 = grid.z[params.iz]
    elif params.slice_position == 'm':
        z02 = grid.z[int(len(grid.z) / 2)]

    for t_idx, t in enumerate(slices.t):
        for ext in extension:
            # Open the destination file for writing.
            fd = open(destination + '_' + ext + '_' + str(t_idx) + '.vtk',
                      'wb')

            # Write the header.
            fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8'))
            fd.write('slices {0}\n'.format(ext).encode('utf-8'))
            fd.write('BINARY\n'.encode('utf-8'))
            fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8'))
            if ext == 'xy':
                fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(
                    dim.nx, dim.ny, 1).encode('utf-8'))
                fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(
                    grid.x[0], grid.y[0], z0).encode('utf-8'))
                fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
                    grid.dx, grid.dy, 1.).encode('utf-8'))
                dim_p = dim.nx
                dim_q = dim.ny
            if ext == 'xy2':
                fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(
                    dim.nx, dim.ny, 1).encode('utf-8'))
                fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(
                    grid.x[0], grid.y[0], z02).encode('utf-8'))
                fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
                    grid.dx, grid.dy, 1.).encode('utf-8'))
                dim_p = dim.nx
                dim_q = dim.ny
            if ext == 'xz':
                fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(
                    dim.nx, 1, dim.nz).encode('utf-8'))
                fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(
                    grid.x[0], y0, grid.z[0]).encode('utf-8'))
                fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
                    grid.dx, 1., grid.dz).encode('utf-8'))
                dim_p = dim.nx
                dim_q = dim.nz
            if ext == 'yz':
                fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(
                    1, dim.ny, dim.nz).encode('utf-8'))
                fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(
                    x0, grid.y[0], grid.z[0]).encode('utf-8'))
                fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
                    1., grid.dy, grid.dz).encode('utf-8'))
                dim_p = dim.ny
                dim_q = dim.nz
            fd.write('POINT_DATA {0:9}\n'.format(dim_p *
                                                 dim_q).encode('utf-8'))

            # Write the data.
            for fi in field:
                data = getattr(getattr(slices, ext), fi)
                fd.write(
                    ('SCALARS ' + ext + '_' + fi + ' float\n').encode('utf-8'))
                fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
                if sys.byteorder == 'little':
                    data = data.astype(np.float32).byteswap()
                else:
                    data = data.astype(np.float32)
                fd.write(data[t_idx].tobytes())

            fd.close()
Exemple #14
0
    def read(
        self,
        var_file="",
        datadir="data",
        proc=-1,
        ivar=-1,
        quiet=True,
        trimall=False,
        magic=None,
        sim=None,
        precision="d",
        lpersist=False,
        dtype=np.float64,
    ):
        """
        read(var_file='', datadir='data', proc=-1, ivar=-1, quiet=True,
             trimall=False, magic=None, sim=None, precision='f')

        Read VAR files from Pencil Code. If proc < 0, then load all data
        and assemble, otherwise load VAR file from specified processor.

        The file format written by output() (and used, e.g. in var.dat)
        consists of the followinig Fortran records:
        1. data(mx, my, mz, nvar)
        2. t(1), x(mx), y(my), z(mz), dx(1), dy(1), dz(1), deltay(1)
        Here nvar denotes the number of slots, i.e. 1 for one scalar field, 3
        for one vector field, 8 for var.dat in the case of MHD with entropy.
        but, deltay(1) is only there if lshear is on! need to know parameters.


        Parameters
        ----------
         var_file : string
             Name of the VAR file.
             If not specified, use var.dat (which is the latest snapshot of the fields)

         datadir : string
             Directory where the data is stored.

         proc : int
             Processor to be read. If -1 read all and assemble to one array.

         ivar : int
           Index of the VAR file, if var_file is not specified.

         quiet : bool
             Flag for switching off output.

         trimall : bool
             Trim the data cube to exclude ghost zones.

         magic : bool
             Values to be computed from the data, e.g. B = curl(A).

         sim : pencil code simulation object
             Contains information about the local simulation.

         precision : string
             Float 'f', double 'd' or half 'half'.

         lpersist : bool
             Read the persistent variables if they exist

        Returns
        -------
        DataCube
            Instance of the pencil.read.var.DataCube class.
            All of the computed fields are imported as class members.

        Examples
        --------
        Read the latest var.dat file and print the shape of the uu array:
        >>> var = pc.read.var()
        >>> print(var.uu.shape)

        Read the VAR2 file, compute the magnetic field B = curl(A),
        the vorticity omega = curl(u) and remove the ghost zones:
        >>> var = pc.read.var(var_file='VAR2', magic=['bb', 'vort'], trimall=True)
        >>> print(var.bb.shape)
        """

        import os
        from scipy.io import FortranFile
        from pencil.math.derivatives import curl, curl2
        from pencil import read
        from pencil.sim import __Simulation__

        def persist(self, infile=None, precision="d", quiet=quiet):
            """An open Fortran file potentially containing persistent variables appended
            to the f array and grid data are read from the first proc data

            Record types provide the labels and id record for the peristent
            variables in the depricated fortran binary format
            """
            record_types = {}
            for key in read.record_types.keys():
                if read.record_types[key][1] == "d":
                    record_types[key] = (read.record_types[key][0], precision)
                else:
                    record_types[key] = read.record_types[key]

            try:
                tmp_id = infile.read_record("h")
            except:
                return -1
            block_id = 0
            for i in range(2000):
                i += 1
                tmp_id = infile.read_record("h")
                block_id = tmp_id[0]
                if block_id == 2000:
                    break
                for key in record_types.keys():
                    if record_types[key][0] == block_id:
                        tmp_val = infile.read_record(record_types[key][1])
                        self.__setattr__(key, tmp_val[0])
                        if not quiet:
                            print(key, record_types[key][0],
                                  record_types[key][1], tmp_val)
            return self

        dim = None
        param = None
        index = None

        if isinstance(sim, __Simulation__):
            datadir = os.path.expanduser(sim.datadir)
            dim = sim.dim
            param = read.param(datadir=sim.datadir,
                               quiet=True,
                               conflicts_quiet=True)
            index = read.index(datadir=sim.datadir)
        else:
            datadir = os.path.expanduser(datadir)
            if dim is None:
                if var_file[0:2].lower() == "og":
                    dim = read.ogdim(datadir, proc)
                else:
                    if var_file[0:4] == "VARd":
                        dim = read.dim(datadir, proc, down=True)
                    else:
                        dim = read.dim(datadir, proc)
            if param is None:
                param = read.param(datadir=datadir,
                                   quiet=quiet,
                                   conflicts_quiet=True)
            if index is None:
                index = read.index(datadir=datadir)

        if param.lwrite_aux:
            total_vars = dim.mvar + dim.maux
        else:
            total_vars = dim.mvar

        if os.path.exists(os.path.join(datadir, "grid.h5")):
            #
            #  Read HDF5 files.
            #
            import h5py

            run2D = param.lwrite_2d

            # Set up the global array.
            if not run2D:
                self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx),
                                  dtype=dtype)
            else:
                if dim.ny == 1:
                    self.f = np.zeros((total_vars, dim.mz, dim.mx),
                                      dtype=dtype)
                else:
                    self.f = np.zeros((total_vars, dim.my, dim.mx),
                                      dtype=dtype)

            if not var_file:
                if ivar < 0:
                    var_file = "var.h5"
                else:
                    var_file = "VAR" + str(ivar) + ".h5"

            file_name = os.path.join(datadir, "allprocs", var_file)
            with h5py.File(file_name, "r") as tmp:
                for key in tmp["data"].keys():
                    self.f[index.__getattribute__(key) - 1, :] = dtype(
                        tmp["data/" + key][:])
                t = (tmp["time"][()]).astype(precision)
                x = (tmp["grid/x"][()]).astype(precision)
                y = (tmp["grid/y"][()]).astype(precision)
                z = (tmp["grid/z"][()]).astype(precision)
                dx = (tmp["grid/dx"][()]).astype(precision)
                dy = (tmp["grid/dy"][()]).astype(precision)
                dz = (tmp["grid/dz"][()]).astype(precision)
                if param.lshear:
                    deltay = (tmp["persist/shear_delta_y"][(
                        0)]).astype(precision)
                if lpersist:
                    for key in tmp["persist"].keys():
                        self.__setattr__(
                            key, (tmp["persist"][key][0]).astype(precision))
        else:
            #
            #  Read scattered Fortran binary files.
            #
            run2D = param.lwrite_2d

            if dim.precision == "D":
                read_precision = "d"
            else:
                read_precision = "f"

            if not var_file:
                if ivar < 0:
                    var_file = "var.dat"
                else:
                    var_file = "VAR" + str(ivar)

            if proc < 0:
                proc_dirs = self.__natural_sort(
                    filter(lambda s: s.startswith("proc"),
                           os.listdir(datadir)))
                if proc_dirs.count("proc_bounds.dat") > 0:
                    proc_dirs.remove("proc_bounds.dat")
                if param.lcollective_io:
                    # A collective IO strategy is being used
                    proc_dirs = ["allprocs"]
            #                else:
            #                    proc_dirs = proc_dirs[::dim.nprocx*dim.nprocy]
            else:
                proc_dirs = ["proc" + str(proc)]

            # Set up the global array.
            if not run2D:
                self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx),
                                  dtype=dtype)
            else:
                if dim.ny == 1:
                    self.f = np.zeros((total_vars, dim.mz, dim.mx),
                                      dtype=dtype)
                else:
                    self.f = np.zeros((total_vars, dim.my, dim.mx),
                                      dtype=dtype)

            x = np.zeros(dim.mx, dtype=precision)
            y = np.zeros(dim.my, dtype=precision)
            z = np.zeros(dim.mz, dtype=precision)

            for directory in proc_dirs:
                if not param.lcollective_io:
                    proc = int(directory[4:])
                    if var_file[0:2].lower() == "og":
                        procdim = read.ogdim(datadir, proc)
                    else:
                        if var_file[0:4] == "VARd":
                            procdim = read.dim(datadir, proc, down=True)
                        else:
                            procdim = read.dim(datadir, proc)
                    if not quiet:
                        print("Reading data from processor" +
                              " {0} of {1} ...".format(proc, len(proc_dirs)))

                else:
                    # A collective IO strategy is being used
                    procdim = dim
                #                else:
                #                    procdim.mx = dim.mx
                #                    procdim.my = dim.my
                #                    procdim.nx = dim.nx
                #                    procdim.ny = dim.ny
                #                    procdim.ipx = dim.ipx
                #                    procdim.ipy = dim.ipy

                mxloc = procdim.mx
                myloc = procdim.my
                mzloc = procdim.mz

                # Read the data.
                file_name = os.path.join(datadir, directory, var_file)
                infile = FortranFile(file_name)
                if not run2D:
                    f_loc = dtype(infile.read_record(dtype=read_precision))
                    f_loc = f_loc.reshape((-1, mzloc, myloc, mxloc))
                else:
                    if dim.ny == 1:
                        f_loc = dtype(infile.read_record(dtype=read_precision))
                        f_loc = f_loc.reshape((-1, mzloc, mxloc))
                    else:
                        f_loc = dtype(infile.read_record(dtype=read_precision))
                        f_loc = f_loc.reshape((-1, myloc, mxloc))
                raw_etc = infile.read_record(dtype=read_precision)
                if lpersist:
                    persist(self,
                            infile=infile,
                            precision=read_precision,
                            quiet=quiet)
                infile.close()

                t = raw_etc[0]
                x_loc = raw_etc[1:mxloc + 1]
                y_loc = raw_etc[mxloc + 1:mxloc + myloc + 1]
                z_loc = raw_etc[mxloc + myloc + 1:mxloc + myloc + mzloc + 1]
                if param.lshear:
                    shear_offset = 1
                    deltay = raw_etc[-1]
                else:
                    shear_offset = 0

                dx = raw_etc[-3 - shear_offset]
                dy = raw_etc[-2 - shear_offset]
                dz = raw_etc[-1 - shear_offset]

                if len(proc_dirs) > 1:
                    # Calculate where the local processor will go in
                    # the global array.
                    #
                    # Don't overwrite ghost zones of processor to the
                    # left (and accordingly in y and z direction -- makes
                    # a difference on the diagonals)
                    #
                    # Recall that in NumPy, slicing is NON-INCLUSIVE on
                    # the right end, ie, x[0:4] will slice all of a
                    # 4-digit array, not produce an error like in idl.

                    if procdim.ipx == 0:
                        i0x = 0
                        i1x = i0x + procdim.mx
                        i0xloc = 0
                        i1xloc = procdim.mx
                    else:
                        i0x = procdim.ipx * procdim.nx + procdim.nghostx
                        i1x = i0x + procdim.mx - procdim.nghostx
                        i0xloc = procdim.nghostx
                        i1xloc = procdim.mx

                    if procdim.ipy == 0:
                        i0y = 0
                        i1y = i0y + procdim.my
                        i0yloc = 0
                        i1yloc = procdim.my
                    else:
                        i0y = procdim.ipy * procdim.ny + procdim.nghosty
                        i1y = i0y + procdim.my - procdim.nghosty
                        i0yloc = procdim.nghosty
                        i1yloc = procdim.my

                    if procdim.ipz == 0:
                        i0z = 0
                        i1z = i0z + procdim.mz
                        i0zloc = 0
                        i1zloc = procdim.mz
                    else:
                        i0z = procdim.ipz * procdim.nz + procdim.nghostz
                        i1z = i0z + procdim.mz - procdim.nghostz
                        i0zloc = procdim.nghostz
                        i1zloc = procdim.mz

                    x[i0x:i1x] = x_loc[i0xloc:i1xloc]
                    y[i0y:i1y] = y_loc[i0yloc:i1yloc]
                    z[i0z:i1z] = z_loc[i0zloc:i1zloc]

                    if not run2D:
                        self.f[:, i0z:i1z, i0y:i1y,
                               i0x:i1x] = f_loc[:, i0zloc:i1zloc,
                                                i0yloc:i1yloc, i0xloc:i1xloc]
                    else:
                        if dim.ny == 1:
                            self.f[:, i0z:i1z,
                                   i0x:i1x] = f_loc[:, i0zloc:i1zloc,
                                                    i0xloc:i1xloc]
                        else:
                            self.f[i0z:i1z, i0y:i1y,
                                   i0x:i1x] = f_loc[i0zloc:i1zloc,
                                                    i0yloc:i1yloc,
                                                    i0xloc:i1xloc]
                else:
                    self.f = f_loc
                    x = x_loc
                    y = y_loc
                    z = z_loc

        if magic is not None:
            if not np.all(param.lequidist):
                raise NotImplementedError(
                    "Magic functions are only implemented for equidistant grids."
                )
            if "bb" in magic:
                # Compute the magnetic field before doing trimall.
                aa = self.f[index.ax - 1:index.az, ...]
                self.bb = dtype(
                    curl(
                        aa,
                        dx,
                        dy,
                        dz,
                        x=x,
                        y=y,
                        run2D=run2D,
                        coordinate_system=param.coord_system,
                    ))
                if trimall:
                    self.bb = self.bb[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                                      dim.l1:dim.l2 + 1]
            if "jj" in magic:
                # Compute the electric current field before doing trimall.
                aa = self.f[index.ax - 1:index.az, ...]
                self.jj = dtype(
                    curl2(aa,
                          dx,
                          dy,
                          dz,
                          x=x,
                          y=y,
                          coordinate_system=param.coord_system))
                if trimall:
                    self.jj = self.jj[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                                      dim.l1:dim.l2 + 1]
            if "vort" in magic:
                # Compute the vorticity field before doing trimall.
                uu = self.f[index.ux - 1:index.uz, ...]
                self.vort = dtype(
                    curl(
                        uu,
                        dx,
                        dy,
                        dz,
                        x=x,
                        y=y,
                        run2D=run2D,
                        coordinate_system=param.coord_system,
                    ))
                if trimall:
                    if run2D:
                        if dim.nz == 1:
                            self.vort = self.vort[:, dim.m1:dim.m2 + 1,
                                                  dim.l1:dim.l2 + 1]
                        else:
                            self.vort = self.vort[:, dim.n1:dim.n2 + 1,
                                                  dim.l1:dim.l2 + 1]
                    else:
                        self.vort = self.vort[:, dim.n1:dim.n2 + 1,
                                              dim.m1:dim.m2 + 1,
                                              dim.l1:dim.l2 + 1, ]

        # Trim the ghost zones of the global f-array if asked.
        if trimall:
            self.x = x[dim.l1:dim.l2 + 1]
            self.y = y[dim.m1:dim.m2 + 1]
            self.z = z[dim.n1:dim.n2 + 1]
            if not run2D:
                self.f = self.f[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                                dim.l1:dim.l2 + 1]
            else:
                if dim.ny == 1:
                    self.f = self.f[:, dim.n1:dim.n2 + 1, dim.l1:dim.l2 + 1]
                else:
                    self.f = self.f[:, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1]
        else:
            self.x = x
            self.y = y
            self.z = z
            self.l1 = dim.l1
            self.l2 = dim.l2 + 1
            self.m1 = dim.m1
            self.m2 = dim.m2 + 1
            self.n1 = dim.n1
            self.n2 = dim.n2 + 1

        # Assign an attribute to self for each variable defined in
        # 'data/index.pro' so that e.g. self.ux is the x-velocity
        aatest = []
        uutest = []
        for key in index.__dict__.keys():
            if "aatest" in key:
                aatest.append(key)
            if "uutest" in key:
                uutest.append(key)
            if (key != "global_gg" and key != "keys" and "aatest" not in key
                    and "uutest" not in key):
                value = index.__dict__[key]
                setattr(self, key, self.f[value - 1, ...])
        # Special treatment for vector quantities.
        if hasattr(index, "uu"):
            self.uu = self.f[index.ux - 1:index.uz, ...]
        if hasattr(index, "aa"):
            self.aa = self.f[index.ax - 1:index.az, ...]
        if hasattr(index, "uu_sph"):
            self.uu_sph = self.f[index.uu_sphx - 1:index.uu_sphz, ...]
        if hasattr(index, "bb_sph"):
            self.bb_sph = self.f[index.bb_sphx - 1:index.bb_sphz, ...]
        # Special treatment for test method vector quantities.
        # Note index 1,2,3,...,0 last vector may be the zero field/flow
        if hasattr(index, "aatest1"):
            naatest = int(len(aatest) / 3)
            for j in range(0, naatest):
                key = "aatest" + str(np.mod(j + 1, naatest))
                value = index.__dict__["aatest1"] + 3 * j
                setattr(self, key, self.f[value - 1:value + 2, ...])
        if hasattr(index, "uutest1"):
            nuutest = int(len(uutest) / 3)
            for j in range(0, nuutest):
                key = "uutest" + str(np.mod(j + 1, nuutest))
                value = index.__dict__["uutest"] + 3 * j
                setattr(self, key, self.f[value - 1:value + 2, ...])

        self.t = t
        self.dx = dx
        self.dy = dy
        self.dz = dz
        if param.lshear:
            self.deltay = deltay

        # Do the rest of magic after the trimall (i.e. no additional curl.)
        self.magic = magic
        if self.magic is not None:
            self.magic_attributes(param, dtype=dtype)
Exemple #15
0
def animate_slices(
    field="uu1",
    datadir="data/",
    proc=-1,
    extension="xz",
    format="native",
    tmin=0.0,
    tmax=1.0e38,
    wait=0.0,
    amin=0.0,
    amax=1.0,
    transform="",
    oldfile=False,
):
    """
    read 2D slice files and assemble an animation.

    Options:

     field --- which variable to slice
     datadir --- path to data directory
     proc --- an integer giving the processor to read a slice from
     extension --- which plane of xy,xz,yz,Xz. for 2D this should be overwritten.
     format --- endian. one of little, big, or native (default)
     tmin --- start time
     tmax --- end time
     amin --- minimum value for image scaling
     amax --- maximum value for image scaling
     transform --- insert arbitrary numerical code to modify the slice
     wait --- pause in seconds between animation slices
    """

    datadir = os.path.expanduser(datadir)
    if proc < 0:
        filename = os.path.join(datadir, "slice_" + field + "." + extension)
    else:
        filename = os.path.join(datadir, "/proc" + str(proc),
                                "/slice_" + field + "." + extension)

    # global dim
    # param = read_param(datadir)
    param = read.param(datadir)

    # dim = read_dim(datadir,proc)
    dim = read.dim(datadir, proc)
    if dim.precision == "D":
        precision = "d"
    else:
        precision = "f"

    # set up slice plane
    if extension == "xy" or extension == "Xy":
        hsize = dim.nx
        vsize = dim.ny
    if extension == "xz":
        hsize = dim.nx
        vsize = dim.nz
    if extension == "yz":
        hsize = dim.ny
        vsize = dim.nz
    plane = np.zeros((vsize, hsize), dtype=precision)

    infile = FortranFile(filename)

    ax = plt.axes()
    ax.set_xlabel("x")
    ax.set_ylabel("y")
    ax.set_ylim

    image = plt.imshow(plane, vmin=amin, vmax=amax)

    # for real-time image display
    manager = plt.get_current_fig_manager()
    manager.show()

    ifirst = True
    islice = 0
    while 1:
        try:
            raw_data = infile.read_record(dtype=precision)
        except ValueError:
            break
        except TypeError:
            break

        if oldfile:
            t = raw_data[-1]
            plane = raw_data[:-1].reshape(vsize, hsize)
        else:
            slice_z2pos = raw_data[-1]
            t = raw_data[-2]
            plane = raw_data[:-2].reshape(vsize, hsize)

        if transform:
            exec("plane = plane" + transform)

        if t > tmin and t < tmax:
            title = "t = %11.3e" % t
            ax.set_title(title)
            image.set_data(plane)
            manager.canvas.draw()

            if ifirst:
                print(
                    "----islice----------t---------min-------max-------delta")
            print("%10i %10.3e %10.3e %10.3e %10.3e" %
                  (islice, t, plane.min(), plane.max(),
                   plane.max() - plane.min()))

            ifirst = False
            islice += 1

            plt.pause(wait)

        if t > tmax:
            break

    infile.close()
Exemple #16
0
    def __read_2d_aver(
        self,
        plane,
        datadir,
        variables,
        aver_file_name,
        n_vars,
        l_h5=False,
        precision="f",
    ):
        """
        Read the xyaverages.dat, xzaverages.dat, yzaverages.dat
        Return the raw data and the time array.
        """

        import os
        import numpy as np
        from pencil import read

        if l_h5:
            import h5py

            file_id = os.path.join(datadir, aver_file_name)
            print(file_id)
            sys.stdout.flush()
            with h5py.File(file_id, "r") as tmp:
                n_times = len(tmp.keys()) - 1
                # Determine the structure of the xy/xz/yz averages.
                for var in variables:
                    nw = tmp[str(0) + "/" + var.strip()].shape[0]
                    break
        else:
            # Determine the structure of the xy/xz/yz averages.
            if plane == "xy":
                nw = getattr(read.dim(datadir=datadir), "nz")
            if plane == "xz":
                nw = getattr(read.dim(datadir=datadir), "ny")
            if plane == "yz":
                nw = getattr(read.dim(datadir=datadir), "nx")
            file_id = open(os.path.join(datadir, aver_file_name))
            aver_lines = file_id.readlines()
            file_id.close()
            entry_length = int(np.ceil(nw * n_vars / 8.0))
            n_times = int(len(aver_lines) / (1.0 + entry_length))

        # Prepare the data arrays.
        t = np.zeros(n_times, dtype=precision)

        # Read the data
        if l_h5:
            raw_data = np.zeros([n_times, n_vars, nw], dtype=precision)
            with h5py.File(file_id, "r") as tmp:
                for t_idx in range(0, n_times):
                    t[t_idx] = tmp[str(t_idx) + "/time"][()]
                    raw_idx = 0
                    for var in variables:
                        raw_data[t_idx, raw_idx] = tmp[str(t_idx) + "/" +
                                                       var.strip()][()]
                        raw_idx += 1
        else:
            raw_data = np.zeros([n_times, n_vars * nw], dtype=precision)
            line_idx = 0
            t_idx = -1
            try:
                for current_line in aver_lines:
                    if line_idx % (entry_length + 1) == 0:
                        t_idx += 1
                        t[t_idx] = current_line
                        raw_idx = 0
                    else:
                        raw_data[t_idx, raw_idx * 8:(raw_idx * 8 + 8)] = list(
                            map(np.float32, current_line.split()))
                        raw_idx += 1
                    line_idx += 1
            except ValueError:
                print(
                    "Error: There was a problem reading {}.\nCalculated values: n_vars = {}, nw = {}.\nAre these correct?"
                    .format(aver_file_name, n_vars, nw))
                raise

            # Restructure the raw data and add it to the Averages object.
            raw_data = np.reshape(raw_data, [n_times, n_vars, nw])

        return t, raw_data
Exemple #17
0
    def find_tracers(self, var_file='VAR0', datadir='data', trace_field='bb',
                     ti=-1, tf=-1):
        """
        Trace streamlines of the vectofield 'field' from z = z0 to z = z1
        and integrate quantities 'int_q' along the lines. Creates a 2d
        mapping as in 'streamlines.f90'.

        call signature:

        find_tracers(var_file='VAR0', datadir='data', trace_field='bb',
                     ti=-1, tf=-1)

        Keyword arguments:

        *var_file*:
          Varfile to be read.

        *datadir*:
          Directory where the data is stored.

        *trace_field*:
          Vector field used for the streamline tracing.

        *ti*:
          Initial VAR file index for tracer time sequences. Overrides 'var_file'.

        *tf*:
          Final VAR file index for tracer time sequences. Overrides 'var_file'.
        """

        import numpy as np
        import multiprocessing as mp
        from pencil import read
        from pencil import math

        # Write the tracing parameters.
        self.params.trace_field = trace_field
        self.params.datadir = datadir

        # Multi core setup.
        if not(np.isscalar(self.params.n_proc)) or (self.params.n_proc%1 != 0):
            print("error: invalid processor number")
            return -1
        queue = mp.Queue()

        # Read the data.
        magic = []
        if trace_field == 'bb':
            magic.append('bb')
        if trace_field == 'jj':
            magic.append('jj')
        if trace_field == 'vort':
            magic.append('vort')
        if self.params.int_q == 'ee':
            magic.append('bb')
            magic.append('jj')
        dim = read.dim(datadir=datadir)
        self.params.var_file = var_file

        # Check if user wants a tracer time series.
        if (ti%1 == 0) and (tf%1 == 0) and (ti >= 0) and (tf >= ti):
            series = True
            nTimes = tf-ti+1
        else:
            series = False
            nTimes = 1

        # Initialize the arrays.
        self.x0 = np.zeros([int(self.params.trace_sub*dim.nx),
                            int(self.params.trace_sub*dim.ny), nTimes])
        self.y0 = np.zeros([int(self.params.trace_sub*dim.nx),
                            int(self.params.trace_sub*dim.ny), nTimes])
        self.x1 = np.zeros([int(self.params.trace_sub*dim.nx),
                            int(self.params.trace_sub*dim.ny), nTimes])
        self.y1 = np.zeros([int(self.params.trace_sub*dim.nx),
                            int(self.params.trace_sub*dim.ny), nTimes])
        self.z1 = np.zeros([int(self.params.trace_sub*dim.nx),
                            int(self.params.trace_sub*dim.ny), nTimes])
        self.l = np.zeros([int(self.params.trace_sub*dim.nx),
                           int(self.params.trace_sub*dim.ny), nTimes])
        if self.params.int_q == 'curly_A':
            self.curly_A = np.zeros([int(self.params.trace_sub*dim.nx),
                                     int(self.params.trace_sub*dim.ny), nTimes])
        if self.params.int_q == 'ee':
            self.ee = np.zeros([int(self.params.trace_sub*dim.nx),
                                int(self.params.trace_sub*dim.ny), nTimes])
        self.mapping = np.zeros([int(self.params.trace_sub*dim.nx),
                                 int(self.params.trace_sub*dim.ny),
                                 nTimes, 3])
        self.t = np.zeros(nTimes)

        for t_idx in range(ti, tf+1):
            if series:
                var_file = 'VAR' + str(t_idx)

            # Read the data.
            var = read.var(var_file=var_file, datadir=datadir, magic=magic,
                           quiet=True, trimall=True)
            grid = read.grid(datadir=datadir, quiet=True, trim=True)
            param2 = read.param(datadir=datadir, quiet=True)
            self.t[t_idx] = var.t

            # Extract the requested vector trace_field.
            field = getattr(var, trace_field)
            if self.params.int_q == 'curly_A':
                self.aa = var.aa
            if self.params.int_q == 'ee':
                self.ee = var.jj*param2.eta - math.cross(var.uu, var.bb)

            # Get the simulation parameters.
            self.params.dx = var.dx
            self.params.dy = var.dy
            self.params.dz = var.dz
            self.params.Ox = var.x[0]
            self.params.Oy = var.y[0]
            self.params.Oz = var.z[0]
            self.params.Lx = grid.Lx
            self.params.Ly = grid.Ly
            self.params.Lz = grid.Lz
            self.params.nx = dim.nx
            self.params.ny = dim.ny
            self.params.nz = dim.nz

            # Initialize the tracers.
            for ix in range(int(self.params.trace_sub*dim.nx)):
                for iy in range(int(self.params.trace_sub*dim.ny)):
                    self.x0[ix, iy, t_idx] = grid.x[0] + grid.dx/self.params.trace_sub*ix
                    self.x1[ix, iy, t_idx] = self.x0[ix, iy, t_idx].copy()
                    self.y0[ix, iy, t_idx] = grid.y[0] + grid.dy/self.params.trace_sub*iy
                    self.y1[ix, iy, t_idx] = self.y0[ix, iy, t_idx].copy()
                    self.z1[ix, iy, t_idx] = grid.z[0]

            proc = []
            sub_data = []
            for i_proc in range(self.params.n_proc):
                proc.append(mp.Process(target=self.__sub_tracers,
                                       args=(queue, field, t_idx, i_proc, self.params.n_proc)))
            for i_proc in range(self.params.n_proc):
                proc[i_proc].start()
            for i_proc in range(self.params.n_proc):
                sub_data.append(queue.get())
            for i_proc in range(self.params.n_proc):
                proc[i_proc].join()
            for i_proc in range(self.params.n_proc):
                # Extract the data from the single cores. Mind the order.
                sub_proc = sub_data[i_proc][0]
                self.x1[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][1]
                self.y1[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][2]
                self.z1[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][3]
                self.l[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][4]
                self.mapping[sub_proc::self.params.n_proc, :, t_idx, :] = sub_data[i_proc][5]
                if self.params.int_q == 'curly_A':
                    self.curly_A[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][6]
                if self.params.int_q == 'ee':
                    self.ee[sub_proc::self.params.n_proc, :, t_idx] = sub_data[i_proc][7]
            for i_proc in range(self.params.n_proc):
                proc[i_proc].terminate()

            return 0
Exemple #18
0
def write_h5_grid(
    file_name="grid",
    datadir="data",
    precision="d",
    nghost=3,
    settings=None,
    param=None,
    grid=None,
    unit=None,
    quiet=True,
    driver=None,
    comm=None,
    overwrite=False,
    rank=0,
):
    """
    Write the grid information as hdf5.
    We assume by default that a run simulation directory has already been
    constructed, but start has not been executed in h5 format so that
    binary sim files dim, grid and param files are already present in the sim
    directory, or provided from an old binary sim source directory as inputs.

    call signature:

    write_h5_grid(file_name='grid', datadir='data', precision='d', nghost=3,
                  settings=None, param=None, grid=None, unit=None, quiet=True,
                  driver=None, comm=None)

    Keyword arguments:

    *file_name*:
      Prefix of the file name to be written, 'grid'.

    *datadir*:
      Directory where 'grid.h5' is stored.

    *precision*:
      Single 'f' or double 'd' precision.

    *nghost*:
      Number of ghost zones.

    *settings*:
      Optional dictionary of persistent variable.

    *param*:
      Optional Param object.

    *grid*:
      Optional Pencil Grid object of grid parameters.

    *unit*:
      Optional dictionary of simulation units.

    *quiet*:
      Option to print output.
    """

    from os.path import join
    import numpy as np

    from pencil import read
    from pencil.io import open_h5, group_h5, dataset_h5
    from pencil import is_sim_dir

    # test if simulation directory
    if not is_sim_dir():
        print("ERROR: Directory needs to be a simulation")
        sys.stdout.flush()
    #
    if settings == None:
        settings = {}
        skeys = [
            "l1",
            "l2",
            "m1",
            "m2",
            "n1",
            "n2",
            "nx",
            "ny",
            "nz",
            "mx",
            "my",
            "mz",
            "nprocx",
            "nprocy",
            "nprocz",
            "maux",
            "mglobal",
            "mvar",
            "precision",
        ]
        dim = read.dim()
        for key in skeys:
            settings[key] = dim.__getattribute__(key)
        settings["precision"] = precision.encode()
        settings["nghost"] = nghost
        settings["version"] = np.int32(0)
    gkeys = [
        "x",
        "y",
        "z",
        "Lx",
        "Ly",
        "Lz",
        "dx",
        "dy",
        "dz",
        "dx_1",
        "dy_1",
        "dz_1",
        "dx_tilde",
        "dy_tilde",
        "dz_tilde",
    ]
    if grid == None:
        grid = read.grid(quiet=True)
    else:
        gd_err = False
        for key in gkeys:
            if not key in grid.__dict__.keys():
                print("ERROR: key " + key + " missing from grid")
                sys.stdout.flush()
                gd_err = True
        if gd_err:
            print("ERROR: grid incomplete")
            sys.stdout.flush()
    ukeys = [
        "length",
        "velocity",
        "density",
        "magnetic",
        "time",
        "temperature",
        "flux",
        "energy",
        "mass",
        "system",
    ]
    if param == None:
        param = read.param(quiet=True)
        param.__setattr__("unit_mass",
                          param.unit_density * param.unit_length**3)
        param.__setattr__("unit_energy",
                          param.unit_mass * param.unit_velocity**2)
        param.__setattr__("unit_time", param.unit_length / param.unit_velocity)
        param.__setattr__("unit_flux", param.unit_mass / param.unit_time**3)
        param.unit_system = param.unit_system.encode()

    # open file for writing data
    filename = join(datadir, file_name + ".h5")
    with open_h5(filename,
                 "w",
                 driver=driver,
                 comm=comm,
                 overwrite=overwrite,
                 rank=rank) as ds:
        # add settings
        sets_grp = group_h5(ds, "settings", status="w")
        for key in settings.keys():
            if "precision" in key:
                dataset_h5(sets_grp, key, status="w", data=(settings[key], ))
            else:
                dataset_h5(sets_grp, key, status="w", data=(settings[key], ))
        # add grid
        grid_grp = group_h5(ds, "grid", status="w")
        for key in gkeys:
            dataset_h5(grid_grp,
                       key,
                       status="w",
                       data=(grid.__getattribute__(key)))
        dataset_h5(grid_grp,
                   "Ox",
                   status="w",
                   data=(param.__getattribute__("xyz0")[0], ))
        dataset_h5(grid_grp,
                   "Oy",
                   status="w",
                   data=(param.__getattribute__("xyz0")[1], ))
        dataset_h5(grid_grp,
                   "Oz",
                   status="w",
                   data=(param.__getattribute__("xyz0")[2], ))
        # add physical units
        unit_grp = group_h5(ds, "unit", status="w")
        for key in ukeys:
            if "system" in key:
                dataset_h5(
                    unit_grp,
                    key,
                    status="w",
                    data=(param.__getattribute__("unit_" + key), ),
                )
            else:
                dataset_h5(
                    unit_grp,
                    key,
                    status="w",
                    data=param.__getattribute__("unit_" + key),
                )
Exemple #19
0
def var2h5(
    newdir,
    olddir,
    allfile_names,
    todatadir,
    fromdatadir,
    snap_by_proc,
    precision,
    lpersist,
    quiet,
    nghost,
    settings,
    param,
    grid,
    x,
    y,
    z,
    lshear,
    lremove_old_snapshots,
    indx,
    trimall=False,
    l_mpi=False,
    driver=None,
    comm=None,
    rank=0,
    size=1,
):
    """
    Copy a simulation snapshot set written in Fortran binary to hdf5.

    call signature:

    var2h5(newdir, olddir, allfile_names, todatadir, fromdatadir, snap_by_proc,
           precision, lpersist, quiet, nghost, settings, param, grid,
           x, y, z, lshear, lremove_old_snapshots, indx,
           trimall=False, l_mpi=False, driver=None, comm=None, rank=0, size=1
          )

    Keyword arguments:

    *newdir*:
      String path to simulation destination directory.

    *olddir*:
      String path to simulation destination directory.

    *allfile_names*:
      A list of names of the snapshot files to be written, e.g. VAR0.

    *todatadir*:
      Directory to which the data is stored.

    *fromdatadir*:
      Directory from which the data is collected.

    *snap_by_proc*:
      Read and write snapshots by procdir of the fortran binary tree

    *precision*:
      Single 'f' or double 'd' precision for new data.

    *lpersist*:
      option to include persistent variables from snapshots.

    *quiet*
      Option not to print output.

    *nghost*:
      Number of ghost zones.

    *settings*
      simulation properties.

    *param*
      simulation Param object.

    *grid*
      simulation Grid object.

    *xyz*:
      xyz arrays of the domain with ghost zones.

    *lshear*:
      Flag for the shear.

    *lremove_old_snapshots*:
      If True the old snapshots will be deleted once the new snapshot has
      been saved.

    *indx*:
      List of variable indices in the f-array.

    *trimall*:
      Strip ghost zones from snapshots

    *l_mpi*:
      Applying MPI parallel process

    *driver*:
      HDF5 file io driver either None or mpio

    *comm*:
      MPI library calls

    *rank*:
      Integer ID of processor

    *size*:
      Number of MPI processes
    """
    import os
    from os.path import exists, join
    import numpy as np
    import glob
    from pencil import read
    from pencil import sim
    from pencil.io import write_h5_snapshot
    import sys
    import time
    import subprocess as sub

    if isinstance(allfile_names, list):
        allfile_names = allfile_names
    else:
        allfile_names = [allfile_names]
    # proceed to copy each snapshot in varfile_names
    nprocs = settings["nprocx"] * settings["nprocy"] * settings["nprocz"]
    if l_mpi:
        if not snap_by_proc:
            file_names = np.array_split(allfile_names, size)
            if "VARd1" in allfile_names:
                varfile_names = file_names[size - rank - 1]
            else:
                varfile_names = file_names[rank]
        else:
            os.chdir(olddir)
            if size > nprocs:
                nnames = len(allfile_names)
                if size > nnames * nprocs:
                    file_names = np.array_split(allfile_names, nnames)
                    varfile_names = file_names[np.mod(rank, nnames)]
                    nprocsplit = int(size / nnames)
                    iprocs = np.array_split(np.arange(nprocs), nprocs)
                    procs = iprocs[np.mod(rank, nprocs)]
                else:
                    file_names = np.array_split(allfile_names, nnames)
                    varfile_names = file_names[np.mod(rank, nnames)]
                    if nnames > size:
                        procs = np.arange(nprocs)
                    else:
                        nproc_per_fname = int(size / nnames)
                        isize = np.int(np.mod(rank, nnames) / nproc_per_fname)
                        if np.mod(isize, nproc_per_fname + 1) == 0:
                            npf = nproc_per_fname + 1
                            iprocs = np.array(
                                np.array_split(np.arange(nprocs), npf)).T
                        else:
                            npf = nproc_per_fname
                            iprocs = np.array(
                                np.array_split(np.arange(nprocs), npf)).T
                    procs = iprocs[np.mod(int((rank * nnames) / size), npf)]
            else:
                if np.mod(nprocs, size) > 0:
                    procs = np.arange(nprocs + size - np.mod(nprocs, size))
                    procs[-size + np.mod(nprocs, size):] = np.arange(
                        size - np.mod(nprocs, size))
                else:
                    procs = np.arange(nprocs)
                iprocs = np.array_split(procs, size)
                procs = iprocs[rank]
                varfile_names = allfile_names
            print("rank {} procs:".format(rank), procs)
            sys.stdout.flush()
    else:
        varfile_names = allfile_names
        procs = np.arange(nprocs)
    if len(varfile_names) > 0:
        for file_name in varfile_names:
            # load Fortran binary snapshot
            if not quiet:
                print("rank {}:".format(rank) + "saving " + file_name)
                sys.stdout.flush()
            if snap_by_proc:
                if len(procs) > 0:
                    proctime = time.time()
                    for proc in procs:
                        os.chdir(olddir)
                        if np.mod(proc, size) == size - 1:
                            print(
                                "rank {}:".format(rank) + "saving " +
                                file_name + " on proc{}\n".format(proc),
                                time.ctime(),
                            )
                            sys.stdout.flush()
                        procdim = read.dim(proc=proc)
                        var = read.var(
                            file_name,
                            datadir=fromdatadir,
                            quiet=quiet,
                            lpersist=lpersist,
                            trimall=trimall,
                            proc=proc,
                        )
                        try:
                            var.deltay
                            lshear = True
                        except:
                            lshear = False

                        if lpersist:
                            persist = {}
                            for key in read.record_types.keys():
                                try:
                                    persist[key] = var.__getattribute__(key)[(
                                    )]
                                    if type(persist[key][0]) == str:
                                        persist[key][0] = var.__getattribute__(
                                            key)[0].encode()
                                except:
                                    pass
                        else:
                            persist = None
                        if np.mod(proc, size) == size - 1:
                            print("rank {}:".format(rank) + "loaded " +
                                  file_name +
                                  " on proc{} in {} seconds".format(
                                      proc,
                                      time.time() - proctime))
                            sys.stdout.flush()
                        # write data to h5
                        os.chdir(newdir)
                        write_h5_snapshot(
                            var.f,
                            file_name=file_name,
                            state="a",
                            datadir=todatadir,
                            precision=precision,
                            nghost=nghost,
                            persist=persist,
                            proc=proc,
                            procdim=procdim,
                            settings=settings,
                            param=param,
                            grid=grid,
                            lghosts=True,
                            indx=indx,
                            t=var.t,
                            x=x,
                            y=y,
                            z=z,
                            quiet=quiet,
                            rank=rank,
                            size=size,
                            lshear=lshear,
                            driver=driver,
                            comm=comm,
                        )
                        if np.mod(proc, size) == size - 1:
                            print("rank {}:".format(rank) + "written " +
                                  file_name +
                                  " on proc{} in {} seconds".format(
                                      proc,
                                      time.time() - proctime))
                            sys.stdout.flush()
                        proctime = time.time()
            else:
                var = read.var(
                    file_name,
                    datadir=fromdatadir,
                    quiet=quiet,
                    lpersist=lpersist,
                    trimall=trimall,
                )
                try:
                    var.deltay
                    lshear = True
                except:
                    lshear = False

                if lpersist:
                    persist = {}
                    for key in read.record_types.keys():
                        try:
                            persist[key] = var.__getattribute__(key)[()]
                            if type(persist[key][0]) == str:
                                persist[key][0] = var.__getattribute__(
                                    key)[0].encode()
                        except:
                            pass
                else:
                    persist = None
                # write data to h5
                os.chdir(newdir)
                write_h5_snapshot(
                    var.f,
                    file_name=file_name,
                    datadir=todatadir,
                    precision=precision,
                    nghost=nghost,
                    persist=persist,
                    settings=settings,
                    param=param,
                    grid=grid,
                    lghosts=True,
                    indx=indx,
                    t=var.t,
                    x=x,
                    y=y,
                    z=z,
                    lshear=lshear,
                    driver=None,
                    comm=None,
                )
            if lremove_old_snapshots:
                os.chdir(olddir)
                cmd = "rm -f " + join(olddir, fromdatadir, "proc*", file_name)
                os.system(cmd)
            del var
Exemple #20
0
def animate_slices(field='uu1',
                   datadir='data/',
                   proc=-1,
                   extension='xz',
                   format='native',
                   tmin=0.,
                   tmax=1.e38,
                   wait=0.,
                   amin=0.,
                   amax=1.,
                   transform='',
                   oldfile=False):
    """
    read 2D slice files and assemble an animation.

    Options:

     field --- which variable to slice
     datadir --- path to data directory
     proc --- an integer giving the processor to read a slice from
     extension --- which plane of xy,xz,yz,Xz. for 2D this should be overwritten.
     format --- endian. one of little, big, or native (default)
     tmin --- start time
     tmax --- end time
     amin --- minimum value for image scaling
     amax --- maximum value for image scaling
     transform --- insert arbitrary numerical code to modify the slice
     wait --- pause in seconds between animation slices
    """

    datadir = os.path.expanduser(datadir)
    if proc < 0:
        filename = datadir + '/slice_' + field + '.' + extension
    else:
        filename = datadir + '/proc' + str(
            proc) + '/slice_' + field + '.' + extension

    # global dim
    #param = read_param(datadir)
    param = read.param(datadir)

    #dim = read_dim(datadir,proc)
    dim = read.dim(datadir, proc)
    if dim.precision == 'D':
        precision = 'd'
    else:
        precision = 'f'

    # set up slice plane
    if (extension == 'xy' or extension == 'Xy'):
        hsize = dim.nx
        vsize = dim.ny
    if (extension == 'xz'):
        hsize = dim.nx
        vsize = dim.nz
    if (extension == 'yz'):
        hsize = dim.ny
        vsize = dim.nz
    plane = np.zeros((vsize, hsize), dtype=precision)

    infile = FortranFile(filename)

    ax = plt.axes()
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_ylim

    image = plt.imshow(plane, vmin=amin, vmax=amax)

    # for real-time image display
    manager = plt.get_current_fig_manager()
    manager.show()

    ifirst = True
    islice = 0
    while 1:
        try:
            raw_data = infile.read_record(dtype=precision)
        except ValueError:
            break
        except TypeError:
            break

        if oldfile:
            t = raw_data[-1]
            plane = raw_data[:-1].reshape(vsize, hsize)
        else:
            slice_z2pos = raw_data[-1]
            t = raw_data[-2]
            plane = raw_data[:-2].reshape(vsize, hsize)

        if transform:
            exec('plane = plane' + transform)

        if (t > tmin and t < tmax):
            title = 't = %11.3e' % t
            ax.set_title(title)
            image.set_data(plane)
            manager.canvas.draw()

            if ifirst:
                print(
                    "----islice----------t---------min-------max-------delta")
            print("%10i %10.3e %10.3e %10.3e %10.3e" %
                  (islice, t, plane.min(), plane.max(),
                   plane.max() - plane.min()))

            ifirst = False
            islice += 1

            sleep(wait)

    infile.close()
Exemple #21
0
def aver2h5(
    newdir,
    olddir,
    todatadir="data/averages",
    fromdatadir="data",
    l2D=True,
    precision="d",
    quiet=True,
    lremove_old_averages=False,
    aver_by_proc=False,
    laver2D=False,
    l_mpi=False,
    driver=None,
    comm=None,
    rank=0,
    size=1,
):
    """
    Copy a simulation set of video slices written in Fortran binary to hdf5.

    call signature:

    aver2h5(newdir, olddir,
            todatadir='data/averages', fromdatadir='data', l2D=True,
            precision='d', quiet=True, lremove_old_averages=False,
            aver_by_proc=False,
            laver2D=False, l_mpi=False, driver=None, comm=None, rank=0, size=1):

    Keyword arguments:

    *newdir*:
      String path to simulation destination directory.

    *olddir*:
      String path to simulation destination directory.

    *todatadir*:
      Directory to which the data is stored.

    *fromdatadir*:
      Directory from which the data is collected.

    *l2D*
     Option to include 2D averages if the file sizes are not too large

    *precision*:
      Single 'f' or double 'd' precision for new data.

    *quiet*
      Option not to print output.

    *lremove_old_averages*:
      If True the old averages data will be deleted once the new h5 data
      has been saved.

    *aver_by_proc*
      Option to read old binary files by processor and write in
      parallel

    *laver2D*
      If True apply to each plane_list 'y', 'z' and load each variable
      sequentially

    *l_mpi*:
      Applying MPI parallel process

    *driver*:
      HDF5 file io driver either None or mpio

    *comm*:
      MPI library calls

    *rank*:
      Integer ID of processor

    *size*:
      Number of MPI processes
    """

    import os
    from os.path import exists, join
    import numpy as np
    from .. import read
    from .. import sim
    from . import write_h5_averages
    import sys
    import subprocess as sub

    if laver2D:
        os.chdir(olddir)
        for xl in ["y", "z"]:
            if exists(xl + "aver.in"):
                if exists(join(fromdatadir, "t2davg.dat")):
                    f = open(join(fromdatadir, "t2davg.dat"))
                    niter = int(f.readline().split(" ")[-1].strip("\n")) - 1
                else:
                    if not aver_by_proc:
                        av = read.aver(plane_list=xl, proc=0, var_index=0)
                        niter = av.t.size
                    else:
                        niter = None
                if aver_by_proc:
                    dim = read.dim()
                    if xl == "y":
                        nproc = dim.nprocz
                    if xl == "z":
                        nproc = dim.nprocy
                    all_list = np.array_split(np.arange(nproc), size)
                    proc_list = list(all_list[rank])
                    os.chdir(olddir)
                    if len(proc_list) > 0:
                        for proc in proc_list:
                            print("reading " + xl + "averages on proc", proc)
                            sys.stdout.flush()
                            av = read.aver(plane_list=xl, proc=proc)
                            procdim = read.dim(proc=proc)
                            write_h5_averages(
                                av,
                                file_name=xl,
                                datadir=todatadir,
                                nt=niter,
                                precision=precision,
                                append=True,
                                aver_by_proc=True,
                                nproc=nproc,
                                proc=proc,
                                dim=dim,
                                procdim=procdim,
                                quiet=quiet,
                                driver=driver,
                                comm=comm,
                                rank=rank,
                                size=size,
                            )
                    del av
                else:
                    all_list = np.array_split(np.arange(niter), size)
                    iter_list = list(all_list[rank])
                    os.chdir(olddir)
                    print("reading " + xl + "averages on rank", rank)
                    sys.stdout.flush()
                    av = read.aver(plane_list=xl, iter_list=iter_list)
                    os.chdir(newdir)
                    write_h5_averages(
                        av,
                        file_name=xl,
                        datadir=todatadir,
                        nt=niter,
                        precision=precision,
                        append=False,
                        indx=iter_list,
                        quiet=quiet,
                        driver=driver,
                        comm=comm,
                        rank=rank,
                        size=size,
                    )
                    del av
    else:
        # copy old 1D averages to new h5 sim
        os.chdir(olddir)
        plane_list = []
        for xl in ["xy", "xz", "yz"]:
            if exists(xl + "aver.in"):
                plane_list.append(xl)
        if rank == size - 1 or not l_mpi:
            if len(plane_list) > 0:
                av = read.aver(plane_list=plane_list)
                os.chdir(newdir)
                for key in av.__dict__.keys():
                    if not key in "t":
                        write_h5_averages(
                            av,
                            file_name=key,
                            datadir=todatadir,
                            precision=precision,
                            quiet=quiet,
                            driver=driver,
                            comm=None,
                            rank=None,
                            size=size,
                        )
                del av
            if lremove_old_averages:
                os.chdir(olddir)
                cmd = "rm -f " + join(olddir, fromdatadir, "*averages.dat")
                process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                output, error = process.communicate()
                print(cmd, output, error)
                # os.system(cmd)
            if l2D:
                plane_list = []
                os.chdir(olddir)
                for xl in ["x", "y", "z"]:
                    if exists(xl + "aver.in"):
                        plane_list.append(xl)
                if len(plane_list) > 0:
                    for key in plane_list:
                        os.chdir(olddir)
                        av = read.aver(plane_list=key)
                        os.chdir(newdir)
                        write_h5_averages(
                            av,
                            file_name=key,
                            datadir=todatadir,
                            precision=precision,
                            quiet=quiet,
                            driver=None,
                            comm=None,
                        )
                    del av
    if lremove_old_averages:
        if l_mpi:
            comm.Barrier()
        os.chdir(olddir)
        cmd = "rm -f " + join(olddir, fromdatadir, "*averages.dat")
        if rank == 0:
            process = sub.Popen(cmd.split(), stdout=sub.PIPE)
            output, error = process.communicate()
            print(cmd, output, error)
Exemple #22
0
    def find_fixed(
        self,
        datadir="data",
        var_file="VAR0",
        trace_field="bb",
        ti=-1,
        tf=-1,
        tracer_file_name=None,
    ):
        """
        Find the fixed points to a snapshot or existing tracer file.

        call signature::

          find_fixed(datadir='data', var_file='VAR0', trace_field='bb',
                     ti=-1, tf=-1, tracer_file_name=None):

        Keyword arguments:

          *datadir*:
            Data directory.

         *var_file*:
           Varfile to be read.

         *trace_field*:
           Vector field used for the streamline tracing.

          *ti*:
            Initial VAR file index for tracer time sequences. Overrides 'var_file'.

          *tf*:
            Final VAR file index for tracer time sequences. Overrides 'var_file'.

         *tracer_file_name*
           Name of the tracer file to be read.
           If 'None' compute the tracers.
        """

        import numpy as np
        import multiprocessing as mp
        from pencil import read
        from pencil import math
        from pencil.diag.tracers import Tracers
        from pencil.calc.streamlines import Stream
        from pencil.math.interpolation import vec_int

        if self.params.int_q == "curly_A":
            self.curly_A = []
        if self.params.int_q == "ee":
            self.ee = []

        # Multi core setup.
        if not (np.isscalar(self.params.n_proc)) or (self.params.n_proc % 1 != 0):
            print("Error: invalid processor number")
            return -1
        queue = mp.Queue()

        # Make sure to read the var files with the correct magic.
        magic = []
        if trace_field == "bb":
            magic.append("bb")
        if trace_field == "jj":
            magic.append("jj")
        if trace_field == "vort":
            magic.append("vort")
        if self.params.int_q == "ee":
            magic.append("bb")
            magic.append("jj")
        dim = read.dim(datadir=datadir)

        # Check if user wants a tracer time series.
        if (ti % 1 == 0) and (tf % 1 == 0) and (ti >= 0) and (tf >= ti):
            series = True
            var_file = "VAR{0}".format(ti)
            n_times = tf - ti + 1
        else:
            series = False
            n_times = 1
        self.t = np.zeros(n_times)

        # Read the initial field.
        var = read.var(
            var_file=var_file, datadir=datadir, magic=magic, quiet=True, trimall=True
        )

        self.t[0] = var.t
        grid = read.grid(datadir=datadir, quiet=True, trim=True)
        field = getattr(var, trace_field)
        param2 = read.param(datadir=datadir, quiet=True)
        if self.params.int_q == "ee":
            ee = var.jj * param2.eta - math.cross(var.uu, var.bb)
        self.params.datadir = datadir
        self.params.var_file = var_file
        self.params.trace_field = trace_field

        # Get the simulation parameters.
        self.params.dx = var.dx
        self.params.dy = var.dy
        self.params.dz = var.dz
        self.params.Ox = var.x[0]
        self.params.Oy = var.y[0]
        self.params.Oz = var.z[0]
        self.params.Lx = grid.Lx
        self.params.Ly = grid.Ly
        self.params.Lz = grid.Lz
        self.params.nx = dim.nx
        self.params.ny = dim.ny
        self.params.nz = dim.nz

        tracers = Tracers()
        tracers.params = self.params
        # Create the mapping for all times.
        if not tracer_file_name:
            tracers.find_tracers(
                var_file=var_file,
                datadir=datadir,
                trace_field=trace_field,
                ti=ti,
                tf=tf,
            )
        else:
            tracers.read(datadir=datadir, file_name=tracer_file_name)
        self.tracers = tracers

        # Set some default values.
        self.t = np.zeros((tf - ti + 1) * series + (1 - series))
        self.fixed_index = np.zeros((tf - ti + 1) * series + (1 - series))
        self.poincare = np.zeros(
            [
                int(self.params.trace_sub * dim.nx),
                int(self.params.trace_sub * dim.ny),
                n_times,
            ]
        )
        ix0 = range(0, int(self.params.nx * self.params.trace_sub) - 1)
        iy0 = range(0, int(self.params.ny * self.params.trace_sub) - 1)
        self.fixed_points = []
        self.fixed_sign = []
        self.fixed_tracers = []

        # Start the parallelized fixed point finding.
        for tidx in range(n_times):
            if tidx > 0:
                var = read.var(
                    var_file="VAR{0}".format(tidx + ti),
                    datadir=datadir,
                    magic=magic,
                    quiet=True,
                    trimall=True,
                )
                field = getattr(var, trace_field)
                self.t[tidx] = var.t

            proc = []
            sub_data = []
            fixed = []
            fixed_sign = []
            fixed_tracers = []
            for i_proc in range(self.params.n_proc):
                proc.append(
                    mp.Process(
                        target=self.__sub_fixed,
                        args=(queue, ix0, iy0, field, self.tracers, tidx, var, i_proc),
                    )
                )
            for i_proc in range(self.params.n_proc):
                proc[i_proc].start()
            for i_proc in range(self.params.n_proc):
                sub_data.append(queue.get())
            for i_proc in range(self.params.n_proc):
                proc[i_proc].join()
            for i_proc in range(self.params.n_proc):
                # Extract the data from the single cores. Mind the order.
                sub_proc = sub_data[i_proc][0]
                fixed.extend(sub_data[i_proc][1])
                fixed_tracers.extend(sub_data[i_proc][2])
                fixed_sign.extend(sub_data[i_proc][3])
                self.fixed_index[tidx] += sub_data[i_proc][4]
                self.poincare[sub_proc :: self.params.n_proc, :, tidx] = sub_data[
                    i_proc
                ][5]
            for i_proc in range(self.params.n_proc):
                proc[i_proc].terminate()

            # Discard fixed points which lie too close to each other.
            fixed, fixed_tracers, fixed_sign = self.__discard_close_fixed_points(
                np.array(fixed), np.array(fixed_sign), np.array(fixed_tracers), var
            )
            if self.fixed_points is None:
                self.fixed_points = []
                self.fixed_sign = []
                self.fixed_tracers = []
            self.fixed_points.append(np.array(fixed))
            self.fixed_sign.append(np.array(fixed_sign))
            self.fixed_tracers.append(fixed_tracers)

        # Compute the traced quantities along the fixed point streamlines.
        if (self.params.int_q == "curly_A") or (self.params.int_q == "ee"):
            for t_idx in range(0, n_times):
                if self.params.int_q == "curly_A":
                    self.curly_A.append([])
                if self.params.int_q == "ee":
                    self.ee.append([])
                for fixed in self.fixed_points[t_idx]:
                    # Trace the stream line.
                    xx = np.array([fixed[0], fixed[1], self.params.Oz])
                    #                    time = np.linspace(0, self.params.Lz/np.max(abs(field[2])), 10)
                    field_strength_z0 = vec_int(
                        xx,
                        field,
                        [var.dx, var.dy, var.dz],
                        [var.x[0], var.y[0], var.z[0]],
                        [len(var.x), len(var.y), len(var.z)],
                        interpolation=self.params.interpolation,
                    )
                    field_strength_z0 = np.sqrt(np.sum(field_strength_z0 ** 2))
                    time = np.linspace(0, 4 * self.params.Lz / field_strength_z0, 500)
                    stream = Stream(field, self.params, xx=xx, time=time)
                    # Do the field line integration.
                    if self.params.int_q == "curly_A":
                        curly_A = 0
                        for l in range(stream.iterations - 1):
                            aaInt = vec_int(
                                (stream.tracers[l + 1] + stream.tracers[l]) / 2,
                                var.aa,
                                [var.dx, var.dy, var.dz],
                                [var.x[0], var.y[0], var.z[0]],
                                [len(var.x), len(var.y), len(var.z)],
                                interpolation=self.params.interpolation,
                            )
                            curly_A += np.dot(
                                aaInt, (stream.tracers[l + 1] - stream.tracers[l])
                            )
                        self.curly_A[-1].append(curly_A)
                    if self.params.int_q == "ee":
                        ee_p = 0
                        for l in range(stream.iterations - 1):
                            eeInt = vec_int(
                                (stream.tracers[l + 1] + stream.tracers[l]) / 2,
                                ee,
                                [var.dx, var.dy, var.dz],
                                [var.x[0], var.y[0], var.z[0]],
                                [len(var.x), len(var.y), len(var.z)],
                                interpolation=self.params.interpolation,
                            )
                            ee_p += np.dot(
                                eeInt, (stream.tracers[l + 1] - stream.tracers[l])
                            )
                        self.ee[-1].append(ee_p)
                if self.params.int_q == "curly_A":
                    self.curly_A[-1] = np.array(self.curly_A[-1])
                if self.params.int_q == "ee":
                    self.ee[-1] = np.array(self.ee[-1])

        return 0
Exemple #23
0
def sim2h5(
    newdir=".",
    olddir=".",
    varfile_names=None,
    todatadir="data/allprocs",
    fromdatadir="data",
    precision="d",
    nghost=3,
    lpersist=True,
    x=None,
    y=None,
    z=None,
    lshear=False,
    snap_by_proc=False,
    aver_by_proc=False,
    lremove_old_snapshots=False,
    lremove_old_slices=False,
    lread_all_videoslices=False,
    vlarge=100000000,
    lremove_old_averages=False,
    execute=False,
    quiet=True,
    l2D=True,
    lvars=True,
    lvids=True,
    laver=True,
    laver2D=False,
    lremove_deprecated_vids=False,
    lsplit_slices=False,
):
    """
    Copy a simulation object written in Fortran binary to hdf5.
    The default is to copy all snapshots from/to the current simulation
    directory. Optionally the old files can be removed to

    call signature:

    sim2h5(newdir='.', olddir='.', varfile_names=None,
           todatadir='data/allprocs', fromdatadir='data',
           precision='d', nghost=3, lpersist=False,
           x=None, y=None, z=None, lshear=False,
           snap_by_proc=False, aver_by_proc=False,
           lremove_old_snapshots=False,
           lremove_old_slices=False, lread_all_videoslices=True,
           lremove_old_averages=False, execute=False, quiet=True,
           l2D=True, lvars=True, lvids=True, laver=True)

    Keyword arguments:

    *olddir*:
      String path to simulation source directory.
      Path may be relative or absolute.

    *newdir*:
      String path to simulation destination directory.
      Path may be relative or absolute.

    *varfile_names*:
      A list of names of the snapshot files to be written, e.g. VAR0
      If None all varfiles in olddir+'/data/proc0/' will be converted

    *todatadir*:
      Directory to which the data is stored.

    *fromdatadir*:
      Directory from which the data is collected.

    *precision*:
      Single 'f' or double 'd' precision for new data.

    *nghost*:
      Number of ghost zones.
      TODO: handle switching size of ghost zones.

    *lpersist*:
      option to include persistent variables from snapshots.

    *xyz*:
      xyz arrays of the domain with ghost zones.
      This will normally be obtained from Grid object, but facility to
      redefine an alternative grid value.

    *lshear*:
      Flag for the shear.

    *execute*:
      optional confirmation required if lremove_old.

    *lremove_old_snapshots*:
      If True the old snapshot data will be deleted once the new h5 data
      has been saved.

    *lremove_old_slices*:
      If True the old video slice data will be deleted once the new h5 data
      has been saved.

    *lremove_old_averages*:
      If True the old averages data will be deleted once the new h5 data
      has been saved.

    *aver_by_proc*
      Option to read old binary files by processor and write in
      parallel

    *laver2D*
      If True apply to each plane_list 'y', 'z' and load each variable
      sequentially

    *l_mpi*:
      Applying MPI parallel process

    *driver*:
      HDF5 file io driver either None or mpio

    *comm*:
      MPI library calls

    *rank*:
      Integer ID of processor

    *size*:
      Number of MPI processes
    """

    import glob
    import numpy as np
    import os
    from os.path import exists, join
    import subprocess as sub
    import sys

    from .. import read
    from .. import sim
    from . import write_h5_grid
    from pencil.util import is_sim_dir

    try:
        from mpi4py import MPI

        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        size = comm.Get_size()
        driver = "mpio"
        l_mpi = True
        l_mpi = l_mpi and (size != 1)
    except ImportError:
        comm = None
        driver = None
        rank = 0
        size = 1
        l_mpi = False
    if not l_mpi:
        comm = None
        driver = None
    print("rank {} and size {}".format(rank, size))
    sys.stdout.flush()
    if rank == size - 1:
        print("l_mpi", l_mpi)
        sys.stdout.flush()

    # test if simulation directories
    if newdir == ".":
        newdir = os.getcwd()
    if olddir == ".":
        olddir = os.getcwd()
    os.chdir(olddir)
    if not is_sim_dir():
        if rank == 0:
            print("ERROR: Directory (" + olddir + ") needs to be a simulation")
            sys.stdout.flush()
        return -1
    if newdir != olddir:
        if not exists(newdir):
            cmd = "pc_newrun -s " + newdir
            if rank == size - 1:
                process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                output, error = process.communicate()
                print(cmd, output, error)
                # os.system(cmd)
            if comm:
                comm.Barrier()
        os.chdir(newdir)
        if not is_sim_dir():
            if rank == 0:
                print("ERROR: Directory (" + newdir +
                      ") needs to be a simulation")
                sys.stdout.flush()
            return -1
    #
    lremove_old = lremove_old_snapshots or lremove_old_slices or lremove_old_averages
    if lremove_old:
        if not execute:
            os.chdir(olddir)
            if rank == 0:
                print("WARNING: Are you sure you wish to remove the Fortran" +
                      " binary files from \n" + os.getcwd() + ".\n" +
                      "Set execute=True to proceed.")
                sys.stdout.flush()
            return -1

    os.chdir(olddir)
    if lvars:
        if varfile_names == None:
            os.chdir(fromdatadir + "/proc0")
            lVARd = False
            varfiled_names = natural_sort(glob.glob("VARd*"))
            if len(varfiled_names) > 0:
                varfile_names = natural_sort(glob.glob("VAR*"))
                for iv in range(len(varfile_names) - 1, -1, -1):
                    if "VARd" in varfile_names[iv]:
                        varfile_names.remove(varfile_names[iv])
                lVARd = True
            else:
                varfile_names = natural_sort(glob.glob("VAR*"))
            os.chdir(olddir)
        else:
            lVARd = False
            if isinstance(varfile_names, list):
                varfile_names = varfile_names
            else:
                varfile_names = [varfile_names]
            varfiled_names = []
            tmp_names = []
            for varfile_name in varfile_names:
                if "VARd" in varfile_names:
                    varfiled_names.append(varfile_name)
                    lVARd = True
                else:
                    tmp_names.append(varfile_name)
            varfile_names = tmp_names
    gkeys = [
        "x",
        "y",
        "z",
        "Lx",
        "Ly",
        "Lz",
        "dx",
        "dy",
        "dz",
        "dx_1",
        "dy_1",
        "dz_1",
        "dx_tilde",
        "dy_tilde",
        "dz_tilde",
    ]
    grid = None
    if rank == size - 1:
        grid = read.grid(quiet=True)
    if l_mpi:
        grid = comm.bcast(grid, root=size - 1)
    if not quiet:
        print(rank, grid)
        sys.stdout.flush()
    for key in gkeys:
        if not key in grid.__dict__.keys():
            if rank == 0:
                print("ERROR: key " + key + " missing from grid")
                sys.stdout.flush()
            return -1
    # obtain the settings from the old simulation
    settings = {}
    skeys = [
        "l1",
        "l2",
        "m1",
        "m2",
        "n1",
        "n2",
        "nx",
        "ny",
        "nz",
        "mx",
        "my",
        "mz",
        "nprocx",
        "nprocy",
        "nprocz",
        "maux",
        "mglobal",
        "mvar",
        "precision",
    ]
    if rank == 0:
        olddim = read.dim()
        for key in skeys:
            settings[key] = np.array(olddim.__getattribute__(key))
        olddim = None
        settings["nghost"] = np.array(nghost)
        settings["precision"] = precision.encode()
    if l_mpi:
        settings = comm.bcast(settings, root=0)
    if snap_by_proc:
        nprocs = settings["nprocx"] * settings["nprocy"] * settings["nprocz"]
        if np.mod(nprocs, size) != 0:
            print("WARNING: efficiency requires cpus to divide ncpus")
            sys.stdout.flush()
    if not quiet:
        print(rank, grid)
        sys.stdout.flush()
    # obtain physical units from old simulation
    ukeys = [
        "length",
        "velocity",
        "density",
        "magnetic",
        "time",
        "temperature",
        "flux",
        "energy",
        "mass",
        "system",
    ]
    param = read.param(quiet=True)
    param.__setattr__("unit_mass", param.unit_density * param.unit_length**3)
    param.__setattr__("unit_energy", param.unit_mass * param.unit_velocity**2)
    param.__setattr__("unit_time", param.unit_length / param.unit_velocity)
    param.__setattr__("unit_flux", param.unit_mass / param.unit_time**3)
    param.unit_system = param.unit_system.encode()
    # index list for variables in f-array
    if not quiet:
        print(rank, param)
        sys.stdout.flush()
    indx = None
    if rank == 0:
        indx = read.index()
    if l_mpi:
        indx = comm.bcast(indx, root=0)

    # check consistency between Fortran binary and h5 data
    os.chdir(newdir)
    dim = None
    if is_sim_dir():
        if rank == size - 1:
            if exists(join(newdir, "data", "dim.dat")):
                try:
                    dim = read.dim()
                except ValueError:
                    pass
        if l_mpi:
            dim = comm.bcast(dim, root=size - 1)
        if dim:
            if not quiet:
                print(rank, dim)
                sys.stdout.flush()
            try:
                dim.mvar == settings["mvar"]
                dim.mx == settings["mx"]
                dim.my == settings["my"]
                dim.mz == settings["mz"]
            except ValueError:
                if rank == size - 1:
                    print("ERROR: new simulation dimensions do not match.")
                    sys.stdout.flush()
                return -1
            dim = None
    os.chdir(olddir)
    if rank == size - 1:
        print("precision is ", precision)
        sys.stdout.flush()
    if laver2D:
        aver2h5(
            newdir,
            olddir,
            todatadir="data/averages",
            fromdatadir="data",
            l2D=False,
            precision=precision,
            quiet=quiet,
            laver2D=laver2D,
            lremove_old_averages=False,
            aver_by_proc=aver_by_proc,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
        l2D = False
    # copy snapshots
    if lvars and len(varfile_names) > 0:
        var2h5(
            newdir,
            olddir,
            varfile_names,
            todatadir,
            fromdatadir,
            snap_by_proc,
            precision,
            lpersist,
            quiet,
            nghost,
            settings,
            param,
            grid,
            x,
            y,
            z,
            lshear,
            lremove_old_snapshots,
            indx,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    # copy downsampled snapshots if present
    if lvars and lVARd:
        var2h5(
            newdir,
            olddir,
            varfiled_names,
            todatadir,
            fromdatadir,
            False,
            precision,
            lpersist,
            quiet,
            nghost,
            settings,
            param,
            grid,
            x,
            y,
            z,
            lshear,
            lremove_old_snapshots,
            indx,
            trimall=True,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    if lvars:
        var2h5(
            newdir,
            olddir,
            [
                "var.dat",
            ],
            todatadir,
            fromdatadir,
            snap_by_proc,
            precision,
            lpersist,
            quiet,
            nghost,
            settings,
            param,
            grid,
            x,
            y,
            z,
            lshear,
            lremove_old_snapshots,
            indx,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    # copy old video slices to new h5 sim
    if lvids:
        if lremove_deprecated_vids:
            for ext in [
                    "bb.", "uu.", "ux.", "uy.", "uz.", "bx.", "by.", "bz."
            ]:
                cmd = "rm -f " + join(olddir, fromdatadir, "proc*",
                                      "slice_" + ext + "*")
                if rank == 0:
                    process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                    output, error = process.communicate()
                    print(cmd, output, error)
                cmd = "rm -f " + join(fromdatadir, "slice_" + ext + "*")
                if rank == 0:
                    process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                    output, error = process.communicate()
                    print(cmd, output, error)
        if comm:
            comm.Barrier()
        cmd = "src/read_all_videofiles.x"
        if rank == size - 1 and lread_all_videoslices:
            process = sub.Popen(cmd.split(), stdout=sub.PIPE)
            output, error = process.communicate()
            print(cmd, output, error)
        if comm:
            comm.Barrier()
        slices2h5(
            newdir,
            olddir,
            grid,
            todatadir="data/slices",
            fromdatadir=fromdatadir,
            precision=precision,
            quiet=quiet,
            vlarge=vlarge,
            lsplit_slices=lsplit_slices,
            lremove_old_slices=lremove_old_slices,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    # copy old averages data to new h5 sim
    if laver:
        aver2h5(
            newdir,
            olddir,
            todatadir="data/averages",
            fromdatadir=fromdatadir,
            l2D=l2D,
            precision=precision,
            quiet=quiet,
            aver_by_proc=False,
            lremove_old_averages=lremove_old_averages,
            l_mpi=l_mpi,
            driver=driver,
            comm=comm,
            rank=rank,
            size=size,
        )
    # check some critical sim files are present for new sim without start
    # construct grid.h5 sim information if requied for new h5 sim
    os.chdir(newdir)
    if l_mpi:
        comm.Barrier()
    if rank == 0:
        write_h5_grid(
            file_name="grid",
            datadir="data",
            precision=precision,
            nghost=nghost,
            settings=settings,
            param=param,
            grid=grid,
            unit=None,
            quiet=quiet,
        )
        source_file = join(olddir, fromdatadir, "proc0/varN.list")
        target_file = join(newdir, todatadir, "varN.list")
        if exists(source_file):
            cmd = "cp " + source_file + " " + target_file
            process = sub.Popen(cmd.split(), stdout=sub.PIPE)
            output, error = process.communicate()
            print(cmd, output, error)
        items = [
            "def_var.pro",
            "index.pro",
            "jobid.dat",
            "param.nml",
            "particle_index.pro",
            "pc_constants.pro",
            "pointmass_index.pro",
            "pt_positions.dat",
            "sn_series.dat",
            "svnid.dat",
            "time_series.dat",
            "tsnap.dat",
            "tspec.dat",
            "tvid.dat",
            "t2davg.dat",
            "var.general",
            "variables.pro",
            "varname.dat",
        ]
        for item in items:
            source_file = join(olddir, fromdatadir, item)
            target_file = join(newdir, fromdatadir, item)
            if exists(source_file):
                if not exists(target_file):
                    cmd = "cp " + source_file + " " + target_file
                    process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                    output, error = process.communicate()
                    print(cmd, output, error)
    print("Simulation Fortran to h5 completed on rank {}.".format(rank))
    sys.stdout.flush()
Exemple #24
0
    def __read_2d_aver(self,
                       plane,
                       datadir,
                       variables,
                       aver_file_name,
                       n_vars,
                       l_h5=False,
                       precision='f'):
        """
        Read the xyaverages.dat, xzaverages.dat, yzaverages.dat
        Return the raw data and the time array.
        """

        import os
        import numpy as np
        from pencil import read

        if l_h5:
            import h5py
            file_id = os.path.join(datadir, aver_file_name)
            print(file_id)
            sys.stdout.flush()
            with h5py.File(file_id, 'r') as tmp:
                n_times = len(tmp.keys()) - 1
                # Determine the structure of the xy/xz/yz averages.
                for var in variables:
                    nw = tmp[str(0) + '/' + var.strip()].shape[0]
                    break
        else:
            # Determine the structure of the xy/xz/yz averages.
            if plane == 'xy':
                nw = getattr(read.dim(), 'nz')
            if plane == 'xz':
                nw = getattr(read.dim(), 'ny')
            if plane == 'yz':
                nw = getattr(read.dim(), 'nx')
            file_id = open(os.path.join(datadir, aver_file_name))
            aver_lines = file_id.readlines()
            file_id.close()
            entry_length = int(np.ceil(nw * n_vars / 8.))
            n_times = int(len(aver_lines) / (1. + entry_length))

        # Prepare the data arrays.
        t = np.zeros(n_times, dtype=precision)

        # Read the data
        if l_h5:
            raw_data = np.zeros([n_times, n_vars, nw], dtype=precision)
            with h5py.File(file_id, 'r') as tmp:
                for t_idx in range(0, n_times):
                    t[t_idx] = tmp[str(t_idx) + '/time'][()]
                    raw_idx = 0
                    for var in variables:
                        raw_data[t_idx, raw_idx] = \
                                         tmp[str(t_idx) + '/' + var.strip()][()]
                        raw_idx += 1
        else:
            raw_data = np.zeros([n_times, n_vars * nw], dtype=precision)
            line_idx = 0
            t_idx = -1
            for current_line in aver_lines:
                if line_idx % (entry_length + 1) == 0:
                    t_idx += 1
                    t[t_idx] = current_line
                    raw_idx = 0
                else:
                    raw_data[t_idx, raw_idx*8:(raw_idx*8+8)] = \
                        list(map(np.float32, current_line.split()))
                    raw_idx += 1
                line_idx += 1

            # Restructure the raw data and add it to the Averages object.
            raw_data = np.reshape(raw_data, [n_times, n_vars, nw])

        return t, raw_data
Exemple #25
0
    def read(self,
             datadir='data',
             proc=-1,
             quiet=False,
             precision='f',
             trim=False):
        """
        Read the grid data from the pencil code simulation.
        If proc < 0, then load all data and assemble.
        Otherwise, load grid from specified processor.

        call signature:

        grid(datadir='data', proc=-1, quiet=False, trim=False)

        Keyword arguments:

        *datadir*:
          Directory where the data is stored.

        *proc*
          Processor to be read. If proc is -1, then read the 'global'
          grid. If proc is >=0, then read the grid.dat in the
          corresponding processor directory.

        *quiet*
          Flag for switching of output.

        *precision*
          Float (f), double (d) or half (half).

        *trim*
          Cuts off the ghost points.
        """

        import numpy as np
        import os
        from scipy.io import FortranFile
        from pencil import read

        if precision == 'f':
            dtype = np.float32
        elif precision == 'd':
            dtype = np.float64
        elif precision == 'half':
            dtype = np.float16
        else:
            print(
                'read grid: {} precision not set, using "f"'.format(precision))
            dtype = np.float32

        if os.path.exists(os.path.join(datadir, 'grid.h5')):
            dim = read.dim(datadir, proc)
            import h5py

            with h5py.File(os.path.join(datadir, 'grid.h5'), 'r') as tmp:
                x = dtype(tmp['grid']['x'][()])
                y = dtype(tmp['grid']['y'][()])
                z = dtype(tmp['grid']['z'][()])
                dx_1 = dtype(tmp['grid']['dx_1'][()])
                dy_1 = dtype(tmp['grid']['dy_1'][()])
                dz_1 = dtype(tmp['grid']['dz_1'][()])
                dx_tilde = dtype(tmp['grid']['dx_tilde'][()])
                dy_tilde = dtype(tmp['grid']['dy_tilde'][()])
                dz_tilde = dtype(tmp['grid']['dz_tilde'][()])
                dx = dtype(tmp['grid']['dx'][()])
                dy = dtype(tmp['grid']['dy'][()])
                dz = dtype(tmp['grid']['dz'][()])
                Lx = dtype(tmp['grid']['Lx'][()])
                Ly = dtype(tmp['grid']['Ly'][()])
                Lz = dtype(tmp['grid']['Lz'][()])
                t = dtype(0.0)
        else:
            datadir = os.path.expanduser(datadir)
            dim = read.dim(datadir, proc)
            param = read.param(datadir=datadir,
                               quiet=True,
                               conflicts_quiet=True)
            if dim.precision == 'D':
                read_precision = 'd'
            else:
                read_precision = 'f'

            if proc < 0:
                proc_dirs = list(
                    filter(lambda string: string.startswith('proc'),
                           os.listdir(datadir)))
                if (proc_dirs.count("proc_bounds.dat") > 0):
                    proc_dirs.remove("proc_bounds.dat")
                if param.lcollective_io:
                    # A collective IO strategy is being used
                    proc_dirs = ['allprocs']
            else:
                proc_dirs = ['proc' + str(proc)]

            # Define the global arrays.
            x = np.zeros(dim.mx, dtype=precision)
            y = np.zeros(dim.my, dtype=precision)
            z = np.zeros(dim.mz, dtype=precision)
            dx_1 = np.zeros(dim.mx, dtype=precision)
            dy_1 = np.zeros(dim.my, dtype=precision)
            dz_1 = np.zeros(dim.mz, dtype=precision)
            dx_tilde = np.zeros(dim.mx, dtype=precision)
            dy_tilde = np.zeros(dim.my, dtype=precision)
            dz_tilde = np.zeros(dim.mz, dtype=precision)

            for directory in proc_dirs:
                if not param.lcollective_io:
                    proc = int(directory[4:])
                    procdim = read.dim(datadir, proc)
                    if not quiet:
                        print("reading grid data from processor" +
                              " {0} of {1} ...".format(proc, len(proc_dirs)))
                else:
                    procdim = dim
                mxloc = procdim.mx
                myloc = procdim.my
                mzloc = procdim.mz

                # Read the grid data.
                file_name = os.path.join(datadir, directory, 'grid.dat')
                infile = FortranFile(file_name, 'r')
                grid_raw = infile.read_record(dtype=read_precision)
                dx, dy, dz = tuple(infile.read_record(dtype=read_precision))
                Lx, Ly, Lz = tuple(infile.read_record(dtype=read_precision))
                dx_1_raw = infile.read_record(dtype=read_precision)
                dx_tilde_raw = infile.read_record(dtype=read_precision)
                infile.close()

                # Reshape the arrays.
                t = dtype(grid_raw[0])
                x_loc = grid_raw[1:mxloc + 1]
                y_loc = grid_raw[mxloc + 1:mxloc + myloc + 1]
                z_loc = grid_raw[mxloc + myloc + 1:mxloc + myloc + mzloc + 1]
                dx_1_loc = dx_1_raw[0:mxloc]
                dy_1_loc = dx_1_raw[mxloc:mxloc + myloc]
                dz_1_loc = dx_1_raw[mxloc + myloc:mxloc + myloc + mzloc]
                dx_tilde_loc = dx_tilde_raw[0:mxloc]
                dy_tilde_loc = dx_tilde_raw[mxloc:mxloc + myloc]
                dz_tilde_loc = dx_tilde_raw[mxloc + myloc:mxloc + myloc +
                                            mzloc]

                if len(proc_dirs) > 1:
                    if procdim.ipx == 0:
                        i0x = 0
                        i1x = i0x + procdim.mx
                        i0x_loc = 0
                        i1x_loc = procdim.mx
                    else:
                        i0x = procdim.ipx * procdim.nx + procdim.nghostx
                        i1x = i0x + procdim.mx - procdim.nghostx
                        i0x_loc = procdim.nghostx
                        i1x_loc = procdim.mx

                    if procdim.ipy == 0:
                        i0y = 0
                        i1y = i0y + procdim.my
                        i0y_loc = 0
                        i1y_loc = procdim.my
                    else:
                        i0y = procdim.ipy * procdim.ny + procdim.nghosty
                        i1y = i0y + procdim.my - procdim.nghosty
                        i0y_loc = procdim.nghosty
                        i1y_loc = procdim.my

                    if procdim.ipz == 0:
                        i0z = 0
                        i1z = i0z + procdim.mz
                        i0z_loc = 0
                        i1z_loc = procdim.mz
                    else:
                        i0z = procdim.ipz * procdim.nz + procdim.nghostz
                        i1z = i0z + procdim.mz - procdim.nghostz
                        i0z_loc = procdim.nghostz
                        i1z_loc = procdim.mz

                    x[i0x:i1x] = x_loc[i0x_loc:i1x_loc]
                    y[i0y:i1y] = y_loc[i0y_loc:i1y_loc]
                    z[i0z:i1z] = z_loc[i0z_loc:i1z_loc]
                    dx_1[i0x:i1x] = dx_1_loc[i0x_loc:i1x_loc]
                    dy_1[i0y:i1y] = dy_1_loc[i0y_loc:i1y_loc]
                    dz_1[i0z:i1z] = dz_1_loc[i0z_loc:i1z_loc]
                    dx_tilde[i0x:i1x] = dx_tilde_loc[i0x_loc:i1x_loc]
                    dy_tilde[i0y:i1y] = dy_tilde_loc[i0y_loc:i1y_loc]
                    dz_tilde[i0z:i1z] = dz_tilde_loc[i0z_loc:i1z_loc]

                else:
                    #x = dtype(x_loc.astype)
                    x = dtype(x_loc)
                    y = dtype(y_loc)
                    z = dtype(z_loc)
                    dx_1 = dtype(dx_1_loc)
                    dy_1 = dtype(dy_1_loc)
                    dz_1 = dtype(dz_1_loc)
                    dx_tilde = dtype(dx_tilde_loc)
                    dy_tilde = dtype(dy_tilde_loc)
                    dz_tilde = dtype(dz_tilde_loc)

        if trim:
            self.x = x[dim.l1:dim.l2 + 1]
            self.y = y[dim.m1:dim.m2 + 1]
            self.z = z[dim.n1:dim.n2 + 1]
            self.dx_1 = dx_1[dim.l1:dim.l2 + 1]
            self.dy_1 = dy_1[dim.m1:dim.m2 + 1]
            self.dz_1 = dz_1[dim.n1:dim.n2 + 1]
            self.dx_tilde = dx_tilde[dim.l1:dim.l2 + 1]
            self.dy_tilde = dy_tilde[dim.m1:dim.m2 + 1]
            self.dz_tilde = dz_tilde[dim.n1:dim.n2 + 1]
        else:
            self.x = x
            self.y = y
            self.z = z
            self.dx_1 = dx_1
            self.dy_1 = dy_1
            self.dz_1 = dz_1
            self.dx_tilde = dx_tilde
            self.dy_tilde = dy_tilde
            self.dz_tilde = dz_tilde

        self.t = t
        self.dx = dx
        self.dy = dy
        self.dz = dz
        self.Lx = Lx
        self.Ly = Ly
        self.Lz = Lz
Exemple #26
0
    def calc(
            self,
            aver=[],
            datatopdir=".",
            lskip_zeros=False,
            proc=0,
            rank=0,
            rmfzeros=1,
            rmbzeros=1,
            iy=None,
            l_correction=False,
            t_correction=0.0,
            dim=None,
            timereducer=None,
            trargs=[],
            tindex=(0, None),
            imask=None,
    ):
        """object returns time dependent meridional tensors
        from Averages object aver.z. u, acoef and bcoef and aver.t

        For long DNS runs the 'zaverages.dat' file can be very large
        so MPI may be required and the data is loaded by processor
        as default.

        lskip_zeros=True identifies the resetting of the testfield
        and rmbzeros and rmfzeros number to exclude before and following
        By default none are removed.

        iy is the index array that is computed in this MPI process, which
        may be a subset of the array on this processor

        l_correction=True permits the pencil coefficients computed
        prior to the Pencil Code correction implemented after
        time=t_correction to be rescaled accordingly to match the new
        formulation.

        trargs contain optional arguments for the time treatments: mean,
        smoothing, etc.

        tindex is set to limit the range of the iterations loaded from
        Averages in zaverages.dat

        The index imask, excluding the resets, can be specified to
        ensure all processes use the same mask
        """
        import numpy as np
        import os
        from pencil import read

        os.chdir(datatopdir)  # return to working directory
        grid = read.grid(proc=proc, trim=True, quiet=True)
        # if iy None or scalar create numpy array
        try:
            iy.size > 0
        except:
            print("exception")
            if iy == None:
                print("exception None")
                iy = np.arange(grid.y.size)
            else:
                print("exception int")
                iy = np.array(iy)
        if rank == 0:
            print("iy size is {0}".format(iy.shape))
        r, theta = np.meshgrid(grid.x, grid.y[iy], indexing="ij")
        del (grid, theta)  # conserve memory

        print("rank {0} calculating tensors for proc {1}".format(rank, proc))

        # string containers for zaverages.z keys
        uformat = "u{0}mxy"
        alpformat = "alp{0}{1}xy"
        etaformat = "eta{0}{1}{2}xy"

        # imask calculated once for MPI/processor consistency
        if rank == 0:
            print("Removing zeros")
        old_size = aver.t.shape

        # if imask is not provided either exclude the zeros or use the full time series
        try:
            imask.size > 0
            print("imask shape is {}".format(imask.shape))
        except:
            if lskip_zeros:
                index = alpformat.format(1, 1)
                izero = np.array(
                    np.where(
                        aver.z.__getattribute__(index)
                        [:,
                         int(aver.z.__getattribute__(index).shape[-2] / 2),
                         int(aver.z.__getattribute__(index).shape[-1] /
                             2), ] == 0))[0]
                rmfrange = np.arange(0, rmfzeros - 1)
                rmbrange = np.arange(0, rmbzeros - 1)
                rmpoints = np.array([], dtype=int)
                for zero in izero:
                    rmpoints = np.append(rmpoints, rmfrange + zero)
                    rmpoints = np.append(rmpoints, zero - rmbrange)
                if izero.size > 0:
                    imask = np.delete(np.where(aver.t), rmpoints)
                    if rank == 0:
                        print("Removed {0} zeros from {1} resets".format(
                            len(rmpoints), len(izero)))
                        print(
                            "Resets occured at save points {0}".format(izero))
                else:
                    imask = np.where(aver.t)[0]
                del (rmpoints, rmbrange, rmfrange)
            else:
                imask = np.arange(aver.t.size)
                if rank == 0:
                    print("Skipped zero removals.")
        # update the time of the snapshots included
        self.t = aver.t[imask]

        # Correction to Pencil Code error may be required on old data
        if l_correction:
            if dim == None:
                dim = read.dim(quiet=True)
            itcorr = np.where(aver.t[imask] < t_correction)[0]
            index = alpformat.format(1, 3)
            aver.z.__getattribute__(
                index)[itcorr] *= -dim.nprocz / (dim.nprocz - 2.0)
            for j in range(0, 3):
                index = alpformat.format(3, j + 1)
                aver.z.__getattribute__(
                    index)[itcorr] *= -dim.nprocz / (dim.nprocz - 2.0)
            index = etaformat.format(1, 1, 1)
            aver.z.__getattribute__(
                index)[itcorr] *= -dim.nprocz / (dim.nprocz - 2.0)
            for j in range(0, 3):
                index = etaformat.format(j + 1, 2, 1)
                aver.z.__getattribute__(
                    index)[itcorr] *= -dim.nprocz / (dim.nprocz - 2.0)
            index = etaformat.format(1, 1, 2)
            aver.z.__getattribute__(
                index)[itcorr] *= -dim.nprocz / (dim.nprocz - 2.0)
            for j in range(0, 3):
                index = etaformat.format(j + 1, 2, 2)
                aver.z.__getattribute__(
                    index)[itcorr] *= -dim.nprocz / (dim.nprocz - 2.0)

        # set up place holders for the Pencil Code tensor coefficients
        index = alpformat.format(1, 1)
        u = np.zeros(
            [3,
             len(imask),
             aver.z.__getattribute__(index).shape[-2], iy.size])
        alp = np.zeros([
            3, 3,
            len(imask),
            aver.z.__getattribute__(index).shape[-2], iy.size
        ])
        eta = np.zeros([
            3, 3, 3,
            len(imask),
            aver.z.__getattribute__(index).shape[-2], iy.size
        ])
        if rank == 0:
            print(u.shape, aver.z.__getattribute__(index)[imask, :, :].shape)
        # store the individual components in the z-averages as tensors
        for i, coord in zip(range(0, 3), ("x", "y", "z")):
            try:
                index = uformat.format(coord)
                if iy.size > 1:
                    tmp = aver.z.__getattribute__(index)[:, :, iy]
                    u[i, :, :, :] = tmp[imask]
                else:
                    u[i, :, :, 0] = aver.z.__getattribute__(index)[imask, :,
                                                                   iy]
            except KeyError:
                pass
        for i in range(0, 3):
            for j in range(0, 3):
                index = alpformat.format(i + 1, j + 1)
                if iy.size > 1:
                    tmp = aver.z.__getattribute__(index)[:, :, iy]
                    alp[j, i, :, :, :] = tmp[imask]
                else:
                    alp[j, i, :, :,
                        0] = aver.z.__getattribute__(index)[imask, :, iy]
        for i in range(0, 3):
            for j in range(0, 3):
                index1 = etaformat.format(i + 1, j + 1, 1)
                index2 = etaformat.format(i + 1, j + 1, 2)
                # Sign difference with Schrinner + r correction
                if iy.size > 1:
                    tmp = aver.z.__getattribute__(index1)[:, :, iy]
                    # eta[0,j,i,:,:,:] = -tmp[imask]  # JOERN, no sign correction
                    eta[0, j, i, :, :, :] = tmp[imask]
                    tmp = aver.z.__getattribute__(index2)[:, :, iy]
                    # eta[1,j,i,:,:,:] = -tmp[imask]*r # JOERN, no sign correction
                    eta[1, j, i, :, :, :] = tmp[imask] * r
                    del tmp
                else:
                    # eta[0,j,i,:,:,0] = -aver.z.__getattribute__(index1)[imask,:,iy] # JOERN, no sign correction
                    # eta[1,j,i,:,:,0] = -aver.z.__getattribute__(index2)[imask,:,iy]*r[:,0] # JOERN, no sign correction
                    eta[0, j, i, :, :,
                        0] = aver.z.__getattribute__(index1)[imask, :, iy]
                    eta[1, j, i, :, :,
                        0] = (aver.z.__getattribute__(index2)[imask, :, iy] *
                              r[:, 0])

        # apply the specified averaging or smoothing: 'None' returns unprocessed arrays
        if callable(timereducer):
            u = timereducer(u, trargs)
            alp = timereducer(alp, trargs)
            eta = timereducer(eta, trargs)

        if rank == 0:
            print("Old time dimension has length: {0}".format(old_size))
            print("New time dimension has length: {0}".format(alp.shape[-3]))

        # Create output tensors
        datatype = alp.dtype
        datashape = [alp.shape[-3], alp.shape[-2], alp.shape[-1], 1]
        setattr(self, "utensor", np.zeros([3] + datashape, dtype=datatype))
        setattr(self, "alpha", np.zeros([3, 3] + datashape, dtype=datatype))
        setattr(self, "beta", np.zeros([3, 3] + datashape, dtype=datatype))
        setattr(self, "gamma", np.zeros([3] + datashape, dtype=datatype))
        setattr(self, "delta", np.zeros([3] + datashape, dtype=datatype))
        setattr(self, "kappa", np.zeros([3, 3, 3] + datashape, dtype=datatype))
        setattr(self, "acoef", np.zeros([3, 3] + datashape, dtype=datatype))
        setattr(self, "bcoef", np.zeros([3, 3, 3] + datashape, dtype=datatype))
        """
        All tensors need to be reordered nz,ny,nx,nt for efficient writing to disk
        """
        # Calculating a and b matrices
        self.acoef[:, :, :, :, :, 0] = np.copy(alp)
        self.acoef = np.swapaxes(self.acoef, -4, -1)
        self.acoef = np.swapaxes(self.acoef, -3, -2)
        self.bcoef[:, :, :, :, :, :, 0] = np.copy(eta)
        self.bcoef = np.swapaxes(self.bcoef, -4, -1)
        self.bcoef = np.swapaxes(self.bcoef, -3, -2)

        irr, ith, iph = 0, 1, 2

        # u-tensor
        print("Calculating utensor on rank {}".format(rank))
        # utensor[:,:,:,:,0] = u[:,:,:,:] - np.mean(u[:,:,:,:],axis=1,keepdims=True)
        self.utensor[:, :, :, :, 0] = u[:, :, :, :]
        self.utensor = np.swapaxes(self.utensor, -4, -1)
        self.utensor = np.swapaxes(self.utensor, -3, -2)
        # Alpha tensor
        print("Calculating alpha on rank {}".format(rank))
        self.alpha[irr, irr, :, :, :, 0] = (alp[irr, irr, :, :, :] -
                                            eta[ith, ith, irr, :, :, :] / r)
        self.alpha[irr, ith, :, :, :, 0] = 0.5 * (
            alp[ith, irr, :, :, :] + eta[ith, irr, irr, :, :, :] / r +
            alp[irr, ith, :, :, :] - eta[ith, ith, ith, :, :, :] / r)
        self.alpha[irr, iph, :, :, :,
                   0] = 0.5 * (alp[iph, irr, :, :, :] + alp[irr, iph, :, :, :]
                               - eta[ith, ith, iph, :, :, :] / r)
        self.alpha[ith, irr, :, :, :, 0] = self.alpha[irr, ith, :, :, :, 0]
        self.alpha[ith, ith, :, :, :, 0] = (alp[ith, ith, :, :, :] +
                                            eta[ith, irr, ith, :, :, :] / r)
        self.alpha[ith, iph, :, :, :,
                   0] = 0.5 * (alp[iph, ith, :, :, :] + alp[ith, iph, :, :, :]
                               + eta[ith, irr, iph, :, :, :] / r)
        self.alpha[iph, irr, :, :, :, 0] = self.alpha[irr, iph, :, :, :, 0]
        self.alpha[iph, ith, :, :, :, 0] = self.alpha[ith, iph, :, :, :, 0]
        self.alpha[iph, iph, :, :, :, 0] = alp[iph, iph, :, :, :]
        self.alpha = np.swapaxes(self.alpha, -4, -1)
        self.alpha = np.swapaxes(self.alpha, -3, -2)
        # Gamma vector
        print("Calculating gamma on rank {}".format(rank))
        self.gamma[irr, :, :, :,
                   0] = -0.5 * (alp[iph, ith, :, :, :] - alp[ith, iph, :, :, :]
                                - eta[ith, irr, iph, :, :, :] / r)
        self.gamma[ith, :, :, :,
                   0] = -0.5 * (alp[irr, iph, :, :, :] - alp[iph, irr, :, :, :]
                                - eta[ith, ith, iph, :, :, :] / r)
        self.gamma[iph, :, :, :, 0] = -0.5 * (
            alp[ith, irr, :, :, :] - alp[irr, ith, :, :, :] +
            eta[ith, irr, irr, :, :, :] / r + eta[ith, ith, ith, :, :, :] / r)
        self.gamma = np.swapaxes(self.gamma, -4, -1)
        self.gamma = np.swapaxes(self.gamma, -3, -2)
        # Beta tensor
        print("Calculating beta on rank {}".format(rank))
        self.beta[irr, irr, :, :, :, 0] = -0.5 * eta[ith, iph, irr, :, :, :]
        self.beta[irr, ith, :, :, :, 0] = 0.25 * (eta[irr, iph, irr, :, :, :] -
                                                  eta[ith, iph, ith, :, :, :])
        self.beta[irr, iph, :, :, :, 0] = 0.25 * (eta[ith, irr, irr, :, :, :] -
                                                  eta[ith, iph, iph, :, :, :] -
                                                  eta[irr, ith, irr, :, :, :])
        self.beta[ith, irr, :, :, :, 0] = self.beta[irr, ith, :, :, :, 0]
        self.beta[ith, ith, :, :, :, 0] = 0.5 * eta[irr, iph, ith, :, :, :]
        self.beta[ith, iph, :, :, :, 0] = 0.25 * (eta[ith, irr, ith, :, :, :] +
                                                  eta[irr, iph, iph, :, :, :] -
                                                  eta[irr, ith, ith, :, :, :])
        self.beta[iph, irr, :, :, :, 0] = self.beta[irr, iph, :, :, :, 0]
        self.beta[iph, ith, :, :, :, 0] = self.beta[ith, iph, :, :, :, 0]
        self.beta[iph, iph, :, :, :, 0] = 0.5 * (eta[ith, irr, iph, :, :, :] -
                                                 eta[irr, ith, iph, :, :, :])
        # Sign convention to match with meanfield_e_tensor
        # self.beta = -self.beta #JOERN, not needed
        self.beta = np.swapaxes(self.beta, -4, -1)
        self.beta = np.swapaxes(self.beta, -3, -2)
        # Delta vector
        print("Calculating delta on rank {}".format(rank))
        self.delta[irr, :, :, :, 0] = 0.25 * (eta[irr, ith, ith, :, :, :] -
                                              eta[ith, irr, ith, :, :, :] +
                                              eta[irr, iph, iph, :, :, :])
        self.delta[ith, :, :, :, 0] = 0.25 * (eta[ith, irr, irr, :, :, :] -
                                              eta[irr, ith, irr, :, :, :] +
                                              eta[ith, iph, iph, :, :, :])
        self.delta[iph, :, :, :, 0] = -0.25 * (eta[irr, iph, irr, :, :, :] +
                                               eta[ith, iph, ith, :, :, :])
        # Sign convention to match with meanfield_e_tensor
        # self.delta = -self.delta #JOERN, not needed
        self.delta = np.swapaxes(self.delta, -4, -1)
        self.delta = np.swapaxes(self.delta, -3, -2)
        # Kappa tensor
        print("Calculating kappa on rank {}".format(rank))
        for i in range(0, 3):
            self.kappa[irr, irr, i, :, :, :, 0] = -eta[irr, irr, i, :, :, :]
            self.kappa[ith, irr, i, :, :, :,
                       0] = -0.5 * (eta[ith, irr, i, :, :, :] +
                                    eta[irr, ith, i, :, :, :])
            self.kappa[iph, irr, i, :, :, :,
                       0] = -0.5 * eta[irr, iph, i, :, :, :]
            self.kappa[irr, ith, i, :, :, :, 0] = self.kappa[ith, irr,
                                                             i, :, :, :, 0]
            self.kappa[ith, ith, i, :, :, :, 0] = -eta[ith, ith, i, :, :, :]
            self.kappa[iph, ith, i, :, :, :,
                       0] = -0.5 * eta[ith, iph, i, :, :, :]
            self.kappa[irr, iph, i, :, :, :, 0] = self.kappa[iph, irr,
                                                             i, :, :, :, 0]
            self.kappa[ith, iph, i, :, :, :, 0] = self.kappa[iph, ith,
                                                             i, :, :, :, 0]
            self.kappa[iph, iph, i, :, :, :, 0] = 1e-91
        # Sign convention to match with meanfield_e_tensor
        # self.kappa = -self.kappa #JOERN, not needed
        self.kappa = np.swapaxes(self.kappa, -4, -1)
        self.kappa = np.swapaxes(self.kappa, -3, -2)
        setattr(self, "imask", imask)
Exemple #27
0
    def read(self, var_file='', datadir='data', proc=-1, ivar=-1, quiet=True,
             trimall=False, magic=None, sim=None, precision='d',
             lpersist=False, dtype=np.float64):
        """
        Read VAR files from Pencil Code. If proc < 0, then load all data
        and assemble, otherwise load VAR file from specified processor.

        The file format written by output() (and used, e.g. in var.dat)
        consists of the followinig Fortran records:
        1. data(mx, my, mz, nvar)
        2. t(1), x(mx), y(my), z(mz), dx(1), dy(1), dz(1), deltay(1)
        Here nvar denotes the number of slots, i.e. 1 for one scalar field, 3
        for one vector field, 8 for var.dat in the case of MHD with entropy.
        but, deltay(1) is only there if lshear is on! need to know parameters.

        call signature:

        var(var_file='', datadir='data', proc=-1, ivar=-1, quiet=True,
            trimall=False, magic=None, sim=None, precision='d')

        Keyword arguments:
            var_file:   Name of the VAR file.
            datadir:    Directory where the data is stored.
            proc:       Processor to be read. If -1 read all and assemble to one array.
            ivar:       Index of the VAR file, if var_file is not specified.
            quiet:      Flag for switching off output.
            trimall:    Trim the data cube to exclude ghost zones.
            magic:      Values to be computed from the data, e.g. B = curl(A).
            sim:        Simulation sim object.
            precision:  Float (f), double (d) or half (half).
            dtype:      precision for var.obj, default double
        """

        import os
        from scipy.io import FortranFile
        from pencil.math.derivatives import curl, curl2
        from pencil import read
        from pencil.sim import __Simulation__

        def persist(self, infile=None, precision='d', quiet=quiet):
            """An open Fortran file potentially containing persistent variables appended
               to the f array and grid data are read from the first proc data

               Record types provide the labels and id record for the peristent
               variables in the depricated fortran binary format
            """
            record_types = {}
            for key in read.record_types.keys():
                if read.record_types[key][1] == 'd':
                    record_types[key]=(read.record_types[key][0],
                                      precision)
                else:
                    record_types[key] = read.record_types[key]

            try:
                tmp_id = infile.read_record('h')
            except:
                return -1
            block_id = 0
            for i in range(2000):
                i += 1
                tmp_id = infile.read_record('h')
                block_id = tmp_id[0]
                if block_id == 2000:
                    break
                for key in record_types.keys():
                    if record_types[key][0] == block_id:
                         tmp_val = infile.read_record(record_types[key][1])
                         self.__setattr__(key, tmp_val[0])
                         if not quiet:
                             print(key, record_types[key][0],
                                        record_types[key][1],tmp_val)
            return self

        dim = None
        param = None
        index = None

        if isinstance(sim, __Simulation__):
            datadir = os.path.expanduser(sim.datadir)
            dim = sim.dim
            param = read.param(datadir=sim.datadir, quiet=True,
                               conflicts_quiet=True)
            index = read.index(datadir=sim.datadir)
        else:
            datadir = os.path.expanduser(datadir)
            if dim is None:
                if var_file[0:2].lower() == 'og':
                    dim = read.ogdim(datadir, proc)
                else:
                    if var_file[0:4] == 'VARd':
                        dim = read.dim(datadir, proc, down=True)
                    else:
                        dim = read.dim(datadir, proc)
            if param is None:
                param = read.param(datadir=datadir, quiet=quiet,
                                   conflicts_quiet=True)
            if index is None:
                index = read.index(datadir=datadir)

        if param.lwrite_aux:
            total_vars = dim.mvar + dim.maux
        else:
            total_vars = dim.mvar

        if os.path.exists(os.path.join(datadir, 'grid.h5')):
#
#  Read HDF5 files.
#
            import h5py
            run2D = param.lwrite_2d

            # Set up the global array.
            if not run2D:
                self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx),
                                  dtype=dtype)
            else:
                if dim.ny == 1:
                    self.f = np.zeros((total_vars, dim.mz, dim.mx),
                                      dtype=dtype)
                else:
                    self.f = np.zeros((total_vars, dim.my, dim.mx),
                                      dtype=dtype)

            if not var_file:
                if ivar < 0:
                    var_file = 'var.h5'
                else:
                    var_file = 'VAR' + str(ivar) + '.h5'

            file_name = os.path.join(datadir, 'allprocs', var_file)
            with h5py.File(file_name, 'r') as tmp:
                for key in tmp['data'].keys():
                    self.f[index.__getattribute__(key)-1, :] = dtype(
                                                         tmp['data/'+key][:])
                t = (tmp['time'][()]).astype(precision)
                x = (tmp['grid/x'][()]).astype(precision)
                y = (tmp['grid/y'][()]).astype(precision)
                z = (tmp['grid/z'][()]).astype(precision)
                dx = (tmp['grid/dx'][()]).astype(precision)
                dy = (tmp['grid/dy'][()]).astype(precision)
                dz = (tmp['grid/dz'][()]).astype(precision)
                if param.lshear:
                    deltay = (tmp['persist/shear_delta_y'][(0)]).astype(precision)
                if lpersist:
                    for key in tmp['persist'].keys():
                        self.__setattr__(key, (tmp['persist'][key][0]).astype(precision))
        else:
#
#  Read scattered Fortran binary files.
#
            run2D = param.lwrite_2d

            if dim.precision == 'D':
                read_precision = 'd'
            else:
                read_precision = 'f'

            if not var_file:
                if ivar < 0:
                    var_file = 'var.dat'
                else:
                    var_file = 'VAR' + str(ivar)

            if proc < 0:
                proc_dirs = self.__natural_sort(
                    filter(lambda s: s.startswith('proc'),
                           os.listdir(datadir)))
                if (proc_dirs.count("proc_bounds.dat") > 0):
                    proc_dirs.remove("proc_bounds.dat")
                if param.lcollective_io:
                    # A collective IO strategy is being used
                    proc_dirs = ['allprocs']
#                else:
#                    proc_dirs = proc_dirs[::dim.nprocx*dim.nprocy]
            else:
                proc_dirs = ['proc' + str(proc)]

            # Set up the global array.
            if not run2D:
                self.f = np.zeros((total_vars, dim.mz, dim.my, dim.mx),
                                  dtype=dtype)
            else:
                if dim.ny == 1:
                    self.f = np.zeros((total_vars, dim.mz, dim.mx),
                                      dtype=dtype)
                else:
                    self.f = np.zeros((total_vars, dim.my, dim.mx),
                                      dtype=dtype)

            x = np.zeros(dim.mx, dtype=precision)
            y = np.zeros(dim.my, dtype=precision)
            z = np.zeros(dim.mz, dtype=precision)

            for directory in proc_dirs:
                if not param.lcollective_io:
                    proc = int(directory[4:])
                    if var_file[0:2].lower() == 'og':
                        procdim = read.ogdim(datadir, proc)
                    else:
                        if var_file[0:4] == 'VARd':
                            procdim = read.dim(datadir, proc, down=True)
                        else:
                            procdim = read.dim(datadir, proc)
                    if not quiet:
                        print("Reading data from processor"+
                              " {0} of {1} ...".format(proc, len(proc_dirs)))

                else:
                    # A collective IO strategy is being used
                    procdim = dim
#                else:
#                    procdim.mx = dim.mx
#                    procdim.my = dim.my
#                    procdim.nx = dim.nx
#                    procdim.ny = dim.ny
#                    procdim.ipx = dim.ipx
#                    procdim.ipy = dim.ipy

                mxloc = procdim.mx
                myloc = procdim.my
                mzloc = procdim.mz

                # Read the data.
                file_name = os.path.join(datadir, directory, var_file)
                infile = FortranFile(file_name)
                if not run2D:
                    f_loc = dtype(infile.read_record(dtype=read_precision))
                    f_loc = f_loc.reshape((-1, mzloc, myloc, mxloc))
                else:
                    if dim.ny == 1:
                        f_loc = dtype(infile.read_record(dtype=read_precision))
                        f_loc = f_loc.reshape((-1, mzloc, mxloc))
                    else:
                        f_loc = dtype(infile.read_record(dtype=read_precision))
                        f_loc = f_loc.reshape((-1, myloc, mxloc))
                raw_etc = infile.read_record(dtype=read_precision)
                if lpersist:
                    persist(self, infile=infile, precision=read_precision, quiet=quiet)
                infile.close()

                t = raw_etc[0]
                x_loc = raw_etc[1:mxloc+1]
                y_loc = raw_etc[mxloc+1:mxloc+myloc+1]
                z_loc = raw_etc[mxloc+myloc+1:mxloc+myloc+mzloc+1]
                if param.lshear:
                    shear_offset = 1
                    deltay = raw_etc[-1]
                else:
                    shear_offset = 0

                dx = raw_etc[-3-shear_offset]
                dy = raw_etc[-2-shear_offset]
                dz = raw_etc[-1-shear_offset]

                if len(proc_dirs) > 1:
                    # Calculate where the local processor will go in
                    # the global array.
                    #
                    # Don't overwrite ghost zones of processor to the
                    # left (and accordingly in y and z direction -- makes
                    # a difference on the diagonals)
                    #
                    # Recall that in NumPy, slicing is NON-INCLUSIVE on
                    # the right end, ie, x[0:4] will slice all of a
                    # 4-digit array, not produce an error like in idl.

                    if procdim.ipx == 0:
                        i0x = 0
                        i1x = i0x + procdim.mx
                        i0xloc = 0
                        i1xloc = procdim.mx
                    else:
                        i0x = procdim.ipx*procdim.nx + procdim.nghostx
                        i1x = i0x + procdim.mx - procdim.nghostx
                        i0xloc = procdim.nghostx
                        i1xloc = procdim.mx

                    if procdim.ipy == 0:
                        i0y = 0
                        i1y = i0y + procdim.my
                        i0yloc = 0
                        i1yloc = procdim.my
                    else:
                        i0y = procdim.ipy*procdim.ny + procdim.nghosty
                        i1y = i0y + procdim.my - procdim.nghosty
                        i0yloc = procdim.nghosty
                        i1yloc = procdim.my

                    if procdim.ipz == 0:
                        i0z = 0
                        i1z = i0z+procdim.mz
                        i0zloc = 0
                        i1zloc = procdim.mz
                    else:
                        i0z = procdim.ipz*procdim.nz + procdim.nghostz
                        i1z = i0z + procdim.mz - procdim.nghostz
                        i0zloc = procdim.nghostz
                        i1zloc = procdim.mz

                    x[i0x:i1x] = x_loc[i0xloc:i1xloc]
                    y[i0y:i1y] = y_loc[i0yloc:i1yloc]
                    z[i0z:i1z] = z_loc[i0zloc:i1zloc]

                    if not run2D:
                        self.f[:, i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[:, i0zloc:i1zloc,
                                                                i0yloc:i1yloc, i0xloc:i1xloc]
                    else:
                        if dim.ny == 1:
                            self.f[:, i0z:i1z, i0x:i1x] = f_loc[:, i0zloc:i1zloc, i0xloc:i1xloc]
                        else:
                            self.f[i0z:i1z, i0y:i1y, i0x:i1x] = f_loc[i0zloc:i1zloc,
                                                                 i0yloc:i1yloc, i0xloc:i1xloc]
                else:
                    self.f = f_loc
                    x = x_loc
                    y = y_loc
                    z = z_loc

        if magic is not None:
            if 'bb' in magic:
                # Compute the magnetic field before doing trimall.
                aa = self.f[index.ax-1:index.az, ...]
                self.bb = dtype(curl(aa, dx, dy, dz, x=x, y=y, run2D=run2D,
                               coordinate_system=param.coord_system))
                if trimall:
                    self.bb = self.bb[:, dim.n1:dim.n2+1,
                                      dim.m1:dim.m2+1, dim.l1:dim.l2+1]
            if 'jj' in magic:
                # Compute the electric current field before doing trimall.
                aa = self.f[index.ax-1:index.az, ...]
                self.jj = dtype(curl2(aa, dx, dy, dz, x=x, y=y,
                                coordinate_system=param.coord_system))
                if trimall:
                    self.jj = self.jj[:, dim.n1:dim.n2+1,
                                      dim.m1:dim.m2+1, dim.l1:dim.l2+1]
            if 'vort' in magic:
                # Compute the vorticity field before doing trimall.
                uu = self.f[index.ux-1:index.uz, ...]
                self.vort = dtype(curl(uu, dx, dy, dz, x=x, y=y, run2D=run2D,
                                 coordinate_system=param.coord_system))
                if trimall:
                    if run2D:
                        if dim.nz == 1:
                            self.vort = self.vort[:, dim.m1:dim.m2+1,
                                                  dim.l1:dim.l2+1]
                        else:
                            self.vort = self.vort[:, dim.n1:dim.n2+1,
                                                  dim.l1:dim.l2+1]
                    else:
                        self.vort = self.vort[:, dim.n1:dim.n2+1,
                                              dim.m1:dim.m2+1,
                                              dim.l1:dim.l2+1]

        # Trim the ghost zones of the global f-array if asked.
        if trimall:
            self.x = x[dim.l1:dim.l2+1]
            self.y = y[dim.m1:dim.m2+1]
            self.z = z[dim.n1:dim.n2+1]
            if not run2D:
                self.f = self.f[:, dim.n1:dim.n2+1,
                                dim.m1:dim.m2+1, dim.l1:dim.l2+1]
            else:
                if dim.ny == 1:
                    self.f = self.f[:, dim.n1:dim.n2+1, dim.l1:dim.l2+1]
                else:
                    self.f = self.f[:, dim.m1:dim.m2+1, dim.l1:dim.l2+1]
        else:
            self.x = x
            self.y = y
            self.z = z
            self.l1 = dim.l1
            self.l2 = dim.l2 + 1
            self.m1 = dim.m1
            self.m2 = dim.m2 + 1
            self.n1 = dim.n1
            self.n2 = dim.n2 + 1

        # Assign an attribute to self for each variable defined in
        # 'data/index.pro' so that e.g. self.ux is the x-velocity
        aatest = []
        uutest = []
        for key in index.__dict__.keys():
            if 'aatest' in key:
                aatest.append(key)
            if 'uutest' in key:
                uutest.append(key)
            if key != 'global_gg' and key != 'keys' and 'aatest' not in key\
                                  and  'uutest' not in key:
                value = index.__dict__[key]
                setattr(self, key, self.f[value-1, ...])
        # Special treatment for vector quantities.
        if hasattr(index, 'uu'):
            self.uu = self.f[index.ux-1:index.uz, ...]
        if hasattr(index, 'aa'):
            self.aa = self.f[index.ax-1:index.az, ...]
        if hasattr(index, 'uu_sph'):
            self.uu_sph = self.f[index.uu_sphx-1:index.uu_sphz, ...]
        if hasattr(index, 'bb_sph'):
            self.bb_sph = self.f[index.bb_sphx-1:index.bb_sphz, ...]
        # Special treatment for test method vector quantities.
        #Note index 1,2,3,...,0 last vector may be the zero field/flow
        if hasattr(index, 'aatest1'):
            naatest = int(len(aatest)/3)
            for j in range(0,naatest):
                key = 'aatest'+str(np.mod(j+1,naatest))
                value = index.__dict__['aatest1'] + 3*j
                setattr(self, key, self.f[value-1:value+2, ...])
        if hasattr(index, 'uutest1'):
            nuutest = int(len(uutest)/3)
            for j in range(0,nuutest):
                key = 'uutest'+str(np.mod(j+1,nuutest))
                value = index.__dict__['uutest'] + 3*j
                setattr(self, key, self.f[value-1:value+2, ...])

        self.t = t
        self.dx = dx
        self.dy = dy
        self.dz = dz
        if param.lshear:
            self.deltay = deltay

        # Do the rest of magic after the trimall (i.e. no additional curl.)
        self.magic = magic
        if self.magic is not None:
            self.magic_attributes(param, dtype=dtype)
Exemple #28
0
def make_movie(
        field="uu1",
        datadir="data/",
        proc=-1,
        extension="xz",
        format="native",
        tmin=0.0,
        tmax=1.0e38,
        amin=0.0,
        amax=1.0,
        transform="",
        oldfile=False,
        norm=None,
        save=None,
        figsize=(16, 4),
        fps=12,
):
    """
    read 2D slice files and assemble an animation in a mpg movie.

    Quickly written from the example at http://matplotlib.sourceforge.net/faq/howto_faq.html

    Options:

     field  --- which variable to slice
     datadir--- path to data directory
     proc   --- an integer giving the processor to read a slice from
     extension --- which plane of xy,xz,yz,Xz. for 2D this should be overwritten.
     format  --- endian. one of little, big, or native (default)
     tmin    --- start time
     tmax    --- end time
     amin    --- minimum value for image scaling
     amax    --- maximum value for image scaling
     transform --- insert arbitrary numerical code to modify the slice
     norm    --- scales calar data
     save    --- directory to save file
     figsize --- tuple containing the size of the figure
     fps     --- Frames per seconds for the video
    """
    import os
    from pencil.io import npfile
    from pencil import read
    import numpy as np
    import pylab as plt
    from matplotlib import colors

    # Global configuration:
    # lines
    plt.rcParams["lines.linewidth"] = 2
    plt.rcParams["lines.color"] = "k"
    # font
    plt.rcParams["font.size"] = 30

    plt.rcParams["font.family"] = "serif"
    # legend
    plt.rcParams["legend.fontsize"] = 20
    plt.rcParams["legend.fancybox"] = False
    plt.rcParams["legend.numpoints"] = 2
    plt.rcParams["legend.shadow"] = False
    plt.rcParams["legend.frameon"] = False
    # latex
    plt.rc("text", usetex=True)
    plt.rcParams["text.latex.preamble"] = [r"\usepackage{amsmath}"]

    datadir = os.path.expanduser(datadir)
    if proc < 0:
        filename = os.path.join(datadir, "slice_" + field + "." + extension)
    else:
        filename = os.path.join(
            datadir, "proc" + str(proc) + "/slice_" + field + "." + extension)

    # global dim
    # param = read.param(datadir)

    dim = read.dim(datadir, proc)

    if dim.precision == "D":
        precision = "d"
    else:
        precision = "f"

    grid = read.grid(datadir=datadir, trim=True)
    # set up slice plane
    if extension == "xy" or extension == "Xy":
        hsize = dim.nx
        vsize = dim.ny
        xlabel = "x"
        ylabel = "y"
        x = grid.x
        y = grid.y
    if extension == "xz":
        hsize = dim.nx
        vsize = dim.nz
        xlabel = "x"
        ylabel = "z"
        x = grid.x
        y = grid.z
    if extension == "yz":
        hsize = dim.ny
        vsize = dim.nz
        xlabel = "y"
        ylabel = "z"
        x = grid.y
        y = grid.z

    plane = np.zeros((vsize, hsize), dtype=precision)

    infile = npfile(filename, endian=format)

    files = []
    fig = plt.figure(figsize=figsize)
    fig.subplots_adjust(left=0.12,
                        bottom=0.1,
                        right=0.98,
                        top=0.96,
                        wspace=0.23,
                        hspace=0.2)
    ax = fig.add_subplot(111)

    ifirst = True
    islice = 0
    while 1:
        try:
            raw_data = infile.fort_read(precision)
        except ValueError:
            break
        except TypeError:
            break

        if oldfile:
            t = raw_data[-1]
            plane = raw_data[:-1].reshape(vsize, hsize)
        else:
            slice_z2pos = raw_data[-1]
            t = raw_data[-2]
            plane = raw_data[:-2].reshape(vsize, hsize)

        if transform:
            exec("plane = plane" + transform)

        if t > tmin and t < tmax:
            ax.cla()
            title = "t = %11.3e" % t
            ax.set_title(title)
            ax.set_xlabel(xlabel)
            ax.set_ylabel(ylabel)

            ax.imshow(
                plane,
                origin="lower",
                vmin=amin,
                vmax=amax,
                norm=norm,
                cmap="hot",
                extent=[x[0], x[-1], y[0], y[-1]],
                aspect=1,
            )
            fname = "_tmp%03d.png" % islice
            print("Saving frame", fname)
            fig.savefig(fname)
            files.append(fname)

            if ifirst:
                print(
                    "----islice----------t---------min-------max-------delta")
            print("%10i %10.3e %10.3e %10.3e %10.3e" %
                  (islice, t, plane.min(), plane.max(),
                   plane.max() - plane.min()))

            ifirst = False
            islice += 1
        if t > tmax:
            break

    print("Making movie animation.mpg - this make take a while")
    os.system(
        f"mencoder 'mf://_tmp*.png' -mf type=png:fps={fps} -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o animation.mpg"
    )
    if save:
        os.system(f"mv _tmp*.png {save}")
        print(f"Moving files to {save}")
    else:
        os.system("rm _tmp*.png")
        print("Removing all files")
    infile.close()
Exemple #29
0
    def read(
            self,
            field="",
            extension="",
            datadir="data",
            proc=-1,
            old_file=False,
            precision="f",
            iter_list=list(),
            quiet=True,
            tstart=0,
            tend=None,
            downsample=1,
    ):
        """
        read(field='', extension='', datadir='data', proc=-1, old_file=False,
             precision='f', iter_list=list(), quiet=True,
             tstart=0, tend=None, downsample=1)

        Read Pencil Code slice data.

        Parameters
        ----------
        field : string or list of strings
            Name of the field(s) to be read.

        extension : string or list of strings
            Specifies the plane slice(s).

        datadir : string
            Directory where the data is stored.

        proc : int
            Processor to be read. If -1 read all and assemble to one array.

        old_file : bool
            Flag for reading old file format.

        precision : string
            Precision of the data. Either float 'f' or double 'd'.

        iter_list : list
            Iteration indices for which to sample the slices.

        quiet : bool
            Flag for switching off output.

        tstart : float
            Start time interval from which to sample slices.

        tend : float
            End time interval from which to sample slices.

        downsample : integer
            Sample rate to reduce slice array size.

        Returns
        -------
        Class containing the fields and slices as attributes.

        Notes
        -----
        Use the attribute keys to get a list of attributes
        

        Examples
        --------
        >>> vsl = pc.read.slices()
        >>> vsl.keys()
        t
        xy
        xy2
        xz
        yz
        position
        coordinate
        """

        import os
        import sys
        import numpy as np
        from scipy.io import FortranFile
        from pencil import read

        if os.path.exists(os.path.join(datadir, "grid.h5")):
            l_h5 = True
            import h5py
        else:
            l_h5 = False
        if not isinstance(iter_list, list):
            if not isinstance(iter_list, int):
                print("iter_list must be an integer or integer list, ignoring")
                iter_list = list()
            else:
                iter_list = [iter_list]

        if l_h5:
            # Define the directory that contains the slice files.
            slice_dir = os.path.join(datadir, "slices")
            # Initialize the fields list.
            if field:
                if isinstance(field, list):
                    field_list = field
                else:
                    field_list = [field]
            else:
                # Find the existing fields.
                field_list = []
                for file_name in os.listdir(slice_dir):
                    field_list.append(file_name.split("_")[0])
                # Remove duplicates.
                field_list = list(set(field_list))
            # Initialize the extensions list.
            if extension:
                if isinstance(extension, list):
                    extension_list = extension
                else:
                    extension_list = [extension]
            else:
                # Find the existing extensions.
                extension_list = []
                for file_name in os.listdir(slice_dir):
                    extension_list.append(
                        file_name.split("_")[1].split(".")[0])
                # Remove duplicates.
                extension_list = list(set(extension_list))

            class Foo(object):
                pass

            if len(iter_list) > 0:
                nt = len(iter_list)
                if tstart > 0 or tend:
                    print(
                        "read.slices: using iter_list.",
                        "If tstart or tend required set iter_list=None",
                    )
                tstart = 0
                tend = None
            else:
                nt = None
            pos_object = Foo()
            ind_object = Foo()
            for extension in extension_list:
                if not quiet:
                    print("Extension: " + str(extension))
                    sys.stdout.flush()
                # This one will store the data.
                ext_object = Foo()
                pos_list = []
                ind_list = []
                for field in field_list:
                    if not quiet:
                        print("  -> Field: " + str(field))
                        sys.stdout.flush()
                    # Compose the file name according to field & extension.
                    file_name = os.path.join(slice_dir,
                                             field + "_" + extension + ".h5")
                    with h5py.File(file_name, "r") as ds:
                        if not nt:
                            if not tend:
                                nt = len(ds.keys()) - 1
                                if tstart == 0:
                                    iter_list = list(np.arange(nt) + 1)
                                else:
                                    it = 1
                                    while it < ds["last"][0]:
                                        if ds[str(it) + "/time"][()] >= tstart:
                                            break
                                        it += 1
                                        if not quiet:
                                            print("iter_list: it={}, time={}".
                                                  format(
                                                      it, ds[str(it + 1) +
                                                             "/time"][()]))
                                    iter_list = list(
                                        np.arange(nt - it) + it + 1)
                            else:
                                it = 1
                                while it < ds["last"][0]:
                                    if ds[str(it) + "/time"][()] >= tstart:
                                        if ds[str(it) + "/time"][()] > tend:
                                            break
                                        iter_list.append(it)
                                        if not quiet:
                                            print("iter_list: it={}, time={}".
                                                  format(
                                                      it, ds[str(it) +
                                                             "/time"][()]))
                                    it += 1
                        nt = len(iter_list)
                        istart = 0
                        if not quiet:
                            print("iter_list, start", iter_list, istart)
                        if downsample > 1:
                            downsample = max(1, int(downsample))
                        vsize = int(
                            ceil(ds["1/data"].shape[0] / float(downsample)))
                        hsize = int(
                            ceil(ds["1/data"].shape[1] / float(downsample)))
                        slice_series = np.zeros([nt, vsize, hsize],
                                                dtype=precision)
                        for it in iter_list:
                            if ds.__contains__(str(it)):
                                slice_series[istart] = ds[
                                    str(it) +
                                    "/data"][::downsample, ::downsample]
                            else:
                                print("no data at {} in ".format(it) +
                                      file_name)
                            istart += 1
                        add_pos = len(pos_list) == 0
                        if self.t.size == 0:
                            self.t = list()
                            for it in iter_list:
                                self.t.append(ds[str(it) + "/time"][()])
                                if add_pos:
                                    ind_list.append(ds[str(it) +
                                                       "/coordinate"][0])
                                    pos_list.append(ds[str(it) +
                                                       "/position"][()])
                            self.t = np.array(self.t).astype(precision)
                            setattr(pos_object, extension, np.array(pos_list))
                            setattr(ind_object, extension, np.array(ind_list))
                        else:
                            if add_pos:
                                for it in iter_list:
                                    ind_list.append(ds[str(it) +
                                                       "/coordinate"][0])
                                    pos_list.append(ds[str(it) +
                                                       "/position"][()])
                                setattr(pos_object, extension,
                                        np.array(pos_list))
                                setattr(ind_object, extension,
                                        np.array(ind_list))
                    setattr(ext_object, field, slice_series)

                setattr(self, extension, ext_object)
                setattr(self, "position", pos_object)
                setattr(self, "coordinate", ind_object)
        else:
            # Define the directory that contains the slice files.
            if proc < 0:
                slice_dir = datadir
            else:
                slice_dir = os.path.join(datadir, "proc{0}".format(proc))

            # Initialize the fields list.
            if field:
                if isinstance(field, list):
                    field_list = field
                else:
                    field_list = [field]
            else:
                # Find the existing fields.
                field_list = []
                for file_name in os.listdir(slice_dir):
                    if file_name[:6] == "slice_":
                        field_list.append(file_name.split(".")[0][6:])
                # Remove duplicates.
                field_list = list(set(field_list))
                try:
                    field_list.remove("position")
                except:
                    pass

            # Initialize the extensions list.
            if extension:
                if isinstance(extension, list):
                    extension_list = extension
                else:
                    extension_list = [extension]
            else:
                # Find the existing extensions.
                extension_list = []
                for file_name in os.listdir(slice_dir):
                    if file_name[:6] == "slice_":
                        extension_list.append(file_name.split(".")[1])
                # Remove duplicates.
                extension_list = list(set(extension_list))
                try:
                    extension_list.remove("dat")
                except:
                    pass

            class Foo(object):
                pass

            if len(iter_list) > 0:
                nt = len(iter_list)
                if tstart > 0 or tend:
                    print(
                        "read.slices: using iter_list.",
                        "If tstart or tend required set iter_list=None",
                    )
                tstart = 0
                tend = None
            else:
                nt = None
            for extension in extension_list:
                if not quiet:
                    print("Extension: " + str(extension))
                    sys.stdout.flush()
                # This one will store the data.
                ext_object = Foo()

                for field in field_list:
                    if not quiet:
                        print("  -> Field: " + str(field))
                        sys.stdout.flush()
                    # Compose the file name according to field and extension.
                    datadir = os.path.expanduser(datadir)
                    if proc < 0:
                        file_name = os.path.join(
                            datadir, "slice_" + field + "." + extension)
                    else:
                        file_name = os.path.join(
                            datadir,
                            "proc{0}".format(proc),
                            "slice_" + field + "." + extension,
                        )

                    dim = read.dim(datadir, proc)
                    if dim.precision == "D":
                        read_precision = "d"
                    else:
                        read_precision = "f"

                    # Set up slice plane.
                    if extension == "xy" or extension == "Xy" or extension == "xy2":
                        hsize = dim.nx
                        vsize = dim.ny
                    if extension == "xz":
                        hsize = dim.nx
                        vsize = dim.nz
                    if extension == "yz":
                        hsize = dim.ny
                        vsize = dim.nz
                    if extension == "r":
                        # Read grid size of radial slices by iterating to the last
                        # line of slice_position.dat. This will break if/when there
                        # are changes to slice_position.dat!
                        slicepos_fn = os.path.join(datadir,
                                                   "slice_position.dat")
                        slicepos = open(slicepos_fn, 'r')
                        for line in slicepos:
                            line = line.strip()
                        pars = line.split()
                        hsize = int(pars[1])
                        vsize = int(pars[2])
                        slicepos.close()

                    try:
                        infile = FortranFile(file_name)
                    except:
                        continue

                    islice = 0
                    it = 0
                    self.t = list()
                    slice_series = list()

                    if not quiet:
                        print("  -> Reading... ", file_name)
                        sys.stdout.flush()
                    if not nt:
                        iter_list = list()
                    if not quiet:
                        print("Entering while loop")
                    while True:
                        try:
                            raw_data = infile.read_record(
                                dtype=read_precision).astype(precision)
                        except ValueError:
                            break
                        except TypeError:
                            break

                        if old_file:
                            time = raw_data[-1]
                        else:
                            time = raw_data[-2:-1]
                        if time >= tstart:
                            if tend:
                                if time <= tend:
                                    self.t.append(time)
                                    if old_file:
                                        slice_series.append(raw_data[:-1])
                                    else:
                                        slice_series.append(raw_data[:-2])
                                    islice += 1
                            elif it in iter_list or not nt:
                                self.t.append(time)
                                if old_file:
                                    slice_series.append(raw_data[:-1])
                                else:
                                    slice_series.append(raw_data[:-2])
                                islice += 1
                        it += 1
                    if not quiet:
                        print("  -> Done")
                        sys.stdout.flush()

                    # Remove first entry and reshape.
                    if not quiet:
                        print("Reshaping array")
                        sys.stdout.flush()
                    self.t = np.array(self.t, dtype=precision)[:, 0]
                    slice_series = np.array(slice_series, dtype=precision)
                    slice_series = slice_series.reshape(islice, vsize, hsize)
                    if downsample > 1:
                        downsample = int(downsample)
                        tmp_series = list()
                        for iislice in range(islice):
                            tmp_series.append(slice_series[
                                iislice, ::downsample, ::downsample])
                        slice_series = np.array(tmp_series)
                    setattr(ext_object, field, slice_series)

                setattr(self, extension, ext_object)