Exemple #1
0
    def __read_2d_aver(self, plane, datadir, aver_file_name, n_vars):
        """
        Read the yaverages.dat, xzaverages.dat, yzaverages.dat
        Return the raw data and the time array.
        """

        import os
        import numpy as np
        from pencilnew import read

        # Determine the structure of the xy/xz/yz averages.
        if plane == 'xy':
            nw = getattr(read.dim(), 'nz')
        if plane == 'xz':
            nw = getattr(read.dim(), 'ny')
        if plane == 'yz':
            nw = getattr(read.dim(), 'nx')
        file_id = open(os.path.join(datadir, aver_file_name))
        aver_lines = file_id.readlines()
        file_id.close()
        entry_length = int(np.ceil(nw * n_vars / 8.))
        n_times = int(len(aver_lines) / (1. + entry_length))

        # Prepare the data arrays.
        t = np.zeros(n_times, dtype=np.float32)
        raw_data = np.zeros([n_times, n_vars * nw])

        # Read the data
        line_idx = 0
        t_idx = -1
        for current_line in aver_lines:
            if line_idx % (entry_length + 1) == 0:
                t_idx += 1
                t[t_idx] = np.float32(current_line)
                raw_idx = 0
            else:
                raw_data[t_idx, raw_idx*8:(raw_idx*8+8)] = \
                    list(map(np.float32, current_line.split()))
                raw_idx += 1
            line_idx += 1

        # Restructure the raw data and add it to the Averages object.
        raw_data = np.reshape(raw_data, [n_times, n_vars, nw])

        return t, raw_data
Exemple #2
0
    def read(self, data_dir='data', param=None, dim=None):
        """
        Read Pencil Code index data from index.pro.

        call signature:

        read(self, data_dir='data/', param=None, dim=None)

        Keyword arguments:

        *data_dir*:
          Directory where the data is stored.

        *param*
          Parameter object.

        *dim*
          Dimension object.
        """

        import os
        import pencilnew.read as read

        if param is None:
            param = read.param(data_dir=data_dir, quiet=True)
        if dim is None:
            dim = read.dim(data_dir=data_dir)

        if param.lwrite_aux:
            totalvars = dim.mvar + dim.maux
        else:
            totalvars = dim.mvar

        index_file = open(os.path.join(data_dir, 'index.pro'))
        for line in index_file.readlines():
            clean = line.strip()
            name = clean.split('=')[0].strip().replace('[',
                                                       '').replace(']', '')
            if (clean.split('=')[1].strip().startswith('intarr(370)')):
                continue
            val = int(clean.split('=')[1].strip())

            if (val != 0  and val <= totalvars \
                and not name.startswith('i_') and name.startswith('i')):
                name = name.lstrip('i')
                if (name == 'lnTT' and param.ltemperature_nolog):
                    name = 'tt'
                setattr(self, name, val)
Exemple #3
0
    def update(self, hard=False, quiet=True):
        """Update simulation object:
            if not read in:
                - read param.nml
                - read grid and ghost grid

            Set hard=True to force update.
        """
        from os.path import exists
        from os.path import join
        from pencilnew.read import param, grid, dim

        REEXPORT = False

        if hard == True:
            self.param = False
            self.grid = False
            self.ghost_grid = False
            self.dim = False
            REEXPORT = True

        if self.param == False:
            try:
                if exists(join(self.datadir, 'param.nml')):
                    print('~ Reading param.nml.. ')
                    param = param(quiet=quiet, datadir=self.datadir)
                    self.param = {}  # read params into Simulation object
                    for key in dir(param):
                        if key.startswith('_') or key == 'read': continue
                        self.param[key] = getattr(param, key)
                    REEXPORT = True
                else:
                    if not quiet:
                        print(
                            '? WARNING: for ' + self.path +
                            '\n? Simulation has not run yet! Meaning: No param.nml found!'
                        )
                    REEXPORT = True
            except:
                print('! ERROR: while reading param.nml for ' + self.path)
                self.param = False
                REEXPORT = True

        if self.param != False and (self.grid == False
                                    or self.ghost_grid == False):
            try:  # read grid only if param is not False
                #import pencilnew as pcn; pcn.io.debug_breakpoint()
                print('~ Reading grid.. ')
                self.grid = grid(datadir=self.datadir, trim=True, quiet=True)
                print('~ Reading ghost_grid.. ')
                self.ghost_grid = grid(datadir=self.datadir,
                                       trim=False,
                                       quiet=True)
                print('~ Reading dim.. ')
                self.dim = dim(datadir=self.datadir)
                if not quiet:
                    print('# Updating grid and ghost_grid succesfull')
                REEXPORT = True
                # adding lx, dx etc to params
                self.param['Lx'] = self.grid.Lx
                self.param['Ly'] = self.grid.Ly
                self.param['Lz'] = self.grid.Lz
                self.param['lx'] = self.grid.Lx
                self.param['ly'] = self.grid.Ly
                self.param['lz'] = self.grid.Lz
                self.param['dx'] = self.grid.dx
                self.param['dy'] = self.grid.dy
                self.param['dz'] = self.grid.dz
            except:
                if not quiet:
                    print(
                        '? WARNING: Updating grid and ghost_grid was not succesfull, since reading grid had an error'
                    )
                if self.started() or (not quiet):
                    print('? WARNING: Couldnt load grid for ' + self.path)
                self.grid = False
                self.ghost_grid = False
                self.dim = False
                REEXPORT = True
        elif self.param == False:
            if not quiet:
                print(
                    '? WARNING: Updating grid and ghost_grid was not succesfull, since run did is not started yet.'
                )
            self.grid = False
            self.ghost_grid = False
            self.dim = False
            REEXPORT = True

        if REEXPORT == True: self.export()
        #import pencilnew as pcn; pcn.io.debug_breakpoint()
        return self
Exemple #4
0
    def read(self,
             field='',
             extension='',
             datadir='data',
             proc=-1,
             old_file=False,
             precision='f'):
        """
        Read Pencil Code slice data.

        call signature:

        read(self. field='', extension='', datadir='data', proc=-1,
             old_file=False, precision='f')

        Keyword arguments:

        *field*:
          Name of the field(s) to be read.

        *extension*
          Specifies the slice(s).

        *datadir*:
          Directory where the data is stored.

        *proc*:
          Processor to be read. If -1 read all and assemble to one array.

        *old_file*
          Flag for reading old file format.

        *precision*:
          Precision of the data. Either float 'f' or double 'd'.
        """

        import os
        import numpy as np
        from scipy.io import FortranFile
        from pencilnew import read

        # Define the directory that contains the slice files.
        if proc < 0:
            slice_dir = datadir
        else:
            slice_dir = os.path.join(datadir, 'proc{0}'.format(proc))

        # Initialize the fields list.
        if field:
            if isinstance(field, list):
                field_list = field
            else:
                field_list = [field]
        else:
            # Find the existing fields.
            field_list = []
            for file_name in os.listdir(slice_dir):
                if (file_name[:6] == 'slice_'):
                    field_list.append(file_name.split('.')[0][6:])
            # Remove duplicates.
            field_list = list(set(field_list))

        # Initialize the extensions list.
        if extension:
            if isinstance(extension, list):
                extension_list = extension
            else:
                extension_list = [extension]
        else:
            # Find the existing extensions.
            extension_list = []
            for file_name in os.listdir(slice_dir):
                if (file_name[:6] == 'slice_'):
                    extension_list.append(file_name.split('.')[1])
            # Remove duplicates.
            extension_list = list(set(extension_list))

        class Foo():
            pass

        for extension in extension_list:
            # This one will store the data.
            ext_object = Foo()

            for field in field_list:
                # Compose the file name according to field and extension.
                datadir = os.path.expanduser(datadir)
                if proc < 0:
                    file_name = os.path.join(
                        datadir, 'slice_' + field + '.' + extension)
                else:
                    file_name = os.path.join(
                        datadir, 'proc{0}'.format(proc),
                        'slice_' + field + '.' + extension)

                dim = read.dim(datadir, proc)
                if dim.precision == 'D':
                    precision = 'd'
                else:
                    precision = 'f'

                # Set up slice plane.
                if (extension == 'xy' or extension == 'Xy'):
                    hsize = dim.nx
                    vsize = dim.ny
                if (extension == 'xz'):
                    hsize = dim.nx
                    vsize = dim.nz
                if (extension == 'yz'):
                    hsize = dim.ny
                    vsize = dim.nz

                infile = FortranFile(file_name)

                islice = 0
                self.t = np.zeros(1, dtype=precision)
                slice_series = np.zeros(1, dtype=precision)

                while True:
                    try:
                        raw_data = infile.read_record(dtype=precision)
                    except ValueError:
                        break
                    except TypeError:
                        break

                    if old_file:
                        self.t = np.concatenate((self.t, raw_data[-1:]))
                        slice_series = np.concatenate(
                            (slice_series, raw_data[:-1]))
                    else:
                        self.t = np.concatenate((self.t, raw_data[-2:-1]))
                        slice_series = np.concatenate(
                            (slice_series, raw_data[:-2]))
                    islice += 1

                # Reshape and remove first entry.
                self.t = self.t[1:]
                slice_series = slice_series[1:].reshape(islice, vsize, hsize)
                setattr(ext_object, field, slice_series)

            setattr(self, extension, ext_object)
Exemple #5
0
    def read(self, datadir='data', proc=-1, quiet=False,
             trim=False):
        """
        Read the grid data from the pencil code simulation.
        If proc < 0, then load all data and assemble.
        Otherwise, load grid from specified processor.

        call signature:

        read(self, file_name='time_series.dat', datadir='data',
             double=0, quiet=0, comment_char='#')

        Keyword arguments:

        *datadir*:
          Directory where the data is stored.

        *proc*
          Processor to be read. If proc is -1, then read the 'global'
          grid. If proc is >=0, then read the grid.dat in the
          corresponding processor directory.

        *quiet*
          Flag for switching of output.

        *trim*
          Cuts off the ghost points.
        """

        import numpy as np
        import os
        from scipy.io import FortranFile
        import pencilnew.read as read

        datadir = os.path.expanduser(datadir)
        dim = read.dim(datadir, proc)
        if dim.precision == 'D':
            precision = 'd'
        else:
            precision = 'f'

        if proc < 0:
            import pencilnew
            proc_dirs = list(filter(lambda s: s.startswith('proc'), os.listdir(datadir)))
        else:
            proc_dirs = ['proc'+str(proc)]

        # Define the global arrays.
        x = np.zeros(dim.mx, dtype=precision)
        y = np.zeros(dim.my, dtype=precision)
        z = np.zeros(dim.mz, dtype=precision)
        dx_1 = np.zeros(dim.mx, dtype=precision)
        dy_1 = np.zeros(dim.my, dtype=precision)
        dz_1 = np.zeros(dim.mz, dtype=precision)
        dx_tilde = np.zeros(dim.mx, dtype=precision)
        dy_tilde = np.zeros(dim.my, dtype=precision)
        dz_tilde = np.zeros(dim.mz, dtype=precision)

        for directory in proc_dirs:
            proc = int(directory[4:])
            procdim = read.dim(datadir, proc)
            if not quiet:
                print("reading grid data from processor {0} of {1} ...".format(proc, len(proc_dirs)))

            mxloc = procdim.mx
            myloc = procdim.my
            mzloc = procdim.mz

            # Read the grid data.
            file_name = os.path.join(datadir, directory, 'grid.dat')
            infile = FortranFile(file_name, 'r')
            grid_raw = infile.read_record(dtype=precision)
            dx, dy, dz = tuple(infile.read_record(dtype=precision))
            Lx, Ly, Lz = tuple(infile.read_record(dtype=precision))
            dx_1_raw = infile.read_record(dtype=precision)
            dx_tilde_raw = infile.read_record(dtype=precision)
            infile.close()

            # Reshape the arrays.
            t = grid_raw[0]
            x_loc = grid_raw[1:mxloc+1]
            y_loc = grid_raw[mxloc+1:mxloc+myloc+1]
            z_loc = grid_raw[mxloc+myloc+1:mxloc+myloc+mzloc+1]
            dx_1_loc = dx_1_raw[0:mxloc]
            dy_1_loc = dx_1_raw[mxloc:mxloc+myloc]
            dz_1_loc = dx_1_raw[mxloc+myloc:mxloc+myloc+mzloc]
            dx_tilde_loc = dx_tilde_raw[0:mxloc]
            dy_tilde_loc = dx_tilde_raw[mxloc:mxloc+myloc]
            dz_tilde_loc = dx_tilde_raw[mxloc+myloc:mxloc+myloc+mzloc]

            if len(proc_dirs) > 1:
                if procdim.ipx == 0:
                    i0x = 0
                    i1x = i0x + procdim.mx
                    i0x_loc = 0
                    i1x_loc = procdim.mx
                else:
                    i0x = procdim.ipx*procdim.nx + procdim.nghostx
                    i1x = i0x + procdim.mx - procdim.nghostx
                    i0x_loc = procdim.nghostx
                    i1x_loc = procdim.mx

                if procdim.ipy == 0:
                    i0y = 0
                    i1y = i0y + procdim.my
                    i0y_loc = 0
                    i1y_loc = procdim.my
                else:
                    i0y = procdim.ipy*procdim.ny + procdim.nghosty
                    i1y = i0y + procdim.my - procdim.nghosty
                    i0y_loc = procdim.nghosty
                    i1y_loc = procdim.my

                if procdim.ipz == 0:
                    i0z = 0
                    i1z = i0z + procdim.mz
                    i0z_loc = 0
                    i1z_loc = procdim.mz
                else:
                    i0z = procdim.ipz*procdim.nz + procdim.nghostz
                    i1z = i0z + procdim.mz - procdim.nghostz
                    i0z_loc = procdim.nghostz
                    i1z_loc = procdim.mz

                x[i0x:i1x] = x_loc[i0x_loc:i1x_loc]
                y[i0y:i1y] = y_loc[i0y_loc:i1y_loc]
                z[i0z:i1z] = z_loc[i0z_loc:i1z_loc]
                dx_1[i0x:i1x] = dx_1_loc[i0x_loc:i1x_loc]
                dy_1[i0y:i1y] = dy_1_loc[i0y_loc:i1y_loc]
                dz_1[i0z:i1z] = dz_1_loc[i0z_loc:i1z_loc]
                dx_tilde[i0x:i1x] = dx_tilde_loc[i0x_loc:i1x_loc]
                dy_tilde[i0y:i1y] = dy_tilde_loc[i0y_loc:i1y_loc]
                dz_tilde[i0z:i1z] = dz_tilde_loc[i0z_loc:i1z_loc]

            else:
                x = x_loc
                y = y_loc
                z = z_loc
                dx_1 = dx_1_loc
                dy_1 = dy_1_loc
                dz_1 = dz_1_loc
                dx_tilde = dx_tilde_loc
                dy_tilde = dy_tilde_loc
                dz_tilde = dz_tilde_loc

        if trim:
            self.x = x[dim.l1:dim.l2+1]
            self.y = y[dim.m1:dim.m2+1]
            self.z = z[dim.n1:dim.n2+1]
            self.dx_1 = dx_1[dim.l1:dim.l2+1]
            self.dy_1 = dy_1[dim.m1:dim.m2+1]
            self.dz_1 = dz_1[dim.n1:dim.n2+1]
            self.dx_tilde = dx_tilde[dim.l1:dim.l2+1]
            self.dy_tilde = dy_tilde[dim.m1:dim.m2+1]
            self.dz_tilde = dz_tilde[dim.n1:dim.n2+1]
        else:
            self.x = x
            self.y = y
            self.z = z
            self.dx_1 = dx_1
            self.dy_1 = dy_1
            self.dz_1 = dz_1
            self.dx_tilde = dx_tilde
            self.dy_tilde = dy_tilde
            self.dz_tilde = dz_tilde

        self.t = t
        self.dx = dx
        self.dy = dy
        self.dz = dz
        self.Lx = Lx
        self.Ly = Ly
        self.Lz = Lz
Exemple #6
0
    def update(self, quiet=True):
        """Update simulation object:
                - read param.nml
                - read grid and ghost grid
            Someday this might also change the real simulation parameter.
        """
        from os.path import exists
        from os.path import join
        from pencilnew.read import param, grid, dim

        if self.param == False or self.param != param(quiet=quiet,
                                                      datadir=self.datadir):
            try:
                if exists(join(self.datadir, 'param.nml')):
                    param = param(quiet=quiet, datadir=self.datadir)
                    self.param = {}  # read params into Simulation object
                    for key in dir(param):
                        if key.startswith('_') or key == 'read': continue
                        self.param[key] = getattr(param, key)
                else:
                    if not quiet:
                        print(
                            '? WARNING: for ' + self.path +
                            '\n? Simulation has not run yet! Meaning: No param.nml found!'
                        )
            except:
                print('! ERROR: while reading param.nml for ' + self.path)
                self.param = False

        if self.param != False and (self.grid == False
                                    or self.ghost_grid == False):
            try:  # read grid only if param is not False
                #import pencilnew as pcn; pcn.io.debug_breakpoint()
                self.grid = grid(datadir=self.datadir, trim=True, quiet=True)
                self.ghost_grid = grid(datadir=self.datadir,
                                       trim=False,
                                       quiet=True)
                self.dim = dim(datadir=self.datadir)
                if not quiet:
                    print('# Updating grid and ghost_grid succesfull')
            except:
                if not quiet:
                    print(
                        '? WARNING: Updating grid and ghost_grid was not succesfull, since reading grid had an error'
                    )
                if self.started() or (not quiet):
                    print('? WARNING: Couldnt load grid for ' + self.path)
                self.grid = False
                self.ghost_grid = False
                self.dim = False
        elif self.param == False:
            if not quiet:
                print(
                    '? WARNING: Updating grid and ghost_grid was not succesfull, since run did is not started yet.'
                )
            self.grid = False
            self.ghost_grid = False
            self.dim = False

        self.export()
        #import pencilnew as pcn; pcn.io.debug_breakpoint()
        return self
Exemple #7
0
    def __read_1d_aver(self, plane, datadir, aver_file_name, n_vars, proc):
        """
        Read the yaverages.dat, zaverages.dat.
        Return the raw data and the time array.
        """

        import os
        import numpy as np
        from scipy.io import FortranFile
        from pencilnew import read

        if proc < 0:
            proc_dirs = self.__natural_sort(
                filter(lambda s: s.startswith('proc'), os.listdir(datadir)))
        else:
            proc_dirs = ['proc' + str(proc)]

        dim = read.dim(datadir, proc)
        if dim.precision == 'S':
            dtype = np.float32
        if dim.precision == 'D':
            dtype = np.float64

        # Prepare the raw data.
        # This will be reformatted at the end.

        raw_data = []
        for directory in proc_dirs:
            proc = int(directory[4:])
            proc_dim = read.dim(datadir, proc)

            # Read the data.
            t = []
            proc_data = []
            file_id = FortranFile(
                os.path.join(datadir, directory, aver_file_name))
            while True:
                try:
                    t.append(file_id.read_record(dtype=dtype)[0])
                    proc_data.append(file_id.read_record(dtype=dtype))
                except:
                    # Finished reading.
                    break
            file_id.close()

            # Reshape the proc data into [len(t), pnu, pnv].
            if plane == 'y':
                pnu = proc_dim.nx
                pnv = proc_dim.nz
            if plane == 'z':
                pnu = proc_dim.nx
                pnv = proc_dim.ny
            proc_data = np.array(proc_data)
            print(proc_data.shape, len(t), n_vars, pnv, pnu)
            proc_data = proc_data.reshape([len(t), n_vars, pnv, pnu])

            # Add the proc_data (one proc) to the raw_data (all procs)
            if plane == 'y':
                nu = dim.nx
                nv = dim.nz
                idx_u = proc_dim.ipx * proc_dim.nx
                idx_v = proc_dim.ipz * proc_dim.nz
            if plane == 'z':
                nu = dim.nx
                nv = dim.ny
                idx_u = proc_dim.ipx * proc_dim.nx
                idx_v = proc_dim.ipy * proc_dim.ny
            if not isinstance(raw_data, np.ndarray):
                # Initialize the raw_data array with the right dimensions.
                raw_data = np.zeros([len(t), n_vars, nv, nu])
            raw_data[:, :, idx_v:idx_v + pnv,
                     idx_u:idx_u + pnu] = proc_data.copy()

        t = np.array(t)
        raw_data = np.swapaxes(raw_data, 2, 3)

        return t, raw_data
Exemple #8
0
    def read(self, datadir='data', proc=-1, quiet=False,
             trim=False):
        """
        Read the grid data from the pencil code simulation.
        If proc < 0, then load all data and assemble.
        Otherwise, load grid from specified processor.

        call signature:

        read(self, file_name='time_series.dat', datadir='data',
             double=0, quiet=0, comment_char='#')

        Keyword arguments:

        *datadir*:
          Directory where the data is stored.

        *proc*
          Processor to be read. If proc is -1, then read the 'global'
          grid. If proc is >=0, then read the grid.dat in the
          corresponding processor directory.

        *quiet*
          Flag for switching of output.

        *trim*
          Cuts off the ghost points.
        """

        import numpy as np
        import os
        from scipy.io import FortranFile
        import pencilnew.read as read

        datadir = os.path.expanduser(datadir)
        dim = read.dim(datadir, proc)
        if dim.precision == 'D':
            precision = 'd'
        else:
            precision = 'f'

        if proc < 0:
            proc_dirs = list(filter(lambda string: string.startswith('proc'), os.listdir(datadir)))
        else:
            proc_dirs = ['proc' + str(proc)]

        # Define the global arrays.
        x = np.zeros(dim.mx, dtype=precision)
        y = np.zeros(dim.my, dtype=precision)
        z = np.zeros(dim.mz, dtype=precision)
        dx_1 = np.zeros(dim.mx, dtype=precision)
        dy_1 = np.zeros(dim.my, dtype=precision)
        dz_1 = np.zeros(dim.mz, dtype=precision)
        dx_tilde = np.zeros(dim.mx, dtype=precision)
        dy_tilde = np.zeros(dim.my, dtype=precision)
        dz_tilde = np.zeros(dim.mz, dtype=precision)

        for directory in proc_dirs:
            proc = int(directory[4:])
            procdim = read.dim(datadir, proc)
            if not quiet:
                print("reading grid data from processor {0} of {1} ...".format(proc, len(proc_dirs)))

            mxloc = procdim.mx
            myloc = procdim.my
            mzloc = procdim.mz

            # Read the grid data.
            file_name = os.path.join(datadir, directory, 'grid.dat')
            infile = FortranFile(file_name, 'r')
            grid_raw = infile.read_record(dtype=precision)
            dx, dy, dz = tuple(infile.read_record(dtype=precision))
            Lx, Ly, Lz = tuple(infile.read_record(dtype=precision))
            dx_1_raw = infile.read_record(dtype=precision)
            dx_tilde_raw = infile.read_record(dtype=precision)
            infile.close()

            # Reshape the arrays.
            t = grid_raw[0]
            x_loc = grid_raw[1:mxloc+1]
            y_loc = grid_raw[mxloc+1:mxloc+myloc+1]
            z_loc = grid_raw[mxloc+myloc+1:mxloc+myloc+mzloc+1]
            dx_1_loc = dx_1_raw[0:mxloc]
            dy_1_loc = dx_1_raw[mxloc:mxloc+myloc]
            dz_1_loc = dx_1_raw[mxloc+myloc:mxloc+myloc+mzloc]
            dx_tilde_loc = dx_tilde_raw[0:mxloc]
            dy_tilde_loc = dx_tilde_raw[mxloc:mxloc+myloc]
            dz_tilde_loc = dx_tilde_raw[mxloc+myloc:mxloc+myloc+mzloc]

            if len(proc_dirs) > 1:
                if procdim.ipx == 0:
                    i0x = 0
                    i1x = i0x + procdim.mx
                    i0x_loc = 0
                    i1x_loc = procdim.mx
                else:
                    i0x = procdim.ipx*procdim.nx + procdim.nghostx
                    i1x = i0x + procdim.mx - procdim.nghostx
                    i0x_loc = procdim.nghostx
                    i1x_loc = procdim.mx

                if procdim.ipy == 0:
                    i0y = 0
                    i1y = i0y + procdim.my
                    i0y_loc = 0
                    i1y_loc = procdim.my
                else:
                    i0y = procdim.ipy*procdim.ny + procdim.nghosty
                    i1y = i0y + procdim.my - procdim.nghosty
                    i0y_loc = procdim.nghosty
                    i1y_loc = procdim.my

                if procdim.ipz == 0:
                    i0z = 0
                    i1z = i0z + procdim.mz
                    i0z_loc = 0
                    i1z_loc = procdim.mz
                else:
                    i0z = procdim.ipz*procdim.nz + procdim.nghostz
                    i1z = i0z + procdim.mz - procdim.nghostz
                    i0z_loc = procdim.nghostz
                    i1z_loc = procdim.mz

                x[i0x:i1x] = x_loc[i0x_loc:i1x_loc]
                y[i0y:i1y] = y_loc[i0y_loc:i1y_loc]
                z[i0z:i1z] = z_loc[i0z_loc:i1z_loc]
                dx_1[i0x:i1x] = dx_1_loc[i0x_loc:i1x_loc]
                dy_1[i0y:i1y] = dy_1_loc[i0y_loc:i1y_loc]
                dz_1[i0z:i1z] = dz_1_loc[i0z_loc:i1z_loc]
                dx_tilde[i0x:i1x] = dx_tilde_loc[i0x_loc:i1x_loc]
                dy_tilde[i0y:i1y] = dy_tilde_loc[i0y_loc:i1y_loc]
                dz_tilde[i0z:i1z] = dz_tilde_loc[i0z_loc:i1z_loc]

            else:
                x = x_loc
                y = y_loc
                z = z_loc
                dx_1 = dx_1_loc
                dy_1 = dy_1_loc
                dz_1 = dz_1_loc
                dx_tilde = dx_tilde_loc
                dy_tilde = dy_tilde_loc
                dz_tilde = dz_tilde_loc

        if trim:
            self.x = x[dim.l1:dim.l2+1]
            self.y = y[dim.m1:dim.m2+1]
            self.z = z[dim.n1:dim.n2+1]
            self.dx_1 = dx_1[dim.l1:dim.l2+1]
            self.dy_1 = dy_1[dim.m1:dim.m2+1]
            self.dz_1 = dz_1[dim.n1:dim.n2+1]
            self.dx_tilde = dx_tilde[dim.l1:dim.l2+1]
            self.dy_tilde = dy_tilde[dim.m1:dim.m2+1]
            self.dz_tilde = dz_tilde[dim.n1:dim.n2+1]
        else:
            self.x = x
            self.y = y
            self.z = z
            self.dx_1 = dx_1
            self.dy_1 = dy_1
            self.dz_1 = dz_1
            self.dx_tilde = dx_tilde
            self.dy_tilde = dy_tilde
            self.dz_tilde = dz_tilde

        self.t = t
        self.dx = dx
        self.dy = dy
        self.dz = dz
        self.Lx = Lx
        self.Ly = Ly
        self.Lz = Lz
Exemple #9
0
    def read(self,
             var_file='',
             sim=None,
             datadir='data',
             proc=-1,
             ivar=-1,
             quiet=True,
             trim_all=False,
             trimall=False,
             dim=None,
             magic=None):
        """
        Read VAR files from pencil code. If proc < 0, then load all data
        and assemble. otherwise, load VAR file from specified processor.

        The file format written by output() (and used, e.g. in var.dat)
        consists of the followinig Fortran records:
        1. data(mx, my, mz, nvar)
        2. t(1), x(mx), y(my), z(mz), dx(1), dy(1), dz(1), deltay(1)
        Here nvar denotes the number of slots, i.e. 1 for one scalar field, 3
        for one vector field, 8 for var.dat in the case of MHD with entropy.
        but, deltay(1) is only there if lshear is on! need to know parameters.

        call signature:

        read(var_file='', datadir='data/', proc=-1, ivar=-1,
            quiet=True, trimall=False,
            magic=None, sim=None)

        Keyword arguments:
            var_file:   Name of the VAR file.
            sim:        Simulation sim object.
            magic:      Values to be computed from the data, e.g. B = curl(A).
            trimall:    Trim the data cube to exclude ghost zones.
            quiet:      Flag for switching off output.

            datadir:    Directory where the data is stored.
            proc:       Processor to be read. If -1 read all and assemble to one array.
            ivar:       Index of the VAR file, if var_file is not specified.
        """

        import numpy as np
        import os
        from scipy.io import FortranFile
        from pencilnew.math.derivatives import curl, curl2
        from pencilnew import read

        if sim is not None:
            datadir = os.path.expanduser(sim.datadir)
            dim = sim.dim
            param = read.param(datadir=sim.datadir, quiet=True)
            index = read.index(datadir=sim.datadir)
        else:
            datadir = os.path.expanduser(datadir)
            if dim is None:
                dim = read.dim(datadir, proc)
            if param is None:
                param = read.param(datadir=datadir, quiet=quiet)
            if index is None:
                index = read.index(datadir=datadir)

        run2D = param.lwrite_2d

        if dim.precision == 'D':
            precision = 'd'
        else:
            precision = 'f'

        if param.lwrite_aux:
            total_vars = dim.mvar + dim.maux
        else:
            total_vars = dim.mvar

        if not var_file:
            if ivar < 0:
                var_file = 'var.dat'
            else:
                var_file = 'VAR' + str(ivar)

        if proc < 0:
            proc_dirs = self.__natural_sort(
                filter(lambda s: s.startswith('proc'), os.listdir(datadir)))
        else:
            proc_dirs = ['proc' + str(proc)]

        if trimall != False: trim_all = trimall

        # Set up the global array.
        if not run2D:
            f = np.zeros((total_vars, dim.mz, dim.my, dim.mx), dtype=precision)
        else:
            if dim.ny == 1:
                f = np.zeros((total_vars, dim.mz, dim.mx), dtype=precision)
            else:
                f = np.zeros((total_vars, dim.my, dim.mx), dtype=precision)

        x = np.zeros(dim.mx, dtype=precision)
        y = np.zeros(dim.my, dtype=precision)
        z = np.zeros(dim.mz, dtype=precision)

        for directory in proc_dirs:
            proc = int(directory[4:])
            procdim = read.dim(datadir, proc)
            if not quiet:
                print("Reading data from processor {0} of {1} ...".format( \
                      proc, len(proc_dirs)))

            mxloc = procdim.mx
            myloc = procdim.my
            mzloc = procdim.mz

            # Read the data.
            file_name = os.path.join(datadir, directory, var_file)
            infile = FortranFile(file_name)
            if not run2D:
                f_loc = infile.read_record(dtype=precision)
                f_loc = f_loc.reshape((-1, mzloc, myloc, mxloc))
            else:
                if dim.ny == 1:
                    f_loc = infile.read_record(dtype=precision)
                    f_loc = f_loc.reshape((-1, mzloc, mxloc))
                else:
                    f_loc = infile.read_record(dtype=precision)
                    f_loc = f_loc.reshape((-1, myloc, mxloc))
            raw_etc = infile.read_record(precision)
            infile.close()

            t = raw_etc[0]
            x_loc = raw_etc[1:mxloc + 1]
            y_loc = raw_etc[mxloc + 1:mxloc + myloc + 1]
            z_loc = raw_etc[mxloc + myloc + 1:mxloc + myloc + mzloc + 1]
            if param.lshear:
                shear_offset = 1
                deltay = raw_etc[-1]
            else:
                shear_offset = 0

            dx = raw_etc[-3 - shear_offset]
            dy = raw_etc[-2 - shear_offset]
            dz = raw_etc[-1 - shear_offset]

            if len(proc_dirs) > 1:
                # Calculate where the local processor will go in
                # the global array.
                #
                # Don't overwrite ghost zones of processor to the left (and
                # accordingly in y and z direction -- makes a difference on the
                # diagonals)
                #
                # Recall that in NumPy, slicing is NON-INCLUSIVE on the right end
                # ie, x[0:4] will slice all of a 4-digit array, not produce
                # an error like in idl.

                if procdim.ipx == 0:
                    i0x = 0
                    i1x = i0x + procdim.mx
                    i0xloc = 0
                    i1xloc = procdim.mx
                else:
                    i0x = procdim.ipx * procdim.nx + procdim.nghostx
                    i1x = i0x + procdim.mx - procdim.nghostx
                    i0xloc = procdim.nghostx
                    i1xloc = procdim.mx

                if procdim.ipy == 0:
                    i0y = 0
                    i1y = i0y + procdim.my
                    i0yloc = 0
                    i1yloc = procdim.my
                else:
                    i0y = procdim.ipy * procdim.ny + procdim.nghosty
                    i1y = i0y + procdim.my - procdim.nghosty
                    i0yloc = procdim.nghosty
                    i1yloc = procdim.my

                if procdim.ipz == 0:
                    i0z = 0
                    i1z = i0z + procdim.mz
                    i0zloc = 0
                    i1zloc = procdim.mz
                else:
                    i0z = procdim.ipz * procdim.nz + procdim.nghostz
                    i1z = i0z + procdim.mz - procdim.nghostz
                    i0zloc = procdim.nghostz
                    i1zloc = procdim.mz

                x[i0x:i1x] = x_loc[i0xloc:i1xloc]
                y[i0y:i1y] = y_loc[i0yloc:i1yloc]
                z[i0z:i1z] = z_loc[i0zloc:i1zloc]

                if not run2D:
                    f[:, i0z:i1z, i0y:i1y, i0x:i1x] = \
                        f_loc[:, i0zloc:i1zloc, i0yloc:i1yloc, i0xloc:i1xloc]
                else:
                    if dim.ny == 1:
                        f[:, i0z:i1z, i0x:i1x] = \
                              f_loc[:, i0zloc:i1zloc, i0xloc:i1xloc]
                    else:
                        f[:, i0y:i1y, i0x:i1x] = \
                              f_loc[:, i0yloc:i1yloc, i0xloc:i1xloc]
            else:
                f = f_loc
                x = x_loc
                y = y_loc
                z = z_loc

        if magic is not None:
            if 'bb' in magic:
                # Compute the magnetic field before doing trim_all.
                aa = f[index.ax - 1:index.az, ...]
                self.bb = curl(aa, dx, dy, dz, run2D=run2D)
                if trim_all:
                    self.bb = self.bb[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                                      dim.l1:dim.l2 + 1]
            if 'jj' in magic:
                # Compute the electric current field before doing trim_all.
                aa = f[index.ax - 1:index.az, ...]
                self.jj = curl2(aa, dx, dy, dz)
                if trim_all:
                    self.jj = self.jj[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                                      dim.l1:dim.l2 + 1]
            if 'vort' in magic:
                # Compute the vorticity field before doing trim_all.
                uu = f[index.ux - 1:index.uz, ...]
                self.vort = curl(uu, dx, dy, dz, run2D=run2D)
                if trim_all:
                    if run2D:
                        if dim.nz == 1:
                            self.vort = self.vort[:, dim.m1:dim.m2 + 1,
                                                  dim.l1:dim.l2 + 1]
                        else:
                            self.vort = self.vort[:, dim.n1:dim.n2 + 1,
                                                  dim.l1:dim.l2 + 1]
                    else:
                        self.vort = self.vort[:, dim.n1:dim.n2 + 1,
                                              dim.m1:dim.m2 + 1,
                                              dim.l1:dim.l2 + 1]

        # Trim the ghost zones of the global f-array if asked.
        if trim_all:
            self.x = x[dim.l1:dim.l2 + 1]
            self.y = y[dim.m1:dim.m2 + 1]
            self.z = z[dim.n1:dim.n2 + 1]
            if not run2D:
                self.f = f[:, dim.n1:dim.n2 + 1, dim.m1:dim.m2 + 1,
                           dim.l1:dim.l2 + 1]
            else:
                if dim.ny == 1:
                    self.f = f[:, dim.n1:dim.n2 + 1, dim.l1:dim.l2 + 1]
                else:
                    self.f = f[:, dim.m1:dim.m2 + 1, dim.l1:dim.l2 + 1]
        else:
            self.x = x
            self.y = y
            self.z = z
            self.f = f
            self.l1 = dim.l1
            self.l2 = dim.l2 + 1
            self.m1 = dim.m1
            self.m2 = dim.m2 + 1
            self.n1 = dim.n1
            self.n2 = dim.n2 + 1

        # Assign an attribute to self for each variable defined in
        # 'data/index.pro' so that e.g. self.ux is the x-velocity
        for key in index.__dict__.keys():
            if key != 'global_gg' and key != 'keys':
                value = index.__dict__[key]
                setattr(self, key, self.f[value - 1, ...])
        # Special treatment for vector quantities.
        if hasattr(index, 'uu'):
            self.uu = self.f[index.ux - 1:index.uz, ...]
        if hasattr(index, 'aa'):
            self.aa = self.f[index.ax - 1:index.az, ...]

        self.t = t
        self.dx = dx
        self.dy = dy
        self.dz = dz
        if param.lshear:
            self.deltay = deltay

        # Do the rest of magic after the trim_all (i.e. no additional curl...).
        self.magic = magic
        if self.magic is not None:
            self.magic_attributes(param)
Exemple #10
0
    def update(self, hard=False, quiet=True):
        """Update simulation object:
            if not read in:
                - read param.nml
                - read grid and ghost grid

            Set hard=True to force update.
        """
        from os.path import exists
        from os.path import join
        from pencilnew.read import param, grid, dim

        REEXPORT = False

        if hard == True:
            self.param = False
            self.grid = False
            self.ghost_grid = False
            self.dim = False
            REEXPORT = True

        if self.param == False:
            try:
                if exists(join(self.datadir,'param.nml')):
                    print('~ Reading param.nml.. ')
                    param = param(quiet=quiet, datadir=self.datadir)
                    self.param = {}                     # read params into Simulation object
                    for key in dir(param):
                        if key.startswith('_') or key == 'read': continue
                        self.param[key] = getattr(param, key)
                    REEXPORT = True
                else:
                    if not quiet: print('? WARNING: for '+self.path+'\n? Simulation has not run yet! Meaning: No param.nml found!')
                    REEXPORT = True
            except:
                print('! ERROR: while reading param.nml for '+self.path)
                self.param = False
                REEXPORT = True

        if self.param != False and (self.grid == False or self.ghost_grid == False):
            try:                                # read grid only if param is not False
                #import pencilnew as pcn; pcn.io.debug_breakpoint()
                print('~ Reading grid.. ')
                self.grid = grid(datadir=self.datadir, trim=True, quiet=True)
                print('~ Reading ghost_grid.. ')
                self.ghost_grid = grid(datadir=self.datadir, trim=False, quiet=True)
                print('~ Reading dim.. ')
                self.dim = dim(datadir=self.datadir)
                if not quiet: print('# Updating grid and ghost_grid succesfull')
                REEXPORT = True
                # adding lx, dx etc to params
                self.param['Lx'] = self.grid.Lx; self.param['Ly'] = self.grid.Ly; self.param['Lz'] = self.grid.Lz
                self.param['lx'] = self.grid.Lx; self.param['ly'] = self.grid.Ly; self.param['lz'] = self.grid.Lz
                self.param['dx'] = self.grid.dx; self.param['dy'] = self.grid.dy; self.param['dz'] = self.grid.dz
            except:
                if not quiet: print('? WARNING: Updating grid and ghost_grid was not succesfull, since reading grid had an error')
                if self.started() or (not quiet): print('? WARNING: Couldnt load grid for '+self.path)
                self.grid = False
                self.ghost_grid = False
                self.dim = False
                REEXPORT = True
        elif self.param == False:
            if not quiet: print('? WARNING: Updating grid and ghost_grid was not succesfull, since run did is not started yet.')
            self.grid = False
            self.ghost_grid = False
            self.dim = False
            REEXPORT = True

        if REEXPORT == True: self.export()
        #import pencilnew as pcn; pcn.io.debug_breakpoint()
        return self
Exemple #11
0
    def animate(self,
                field='uu1',
                extension='xz',
                datadir='data',
                proc=-1,
                interval=50.,
                amin=0.,
                amax=1.,
                transform='',
                old_file=False,
                precision='f',
                verbose=False):
        """
        Read Pencil Code slice data.

        call signature:

        animate(self. field='', extension='', datadir='data', proc=-1,
             old_file=False, precision='f')

        Keyword arguments:

        *field*:
          Name of the field(s) to be read.

        *extension*
          Specifies the slice(s).

        *datadir*:
          Directory where the data is stored.

        *proc*:
          Processor to be read. If -1 read all and assemble to one array.

        *old_file*
          Flag for reading old file format.

        *precision*:
          Precision of the data. Either float 'f' or double 'd'.

        *verbose*:
          Print progress
          
        *tmin*:
          Start time of the animation
     
        *tmax*: 
          End time of the animation
     
        *amin*:
          Minimum value for image scaling
     
        *amax*:
          Maximum value for image scaling
     
        *transform*:
          Insert arbitrary numerical code to modify the slice
      
        *wait*:
          Pause in seconds between animation slices
        """

        import os
        import numpy as np
        from pencilnew import read
        from scipy.io import FortranFile

        import matplotlib.pyplot as plt
        import matplotlib.animation as animation

        # Define the directory that contains the slice files.
        if proc < 0:
            slice_dir = datadir
        else:
            slice_dir = os.path.join(datadir, 'proc{0}'.format(proc))

        # Initialize the fields list.
        if field:
            if isinstance(field, list):
                field_list = field
            else:
                field_list = [field]
        else:
            # Find the existing fields.
            field_list = []
            for file_name in os.listdir(slice_dir):
                if file_name[:6] == 'slice_':
                    field_list.append(file_name.split('.')[0][6:])
            # Remove duplicates.
            field_list = list(set(field_list))

        # Initialize the extensions list.
        if extension:
            if isinstance(extension, list):
                extension_list = extension
            else:
                extension_list = [extension]
        else:
            # Find the existing extensions.
            extension_list = []
            for file_name in os.listdir(slice_dir):
                if file_name[:6] == 'slice_':
                    extension_list.append(file_name.split('.')[1])
            # Remove duplicates.
            extension_list = list(set(extension_list))

        class Foo(object):
            pass

        for extension in extension_list:
            if verbose:
                print('Extension: ' + str(extension))
            # This one will store the data.
            ext_object = Foo()

            for field in field_list:
                if verbose:
                    print('  -> Field: ' + str(field))
                # Compose the file name according to field and extension.
                datadir = os.path.expanduser(datadir)
                if proc < 0:
                    file_name = os.path.join(
                        datadir, 'slice_' + field + '.' + extension)
                else:
                    file_name = os.path.join(
                        datadir, 'proc{0}'.format(proc),
                        'slice_' + field + '.' + extension)

                dim = read.dim(datadir, proc)
                if dim.precision == 'D':
                    precision = 'd'
                else:
                    precision = 'f'

                # Set up slice plane.
                if extension == 'xy' or extension == 'Xy' or extension == 'xy2':
                    hsize = dim.nx
                    vsize = dim.ny
                if extension == 'xz':
                    hsize = dim.nx
                    vsize = dim.nz
                if extension == 'yz':
                    hsize = dim.ny
                    vsize = dim.nz

                infile = FortranFile(file_name)

                islice = 0
                self.t = np.zeros(1, dtype=precision)
                self.t = [0]
                slice_series = [0]

                plane = np.zeros((vsize, hsize), dtype=precision)

                fig = plt.figure()

                # ims is a list of lists, each row is a list of artists to draw in the
                # current frame; here we are just animating one artist, the image, in
                # each frame
                ax = plt.axes()
                ax.set_xlabel('x')
                ax.set_ylabel('y')

                ims = []  # image series

                ifirst = True

                while True:
                    try:
                        if verbose:
                            print('  -> Reading... ')
                        raw_data = infile.read_record(dtype=precision)
                    except ValueError:
                        break
                    except TypeError:
                        break

                    if old_file:
                        self.t.append(list(raw_data[-1]))
                        t_now = raw_data[-1]
                        slice_series.extend(list(raw_data[:-1]))
                        plane = raw_data[:-1]
                    else:
                        self.t.append(list(raw_data[-2:-1]))
                        t_now = raw_data[-2:-1]
                        slice_series.extend(list(raw_data[:-2]))
                        plane = raw_data[:-2]

                    if verbose:
                        print('  -> Done')

                    if ifirst:
                        print(
                            "----islice----------t---------min-------max-------delta"
                        )
                    print("%10i %10.3e %10.3e %10.3e %10.3e" %
                          (islice, t_now, plane.min(), plane.max(),
                           plane.max() - plane.min()))

                    ifirst = False

                    title = 't = %11.3e' % t_now
                    ax.set_title(title)
                    im = plt.imshow(plane, vmin=amin, vmax=amax, animated=True)
                    ims.append([im])  # 每张图片都用单独list的形式加入到图片序列中

                    islice += 1
                # Reshape and remove first entry.
                ani = animation.ArtistAnimation(fig,
                                                ims,
                                                interval=interval,
                                                blit=True,
                                                repeat_delay=1000)
                ani.save('animate_' + extension + field + '.mp4')

                plt.show()

                if verbose:
                    print('Reshaping array')
                self.t = np.array(self.t[1:], dtype=precision)
                slice_series = np.array(slice_series, dtype=precision)
                slice_series = slice_series[1:].reshape(islice, vsize, hsize)
                setattr(ext_object, field, slice_series)

            setattr(self, extension, ext_object)
Exemple #12
0
    def calc(self,
                     aver=[],
                     datatopdir='.',
                     lskip_zeros=False,
                     proc=0,
                     rank=0,
                     rmfzeros=1,
                     rmbzeros=1,
                     iy=None,
                     l_correction=False,
                     t_correction=0.,
                     dim=None,
                     timereducer=None,
                     trargs=[],
                     tindex=(0,None),
                     imask=None
                    ):
        """object returns time dependent meridional tensors
           from Averages object aver.z. u, acoef and bcoef and aver.t

           For long DNS runs the 'zaverages.dat' file can be very large
           so MPI may be required and the data is loaded by processor 
           as default.

           lskip_zeros=True identifies the resetting of the testfield
           and rmbzeros and rmfzeros number to exclude before and following
           By default none are removed.

           iy is the index array that is computed in this MPI process, which
           may be a subset of the array on this processor
            
           l_correction=True permits the pencil coefficients computed
           prior to the Pencil Code correction implemented after
           time=t_correction to be rescaled accordingly to match the new
           formulation.

           trargs contain optional arguments for the time treatments: mean,
           smoothing, etc.  

           tindex is set to limit the range of the iterations loaded from
           Averages in zaverages.dat
 
           The index imask, excluding the resets, can be specified to 
           ensure all processes use the same mask 
        """
        import numpy as np
        import os
        from pencilnew import read

        os.chdir(datatopdir) # return to working directory
        grid = read.grid(proc=proc,trim=True, quiet=True)
        # if iy None or scalar create numpy array 
        try:
            iy.size>0
        except:
            print('exception')
            if iy==None:
                print('exception None')
                iy=np.arange(grid.y.size)
            else:
                print('exception int')
                iy=np.array(iy)
        if rank==0:
            print('iy size is {0}'.format(iy.shape))
        r, theta = np.meshgrid(grid.x,grid.y[iy],indexing='ij')
        del(grid,theta) #conserve memory

        print('rank {0} calculating tensors for proc {1}'.format(rank,proc))

        # string containers for zaverages.z keys
        uformat = 'u{0}mxy'
        alpformat = 'alp{0}{1}xy'
        etaformat = 'eta{0}{1}{2}xy'

        # imask calculated once for MPI/processor consistency
        if rank==0:
            print("Removing zeros")
        old_size = aver.t.shape

        # if imask is not provided either exclude the zeros or use the full time series
        try:
            imask.size>0
            print('imask shape is {}'.format(imask.shape))
        except:
            if lskip_zeros:
                index = alpformat.format(1,1)
                izero=np.array(np.where(aver.z.__getattribute__(index)[:,
                               aver.z.__getattribute__(index).shape[-2]/2,
                               aver.z.__getattribute__(index).shape[-1]/2]==0))[0]
                rmfrange = np.arange(0,rmfzeros-1)
                rmbrange = np.arange(0,rmbzeros-1)
                rmpoints = np.array([],dtype=int)
                for zero in izero:
                    rmpoints = np.append(rmpoints, rmfrange + zero)
                    rmpoints = np.append(rmpoints, zero - rmbrange)
                if izero.size>0:
                    imask=np.delete(np.where(aver.t),rmpoints)
                    if rank==0:
                        print("Removed {0} zeros from {1} resets".format(len(rmpoints), len(izero)))
                        print("Resets occured at save points {0}".format(izero))
                else:
                    imask=np.where(aver.t)[0]
                del(rmpoints,rmbrange,rmfrange) 
            else:
                imask=np.arange(aver.t.size)
                if rank==0:
                    print("Skipped zero removals.")
        # update the time of the snapshots included 
        self.t=aver.t[imask]

        # Correction to Pencil Code error may be required on old data
        if l_correction:
            if dim==None:
                dim=read.dim(quiet=True)
            itcorr = np.where(aver.t[imask]<t_correction)[0]
            index = alpformat.format(1,3)
            aver.z.__getattribute__(index)[itcorr] *=\
                                               -dim.nprocz/(dim.nprocz-2.)
            for j in range(0,3):
                index = alpformat.format(3,j+1)
                aver.z.__getattribute__(index)[itcorr] *=\
                                               -dim.nprocz/(dim.nprocz-2.)
            index = etaformat.format(1,1,1)
            aver.z.__getattribute__(index)[itcorr] *=\
                                               -dim.nprocz/(dim.nprocz-2.)
            for j in range(0,3):
                index = etaformat.format(j+1,2,1)
                aver.z.__getattribute__(index)[itcorr] *=\
                                               -dim.nprocz/(dim.nprocz-2.)
            index = etaformat.format(1,1,2)
            aver.z.__getattribute__(index)[itcorr] *=\
                                               -dim.nprocz/(dim.nprocz-2.)
            for j in range(0,3):
                index = etaformat.format(j+1,2,2)
                aver.z.__getattribute__(index)[itcorr] *=\
                                               -dim.nprocz/(dim.nprocz-2.)
            
        # set up place holders for the Pencil Code tensor coefficients
        index = alpformat.format(1,1)
        u  =np.zeros([3,    len(imask),aver.z.__getattribute__(index).shape[-2],iy.size])
        alp=np.zeros([3,3,  len(imask),aver.z.__getattribute__(index).shape[-2],iy.size])
        eta=np.zeros([3,3,3,len(imask),aver.z.__getattribute__(index).shape[-2],iy.size])
        if rank==0:
            print(u.shape,aver.z.__getattribute__(index)[imask,:,:].shape)
        # store the individual components in the z-averages as tensors
        for i,coord in zip(range(0,3),('x','y','z')):
            try:
                index = uformat.format(coord)
                if iy.size>1:
                    tmp=aver.z.__getattribute__(index)[:,:,iy]
                    u[i,:,:,:] = tmp[imask]
                else:
                    u[i,:,:,0] = aver.z.__getattribute__(index)[imask,:,iy]
            except KeyError:
                pass
        for i in range(0,3):
            for j in range(0,3):
                index = alpformat.format(i+1,j+1)
                if iy.size>1:
                    tmp=aver.z.__getattribute__(index)[:,:,iy]
                    alp[j,i,:,:,:] = tmp[imask]
                else:
                    alp[j,i,:,:,0] = aver.z.__getattribute__(index)[imask,:,iy]
        for i in range(0,3):
            for j in range(0,3):
                index1 = etaformat.format(i+1,j+1,1)
                index2 = etaformat.format(i+1,j+1,2)
                # Sign difference with Schrinner + r correction
                if iy.size>1:
                    tmp=aver.z.__getattribute__(index1)[:,:,iy]
                    eta[0,j,i,:,:,:] = -tmp[imask]
                    tmp=aver.z.__getattribute__(index2)[:,:,iy]
                    eta[1,j,i,:,:,:] = -tmp[imask]*r
                    del(tmp)
                else:
                    eta[0,j,i,:,:,0] = -aver.z.__getattribute__(index1)[imask,:,iy]
                    eta[1,j,i,:,:,0] = -aver.z.__getattribute__(index2)[imask,:,iy]*r[:,0]

        # apply the specified averaging or smoothing: 'None' returns unprocessed arrays
        if callable(timereducer):
            u=timereducer(u,trargs)
            alp=timereducer(alp,trargs)
            eta=timereducer(eta,trargs)
        
        if rank==0:
            print("Old time dimension has length: {0}".format(old_size))
            print("New time dimension has length: {0}".format(alp.shape[-3]))
        
        # Create output tensors
        datatype  = alp.dtype
        datashape = [alp.shape[-3], alp.shape[-2], alp.shape[-1], 1]
        setattr(self,'utensor', np.zeros([3]+datashape,dtype=datatype))
        setattr(self,'alpha'  , np.zeros([3,3]+datashape,dtype=datatype))
        setattr(self,'beta'   , np.zeros([3,3]+datashape,dtype=datatype))
        setattr(self,'gamma'  , np.zeros([3]+datashape,dtype=datatype))
        setattr(self,'delta'  , np.zeros([3]+datashape,dtype=datatype))
        setattr(self,'kappa'  , np.zeros([3,3,3]+datashape,dtype=datatype))
        setattr(self,'acoef'  , np.zeros([3,3]+datashape,dtype=datatype))
        setattr(self,'bcoef'  , np.zeros([3,3,3]+datashape,dtype=datatype))

        """
        All tensors need to be reordered nz,ny,nx,nt for efficient writing to disk
        """ 
        # Calculating a and b matrices
        self.acoef[:,:,:,:,:,0]   = np.copy(alp)
        self.acoef=np.swapaxes(self.acoef,-4,-1)
        self.acoef=np.swapaxes(self.acoef,-3,-2)
        self.bcoef[:,:,:,:,:,:,0] = np.copy(eta)
        self.bcoef=np.swapaxes(self.bcoef,-4,-1)
        self.bcoef=np.swapaxes(self.bcoef,-3,-2)

        irr, ith, iph = 0,1,2
        
        # u-tensor
        print("Calculating utensor on rank {}".format(rank))
        #utensor[:,:,:,:,0] = u[:,:,:,:] - np.mean(u[:,:,:,:],axis=1,keepdims=True)
        self.utensor[:,:,:,:,0] = u[:,:,:,:]
        self.utensor=np.swapaxes(self.utensor,-4,-1)
        self.utensor=np.swapaxes(self.utensor,-3,-2)
        # Alpha tensor
        print("Calculating alpha on rank {}".format(rank))
        self.alpha[irr,irr,:,:,:,0]  = (alp[irr,irr,:,:,:]-eta[ith,ith,irr,:,:,:]/r)
        self.alpha[irr,ith,:,:,:,0]  = 0.5*(alp[ith,irr,:,:,:]+eta[ith,irr,irr,:,:,:]/r+alp[irr,ith,:,:,:]-eta[ith,ith,ith,:,:,:]/r)
        self.alpha[irr,iph,:,:,:,0]  = 0.5*(alp[iph,irr,:,:,:]+alp[irr,iph,:,:,:] - eta[ith,ith,iph,:,:,:]/r)
        self.alpha[ith,irr,:,:,:,0]  = self.alpha[irr,ith,:,:,:,0]
        self.alpha[ith,ith,:,:,:,0]  = (alp[ith,ith,:,:,:]+eta[ith,irr,ith,:,:,:]/r)
        self.alpha[ith,iph,:,:,:,0]  = 0.5*(alp[iph,ith,:,:,:]+alp[ith,iph,:,:,:]+eta[ith,irr,iph,:,:,:]/r)
        self.alpha[iph,irr,:,:,:,0]  = self.alpha[irr,iph,:,:,:,0]
        self.alpha[iph,ith,:,:,:,0]  = self.alpha[ith,iph,:,:,:,0]
        self.alpha[iph,iph,:,:,:,0]  = alp[iph,iph,:,:,:]
        self.alpha=np.swapaxes(self.alpha,-4,-1)
        self.alpha=np.swapaxes(self.alpha,-3,-2)
        # Gamma vector
        print("Calculating gamma on rank {}".format(rank))
        self.gamma[irr,:,:,:,0] = -0.5*(alp[iph,ith,:,:,:]-alp[ith,iph,:,:,:]-eta[ith,irr,iph,:,:,:]/r)
        self.gamma[ith,:,:,:,0] = -0.5*(alp[irr,iph,:,:,:]-alp[iph,irr,:,:,:]-eta[ith,ith,iph,:,:,:]/r)
        self.gamma[iph,:,:,:,0] = -0.5*(alp[ith,irr,:,:,:]-alp[irr,ith,:,:,:]+eta[ith,irr,irr,:,:,:]/r
                                                                             +eta[ith,ith,ith,:,:,:]/r)
        self.gamma=np.swapaxes(self.gamma,-4,-1)
        self.gamma=np.swapaxes(self.gamma,-3,-2)
        # Beta tensor
        print("Calculating beta on rank {}".format(rank))
        self.beta[irr,irr,:,:,:,0]   = -0.5* eta[ith,iph,irr,:,:,:]
        self.beta[irr,ith,:,:,:,0]   = 0.25*(eta[irr,iph,irr,:,:,:] - eta[ith,iph,ith,:,:,:])
        self.beta[irr,iph,:,:,:,0]   = 0.25*(eta[ith,irr,irr,:,:,:] - eta[ith,iph,iph,:,:,:] - eta[irr,ith,irr,:,:,:])
        self.beta[ith,irr,:,:,:,0]   = self.beta[irr,ith,:,:,:,0]
        self.beta[ith,ith,:,:,:,0]   = 0.5*eta[irr,iph,ith,:,:,:]
        self.beta[ith,iph,:,:,:,0]   = 0.25*(eta[ith,irr,ith,:,:,:] + eta[irr,iph,iph,:,:,:] - eta[irr,ith,ith,:,:,:])
        self.beta[iph,irr,:,:,:,0]   = self.beta[irr,iph,:,:,:,0]
        self.beta[iph,ith,:,:,:,0]   = self.beta[ith,iph,:,:,:,0]
        self.beta[iph,iph,:,:,:,0]   = 0.5*(eta[ith,irr,iph,:,:,:] - eta[irr,ith,iph,:,:,:])
        # Sign convention to match with meanfield_e_tensor
        self.beta = -self.beta
        self.beta=np.swapaxes(self.beta,-4,-1)
        self.beta=np.swapaxes(self.beta,-3,-2)
        # Delta vector
        print("Calculating delta on rank {}".format(rank))
        self.delta[irr,:,:,:,0]    = 0.25*(eta[irr,ith,ith,:,:,:] - eta[ith,irr,ith,:,:,:] + eta[irr,iph,iph,:,:,:])
        self.delta[ith,:,:,:,0]    = 0.25*(eta[ith,irr,irr,:,:,:] - eta[irr,ith,irr,:,:,:] + eta[ith,iph,iph,:,:,:])
        self.delta[iph,:,:,:,0]    = -0.25*(eta[irr,iph,irr,:,:,:] + eta[ith,iph,ith,:,:,:])
        # Sign convention to match with meanfield_e_tensor
        self.delta = -self.delta
        self.delta=np.swapaxes(self.delta,-4,-1)
        self.delta=np.swapaxes(self.delta,-3,-2)
        # Kappa tensor
        print("Calculating kappa on rank {}".format(rank))
        for i in range(0,3):
            self.kappa[irr,irr,i,:,:,:,0]=      -eta[irr,irr,i,:,:,:]
            self.kappa[ith,irr,i,:,:,:,0]= -0.5*(eta[ith,irr,i,:,:,:]+eta[irr,ith,i,:,:,:])
            self.kappa[iph,irr,i,:,:,:,0]= -0.5* eta[irr,iph,i,:,:,:]
            self.kappa[irr,ith,i,:,:,:,0]=     self.kappa[ith,irr,i,:,:,:,0]
            self.kappa[ith,ith,i,:,:,:,0]= -     eta[ith,ith,i,:,:,:]
            self.kappa[iph,ith,i,:,:,:,0]= -0.5* eta[ith,iph,i,:,:,:]
            self.kappa[irr,iph,i,:,:,:,0]=     self.kappa[iph,irr,i,:,:,:,0]
            self.kappa[ith,iph,i,:,:,:,0]=     self.kappa[iph,ith,i,:,:,:,0]
            self.kappa[iph,iph,i,:,:,:,0]= 1e-91
        # Sign convention to match with meanfield_e_tensor
        self.kappa = -self.kappa
        self.kappa=np.swapaxes(self.kappa,-4,-1)
        self.kappa=np.swapaxes(self.kappa,-3,-2)
        setattr(self, 'imask', imask)