コード例 #1
0
ファイル: pc2vtk.py プロジェクト: zzzxhahaha/pencil-code
def power2vtk(powerfiles=['power_mag.dat'],
              datadir='data/',
              destination='power',
              thickness=1):
    """
    Convert power spectra from PencilCode format to vtk.

    call signature::
    
      power2vtk(powerfiles = ['power_mag.dat'],
            datadir = 'data/', destination = 'power.vtk', thickness = 1):
    
    Read the power spectra stored in the power*.dat files
    and convert them into vtk format.
    Write the result in *destination*.
    
    Keyword arguments:
    
      *powerfiles*:
        The files containing the power spectra.
        
      *datadir*:
        Directory where the data is stored.
       
      *destination*:
        Destination file.
      
      *thickness*:
        Dimension in z-direction. Setting it 2 will create n*m*2 dimensional
        array of data. This is useful in Paraview for visualizing the spectrum
        in 3 dimensions. Note that this will simply double the amount of data.
               
    """

    # this should correct for the case the user types only one variable
    if (len(powerfiles) > 0):
        if (len(powerfiles[0]) == 1):
            powerfiles = [powerfiles]

    # read the grid dimensions
    grid = pc.read_grid(datadir=datadir, trim=True, quiet=True)

    # leave k0 to 1 now, will fix this later
    k0 = 1.
    # leave dk to 1 now, will fix this later
    dk = 1.

    # open the destination file
    fd = open(destination + '.vtk', 'wb')

    # read the first power spectrum
    t, power = pc.read_power(datadir + powerfiles[0])

    fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8'))
    fd.write('power spectra\n'.encode('utf-8'))
    fd.write('BINARY\n'.encode('utf-8'))
    fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8'))
    if (thickness == 1):
        fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(
            len(t), power.shape[1], 1).encode('utf-8'))
    else:
        fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(
            len(t), power.shape[1], 2).encode('utf-8'))
    fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(float(t[0]), k0,
                                                          0.).encode('utf-8'))
    fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
        t[1] - t[0], dk, 1.).encode('utf-8'))
    if (thickness == 1):
        fd.write('POINT_DATA {0:9}\n'.format(power.shape[0] *
                                             power.shape[1]).encode('utf-8'))
    else:
        fd.write('POINT_DATA {0:9}\n'.format(power.shape[0] * power.shape[1] *
                                             2).encode('utf-8'))

    for powfile in powerfiles:
        # read the power spectrum
        t, power = pc.read_power(datadir + powfile)

        fd.write(('SCALARS ' + powfile[:-4] + ' float\n').encode('utf-8'))
        fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))

        if (thickness == 1):
            for j in range(power.shape[1]):
                for i in range(len(t)):
                    fd.write(struct.pack(">f", power[i, j]))
        else:
            for k in [1, 2]:
                for j in range(power.shape[1]):
                    for i in range(len(t)):
                        fd.write(struct.pack(">f", power[i, j]))

    fd.close()
コード例 #2
0
ファイル: pc2npz.py プロジェクト: canasmh/SIPencilScripts
def pc2npz(ivar=-1, datadir="data", files="all", quiet=True, trimall=True):
    """Convert pencil outputs to npz files.

    Pencil files can be extremely large, making it difficult to transfer files from a remote location to your local machine.
    This function helps mitigate that issue by reading these large files and converting them into npz files that are much 
    easier to transfer. 

    Inputs:
        ivar (int) -- The specific VAR and PVAR file you would like to read. Defaults to -1 (i.e., read var.dat, pvar.dat)
        datadir (str) -- Path to the data directory. Defaults to "data".
        quiet (bool) -- Suppress much of the print statements when reading the files. Defaults to True
        trimall (bool) -- Trim the ghost zones from the f array. Defaults to True.
    
    Returns:
         None

    """

    datadir2 = datadir + "/"

    if ivar < 0:
        varfile  = "var.dat"
        pvarfile = "pvar.dat"
        ts_file = "ts.npz"
        dim_file = "dim.npz"
        grid_file = "grid.npz"
        ff_file = "ff.npz"
        fp_file = "fp.npz"
    else:
        varfile  = "VAR" + str(ivar)
        pvarfile = "PVAR" + str(ivar)
        ts_file = "ts{}.npz".format(ivar)
        dim_file = "dim{}.npz".format(ivar)
        grid_file = "grid{}.npz".format(ivar)
        ff_file = "ff{}.npz".format(ivar)
        fp_file = "fp{}.npz".format(ivar)

    if "ts" in files or "all" in files:
        print("Reading time series")
        print(" ")
        ts = ReadTimeSeries(datadir=datadir)
        print(" ")
        ts_vars = vars(ts)
        print("Saving time series as {}".format(ts_file))
        np.savez(ts_file, **ts_vars)
        print("...")
        print("...")

    if "dim" in files or "all" in files:
        print("Reading dim files")
        print(" ")
        dim = read_dim(datadir=datadir2)
        print(" ")
        dim_vars = vars(dim)
        print("Saving dim files as {}".format(dim_file))
        np.savez(dim_file, **dim_vars)
        print("...")
        print("...")

    if "grid" in files or "all" in files:

        print("Reading grid files")
        print(" ")
        grid = read_grid(datadir=datadir2, quiet=quiet)
        print(" ")
        grid_vars = vars(grid)
        print("Saving grid files as {}".format(grid_file))
        np.savez(grid_file, **grid_vars)
        print("...")
        print("...")
        print("Finished...")

    if "ff" in files or "all" in files:
        print("Reading {} (this might take a while) ...".format(varfile))
        print(" ")
        var = read_var(datadir=datadir, trimall=trimall, quiet=quiet, varfile=varfile)
        print(" ")
        var_vars = vars(var)
        print("Saving var files as {}".format(ff_file))
        np.savez(ff_file, **var_vars)
        print("...")
        print("...")

    if "fp" in files or "all" in files:
        print("Reading {} (this might take a while) ...".format(pvarfile))
        print(" ")
        pvar = read_pvar(datadir=datadir, varfile=pvarfile)
        print(" ")
        pvar_vars = vars(pvar)
        print("Saving pvar files as {}".format(fp_file))
        np.savez(fp_file, **pvar_vars)
        print("...")
        print("...")
コード例 #3
0
ファイル: pc2vtk.py プロジェクト: zzzxhahaha/pencil-code
def aver2vtk(varfile='xyaverages.dat',
             datadir='data/',
             destination='xyaverages',
             quiet=1):
    """
    Convert average data from PencilCode format to vtk.

    call signature::
    
      aver2vtk(varfile = 'xyaverages.dat', datadir = 'data/',
            destination = 'xyaverages', quiet = 1):

    Read the average file specified in *varfile* and convert the data
    into vtk format.
    Write the result in *destination*.
    
    Keyword arguments:
    
      *varfile*:
        Name of the average file. This also specifies which dimensions the
        averages are taken.
        
      *datadir*:
        Directory where the data is stored.
       
      *destination*:
        Destination file.
               
    """

    # read the grid dimensions
    grid = pc.read_grid(datadir=datadir, trim=True, quiet=True)

    # read the specified average file
    if varfile[0:2] == 'xy':
        aver = pc.read_xyaver()
        line_len = int(np.round(grid.Lz / grid.dz))
        l0 = grid.z[int((len(grid.z) - line_len) / 2)]
        dl = grid.dz
    elif varfile[0:2] == 'xz':
        aver = pc.read_xzaver()
        line_len = int(np.round(grid.Ly / grid.dy))
        l0 = grid.y[int((len(grid.y) - line_len) / 2)]
        dl = grid.dy
    elif varfile[0:2] == 'yz':
        aver = pc.read_yzaver()
        line_len = int(np.round(grid.Lx / grid.dx))
        l0 = grid.x[int((len(grid.x) - line_len) / 2)]
        dl = grid.dx
    else:
        print("aver2vtk: ERROR: cannot determine average file\n")
        print(
            "aver2vtk: The name of the file has to be either xyaver.dat, xzaver.dat or yzaver.dat\n"
        )
        return -1
    keys = aver.__dict__.keys()
    t = aver.t
    keys.remove('t')

    # open the destination file
    fd = open(destination + '.vtk', 'wb')

    fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8'))
    fd.write(varfile[0:2] + 'averages\n'.encode('utf-8'))
    fd.write('BINARY\n'.encode('utf-8'))
    fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8'))
    fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(len(t), line_len,
                                                     1).encode('utf-8'))
    fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(float(t[0]), l0,
                                                          0.).encode('utf-8'))
    fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
        t[1] - t[0], dl, 1.).encode('utf-8'))
    fd.write('POINT_DATA {0:9}\n'.format(len(t) * line_len))

    # run through all variables
    for var in keys:
        fd.write(('SCALARS ' + var + ' float\n').encode('utf-8'))
        fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
        for j in range(line_len):
            for i in range(len(t)):
                fd.write(struct.pack(">f", aver.__dict__[var][i, j]))

    fd.close()
コード例 #4
0
ファイル: pc2vtk.py プロジェクト: zzzxhahaha/pencil-code
def slices2vtk(variables=['rho'],
               extensions=['xy', 'xy2', 'xz', 'yz'],
               datadir='data/',
               destination='slices',
               proc=-1,
               format='native'):
    """
    Convert slices from PencilCode format to vtk.

    call signature::
    
      slices2vtk(variables = ['rho'], extensions = ['xy', 'xy2', 'xz', 'yz'],
           datadir = 'data/', destination = 'slices', proc = -1,
           format = 'native'):
    
    Read slice files specified by *variables* and convert
    them into vtk format for the specified extensions.
    Write the result in *destination*.
    NB: You need to have called src/read_videofiles.x before using this script.
    
    Keyword arguments:
    
      *variables*:
        All allowed fields which can be written as slice files, e.g. b2, uu1, lnrho, ...
        See the pencil code manual for more (chapter: "List of parameters for `video.in'").
        
      *extensions*:
        List of slice positions.
      
      *datadir*:
        Directory where the data is stored.
       
      *destination*:
        Destination files.
        
      *proc*:
        Processor which should be read. Set to -1 for all processors.
      
      *format*:
        Endian, one of little, big, or native (default)
       
    """

    # this should correct for the case the user types only one variable
    if (len(variables) > 0):
        if (len(variables[0]) == 1):
            variables = [variables]
    # this should correct for the case the user types only one extension
    if (len(extensions) > 0):
        if (len(extensions[0]) == 1):
            extensions = [extensions]

    # read the grid dimensions
    grid = pc.read_grid(datadir=datadir, proc=proc, trim=True, quiet=True)

    # read the user given parameters for the slice positions
    params = pc.read_param(param2=True, quiet=True)

    # run through all specified variables
    for field in variables:
        # run through all specified extensions
        for ext in extensions:
            print("read " + field + ' ' + ext)
            slices, t = pc.read_slices(field=field,
                                       datadir=datadir,
                                       proc=proc,
                                       extension=ext,
                                       format=format)

            dim_p = slices.shape[2]
            dim_q = slices.shape[1]
            if ext[0] == 'x':
                d_p = (np.max(grid.x) - np.min(grid.x)) / (dim_p)
            else:
                d_p = (np.max(grid.y) - np.min(grid.y)) / (dim_p)
            if ext[1] == 'y':
                d_q = (np.max(grid.y) - np.min(grid.y)) / (dim_q)
            else:
                d_q = (np.max(grid.z) - np.min(grid.z)) / (dim_q)

            if params.ix != -1:
                x0 = grid.x[params.ix]
            elif params.slice_position == 'm':
                x0 = grid.x[int(len(grid.x) / 2)]
            if params.iy != -1:
                y0 = grid.y[params.iy]
            elif params.slice_position == 'm':
                y0 = grid.y[int(len(grid.y) / 2)]
            if params.iz != -1:
                z0 = grid.z[params.iz]
            elif params.slice_position == 'm':
                z0 = grid.z[int(len(grid.z) / 2)]

            for i in range(slices.shape[0]):
                # open the destination file for writing
                fd = open(
                    destination + '_' + field + '_' + ext + '_' + str(i) +
                    '.vtk', 'wb')

                # write the header
                fd.write('# vtk DataFile Version 2.0\n')
                fd.write(field + '_' + ext + '\n')
                fd.write('BINARY\n')
                fd.write('DATASET STRUCTURED_POINTS\n')
                if ext[0:2] == 'xy':
                    x0 = grid.x[0]
                    y0 = grid.y[0]
                    fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(
                        dim_p, dim_q, 1))
                    fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(
                        x0, y0, z0))
                    fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
                        grid.dx, grid.dy, 1.))
                elif ext[0:2] == 'xz':
                    x0 = grid.x[0]
                    z0 = grid.z[0]
                    fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(
                        dim_p, 1, dim_q))
                    fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(
                        x0, y0, z0))
                    fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
                        grid.dx, 1., grid.dy))
                elif ext[0:2] == 'yz':
                    y0 = grid.y[0]
                    z0 = grid.z[0]
                    fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(
                        1, dim_p, dim_q))
                    fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(
                        x0, y0, z0))
                    fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
                        1., grid.dy, grid.dy))
                fd.write('POINT_DATA {0:9}\n'.format(dim_p * dim_q))

                fd.write('SCALARS ' + field + '_' + ext + ' float\n')
                fd.write('LOOKUP_TABLE default\n')
                for j in range(dim_q):
                    for k in range(dim_p):
                        fd.write(struct.pack(">f", slices[i, j, k]))

                fd.close()
コード例 #5
0
ファイル: pc2vtk.py プロジェクト: zzzxhahaha/pencil-code
def pc2vtk_vid(ti=0,
               tf=1,
               datadir='data/',
               proc=-1,
               variables=['rho', 'uu', 'bb'],
               magic=[],
               b_ext=False,
               destination='animation',
               quiet=True):
    """
    Convert data from PencilCode format to vtk.

    call signature::
    
      pc2vtk(ti = 0, tf = 1, datadir = 'data/', proc = -1,
           variables = ['rho','uu','bb'], magic = [],
           destination = 'animation')
    
    Read *varfile* and convert its content into vtk format. Write the result
    in *destination*.
    
    Keyword arguments:
    
      *ti*:
        Initial time.
        
      *tf*:
        Final time.
        
      *datadir*:
        Directory where the data is stored.
       
      *proc*:
        Processor which should be read. Set to -1 for all processors.
      
      *variables* = [ 'rho' , 'lnrho' , 'uu' , 'bb', 'b_mag', 'jj', 'j_mag', 'aa', 'ab', 'TT', 'lnTT', 'cc', 'lncc', 'ss', 'vort' ]
        Variables which should be written.
        
      *magic*: [ 'vort' , 'bb' ]
        Additional variables which should be written.
       
      *b_ext*:
        Add the external magnetic field.
        
      *destination*:
        Destination files without '.vtk' extension. 
        
      *quiet*:
        Keep quiet when reading the var files.
    """

    # this should correct for the case the user type only one variable
    if (len(variables) > 0):
        if (len(variables[0]) == 1):
            variables = [variables]
    # this should correct for the case the user type only one variable
    if (len(magic) > 0):
        if (len(magic[0]) == 1):
            magic = [magic]

    # make sure magic is set when writing 'vort' or 'bb'
    try:
        index = variables.index('vort')
        magic.append('vort')
    except:
        pass
    try:
        index = variables.index('bb')
        magic.append('bb')
    except:
        pass
    try:
        index = variables.index('b_mag')
        magic.append('bb')
    except:
        pass
    try:
        index = variables.index('jj')
        magic.append('jj')
    except:
        pass
    try:
        index = variables.index('j_mag')
        magic.append('jj')
    except:
        pass

    for i in range(ti, tf + 1):
        varfile = 'VAR' + str(i)
        # reading pc variables and setting dimensions
        var = pc.read_var(varfile=varfile,
                          datadir=datadir,
                          proc=proc,
                          magic=magic,
                          trimall=True,
                          quiet=quiet)

        grid = pc.read_grid(datadir=datadir, proc=proc, trim=True, quiet=True)

        params = pc.read_param(param2=True, quiet=True)
        B_ext = np.array(params.b_ext)
        # add external magnetic field
        if (b_ext == True):
            var.bb[0, ...] += B_ext[0]
            var.bb[1, ...] += B_ext[1]
            var.bb[2, ...] += B_ext[2]

        dimx = len(grid.x)
        dimy = len(grid.y)
        dimz = len(grid.z)
        dim = dimx * dimy * dimz
        dx = (np.max(grid.x) - np.min(grid.x)) / (dimx - 1)
        dy = (np.max(grid.y) - np.min(grid.y)) / (dimy - 1)
        dz = (np.max(grid.z) - np.min(grid.z)) / (dimz - 1)

        #fd = open(destination + "{0:1.0f}".format(var.t*1e5) + '.vtk', 'wb')
        fd = open(destination + str(i) + '.vtk', 'wb')
        fd.write('# vtk DataFile Version 2.0\n'.encode('utf-8'))
        fd.write('density + magnetic field\n'.encode('utf-8'))
        fd.write('BINARY\n'.encode('utf-8'))
        fd.write('DATASET STRUCTURED_POINTS\n'.encode('utf-8'))
        fd.write('DIMENSIONS {0:9} {1:9} {2:9}\n'.format(dimx, dimy,
                                                         dimz).encode('utf-8'))
        fd.write('ORIGIN {0:8.12} {1:8.12} {2:8.12}\n'.format(
            grid.x[0], grid.y[0], grid.z[0]).encode('utf-8'))
        fd.write('SPACING {0:8.12} {1:8.12} {2:8.12}\n'.format(
            dx, dy, dz).encode('utf-8'))
        fd.write('POINT_DATA {0:9}\n'.format(dim).encode('utf-8'))

        try:
            index = variables.index('rho')
            print('writing rho')
            fd.write('SCALARS rho float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.rho[k, j, i]))
        except:
            pass

        try:
            index = variables.index('lnrho')
            print('writing lnrho')
            fd.write('SCALARS lnrho float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.lnrho[k, j, i]))
        except:
            pass

        try:
            index = variables.index('uu')
            print('writing uu')
            fd.write('VECTORS vfield float\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.uu[0, k, j, i]))
                        fd.write(struct.pack(">f", var.uu[1, k, j, i]))
                        fd.write(struct.pack(">f", var.uu[2, k, j, i]))
        except:
            pass

        try:
            index = variables.index('bb')
            print('writing bb')
            fd.write('VECTORS bfield float\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.bb[0, k, j, i]))
                        fd.write(struct.pack(">f", var.bb[1, k, j, i]))
                        fd.write(struct.pack(">f", var.bb[2, k, j, i]))
        except:
            pass

        try:
            index = variables.index('b_mag')
            b_mag = np.sqrt(pc.dot2(var.bb))
            print('writing b_mag')
            fd.write('SCALARS b_mag float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", b_mag[k, j, i]))
        except:
            pass

        try:
            index = variables.index('jj')
            print('writing jj')
            fd.write('VECTORS jfield float\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.jj[0, k, j, i]))
                        fd.write(struct.pack(">f", var.jj[1, k, j, i]))
                        fd.write(struct.pack(">f", var.jj[2, k, j, i]))
        except:
            pass

        try:
            index = variables.index('j_mag')
            j_mag = np.sqrt(pc.dot2(var.jj))
            print('writing j_mag')
            fd.write('SCALARS j_mag float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", j_mag[k, j, i]))
        except:
            pass

        try:
            index = variables.index('aa')
            print('writing aa')
            fd.write('VECTORS afield float\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.aa[0, k, j, i]))
                        fd.write(struct.pack(">f", var.aa[1, k, j, i]))
                        fd.write(struct.pack(">f", var.aa[2, k, j, i]))
        except:
            pass

        try:
            index = variables.index('ab')
            ab = pc.dot(var.aa, var.bb)
            print('writing ab')
            fd.write('SCALARS ab float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", ab[k, j, i]))
        except:
            pass

        try:
            index = variables.index('TT')
            print('writing TT')
            fd.write('SCALARS TT float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.TT[k, j, i]))
        except:
            pass

        try:
            index = variables.index('lnTT')
            print('writing lnTT')
            fd.write('SCALARS lnTT float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.lnTT[k, j, i]))
        except:
            pass

        try:
            index = variables.index('cc')
            print('writing cc')
            fd.write('SCALARS cc float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.cc[k, j, i]))
        except:
            pass

        try:
            index = variables.index('lncc')
            print('writing lncc')
            fd.write('SCALARS lncc float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.lncc[k, j, i]))
        except:
            pass

        try:
            index = variables.index('ss')
            print('writing ss')
            fd.write('SCALARS ss float\n'.encode('utf-8'))
            fd.write('LOOKUP_TABLE default\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.ss[k, j, i]))
        except:
            pass

        try:
            index = variables.index('vort')
            print('writing vort')
            fd.write('VECTORS vorticity float\n'.encode('utf-8'))
            for k in range(dimz):
                for j in range(dimy):
                    for i in range(dimx):
                        fd.write(struct.pack(">f", var.vort[0, k, j, i]))
                        fd.write(struct.pack(">f", var.vort[1, k, j, i]))
                        fd.write(struct.pack(">f", var.vort[2, k, j, i]))
        except:
            pass

        del (var)

        fd.close()
コード例 #6
0
def calc_tensors(datatopdir,
                 lskip_zeros=False,
                 datadir='data/',
                 rank=0,
                 size=1,
                 comm=None,
                 proc=[0],
                 l_mpi=True,
                 iuxmxy=0,
                 irhomxy=7,
                 iTTmxy=6,
                 first_alpha=9,
                 l_correction=False,
                 t_correction=0.,
                 fskip=2,
                 mskip=1,
                 trange=(0, None),
                 tindex=(0, None, 1),
                 yindex=[]):
    nt = None
    alltmp = 100000
    dim = pc.read_dim()
    gc.garbage
    if len(yindex) == 0:
        iy = np.arange(dim.ny)
    else:
        iy = yindex
    os.chdir(datatopdir)  # return to working directory
    av = []
    if l_mpi:
        from mpi4py import MPI
        if proc.size < dim.nprocz:
            print('rank {}: proc.size {} <  dim.nprocz {}'.format(
                rank, proc.size, dim.nprocz))
            yproc = proc[0] / dim.nprocz
            aav, time = pc.read_zaver(datadir,
                                      trange=trange,
                                      tindex=tindex,
                                      proc=yproc)
            tmp = time.size
        else:
            print('rank {}: proc.size {} >= dim.nprocz {}'.format(
                rank, proc.size, dim.nprocz))
            for iproc in range(0, proc.size, dim.nprocz):
                if iproc == 0:
                    aav, time = pc.read_zaver(datadir,
                                              trange=trange,
                                              tindex=tindex,
                                              proc=proc[iproc] / dim.nprocz)
                    tmp = time.size
                else:
                    aav, time = pc.read_zaver(datadir,
                                              proc=proc[iproc] / dim.nprocz)
                    tmp = min(time.size, tmp)
    else:
        av, time = pc.read_zaver(datadir, trange=trange, tindex=tindex)
    gc.garbage
    if l_mpi:
        print('rank {}: tmp {}'.format(rank, tmp))
        if rank != 0:
            comm.send(tmp, dest=0, tag=rank)
        else:
            for irank in range(1, size):
                tmp = comm.recv(source=irank, tag=irank)
                alltmp = min(alltmp, tmp)
        nt = comm.bcast(alltmp, root=0)
        print('rank {}: nt {}'.format(rank, nt))
        if proc.size < dim.nprocz:
            yndx = iy - yproc * (dim.nygrid / dim.nprocy)
            print('rank {}: yndx[0] {}'.format(rank, yndx[0]))
            av = aav[:nt, :, yndx, :]
        else:
            av = aav[:nt]
            for iproc in range(dim.nprocz, proc.size, dim.nprocz):
                aav, time = pc.read_zaver(datadir,
                                          tindex=(0, nt, 1),
                                          proc=proc[iproc] / dim.nprocz)
                av = np.concatenate((av, aav), axis=2)
        aav = []
    print('rank {}: loaded av'.format(rank))
    #where testfield calculated under old incorrect spec apply correction
    gc.garbage
    if l_correction:
        itcorr = np.where(time < t_correction)[0]
        av[itcorr, first_alpha + 2] *= -dim.nprocz / (dim.nprocz - 2.)
        for j in range(0, 3):
            av[itcorr, first_alpha + 5 + j] *= -dim.nprocz / (dim.nprocz - 2.)
        av[itcorr, first_alpha + 11] *= -dim.nprocz / (dim.nprocz - 2.)
        for j in range(0, 3):
            av[itcorr, first_alpha + 14 + j] *= -dim.nprocz / (dim.nprocz - 2.)
        av[itcorr, first_alpha + 20] *= -dim.nprocz / (dim.nprocz - 2.)
        for j in range(0, 3):
            av[itcorr, first_alpha + 23 + j] *= -dim.nprocz / (dim.nprocz - 2.)
    #factor by which to rescale code time to years
    trescale = 0.62 / 2.7e-6 / (365. * 86400.)  #0.007281508
    time *= trescale
    grid = pc.read_grid(datadir, trim=True, quiet=True)
    r, theta = np.meshgrid(grid.x, grid.y[iy])
    gc.garbage

    #exclude zeros and next point if resetting of test fields is used
    #trim reset data and neighbours as required fskip after zeros and mskip before zeros.
    if lskip_zeros:
        if l_mpi:
            if rank == 0:
                izer0 = np.where(av[:, 9, av.shape[2] / 2,
                                    av.shape[3] / 2] == 0)[0]
                for ii in range(1, fskip):
                    izer1 = np.where(av[:, 9, av.shape[2] / 2,
                                        av.shape[3] / 2] == 0)[0] + ii
                    izer0 = np.append(izer0, izer1)
                for ii in range(1, mskip):
                    izer1 = np.where(av[:, 9, av.shape[2] / 2,
                                        av.shape[3] / 2] == 0)[0] - ii
                    izer0 = np.append(izer0, izer1)
                if izer0.size > 0:
                    imask = np.delete(np.where(time), [izer0])
                else:
                    imask = np.where(time)[0]
            else:
                imask = None
            imask = comm.bcast(imask, root=0)
        else:
            izer0 = np.where(av[:, 9, av.shape[2] / 2,
                                av.shape[3] / 2] == 0)[0]
            for ii in range(1, fskip):
                izer1 = np.where(av[:, 9, av.shape[2] / 2,
                                    av.shape[3] / 2] == 0)[0] + ii
                izer0 = np.append(izer0, izer1)
            for ii in range(1, mskip):
                izer1 = np.where(av[:, 9, av.shape[2] / 2,
                                    av.shape[3] / 2] == 0)[0] - ii
                izer0 = np.append(izer0, izer1)
            if izer0.size > 0:
                imask = np.delete(np.where(time), [izer0])
            else:
                imask = np.where(time)[0]
    else:
        imask = np.arange(time.size)
    #if lskip_zeros:
    #    izer0=np.where(av[:,first_alpha,av.shape[2]/2,av.shape[3]/2]==0)[0]
    #    izer1=np.where(av[:,first_alpha,av.shape[2]/2,av.shape[3]/2]==0)[0]+1
    #    if izer0.size>0:
    #        imask=np.delete(np.where(time[:nt]),[izer0,izer1])
    #    else:
    #        imask=np.where(time[:nt])[0]
    #else:
    #    imask=np.arange(time[:nt].size)
    if rank == 0:
        print('rank {}: calculating alp'.format(rank))
    alp = np.zeros([3, 3, imask.size, av.shape[2], av.shape[3]])
    eta = np.zeros([3, 3, 3, imask.size, av.shape[2], av.shape[3]])
    urmst = np.zeros([3, 3, av.shape[2], av.shape[3]])
    etat0 = np.zeros([3, 3, 3, av.shape[2], av.shape[3]])
    #eta0 = np.zeros([3,3,3,imask.size,av.shape[2],av.shape[3]])
    Hp = np.zeros([av.shape[2], av.shape[3]])
    #compute rms velocity normalisation
    if rank == 0:
        print('rank {}: calculating urms'.format(rank))
    urms = np.sqrt(
        np.mean(av[imask, iuxmxy + 3, :, :] - av[imask, iuxmxy + 0, :, :]**2 +
                av[imask, iuxmxy + 4, :, :] - av[imask, iuxmxy + 1, :, :]**2 +
                av[imask, iuxmxy + 5, :, :] - av[imask, iuxmxy + 2, :, :]**2,
                axis=0))
    #compute turbulent diffusion normalisation
    cv, gm, alp_MLT = 0.6, 5. / 3, 5. / 3
    pp = np.mean(av[imask, iTTmxy, :, :] * av[imask, irhomxy, :, :] * cv *
                 (gm - 1),
                 axis=0)
    if rank == 0:
        print('rank {}: completed pressure'.format(rank))
    for i in range(0, av.shape[2]):
        Hp[i, :] = -1. / np.gradient(np.log(pp[i, :]), grid.dx)
    grid, pp = [], []
    for i in range(0, 3):
        for j in range(0, 3):
            alp[i, j, :, :, :] = av[imask, first_alpha + 3 * j + i, :, :]
            urmst[i, j, :, :] = urms / 3.
            for k in range(0, 3):
                etat0[i, j, k, :, :] = urms * alp_MLT * Hp / 3.
    #for i in range(0,imask.size):
    #    eta0[i,:,:,:,:,:] = etat0

    if rank == 0:
        print('rank {}: calculating eta'.format(rank))
    for j in range(0, 3):
        for k in range(0, 3):
            # Sign difference with Schrinner + r correction
            eta[j, k, 1, :, :, :] = -av[imask,
                                        first_alpha + 18 + 3 * k + j, :, :] * r
            eta[j, k,
                0, :, :, :] = -av[imask, first_alpha + 9 + 3 * k + j, :, :]
    nnt, ny, nx = imask.size, av.shape[2], av.shape[3]
    av = []
    irr, ith, iph = 0, 1, 2
    # Create output tensors
    if rank == 0:
        print('rank {}: setting alp'.format(rank))
    alpha = np.zeros([3, 3, nnt, ny, nx])
    beta = np.zeros([3, 3, nnt, ny, nx])
    gamma = np.zeros([3, nnt, ny, nx])
    delta = np.zeros([3, nnt, ny, nx])
    kappa = np.zeros([3, 3, 3, nnt, ny, nx])
    # Alpha tensor
    if rank == 0:
        print('rank {}: calculating alpha'.format(rank))
    alpha[irr, irr, :, :, :] = (alp[irr, irr, :, :, :] -
                                eta[irr, ith, ith, :, :, :] / r)
    alpha[irr, ith, :, :, :] = 0.5 * (
        alp[irr, ith, :, :, :] + eta[irr, irr, ith, :, :, :] / r +
        alp[ith, irr, :, :, :] - eta[ith, ith, ith, :, :, :] / r)
    alpha[irr, iph, :, :, :] = 0.5 * (alp[iph, irr, :, :, :] +
                                      alp[irr, iph, :, :, :] -
                                      eta[iph, ith, ith, :, :, :] / r)
    alpha[ith, irr, :, :, :] = alpha[irr, ith, :, :, :]
    alpha[ith, ith, :, :, :] = (alp[ith, ith, :, :, :] +
                                eta[ith, irr, ith, :, :, :] / r)
    alpha[ith, iph, :, :, :] = 0.5 * (alp[iph, ith, :, :, :] +
                                      alp[ith, iph, :, :, :] +
                                      eta[iph, irr, ith, :, :, :] / r)
    alpha[iph, irr, :, :, :] = alpha[irr, iph, :, :, :]
    alpha[iph, ith, :, :, :] = alpha[ith, iph, :, :, :]
    alpha[iph, iph, :, :, :] = alp[iph, iph, :, :, :]
    # Gamma vector
    gamma[irr, :, :, :] = -0.5 * (alp[ith, iph, :, :, :] -
                                  alp[iph, ith, :, :, :] -
                                  eta[iph, irr, ith, :, :, :] / r)
    gamma[ith, :, :, :] = -0.5 * (alp[iph, irr, :, :, :] -
                                  alp[irr, iph, :, :, :] -
                                  eta[iph, ith, ith, :, :, :] / r)
    gamma[iph, :, :, :] = -0.5 * (
        alp[irr, ith, :, :, :] - alp[ith, irr, :, :, :] +
        eta[irr, irr, ith, :, :, :] / r + eta[ith, ith, ith, :, :, :] / r)
    if rank == 0:
        print('rank {}: calculating beta'.format(rank))
    alp = []
    # Beta tensor
    beta[irr, irr, :, :, :] = -0.5 * eta[irr, iph, ith, :, :, :]
    beta[irr, ith, :, :, :] = 0.25 * (eta[irr, iph, irr, :, :, :] -
                                      eta[ith, iph, ith, :, :, :])
    beta[irr, iph, :, :, :] = 0.25 * (eta[irr, irr, ith, :, :, :] -
                                      eta[iph, iph, ith, :, :, :] -
                                      eta[irr, ith, irr, :, :, :])
    beta[ith, ith, :, :, :] = 0.5 * eta[ith, iph, irr, :, :, :]
    beta[ith, iph, :, :, :] = 0.25 * (eta[ith, irr, ith, :, :, :] +
                                      eta[iph, iph, irr, :, :, :] -
                                      eta[ith, ith, irr, :, :, :])
    beta[iph, iph, :, :, :] = 0.5 * (eta[iph, irr, ith, :, :, :] -
                                     eta[iph, ith, irr, :, :, :])
    beta[ith, irr, :, :, :] = beta[irr, ith, :, :, :]
    beta[iph, irr, :, :, :] = beta[irr, iph, :, :, :]
    beta[iph, ith, :, :, :] = beta[ith, iph, :, :, :]
    # Delta vector
    delta[irr, :, :, :] = 0.25 * (eta[ith, ith, irr, :, :, :] -
                                  eta[ith, irr, ith, :, :, :] +
                                  eta[iph, iph, irr, :, :, :])
    delta[ith, :, :, :] = 0.25 * (eta[irr, irr, ith, :, :, :] -
                                  eta[irr, ith, irr, :, :, :] +
                                  eta[iph, iph, ith, :, :, :])
    delta[iph, :, :, :] = -0.25 * (eta[irr, iph, irr, :, :, :] +
                                   eta[ith, iph, ith, :, :, :])
    # Kappa tensor
    if rank == 0:
        print('rank {}: calculating kappa'.format(rank))
    for i in range(0, 3):
        kappa[i, irr, irr, :, :, :] = -eta[i, irr, irr, :, :, :]
        kappa[i, irr, ith, :, :, :] = -0.5 * (eta[i, ith, irr, :, :, :] +
                                              eta[i, irr, ith, :, :, :])
        kappa[i, irr, iph, :, :, :] = -0.5 * eta[i, iph, irr, :, :, :]
        kappa[i, ith, irr, :, :, :] = kappa[i, irr, ith, :, :, :]
        kappa[i, ith, ith, :, :, :] = -eta[i, ith, ith, :, :, :]
        kappa[i, ith, iph, :, :, :] = -0.5 * eta[i, iph, ith, :, :, :]
        kappa[i, iph, irr, :, :, :] = kappa[i, irr, iph, :, :, :]
        kappa[i, iph, ith, :, :, :] = kappa[i, ith, iph, :, :, :]
        #for it in range(0,nnt):
        #    kappa[i,iph,iph,it,:,:]= 1e-9*etat0[i,0,0,:,:]
    eta = []
    return alpha, beta, gamma, delta, kappa,\
                          time[imask], urmst, etat0
コード例 #7
0
def fixed_points(datadir='data/',
                 fileName='fixed_points_post.dat',
                 varfile='VAR0',
                 ti=-1,
                 tf=-1,
                 traceField='bb',
                 hMin=2e-3,
                 hMax=2e4,
                 lMax=500,
                 tol=1e-2,
                 interpolation='weighted',
                 trace_sub=1,
                 integration='simple',
                 nproc=1):
    """
    Find the fixed points.

    call signature::

      fixed = fixed_points(datadir = 'data/', fileName = 'fixed_points_post.dat', varfile = 'VAR0', ti = -1, tf = -1,
                 traceField = 'bb', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2,
                 interpolation = 'weighted', trace_sub = 1, integration = 'simple', nproc = 1)

    Finds the fixed points. Returns the fixed points positions.

    Keyword arguments:

      *datadir*:
        Data directory.

      *fileName*:
        Name of the fixed points file.

     *varfile*:
       Varfile to be read.
       
      *ti*:
        Initial VAR file index for tracer time sequences. Overrides 'varfile'.
        
      *tf*:
        Final VAR file index for tracer time sequences. Overrides 'varfile'.        

     *traceField*:
       Vector field used for the streamline tracing.
        
     *hMin*:
       Minimum step length for and underflow to occur.
       
     *hMax*:
       Parameter for the initial step length.
       
     *lMax*:
       Maximum length of the streamline. Integration will stop if l >= lMax.
       
     *tol*:
       Tolerance for each integration step. Reduces the step length if error >= tol.
     
     *interpolation*:
       Interpolation of the vector field.
       'mean': takes the mean of the adjacent grid point.
       'weighted': weights the adjacent grid points according to their distance.
       
     *trace_sub*:
       Number of sub-grid cells for the seeds for the initial mapping.
       
     *intQ*:
       Quantities to be integrated along the streamlines.
     
      *integration*:
        Integration method.
        'simple': low order method.
        'RK6': Runge-Kutta 6th order.
       
     *nproc*:
       Number of cores for multi core computation.
    """
    class data_struct:
        def __init__(self):
            self.t = []
            self.fidx = []  # number of fixed points at this time
            self.x = []
            self.y = []
            self.q = []

    # Computes rotation along one edge.
    def edge(vv,
             p,
             sx,
             sy,
             diff1,
             diff2,
             phiMin,
             rec,
             hMin=hMin,
             hMax=hMax,
             lMax=lMax,
             tol=tol,
             interpolation=interpolation,
             integration=integration):
        dtot = m.atan2(diff1[0] * diff2[1] - diff2[0] * diff1[1],
                       diff1[0] * diff2[0] + diff1[1] * diff2[1])
        if ((abs(dtot) > phiMin) and (rec < 4)):
            xm = 0.5 * (sx[0] + sx[1])
            ym = 0.5 * (sy[0] + sy[1])
            # trace intermediate field line
            s = pc.stream(vv,
                          p,
                          hMin=hMin,
                          hMax=hMax,
                          lMax=lMax,
                          tol=tol,
                          interpolation=interpolation,
                          integration=integration,
                          xx=np.array([xm, ym, p.Oz]))
            tracer = np.concatenate(
                (s.tracers[0,
                           0:2], s.tracers[s.sl - 1, :], np.reshape(s.l, (1))))
            # discard any streamline which does not converge or hits the boundary
            if ((tracer[5] >= lMax) or (tracer[4] < p.Oz + p.Lz - p.dz)):
                dtot = 0.
            else:
                diffm = np.array(
                    [tracer[2] - tracer[0], tracer[3] - tracer[1]])
                if (sum(diffm**2) != 0):
                    diffm = diffm / np.sqrt(sum(diffm**2))
                dtot = edge(vv, p, [sx[0], xm], [sy[0], ym], diff1, diffm, phiMin, rec+1,
                             hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration)+ \
                       edge(vv, p, [xm, sx[1]], [ym, sy[1]], diffm, diff2, phiMin, rec+1,
                             hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, interpolation = interpolation, integration = integration)
        return dtot

    # Finds the Poincare index of this grid cell.
    def pIndex(vv,
               p,
               sx,
               sy,
               diff,
               phiMin,
               hMin=hMin,
               hMax=hMax,
               lMax=lMax,
               tol=tol,
               interpolation=interpolation,
               integration=integration):
        poincare = 0
        poincare += edge(vv,
                         p, [sx[0], sx[1]], [sy[0], sy[0]],
                         diff[0, :],
                         diff[1, :],
                         phiMin,
                         0,
                         hMin=hMin,
                         hMax=hMax,
                         lMax=lMax,
                         tol=tol,
                         interpolation=interpolation,
                         integration=integration)
        poincare += edge(vv,
                         p, [sx[1], sx[1]], [sy[0], sy[1]],
                         diff[1, :],
                         diff[2, :],
                         phiMin,
                         0,
                         hMin=hMin,
                         hMax=hMax,
                         lMax=lMax,
                         tol=tol,
                         interpolation=interpolation,
                         integration=integration)
        poincare += edge(vv,
                         p, [sx[1], sx[0]], [sy[1], sy[1]],
                         diff[2, :],
                         diff[3, :],
                         phiMin,
                         0,
                         hMin=hMin,
                         hMax=hMax,
                         lMax=lMax,
                         tol=tol,
                         interpolation=interpolation,
                         integration=integration)
        poincare += edge(vv,
                         p, [sx[0], sx[0]], [sy[1], sy[0]],
                         diff[3, :],
                         diff[0, :],
                         phiMin,
                         0,
                         hMin=hMin,
                         hMax=hMax,
                         lMax=lMax,
                         tol=tol,
                         interpolation=interpolation,
                         integration=integration)
        return poincare

    # fixed point finder for a subset of the domain
    def subFixed(queue,
                 ix0,
                 iy0,
                 vv,
                 p,
                 tracers,
                 iproc,
                 hMin=2e-3,
                 hMax=2e4,
                 lMax=500,
                 tol=1e-2,
                 interpolation='weighted',
                 integration='simple'):
        diff = np.zeros((4, 2))
        phiMin = np.pi / 8.
        x = []
        y = []
        q = []
        fidx = 0

        for ix in ix0:
            for iy in iy0:
                # compute Poincare index around this cell (!= 0 for potential fixed point)
                diff[0, :] = tracers[iy, ix, 0, 2:4] - tracers[iy, ix, 0, 0:2]
                diff[1, :] = tracers[iy, ix + 1, 0, 2:4] - tracers[iy, ix + 1,
                                                                   0, 0:2]
                diff[2, :] = tracers[iy + 1, ix + 1, 0,
                                     2:4] - tracers[iy + 1, ix + 1, 0, 0:2]
                diff[3, :] = tracers[iy + 1, ix, 0, 2:4] - tracers[iy + 1, ix,
                                                                   0, 0:2]
                if (sum(np.sum(diff**2, axis=1) != 0) == True):
                    diff = np.swapaxes(
                        np.swapaxes(diff, 0, 1) /
                        np.sqrt(np.sum(diff**2, axis=1)), 0, 1)
                poincare = pIndex(vv,
                                  p,
                                  tracers[iy, ix:ix + 2, 0, 0],
                                  tracers[iy:iy + 2, ix, 0, 1],
                                  diff,
                                  phiMin,
                                  hMin=hMin,
                                  hMax=hMax,
                                  lMax=lMax,
                                  tol=tol,
                                  interpolation=interpolation,
                                  integration=integration)

                if (abs(poincare) > 5
                    ):  # use 5 instead of 2pi to account for rounding errors
                    # subsample to get starting point for iteration
                    nt = 4
                    xmin = tracers[iy, ix, 0, 0]
                    ymin = tracers[iy, ix, 0, 1]
                    xmax = tracers[iy, ix + 1, 0, 0]
                    ymax = tracers[iy + 1, ix, 0, 1]
                    xx = np.zeros((nt**2, 3))
                    tracersSub = np.zeros((nt**2, 5))
                    i1 = 0
                    for j1 in range(nt):
                        for k1 in range(nt):
                            xx[i1, 0] = xmin + j1 / (nt - 1.) * (xmax - xmin)
                            xx[i1, 1] = ymin + k1 / (nt - 1.) * (ymax - ymin)
                            xx[i1, 2] = p.Oz
                            i1 += 1
                    for it1 in range(nt**2):
                        s = pc.stream(vv,
                                      p,
                                      hMin=hMin,
                                      hMax=hMax,
                                      lMax=lMax,
                                      tol=tol,
                                      interpolation=interpolation,
                                      integration=integration,
                                      xx=xx[it1, :])
                        tracersSub[it1, 0:2] = xx[it1, 0:2]
                        tracersSub[it1, 2:] = s.tracers[s.sl - 1, :]
                    min2 = 1e6
                    minx = xmin
                    miny = ymin
                    i1 = 0
                    for j1 in range(nt):
                        for k1 in range(nt):
                            diff2 = (tracersSub[i1, 2] - tracersSub[i1, 0]
                                     )**2 + (tracersSub[i1, 3] -
                                             tracersSub[i1, 1])**2
                            if (diff2 < min2):
                                min2 = diff2
                                minx = xmin + j1 / (nt - 1.) * (xmax - xmin)
                                miny = ymin + k1 / (nt - 1.) * (ymax - ymin)
                            it1 += 1

                    # get fixed point from this starting position using Newton's method
                    #TODO:
                    dl = np.min(
                        var.dx, var.dy
                    ) / 100.  # step-size for calculating the Jacobian by finite differences
                    it = 0
                    # tracers used to find the fixed point
                    tracersNull = np.zeros((5, 4))
                    point = np.array([minx, miny])
                    while True:
                        # trace field lines at original point and for Jacobian:
                        # (second order seems to be enough)
                        xx = np.zeros((5, 3))
                        xx[0, :] = np.array([point[0], point[1], p.Oz])
                        xx[1, :] = np.array([point[0] - dl, point[1], p.Oz])
                        xx[2, :] = np.array([point[0] + dl, point[1], p.Oz])
                        xx[3, :] = np.array([point[0], point[1] - dl, p.Oz])
                        xx[4, :] = np.array([point[0], point[1] + dl, p.Oz])
                        for it1 in range(5):
                            s = pc.stream(vv,
                                          p,
                                          hMin=hMin,
                                          hMax=hMax,
                                          lMax=lMax,
                                          tol=tol,
                                          interpolation=interpolation,
                                          integration=integration,
                                          xx=xx[it1, :])
                            tracersNull[it1, :2] = xx[it1, :2]
                            tracersNull[it1, 2:] = s.tracers[s.sl - 1, 0:2]

                        # check function convergence
                        ff = np.zeros(2)
                        ff[0] = tracersNull[0, 2] - tracersNull[0, 0]
                        ff[1] = tracersNull[0, 3] - tracersNull[0, 1]
                        #TODO:
                        if (sum(abs(ff)) <= 1e-4):
                            fixedPoint = np.array([point[0], point[1]])
                            break

                        # compute the Jacobian
                        fjac = np.zeros((2, 2))
                        fjac[0, 0] = (
                            (tracersNull[2, 2] - tracersNull[2, 0]) -
                            (tracersNull[1, 2] - tracersNull[1, 0])) / 2. / dl
                        fjac[0, 1] = (
                            (tracersNull[4, 2] - tracersNull[4, 0]) -
                            (tracersNull[3, 2] - tracersNull[3, 0])) / 2. / dl
                        fjac[1, 0] = (
                            (tracersNull[2, 3] - tracersNull[2, 1]) -
                            (tracersNull[1, 3] - tracersNull[1, 1])) / 2. / dl
                        fjac[1, 1] = (
                            (tracersNull[4, 3] - tracersNull[4, 1]) -
                            (tracersNull[3, 3] - tracersNull[3, 1])) / 2. / dl

                        # invert the Jacobian
                        fjin = np.zeros((2, 2))
                        det = fjac[0, 0] * fjac[1, 1] - fjac[0, 1] * fjac[1, 0]
                        #TODO:
                        if (abs(det) < dl):
                            fixedPoint = point
                            break
                        fjin[0, 0] = fjac[1, 1]
                        fjin[1, 1] = fjac[0, 0]
                        fjin[0, 1] = -fjac[0, 1]
                        fjin[1, 0] = -fjac[1, 0]
                        fjin = fjin / det
                        dpoint = np.zeros(2)
                        dpoint[0] = -fjin[0, 0] * ff[0] - fjin[0, 1] * ff[1]
                        dpoint[1] = -fjin[1, 0] * ff[0] - fjin[1, 1] * ff[1]
                        point += dpoint

                        # check root convergence
                        #TODO:
                        if (sum(abs(dpoint)) < 1e-4):
                            fixedPoint = point
                            break

                        if (it > 20):
                            fixedPoint = point
                            print("warning: Newton did not converged")
                            break

                        it += 1

                    # check if fixed point lies inside the cell
                    if ((fixedPoint[0] < tracers[iy, ix, 0, 0])
                            or (fixedPoint[0] > tracers[iy, ix + 1, 0, 0])
                            or (fixedPoint[1] < tracers[iy, ix, 0, 1])
                            or (fixedPoint[1] > tracers[iy + 1, ix, 0, 1])):
                        print("warning: fixed point lies outside the cell")
                    else:
                        x.append(fixedPoint[0])
                        y.append(fixedPoint[1])
                        #q.append()
                        fidx += 1

        queue.put((x, y, q, fidx, iproc))

    # multi core setup
    if (np.isscalar(nproc) == False) or (nproc % 1 != 0):
        print("error: invalid processor number")
        return -1
    queue = mp.Queue()
    proc = []

    # make sure to read the var files with the correct magic
    if (traceField == 'bb'):
        magic = 'bb'
    if (traceField == 'jj'):
        magic = 'jj'
    if (traceField == 'vort'):
        magic = 'vort'

    # read the cpu structure
    dim = pc.read_dim(datadir=datadir)
    if (dim.nprocz > 1):
        print("error: number of cores in z-direction > 1")

    var = pc.read_var(varfile=varfile,
                      datadir=datadir,
                      magic=magic,
                      quiet=True,
                      trimall=True)
    grid = pc.read_grid(datadir=datadir, quiet=True, trim=True)
    vv = getattr(var, traceField)

    # initialize the parameters
    p = pc.pClass()
    p.dx = var.dx
    p.dy = var.dy
    p.dz = var.dz
    p.Ox = var.x[0]
    p.Oy = var.y[0]
    p.Oz = var.z[0]
    p.Lx = grid.Lx
    p.Ly = grid.Ly
    p.Lz = grid.Lz
    p.nx = dim.nx
    p.ny = dim.ny
    p.nz = dim.nz

    # create the initial mapping
    tracers, mapping, t = pc.tracers(traceField='bb',
                                     hMin=hMin,
                                     hMax=hMax,
                                     lMax=lMax,
                                     tol=tol,
                                     interpolation=interpolation,
                                     trace_sub=trace_sub,
                                     varfile=varfile,
                                     integration=integration,
                                     datadir=datadir,
                                     destination='',
                                     nproc=nproc)

    # find fixed points
    fixed = pc.fixed_struct()
    xyq = []  # list of  return values from subFixed
    ix0 = range(0, p.nx * trace_sub - 1)  # set of grid indices for the cores
    iy0 = range(0, p.ny * trace_sub - 1)  # set of grid indices for the cores
    subFixedLambda = lambda queue, ix0, iy0, vv, p, tracers, iproc: \
        subFixed(queue, ix0, iy0, vv, p, tracers, iproc, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol,
                 interpolation = interpolation, integration = integration)
    for iproc in range(nproc):
        proc.append(
            mp.Process(target=subFixedLambda,
                       args=(queue, ix0[iproc::nproc], iy0, vv, p, tracers,
                             iproc)))
    for iproc in range(nproc):
        proc[iproc].start()
    for iproc in range(nproc):
        xyq.append(queue.get())
    for iproc in range(nproc):
        proc[iproc].join()

    # put together return values from subFixed
    fixed.fidx = 0
    fixed.t = var.t
    for iproc in range(nproc):
        fixed.x.append(xyq[xyq[iproc][4]][0])
        fixed.y.append(xyq[xyq[iproc][4]][1])
        fixed.q.append(xyq[xyq[iproc][4]][2])
        fixed.fidx += xyq[xyq[iproc][4]][3]

    fixed.t = np.array(fixed.t)
    fixed.x = np.array(fixed.x)
    fixed.y = np.array(fixed.y)
    fixed.q = np.array(fixed.q)
    fixed.fidx = np.array(fixed.fidx)

    return fixed