Exemplo n.º 1
0
    def bash(self, command, verbose="last100", logfile=False):
        """Executes command in simulation directory.
        This method will use your settings as defined in your .bashrc-file.
        A log file will be produced within 'self.path/pc'-folder

        Parameters
        ----------
        command : string
            Command to be executed, can be a list of commands.

        verbose : bool
            lastN = show last N lines of output afterwards
             False = no output
             True = all output
        """

        import subprocess
        from pencil import io
        from os.path import join

        timestamp = io.timestamp()
        io.mkdir(self.pc_dir)
        if not type(logfile) == type("string"):
            logfile = join(self.pc_dir, "bash_log_" + timestamp)

        commands = ["cd " + realpath(self.path)]
        # commands.append('source ~/.bashrc')
        # commands.append('shopt -s expand_aliases')

        if type(command) == type(["list"]):
            for c in command:
                commands.append(c)
        elif type(command) == type("string"):
            commands.append(command)
        else:
            print("! ERROR: Couldnt understand the command parameter: " +
                  str(command))

        with open(logfile, "w") as f:
            rc = subprocess.call(["/bin/bash", "-i", "-c", ";".join(commands)],
                                 stdout=f,
                                 stderr=f)

        if type(verbose) == type("string"):
            outputlength = -int(verbose.split("last")[-1])
            with open(logfile, "r") as f:
                strList = f.read().split("\n")[outputlength:]
                print("\n".join([s for s in strList if not s == ""]))
        elif verbose == True:
            with open(logfile, "r") as f:
                print(f.read())

        if rc == 0:
            return True
        else:
            print("! ERROR: Execution ended with error code " + str(rc) +
                  "!\n! Please check log file in")
            print("! " + logfile)
            return rc
Exemplo n.º 2
0
    def bash(self, command, verbose='last100', logfile=False):
        """Executes command in simulation directory.
        This method will use your settings as defined in your .bashrc-file.
        A log file will be produced within 'self.path/pc'-folder

        Args:
            - command:     command to be executed, can be a list of commands
            - verbose:     lastN = show last N lines of output afterwards
                           False = no output
                           True = all output
        """
        import subprocess
        from pencil import io
        from os.path import join, realpath

        timestamp = io.timestamp()
        io.mkdir(self.pc_dir)
        if not type(logfile) == type('string'):
            logfile = join(self.pc_dir, 'bash_log_' + timestamp)

        commands = ['cd ' + realpath(self.path)]
        #commands.append('source ~/.bashrc')
        #commands.append('shopt -s expand_aliases')

        if type(command) == type(['list']):
            for c in command:
                commands.append(c)
        elif type(command) == type('string'):
            commands.append(command)
        else:
            print('! ERROR: Couldnt understand the command parameter: ' +
                  str(command))

        with open(logfile, 'w') as f:
            rc = subprocess.call(['/bin/bash', '-i', '-c', ';'.join(commands)],
                                 stdout=f,
                                 stderr=f)

        if type(verbose) == type('string'):
            outputlength = -int(verbose.split('last')[-1])
            with open(logfile, 'r') as f:
                strList = f.read().split('\n')[outputlength:]
                print('\n'.join([s for s in strList if not s == '']))
        elif verbose == True:
            with open(logfile, 'r') as f:
                print(f.read())

        if rc == 0:
            return True
        else:
            print('! ERROR: Execution ended with error code ' + str(rc) +
                  '!\n! Please check log file in')
            print('! ' + logfile)
            return rc
Exemplo n.º 3
0
def open_h5(
    filename,
    status,
    driver=None,
    comm=None,
    overwrite=False,
    size=1,
    rank=0,
    lfs=False,
    MB=1,
    count=1,
):
    """This function opens hdf5 file in serial or parallel.

    Keyword arguments:
        filename:  relative or absolute path string for name of hdf5 file.
        status:    open state of file 'w': write, 'r': read or 'a'/'r+': append.
        driver:    'mpio' required for parallel: version but absent for serial.
        comm:      only present for parallel version of h5py.
        overwrite: flag to replace existing file.
        rank:      processor rank with root = 0.
    """
    if "/" in filename:
        fname = filename.split("/")[-1]
        path = filename.split(fname)[0]
    else:
        fname = filename
        path = "./"
    if not (".h5" == filename[-3:] or ".hdf5" == filename[-5:]):
        if np.mod(rank, size) == 0:
            print("Relabelling h5 " + fname + " to " +
                  str.strip(fname, ".dat") + ".h5 on path " + path)
        fname = str.strip(fname, ".dat") + ".h5"
    mkdir(path, rank=rank, lfs=lfs, MB=MB, count=count)
    if status == "w" and exists(join(path, fname)):
        if not overwrite:
            try:
                cmd = "mv " + join(path, fname) + " " + join(
                    path, fname + ".bak")
                process = sub.Popen(cmd.split(), stdout=sub.PIPE)
                output, error = process.communicate()
                print(cmd, output, error)
            except:
                while exists(join(path, fname)):
                    pass
    if comm:
        if not driver:
            driver = "mpio"
        dset = h5py.File(join(path, fname), status, driver=driver, comm=comm)
    else:
        dset = h5py.File(join(path, fname), status)

    return dset
Exemplo n.º 4
0
def export_fig(fig,
               filepath,
               filename=False,
               PNG=True,
               PDF=False,
               EPS=False,
               DPI=300,
               EXPORT_BBOX_INCES='tight',
               transparent=True,
               verbose=True,
               timestamp=False):
    """Does a proper export of a figure handle to all kind of image files.
    """
    import datetime as dt
    from os.path import join, split
    from pencil.io import exists_file as exists
    from pencil.io import mkdir

    ######## parse filepath and filename
    if not filename:
        filepath = split(filepath)
        filename = filepath[-1]
        filepath = filepath[0]

    if filepath == '': filepath = '.'

    filename = filename.strip()
    filepath = filepath.strip()
    complete_filepath = join(filepath, filename)

    mkdir(filepath)

    ######## generate timestamp if demanded
    if timestamp == True:
        timestamp = str(dt.datetime.now())[:-7]
        timestamp = timestamp.replace(" ", "_").replace(":", "-")
        complete_filepath = complete_filepath + '_' + timestamp

    ######## do the export
    if PNG:
        fig.savefig(complete_filepath + '.png',
                    bbox_inches=EXPORT_BBOX_INCES,
                    dpi=DPI,
                    transparent=transparent)
        if verbose: print('~ .png saved')

    if PDF:
        fig.savefig(complete_filepath + '.pdf',
                    bbox_inches=EXPORT_BBOX_INCES,
                    dpi=DPI,
                    transparent=transparent)
        if verbose: print('~ .pdf saved')

    if EPS:
        fig.savefig(complete_filepath + '.png',
                    bbox_inches=EXPORT_BBOX_INCES,
                    dpi=DPI,
                    transparent=transparent)
        if verbose: print('~ .eps saved')

    if not PNG and not EPS and not EPS:
        if verbose: print('? WARNING: NO OUTPUT FILE HAS BEEN PRODUCED !!')
    else:
        if verbose: print('~ Plots saved to ' + complete_filepath)

    return True
Exemplo n.º 5
0
    def resume_from_var(self, sim_source, varno, DEBUG=False):
        """
        Copies everything to resume a run from an older state.

        It uses VAR-file number >varno< as new VAR0 and var.dat.
        Does copy PVAR as well if available.

        Args:
            sim_source:  simulation from where to copy all the files
            varno:       var-file number # from which to copy (VAR#)
        """

        from os import listdir
        from os.path import exists, join, isdir
        import glob
        from pencil.math import is_int
        from pencil.io import mkdir

        def copyfile(src, dst, DEBUG=False):
            from shutil import copy2
            from os.path import exists

            if not exists(src): return False
            if DEBUG: print('< ' + src)
            if DEBUG: print('> ' + dst)
            copy2(src, dst)

        src = sim_source.datadir
        dst = self.datadir
        if is_int(varno): varno = 'VAR' + str(int(varno))

        if not exists(src):
            print('! ERROR: Source data directory does not exits: ' + str(src))
            return False
        if not exists(dst):
            print('! ERROR: Destination data directory does not exits: ' +
                  str(dst))
            return False
        if not varno in sim_source.get_varlist():
            print('! ERROR: Could not find ' + varno +
                  ' in procX folder of sim_source: ' + sim_source.name)
            return False

        data_folders = [p for p in listdir(src) if isdir(join(src, p))]
        procX_folder = [p for p in data_folders if p.startswith('proc')]
        for p in data_folders:
            mkdir(join(dst, p))

        # data/
        files = [
            'def_var.pro', 'dim.dat', 'index.pro', 'move-me.list',
            'particles_stalker_header.dat', 'params.log', 'pc_constants.pro',
            'pdim.dat', 'pencils.list', 'pvarname.dat', 'svnid.dat',
            'var.general', 'variables.pro', 'varname.dat'
        ]
        for f in files:
            copyfile(join(src, f), dst, DEBUG=DEBUG)

        # data/allprocs/
        files = ['grid.dat']
        for f in files:
            copyfile(join(src, 'allprocs', f),
                     join(dst, 'allprocs/'),
                     DEBUG=DEBUG)

        # data/procX
        files = ['dim.dat', 'grid.dat', 'proc_bounds.dat']
        for X in procX_folder:
            for f in files:
                copyfile(join(src, X, f), join(dst, X), DEBUG=DEBUG)
            copyfile(join(src, X, varno), join(dst, X, 'VAR0'), DEBUG=DEBUG)
            copyfile(join(src, X, 'P' + varno),
                     join(dst, X, 'PVAR0'),
                     DEBUG=DEBUG)
            copyfile(join(src, X, varno), join(dst, X, 'var.dat'), DEBUG=DEBUG)
            copyfile(join(src, X, 'P' + varno),
                     join(dst, X, 'pvar.dat'),
                     DEBUG=DEBUG)

        print('? WARNING: KNOWN ERRORS:')
        print(
            '? RUN MIGHT NOT START BECAUSE data/param.nml can get damaged in' +
            ' a run that crashes. This is not fixed by this routine.')
        print('? TRY AND START A SINGLE CORE RUN WITH THIS SETUP AND USE THE' +
              ' CREATED param.nml FOR YOUR PURPOSE INSTEAD.')
        print('? SAME FOR: - tstalk.dat')

        return True
Exemplo n.º 6
0
    def copy(self,
             path_root='.',
             name=False,
             start_optionals=False,
             optionals=True,
             quiet=True,
             rename_submit_script=False,
             OVERWRITE=False):
        """
        This method does a copy of the simulation object by creating a new
        directory 'name' in 'path_root' and copy all simulation components and
        optiona)
                ls to its directory.
        This method neither links/compiles the simulation.
        If start_optionals it creates data dir.
        It does not overwrite anything, unless OVERWRITE is True.

        Submit Script Rename:
            Name in submit scripts will be renamed if possible!
            Submit scripts will be identified by submit* plus appearenace of old
            simulation name inside, latter will be renamed!

        Args:
            path_root:      Path to new sim.-folder(sim.-name). This folder will
                            be created if not existing! Relative paths are
                            thought to be relative to the python current workdir
            name:     Name of new simulation, will be used as folder name.
                      Rename will also happen in submit script if found.
                      Simulation folders is not allowed to preexist!!
            optionals:      Add list of further files to be copied. Wildcasts
                            allowed according to glob module!
                            Set True to use self.optionals.
            start optionals:    Add list of further files to be copied.
                                Wildcasts allowed according to glob module!
                                Set True to use self.optionals.
            quiet:              Set True to suppress output.
            rename_submit_script:    Set False if no renames shall be performed
                                     in submit* files
            OVERWRITE:          Set True to overwrite no matter what happens!
        """
        from glob import glob
        from numpy import size
        from os import listdir, symlink
        from shutil import copyfile

        from pencil import get_sim
        from pencil.io import mkdir, get_systemid, rename_in_submit_script, debug_breakpoint
        from pencil import is_sim_dir

        # set up paths
        if path_root == False or type(path_root) != type('string'):
            print('! ERROR: No path_root specified to copy the simulation to.')
            return False
        path_root = abspath(path_root)  # simulation root dir

        # name and folder of new simulation but keep name of old if sim with old
        # name is NOT existing in NEW directory
        if name == False:
            name = self.name
        if exists(join(path_root, name)) and OVERWRITE == False:
            name = name + '_copy'
            if exists(join(path_root, name)):
                name = name + str(
                    size([f
                          for f in listdir(path_root) if f.startswith(name)]))
            print(
                '? Warning: No name specified and simulation with that name ' +
                'already found! New simulation name now ' + name)
        path_newsim = join(path_root, name)  # simulation abspath
        path_newsim_src = join(path_newsim, 'src')
        if islink(join(path_root, self.name, 'data')):
            link_data = True
            oldtmp = os.path.realpath(join(path_root, self.name, 'data'))
            newtmp = join(str.strip(str.strip(oldtmp, 'data'), self.name),
                          name, 'data')
            if exists(newtmp) and OVERWRITE == False:
                raise ValueError(
                    'Data directory {0} already exists'.format(newtmp))
            else:
                path_newsim_data = newtmp
                path_newsim_data_link = join(path_newsim, 'data')
        else:
            link_data = False
            path_newsim_data = join(path_newsim, 'data')

        path_initial_condition = join(self.path, 'initial_condition')
        if exists(path_initial_condition):
            has_initial_condition_dir = True
            path_newsim_initcond = join(path_newsim, 'initial_condition')
        else:
            has_initial_condition_dir = False

        if type(optionals) == type(['list']):
            optionals = self.optionals + optionals  # optional files to be copied
        if optionals == True: optionals = self.optionals
        if type(optionals) == type('string'): optionals = [optionals]
        if type(optionals) != type(['list']):
            print('! ERROR: optionals must be of type list!')

        tmp = []
        for opt in optionals:
            files = glob(join(self.path, opt))
            for f in files:
                tmp.append(basename(f))
        optionals = tmp

        # optional files to be copied
        if type(start_optionals) == type(['list']):
            start_optionals = self.start_optionals + start_optionals
        if start_optionals == False: start_optionals = self.start_optionals
        if type(start_optionals) == type('string'):
            start_optionals = [start_optionals]
        if type(start_optionals) != type(['list']):
            print('! ERROR: start_optionals must be of type list!')

        tmp = []
        for opt in start_optionals:
            files = glob(join(self.datadir, opt))
            for f in files:
                tmp.append(basename(f))
        start_optionals = tmp
        ## check if the copy was already created
        if is_sim_dir(path_newsim) and OVERWRITE == False:
            if not quiet:
                print('? WARNING: Simulation already exists.' +
                      ' Returning with existing simulation.')
            return get_sim(path_newsim, quiet=quiet)

        ## expand list of optionals wildcasts

        # check existence of path_root+name, a reason to stop and not overwrite
        if OVERWRITE == False and exists(path_newsim):
            print(
                '! ERROR: Folder to copy simulation to already exists!\n! -> '
                + path_newsim)
            return False

        # check existance of self.components
        for comp in self.components:
            if not exists(join(self.path, comp)):
                print('! ERROR: Couldnt find component ' + comp +
                      ' from simulation ' + self.name + ' at location ' +
                      join(self.path, comp))
                return False

        # check existance of optionals
        for opt in optionals:
            if not exists(join(self.path, opt)):
                print('! ERROR: Couldnt find optional component ' + opt +
                      ' from simulation ' + self.name + ' at location ' +
                      join(self.path, opt))
                return False

        # check existance of self.start_components
        for comp in self.start_components:
            if not exists(join(self.datadir, comp)):
                print('! ERROR: Couldnt find component ' + comp +
                      ' from simulation ' + self.name + ' at location ' +
                      join(self.path, comp))
                return False

        # check existance of start_optionals
        for opt in start_optionals:
            if not exists(join(self.datadir, opt)):
                print('! ERROR: Couldnt find optional component ' + opt +
                      ' from simulation ' + self.name + ' at location ' +
                      join(self.datadir, opt))
                return False

        # create folders
        if mkdir(path_newsim) == False and OVERWRITE == False:
            print('! ERROR: Couldnt create new simulation directory ' +
                  path_newsim + ' !!')
            return False

        if mkdir(path_newsim_src) == False and OVERWRITE == False:
            print('! ERROR: Couldnt create new simulation src directory ' +
                  path_newsim_src + ' !!')
            return False

        if mkdir(path_newsim_data) == False and OVERWRITE == False:
            print('! ERROR: Couldnt create new simulation data directory ' +
                  path_newsim_data + ' !!')
            return False
        if link_data:
            symlink(path_newsim_data, path_newsim_data_link)

        # copy files
        files_to_be_copied = []
        for f in self.components + optionals:
            f_path = abspath(join(self.path, f))
            copy_to = abspath(join(path_newsim, f))
            if f_path == copy_to:
                print('!! ERROR: file path f_path equal to destination ' +
                      'copy_to. Debug this line manually!')
                debug_breakpoint()
            copyfile(f_path, copy_to)

        files_to_be_copied = []
        for f in self.start_components + start_optionals:
            f_path = abspath(join(self.datadir, f))
            copy_to = abspath(join(path_newsim_data, f))
            if f_path == copy_to:
                print('!! ERROR: file path f_path equal to destination ' +
                      'copy_to. Debug this line manually!')
                debug_breakpoint()
            copyfile(f_path, copy_to)

        # Organizes any personalized initial conditions
        if has_initial_condition_dir:
            if mkdir(path_newsim_initcond) == False and OVERWRITE == False:
                print(
                    '! ERROR: Couldnt create new simulation initial_condition'
                    + ' directory ' + path_newsim_initcond + ' !!')
                return False

            for f in listdir(path_initial_condition):
                f_path = abspath(join(path_initial_condition, f))
                copy_to = abspath(join(path_newsim_initcond, f))

                if f_path == copy_to:
                    print('!! ERROR: file path f_path equal to destination ' +
                          'copy_to. Debug this line manually!')
                    debug_breakpoint()
                copyfile(f_path, copy_to)

        # modify name in submit script files
        if rename_submit_script != False:
            if type(rename_submit_script) == type('STRING'):
                rename_in_submit_script(new_name=rename_submit_script,
                                        sim=get_sim(path_newsim))
            else:
                print('!! ERROR: Could not understand rename_submit_script=' +
                      str(rename_submit_script))

        # done
        return get_sim(path_newsim)
Exemplo n.º 7
0
    def __init__(
        self,
        datadir=False,
        sim=False,
        tmin=0,
        tmax=-1,
        noutmax="-1",
        swap_endian=False,
        quiet=False,
        use_existing_pstalk_sav=False,
    ):
        """
        Read PSTALK files from Pencil Code using IDL.
        Uses IDL<->Python Bridge, this must be activated manually!

        Args:
            - datadir      specify datadir, default False
            - sim           specify simulation from which you want to read
            - swap_endian   change if needed to True, default False
            - quiet         verbosity, default False
            - use_existing_pstalk_sav
                            use existing <sim.datadir>/data/pc/tmp/pstalk.sav for speed up


        """

        import numpy as np
        import os
        from os.path import join
        from pencil import get_sim

        if datadir == False:
            if sim == False:
                sim = get_sim()
            datadir = sim.datadir

        if quiet == False:
            quiet = "0"
        else:
            quiet = "1"

        if swap_endian == False:
            swap_endian = "0"
        else:
            swap_endian = "1"

        if use_existing_pstalk_sav == True:
            from scipy.io.idl import readsav

            print("~ reading existing pstalk..")

            ps = readsav(join(sim.pc_datadir, "tmp", "pstalk.sav"))("pstalk")

            for key in set(ps.dtype.fields.keys()):
                if hasattr(self, key.lower()):
                    continue
                setattr(self, key.lower(), ps[key][0].T)

        else:
            try:
                cwd = os.getcwd()
                from idlpy import IDL

                os.chdir(cwd)

                print("~ reading pstalk in IDL..")

                idl_call = ", ".join(
                    [
                        "pc_read_pstalk",
                        "obj=pstalk",
                        'datadir="' + datadir + '"',
                        "it0=" + str(tmin),
                        "it1=" + str(tmax),
                        "quiet=" + quiet,
                        "swap_endian=" + swap_endian,
                        "noutmax=" + str(noutmax),
                    ]
                )

                IDL.run(idl_call)

                print("~ parsing pstalk from IDL to python..")
                ps = IDL.pstalk

                for key in set(ps.keys()):
                    if hasattr(self, key.lower()):
                        continue
                    setattr(self, key.lower(), ps[key].T)

            except:
                print(
                    "! ERROR: no idl<->python bridge found. Try whats written in pstalk-comment to fix that issue."
                )
                print("! ")
                print("! Use something like: (ensure you have IDL 8.5.1 or larger)")
                print(
                    "! export PYTHONPATH=$PYTHONPATH:$IDL_HOME/lib/bridges:$IDL_HOME/bin/bin.linux.x86_64"
                )
                print(
                    "! export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib64:$IDL_HOME/bin/bin.linux.x86_64"
                )
                print("! in your .bashrc")
                print("! ")
                print(
                    "! If you have it already installed, try: from idlpy import IDL and check for errors"
                )
                print("! ")
                print("~ BACKUP SOLUTION: reading pstalk via pidly, starting IDL..")

                from pencil.backpack import pidly

                IDL = pidly.IDL(long_delay=0.05)  # start IDL engine
                from scipy.io.idl import readsav
                from pencil.io import mkdir

                ## read tstalk file
                print("## reading particle stalker file..")
                IDL(
                    'pc_read_pstalk, object=pstalk, datadir="'
                    + sim.datadir
                    + '"'
                    + ", quiet="
                    + quiet
                    + ", it0="
                    + str(tmin)
                    + ", it1="
                    + str(tmax)
                    + ", swap_endian="
                    + swap_endian
                )

                print("## transfering pstalk file from IDL to python..")
                mkdir(join(sim.pc_datadir, "tmp"))
                IDL(
                    'save, pstalk, filename="'
                    + join(
                        sim.pc_datadir,
                        "tmp",
                        "pstalk_" + str(tmin) + "_" + str(tmax) + ".sav",
                    )
                    + '"'
                )
                ps = readsav(join(sim.pc_datadir, "tmp", "pstalk.sav"))("pstalk")

                # from pc.io import debug_breakpoint; debug_breakpoint()

                for key in set(ps.dtype.fields.keys()):
                    if hasattr(self, key.lower()):
                        continue
                    setattr(self, key.lower(), ps[key][0].T)
def gas_velo_at_particle_pos(
    varfiles="last4", sim=False, scheme="tsc", use_IDL=False, OVERWRITE=False
):
    """This script calulates the gas velocity at the particle position and stores this together
    with particle position, containing grid cell idicies, particle velocities, and particle index
    in a gas_velo_at_particle_pos file.

    Args:
      varfiles:       specifiy varfiles for calculation, e.g. 'last', 'first',
                          'all', 'VAR###', 'last4', 'first3'
      scheme:         possible are:
                          - ngp: nearest grid point
                          - cic: cloud in cell
                          - tsc: triangular shaped cloud
      OVERWRITE:		set to True to overwrite already calculated results
    """

    from pencil import get_sim
    from pencil import read
    from pencil import diag
    from pencil.io import mkdir
    from os import listdir
    from os.path import exists, join, dirname
    import numpy as np

    GAS_VELO_TAG = "gas_velo_at_particle_pos"

    if sim == False:
        sim = get_sim()
        if sim == False:
            print("! ERROR: Specify simulation object!")
            return False
    SIM = sim

    if use_IDL:
        print(
            "? WARNING: IDL VERSION OF THIS SCRIPT BY JOHANSEN, not recommended for 2D data"
        )
        from ...backpack import pidly

        print("## starting IDL engine..")
        IDL = pidly.IDL(long_delay=0.05)  # start IDL engine

        ## skip if nothing is new
        if (
            (not OVERWRITE)
            and (exists(join(SIM.pc_datadir, "sigma.pkl")))
            and (exists(join(SIM.pc_datadir, "zeta.pkl")))
        ):
            print(
                "~ "
                + SIM.name
                + " is already calculated and up-to-date! -> skipping it!"
            )

        else:
            ## start calculations
            print(
                '~ Calculating gas_velo_at_particle_pos for "'
                + SIM.name
                + '" in "'
                + SIM.path
                + '"'
            )
            IDL(
                ".COMPILE "
                + str(
                    join(
                        dirname(diag.particle.__file__), "gas_velo_at_particle_pos.pro"
                    )
                )
            )
            IDL.pro(
                "gas_velo_at_particle_pos",
                datadir=SIM.datadir,
                destination=GAS_VELO_TAG,
                doforthelastNvar=varfiles[4:],
            )
            files = [
                i.split("_")[-1].split(".sav")[0]
                for i in listdir(join(SIM.pc_datadir, GAS_VELO_TAG))
                if i.startswith(GAS_VELO_TAG)
                and i.endswith(".sav")
                or i.endswith(".pkl")
            ]
            if files == []:
                print(
                    "!! ERROR: No calc_gas_speed_at_particle_position-files found for "
                    + SIM.name
                    + "! Use idl script to produce them first!"
                )

        IDL.close()
        return True

    else:
        print(
            '~ Calculating gas_velo_at_particle_pos for "'
            + SIM.name
            + '" in "'
            + SIM.path
            + '"'
        )
        save_destination = join(SIM.pc_datadir, GAS_VELO_TAG)
        mkdir(save_destination)
        varlist = SIM.get_varlist(pos=varfiles, particle=False)
        pvarlist = SIM.get_varlist(pos=varfiles, particle=True)

        for f, p in zip(varlist, pvarlist):
            save_filename = GAS_VELO_TAG + "_" + scheme + "_" + f[3:]
            if not OVERWRITE and exists(save_filename, folder=save_destination):
                continue

            print("## Reading " + f + " ...")
            ff = read.var(datadir=SIM.datadir, varfile=f, quiet=True, trimall=False)
            pp = read.pvar(datadir=SIM.datadir, varfile=p)

            ## remove ghost zones from grid, call the reduced grid the "real grid"
            realgridx = ff.x[ff.l1 : ff.l2]
            realgridy = ff.y[ff.m1 : ff.m2]
            realgridz = ff.z[ff.n1 : ff.n2]
            nx = ff.l2 - ff.l1
            ny = ff.m2 - ff.m1
            nz = ff.n2 - ff.n1

            ## prepare list for all quantities
            l_ipars = pp.ipars  # particle number   KNOWN
            l_px = pp.xp
            l_py = pp.yp
            l_pz = pp.zp  # particle absolut position KNOWN
            l_vx = pp.vpx
            l_vy = pp.vpy
            l_vz = pp.vpz  # particle velocity KNOWN
            l_rix = []
            l_riy = []
            l_riz = (
                []
            )  # particle untrimmed realgrid index (grid index = l/m/n + readgrid index ???)
            l_ix = []
            l_iy = []
            l_iz = []  # particle grid index (in untrimmed grid)
            l_ux = []
            l_uy = []
            l_uz = []  # underlying gas velocity at position of particle

            ## get index of realgrid cell for each particle
            for i in range(len(l_ipars)):
                l_rix.append(np.abs(realgridx - l_px[i]).argmin())
                l_riy.append(np.abs(realgridy - l_py[i]).argmin())
                l_riz.append(np.abs(realgridz - l_pz[i]).argmin())

            ## convert into untrimmed grid
            l_ix = np.array(l_rix) + ff.l1
            l_iy = np.array(l_riy) + ff.m1
            l_iz = np.array(l_riz) + ff.n1

            ## NGP
            if scheme == "ngp" or scheme == "NGP":
                print("## Calculating gas velocities via " + scheme)
                l_ux = ff.ux[l_iz, l_iy, l_ix]
                l_uy = ff.uy[l_iz, l_iy, l_ix]
                l_uz = ff.uz[l_iz, l_iy, l_ix]

            ## CIC
            if scheme == "cic" or scheme == "CIC":
                print("## Calculating gas velocities via " + scheme)
                for ix0, iy0, iz0, px, py, pz in zip(
                    l_ix, l_iy, l_iz, l_px, l_py, l_pz
                ):  # for each particle
                    if ff.x[ix0] > px:
                        ix0 = ix0 - 1  # ix0 must be left to particle
                    if ff.y[iy0] > py:
                        iy0 = iy0 - 1  # iy0 must be below the particle
                    if ff.z[iz0] > pz:
                        iz0 = iz0 - 1  # iz0 must be under particle

                    ix1 = ix0
                    iy1 = iy0
                    iz1 = iz0  # if a dim. is zero, this is default, else:
                    if nx > 1:
                        ix1 = ix0 + 1
                        dx_1 = (
                            1.0 / ff.dx
                        )  # if a dim is non-zero, ajust ix1 to right cell
                    if ny > 1:
                        iy1 = iy0 + 1
                        dy_1 = (
                            1.0 / ff.dy
                        )  # if a dim is non-zero, ajust iy1 to above cell
                    if nz > 1:
                        iz1 = iz0 + 1
                        dz_1 = (
                            1.0 / ff.dz
                        )  # if a dim is non-zero, ajust iz1 to above cell

                    ux = 0.0
                    uy = 0.0
                    uz = 0.0
                    for ix in [ix0, ix1]:
                        for iy in [iy0, iy1]:
                            for iz in [iz0, iz1]:
                                weight = 1.0
                                if nx > 1:
                                    weight = weight * (1.0 - abs(px - ff.x[ix]) * dx_1)
                                if ny > 1:
                                    weight = weight * (1.0 - abs(py - ff.y[iy]) * dy_1)
                                if nz > 1:
                                    weight = weight * (1.0 - abs(pz - ff.z[iz]) * dz_1)

                                ux = ux + weight * ff.ux[iz, iy, ix]
                                uy = uy + weight * ff.uy[iz, iy, ix]
                                uz = uz + weight * ff.uz[iz, iy, ix]

                                if iz0 == iz1:
                                    break  # beware of degeneracy:
                            if iy0 == iy1:
                                break  # beware of degeneracy:
                        if ix0 == ix1:
                            break  # beware of degeneracy:

                    l_ux.append(ux)
                    l_uy.append(uy)
                    l_uz.append(uz)

            ## TSC
            if scheme == "tsc" or scheme == "TSC":
                for ix0, iy0, iz0, px, py, pz in zip(
                    l_ix, l_iy, l_iz, l_px, l_py, l_pz
                ):  # for each particle
                    ixx0 = ix0
                    ixx1 = ix0  # beware of degeneracy
                    iyy0 = iy0
                    iyy1 = iy0
                    izz0 = iz0
                    izz1 = iz0

                    if nx > 1:
                        ixx0 = ix0 - 1
                        ixx1 = ix0 + 1
                        dx_1 = 1.0 / ff.dx
                        dx_2 = 1.0 / ff.dx ** 2
                    if ny > 1:
                        iyy0 = iy0 - 1
                        iyy1 = iy0 + 1
                        dy_1 = 1.0 / ff.dy
                        dy_2 = 1.0 / ff.dy ** 2
                    if nz > 1:
                        izz0 = iz0 - 1
                        izz1 = iz0 + 1
                        dz_1 = 1.0 / ff.dz
                        dz_2 = 1.0 / ff.dz ** 2

                    ux = 0.0
                    uy = 0.0
                    uz = 0.0
                    for ix in [ix0, ixx0, ixx1]:
                        weight_x = 0.0
                        if ix - ix0 == -1 or ix - ix0 == 1:
                            weight_x = (
                                1.125
                                - 1.5 * abs(px - ff.x[ix]) * dx_1
                                + 0.5 * abs(px - ff.x[ix]) ** 2 * dx_2
                            )
                        elif nx != 1:
                            weight_x = 0.75 - (px - ff.x[ix]) ** 2 * dx_2

                        for iy in [iy0, iyy0, iyy1]:
                            weight_y = 0.0
                            if iy - iy0 == -1 or iy - iy0 == 1:
                                weight_y = (
                                    1.125
                                    - 1.5 * abs(py - ff.y[iy]) * dy_1
                                    + 0.5 * abs(py - ff.y[iy]) ** 2 * dy_2
                                )
                            elif ny != 1:
                                weight_y = 0.75 - (py - ff.y[iy]) ** 2 * dy_2

                            for iz in [iz0, izz0, izz1]:
                                weight_z = 0.0
                                if iz - iz0 == -1 or iz - iz0 == 1:
                                    weight_z = (
                                        1.125
                                        - 1.5 * abs(pz - ff.z[iz]) * dz_1
                                        + 0.5 * abs(pz - ff.z[iz]) ** 2 * dz_2
                                    )
                                elif nz != 1:
                                    weight_z = 0.75 - (pz - ff.z[iz]) ** 2 * dz_2

                                weight = 1.0
                                if nx > 1:
                                    weight = weight * weight_x
                                if ny > 1:
                                    weight = weight * weight_y
                                if nz > 1:
                                    weight = weight * weight_z

                                ux = ux + weight * ff.ux[iz, iy, ix]
                                uy = uy + weight * ff.uy[iz, iy, ix]
                                uz = uz + weight * ff.uz[iz, iy, ix]

                                if izz0 == izz1:
                                    break  # beware of degeneracy:
                            if iyy0 == iyy1:
                                break  # beware of degeneracy:
                        if ixx0 == ixx1:
                            break  # beware of degeneracy:

                    l_ux.append(ux)
                    l_uy.append(uy)
                    l_uz.append(uz)

            ## Convert all information into a single record array
            data_set = np.core.records.fromarrays(
                [
                    l_ipars.astype("int"),
                    l_px,
                    l_py,
                    l_pz,
                    l_vx,
                    l_vy,
                    l_vz,
                    l_rix,
                    l_riy,
                    l_riz,
                    l_ix,
                    l_iy,
                    l_iz,
                    l_ux,
                    l_uy,
                    l_uz,
                ],
                names="ipar, ipx, ipy, ipz, vx, vy, vz, rix, riy, riz, ix, iy, iz, ux, uy, uz",
                formats="int, float, float, float, float, float, float, int, int, int, int, int, int, float, float, float",
            )
            gas_velo_at_particle_pos = np.sort(data_set, order=["ix", "iy", "iz"])

            Nix = int(gas_velo_at_particle_pos["rix"].max() + 1)
            Niy = int(gas_velo_at_particle_pos["riy"].max() + 1)
            Niz = int(gas_velo_at_particle_pos["riz"].max() + 1)

            Npar_arr = np.array(
                [
                    gas_velo_at_particle_pos["rix"],
                    gas_velo_at_particle_pos["riy"],
                    gas_velo_at_particle_pos["riz"],
                ]
            )
            # rgrid_edges = (grid.x[1:]-(grid.x[1:]-grid.x[:-1])/2)[2:-2]
            xrange = np.arange(0, float(gas_velo_at_particle_pos["rix"].max()) + 2)
            xrange = xrange - 0.5
            yrange = np.arange(0, float(gas_velo_at_particle_pos["riy"].max()) + 2)
            zrange = np.arange(0, float(gas_velo_at_particle_pos["riz"].max()) + 2)

            Npar_hist, edges = np.histogramdd(Npar_arr.T, bins=(xrange, yrange, zrange))
            Npar_hist, edges = np.histogramdd(Npar_arr.T, bins=(Nix, Niy, Niz))

            gas_velo_at_particle_pos = {
                "time": ff.t,
                "par_pos": np.array(
                    [
                        gas_velo_at_particle_pos["ipx"],
                        gas_velo_at_particle_pos["ipy"],
                        gas_velo_at_particle_pos["ipz"],
                    ]
                ),
                "par_velo": np.array(
                    [
                        gas_velo_at_particle_pos["vx"],
                        gas_velo_at_particle_pos["vy"],
                        gas_velo_at_particle_pos["vz"],
                    ]
                ),
                "par_idx": np.array(
                    [
                        gas_velo_at_particle_pos["rix"],
                        gas_velo_at_particle_pos["riy"],
                        gas_velo_at_particle_pos["riz"],
                    ]
                ),
                "npar": np.array(
                    Npar_hist[
                        gas_velo_at_particle_pos["rix"],
                        gas_velo_at_particle_pos["riy"],
                        gas_velo_at_particle_pos["riz"],
                    ]
                ),
                "gas_velo": np.array(
                    [
                        gas_velo_at_particle_pos["ux"],
                        gas_velo_at_particle_pos["uy"],
                        gas_velo_at_particle_pos["uz"],
                    ]
                ),
            }

            print("## Saving dataset into " + save_destination + "...")
            pkl_save(
                {"gas_velo_at_particle_pos": gas_velo_at_particle_pos, "t": ff.t},
                save_filename,
                folder=save_destination,
            )
        print("## Done!")
def get_value_from_file(
    filename,
    quantity,
    change_quantity_to=None,
    sim=False,
    filepath=False,
    DEBUG=False,
    silent=False,
):
    """Use to read in a quantity from
        - *.in
        - *.local
        - submit*, i.e. submit.sh, submit.csh, files, only works if computer is readily specified in pc.io.get_systemid

    Please add further functionallity by yourself!

    Args:
        filename:   can be "run.in", "start.in", "cparam.local", path to that file is extraced from filepath or sim object
        quantity:   variable to read in from file
        sim:        put simulation object here, file will be found by filename automatically
        filepath:   normally not needed, specify here where to find the file with filename, can be a list of paths if unshure
        DEBUG:      make dry run, tell me what you would do but dont change anything!
        silent:     suppress certain output by setting True

    Return:
        Returns None if not successful
    """

    import os
    import numpy as np
    from os.path import join, abspath, exists, split, isfile
    from pencil import get_sim
    from pencil.math import is_number, is_float, is_int
    from pencil.io import timestamp, debug_breakpoint, mkdir
    import re
    import copy

    def string_to_tuple(s):
        q = s.split(",")

        if is_number(q[0]):
            q = np.array([float(t) for t in q])
            q_type = "TUPLE_FLOAT"
            return q, q_type

        if q[0] == "T" or q[0] == "F":
            q = np.array([bool(t == "T") for t in q])
            q_type = "TUPLE_BOOL"
            return q, q_type

        if type(q[0]) == type("string"):
            q = [t.replace('"', "").replace("'", "") for t in q]
            q_type = "TUPLE_STRING"
            return q, q_type

        print("! ERROR: Could not parse string " + s + " into a tuple!")
        print(
            "! DEBUG_BREAKPOINT AKTIVATED - check out the following variables: string s, tuple q, first entry in tuple q[0]"
        )
        debug_breakpoint()
        return None, None

    def tuple_to_string(t, q_type):
        return ",".join([str(a) for a in t])

    ######## prepare filename and quantity
    filename = filename.strip()  # get rid of whitespaces
    quantity = quantity.strip()
    q_type = False  # q_type will store the type of the quantity value once found and identified

    split_filename = split(filename)
    if sim == False and split_filename[0] != "" and filepath == False:
        filepath = split_filename[0]
        filename = split_filename[1]

    ######## find correct file
    # prepare search_path list to search filename in
    if filepath == False:
        if sim == False:
            sim = get_sim()
        else:
            filepath = sim.path
        search_paths = [
            sim.path,
            join(sim.path, "src"),
        ]  # add other search paths here!!

    elif type(filepath) == type("string"):
        if filepath.endswith(filename):
            filepath = filepath[:-len(
                filename
            )]  # clean filepath if filename occures to be in there at the end
        search_paths = [abspath(filepath.strip())]  # correct path format

    elif type(filepath) == type(["list"]):
        search_paths = filepath

    else:
        print("! ERROR: Filename " + str(filename) +
              " could not be interprated or found!")
        return None

    absolute_filepath = None
    for search_path in search_paths:
        tmp_path = join(search_path, filename)
        if os.path.isfile(tmp_path):
            absolute_filepath = tmp_path
            break

    # Traps the case of not being able to find the file
    if absolute_filepath is None:
        if DEBUG:
            print("~ DEBUG: File {0} not found in {1}!".format(
                filename, search_paths))
        return None

    ######## open file
    # now having absolute filepath to file, lets check that file and find quantity inside!
    if DEBUG:
        print("~ DEBUG: Found file {0} in {1}".format(filename, filepath))

    with open(absolute_filepath, "r") as f:
        data_raw = f.readlines()

    ######## find line in file which quantity in
    line_matches = []
    # scan through file for differently for different files
    if filename.endswith(
            ".in") or "cparam.local" or "Makefile.local" in filename:
        FILE_IS = "IN_LOCAL"
        SYM_COMMENT = "!"
        SYM_ASSIGN = "="
        SYM_SEPARATOR = ","

        for ii, line in enumerate(data_raw):
            if line.strip().startswith("&"):
                continue  # filter out lines with &something, e.g. &density_run_pars
            quantity_match_tmp = re.search(
                "[^0-9a-zA-Z_]*{0}[^0-9a-zA-Z_]".format(quantity),
                line.split(SYM_COMMENT)[0],
            )
            # Check if this substring occurs as a string.
            if quantity_match_tmp:
                quantity_match = quantity_match_tmp
                if (str.count(line[0:quantity_match.start()], "'") % 2 == 0
                        and str.count(line[0:quantity_match.start()], '"') % 2
                        == 0):
                    if ("run" in filename or "start" in filename
                            or ".local" in filename or "print" in filename):
                        if ("=" in quantity_match_tmp.
                                string[quantity_match_tmp.start() +
                                       2:quantity_match_tmp.end()]):
                            if line_matches:
                                line_matches[0] = ii
                            else:
                                line_matches.append(ii)
                        quantity_match = quantity_match_tmp

    elif filename.startswith("submit") and filename.split(".")[-1] in [
            "csh", "sh"
    ]:
        FILE_IS = "SUBMIT"
        SYM_COMMENT = False
        SYM_ASSIGN = "="
        SYM_SEPARATOR = ","

        for ii, line in enumerate(data_raw):
            if line.replace(" ", "").startswith("#@") and quantity in line:
                quantity_match_tmp = re.search(
                    "[^0-9a-zA-Z_]*{0}[^0-9a-zA-Z_]".format(quantity),
                    line.split(SYM_COMMENT)[0],
                )
                if quantity_match_tmp:
                    quantity_match = quantity_match_tmp
                    if line_matches:
                        line_matches[0] = ii
                    else:
                        line_matches.append(ii)
    else:
        print(
            "! ERROR: Filename unknown! No parsing possible! Please enhance this function to work with "
            + filename)

    if len(line_matches) > 1:
        print('! ERROR: Found more than one line with keyword "' + quantity +
              '" inside!')
        return None
    if len(line_matches) == 0:
        if silent == False:
            print('! ERROR: Found no line with keyword "' + quantity +
                  '" inside ' + join(filepath, filename) + "!")
        return None

    filename = os.path.basename(filename)

    ######## get line with quantity inside
    line = data_raw[line_matches[0]]
    ######## do separation of quantity from rest of line, i.e. get rid of comments and other quantities defined in this line
    comment = ""
    if SYM_COMMENT:
        tmp = line.partition(SYM_COMMENT)  # strip away comment
        line = tmp[0]
        if tmp[-1] != "":
            comment = SYM_COMMENT + tmp[-1]  # and store for later

    #    line = line.replace(' ','').replace('\n', '')                               # do cleanup in this line

    # Find the position where the quantity is stored.
    pos_equal_sign_left = quantity_match.end() + str.find(
        line[quantity_match.end() - 1:], "=")
    #    pos_equal_sign_right = pos_equal_sign_left + str.find(line[pos_equal_sign_left:], ', *[0-9a-zA-Z][0-9a-zA-Z]* *= *[0-9a-zA-Z][0-9a-zA-Z]*')
    pos_equal_sign_right = pos_equal_sign_left + str.find(
        line[pos_equal_sign_left:], "=")
    if pos_equal_sign_right < pos_equal_sign_left:
        pos_equal_sign_right = -1
        pos_right_comma = -1
    else:
        pos_right_comma = str.rfind(line[:pos_equal_sign_right], ",")

    # Change the quantity in the line string.
    q = copy.copy(line[pos_equal_sign_left:pos_right_comma])
    #    qs = line.partition(quantity+SYM_ASSIGN)
    #    if SYM_ASSIGN in qs[-1]:
    #        qs = qs[:2]+qs[-1].partition(SYM_ASSIGN)
    #        #qs = qs[:2]+qs[-1].partition(SYM_ASSIGN)
    #        qs = qs[:2]+qs[2].rpartition(',')+qs[3:]
    #
    #    qs = list(qs)
    #    q = qs[2]

    while q.endswith("\t"):
        q = q[:-1]
        comment = "\t" + comment  # take care of trailing tabulator
    while q.endswith(","):
        q = q[:-1]  # remove trailing ,

    ######## do a cleanup of quantity value q and convert into string, float, int or array, also remember data type of q
    if q.startswith("'") and q.endswith(
            "'"):  # quantity q is string in form of 'STRING'
        q = q[1:-1]
        q_type = "STRING"

    elif q.startswith('"') and q.endswith(
            '"'):  # quantity q is string in form of "STRING"
        q = q[1:-1]
        q_type = "STRING"

    elif not is_number(
            q[0]):  # quantity q is string in form of not beeing a number
        q = q.strip().replace('"', "").replace("'", "")
        q_type = "STRING"

    try:
        float(q)
        q_type = "FLOAT"
        if is_int(q):
            q = int(q)
            q_type = "INT"
    except:
        if type(q) == type("string") and "," in q:
            q, q_type = string_to_tuple(q)  # q is a TULPE_something
            print("q = {0}, q_type = {1}".format(q, q_type))

        if type(q) == type("string") and q in ["F", "f"]:  # q is BOOL
            q = False
            q_type = "BOOL"

        if type(q) == type("string") and q in ["T", "t"]:
            q = True
            q_type = "BOOL"

        if type(q) == type("string"):
            if is_number(q[0]):
                q_type = "STRING"

    if q_type == False:  # catch if type of q was not recognized
        print(
            "! ERROR: Couldnt identify the data type of the quantity value: " +
            str(q))
        DEBUG = True
        debug_breakpoint()
    elif DEBUG:
        print("~ DEBUG: q_type = " + q_type)
    if q_type == "FLOAT":
        q = float(q)
    elif q_type == "INT":
        q = int(q)

    ######## if value of quantity has to be changed do:
    if change_quantity_to != None:

        ####### prepare change_quantity_to for string injection
        if q_type == "STRING":
            if not FILE_IS == "SUBMIT":
                if type(change_quantity_to) == list:
                    tmp = ""
                    for a in change_quantity_to:
                        tmp += "'" + a + "',"
                    change_quantity_to = tmp[:-1]
                elif type(change_quantity_to) == int:
                    change_quantity_to = str(change_quantity_to)
                else:
                    change_quantity_to = "'" + change_quantity_to + "'"

        elif q_type == "BOOL":
            change_quantity_to = bool(change_quantity_to in ["T", "t", True])
            if change_quantity_to == True:
                change_quantity_to = "T"
            elif change_quantity_to == False:
                change_quantity_to = "F"
            else:
                print("! ERROR: There is something deeply wrong here!" +
                      " change_quantity_to should be bool...")
                debug_breakpoint()
                return None

        elif q_type == "FLOAT":
            if type(change_quantity_to) == str:
                change_quantity_to = float(change_quantity_to)
            change_quantity_to = "%e" % change_quantity_to

        elif q_type == "INT":
            if type(change_quantity_to) == str:
                change_quantity_to = int(change_quantity_to)
            change_quantity_to = "%i" % change_quantity_to

        elif q_type.startswith("TUPLE"):
            if q_type.endswith("BOOL"):
                if type(change_quantity_to) == type(
                    ["list", "of", "bool", "or", "strings"]):
                    for ii, val in enumerate(change_quantity_to):
                        if val in ["T", "t", True]:
                            change_quantity_to[ii] = "T"
                        elif val in ["F", "f", False]:
                            change_quantity_to[ii] = "F"
                        else:
                            print(
                                "! ERROR: There is something deeply wrong here! change_quantity_to["
                                + str(ii) +
                                "] should be bool or string representation, but it is "
                                + str(change_quantity_to[ii]))
                            debug_breakpoint()
                            return None
                change_quantity_to = ",".join(
                    [str(t) for t in change_quantity_to])
            if q_type.endswith("FLOAT"):
                change_quantity_to = str(list(change_quantity_to))[1:-1]
            if q_type.endswith("STRING"):
                change_quantity_to = str(list(change_quantity_to))[1:-1]

        if DEBUG:
            print("~ DEBUG: Would change quantity " + quantity + " from " +
                  str(q) + " to " + str(change_quantity_to))
        q = str(change_quantity_to)

        ######## further formatting
        new_line = (line[:pos_equal_sign_left] + q + line[pos_right_comma:] +
                    "\t" + comment
                    )  # create new line and add comment stripped away before
        #        new_line = ''.join(qs).replace(SYM_SEPARATOR, SYM_SEPARATOR+' ')+'\t'+comment    # create new line and add comment stripped away before
        new_line = (new_line.rstrip()
                    )  # clean empty spaces on the right, no one needs that...
        if new_line[-1] != "\n":
            new_line = new_line + "\n"
        if FILE_IS == "SUBMIT":
            new_line = new_line.replace("#@", "#@ ").replace(
                "=", " = ")  # optimizing format of submit script

        if DEBUG:
            print("~ DEBUG: old line: " + str(data_raw[line_matches[0]])[:-1])
            print("~ DEBUG: new line: " + str(new_line)[:-1])

        if not DEBUG:
            ####### do backup of file before changing it
            if sim:
                from shutil import copyfile

                target = join(sim.path, "pc/backups/" + timestamp())
                mkdir(target)
                target = join(target, filename)
                copyfile(absolute_filepath, target)

            # replace line in raw data
            data_raw[line_matches[0]] = new_line

            # save on drive
            f.close()
            with open(absolute_filepath, "w") as f:
                for l in data_raw:
                    f.write(l)

    ######## DONE!
    return q
def gas_velo_at_particle_pos(varfiles='last4',
                             sim=False,
                             scheme='tsc',
                             use_IDL=False,
                             OVERWRITE=False):
    """This script calulates the gas velocity at the particle position and stores this together
  with particle position, containing grid cell idicies, particle velocities, and particle index
  in a gas_velo_at_particle_pos file.

  Args:
    varfiles:       specifiy varfiles for calculation, e.g. 'last', 'first',
                        'all', 'VAR###', 'last4', 'first3'
    scheme:         possible are:
                        - ngp: nearest grid point
                        - cic: cloud in cell
                        - tsc: triangular shaped cloud
    OVERWRITE:		set to True to overwrite already calculated results
  """

    import os
    from pencil import io
    from pencil import read
    from pencil import get_sim
    from os.path import exists
    import numpy as np

    GAS_VELO_TAG = 'gas_velo_at_particle_pos'

    if sim == False:
        sim = get_sim()
        if sim == False:
            print('! ERROR: Specify simulation object!')
            return False
    SIM = sim

    if use_IDL:
        print(
            '? WARNING: IDL VERSION OF THIS SCRIPT BY JOHANSEN, not recommended for 2D data'
        )
        from pencil.backpack import pidly
        print('## starting IDL engine..')
        IDL = pidly.IDL(long_delay=0.05)  # start IDL engine

        ## skip if nothing is new
        if (not OVERWRITE) and (exists(
                os.path.join(SIM.pc_datadir, 'sigma.pkl'))) and (exists(
                    os.path.join(SIM.pc_datadir, 'zeta.pkl'))):
            print('~ ' + SIM.name +
                  ' is already calculated and up-to-date! -> skipping it!')

        else:
            ## start calculations
            print('~ Calculating gas_velo_at_particle_pos for "' + SIM.name +
                  '" in "' + SIM.path + '"')
            IDL.pro('gas_velo_at_particle_pos',
                    datadir=SIM.datadir,
                    destination=GAS_VELO_TAG,
                    doforthelastNvar=varfiles[4:])
            files = [
                i.split('_')[-1].split('.sav')[0]
                for i in os.listdir(os.path.join(SIM.pendatadir, GAS_VELO_TAG))
                if i.startswith(GAS_VELO_TAG) and i.endswith('.sav')
                or i.endswith('.pkl')
            ]
            if files == []:
                print(
                    '!! ERROR: No calc_gas_speed_at_particle_position-files found for '
                    + SIM.name + '! Use idl script to produce them first!')

        IDL.close()
        return True

    else:
        print('~ Calculating gas_velo_at_particle_pos for "' + SIM.name +
              '" in "' + SIM.path + '"')
        save_destination = os.path.join(SIM.pc_datadir, GAS_VELO_TAG)
        io.mkdir(save_destination)
        varlist = SIM.get_varlist(pos=varfiles, particle=False)
        pvarlist = SIM.get_varlist(pos=varfiles, particle=True)

        for f, p in zip(varlist, pvarlist):
            save_filename = GAS_VELO_TAG + '_' + scheme + '_' + f[3:]
            if not OVERWRITE and exists(save_filename,
                                        folder=save_destination):
                continue

            print('## Reading ' + f + ' ...')
            ff = read.var(datadir=SIM.datadir,
                          varfile=f,
                          quiet=True,
                          trimall=False)
            pp = read.pvar(datadir=SIM.datadir, varfile=p)

            ## remove ghost zones from grid, call the reduced grid the "real grid"
            realgridx = ff.x[ff.l1:ff.l2]
            realgridy = ff.y[ff.m1:ff.m2]
            realgridz = ff.z[ff.n1:ff.n2]
            nx = ff.l2 - ff.l1
            ny = ff.m2 - ff.m1
            nz = ff.n2 - ff.n1

            ## prepare list for all quantities
            l_ipars = pp.ipars  # particle number   KNOWN
            l_px = pp.xp
            l_py = pp.yp
            l_pz = pp.zp  # particle absolut position KNOWN
            l_vx = pp.vpx
            l_vy = pp.vpy
            l_vz = pp.vpz  # particle velocity KNOWN
            l_rix = []
            l_riy = []
            l_riz = [
            ]  # particle untrimmed realgrid index (grid index = l/m/n + readgrid index ???)
            l_ix = []
            l_iy = []
            l_iz = []  # particle grid index (in untrimmed grid)
            l_ux = []
            l_uy = []
            l_uz = []  # underlying gas velocity at position of particle

            ## get index of realgrid cell for each particle
            for i in range(len(l_ipars)):
                l_rix.append(np.abs(realgridx - l_px[i]).argmin())
                l_riy.append(np.abs(realgridy - l_py[i]).argmin())
                l_riz.append(np.abs(realgridz - l_pz[i]).argmin())

            ## convert into untrimmed grid
            l_ix = np.array(l_rix) + ff.l1
            l_iy = np.array(l_riy) + ff.m1
            l_iz = np.array(l_riz) + ff.n1

            ## NGP
            if scheme == 'ngp' or scheme == 'NGP':
                print('## Calculating gas velocities via ' + scheme)
                l_ux = ff.ux[l_iz, l_iy, l_ix]
                l_uy = ff.uy[l_iz, l_iy, l_ix]
                l_uz = ff.uz[l_iz, l_iy, l_ix]

            ## CIC
            if scheme == 'cic' or scheme == 'CIC':
                print('## Calculating gas velocities via ' + scheme)
                for ix0, iy0, iz0, px, py, pz in zip(
                        l_ix, l_iy, l_iz, l_px, l_py,
                        l_pz):  # for each particle
                    if ff.x[ix0] > px:
                        ix0 = ix0 - 1  # ix0 must be left to particle
                    if ff.y[iy0] > py:
                        iy0 = iy0 - 1  # iy0 must be below the particle
                    if ff.z[iz0] > pz:
                        iz0 = iz0 - 1  # iz0 must be under particle

                    ix1 = ix0
                    iy1 = iy0
                    iz1 = iz0  # if a dim. is zero, this is default, else:
                    if nx > 1:
                        ix1 = ix0 + 1
                        dx_1 = 1. / ff.dx  # if a dim is non-zero, ajust ix1 to right cell
                    if ny > 1:
                        iy1 = iy0 + 1
                        dy_1 = 1. / ff.dy  # if a dim is non-zero, ajust iy1 to above cell
                    if nz > 1:
                        iz1 = iz0 + 1
                        dz_1 = 1. / ff.dz  # if a dim is non-zero, ajust iz1 to above cell

                    ux = 0.
                    uy = 0.
                    uz = 0.
                    for ix in [ix0, ix1]:
                        for iy in [iy0, iy1]:
                            for iz in [iz0, iz1]:
                                weight = 1.
                                if nx > 1:
                                    weight = weight * (
                                        1. - abs(px - ff.x[ix]) * dx_1)
                                if ny > 1:
                                    weight = weight * (
                                        1. - abs(py - ff.y[iy]) * dy_1)
                                if nz > 1:
                                    weight = weight * (
                                        1. - abs(pz - ff.z[iz]) * dz_1)

                                ux = ux + weight * ff.ux[iz, iy, ix]
                                uy = uy + weight * ff.uy[iz, iy, ix]
                                uz = uz + weight * ff.uz[iz, iy, ix]

                                if iz0 == iz1: break  # beware of degeneracy:
                            if iy0 == iy1: break  # beware of degeneracy:
                        if ix0 == ix1: break  # beware of degeneracy:

                    l_ux.append(ux)
                    l_uy.append(uy)
                    l_uz.append(uz)

            ## TSC
            if scheme == 'tsc' or scheme == 'TSC':
                for ix0, iy0, iz0, px, py, pz in zip(
                        l_ix, l_iy, l_iz, l_px, l_py,
                        l_pz):  # for each particle
                    ixx0 = ix0
                    ixx1 = ix0  # beware of degeneracy
                    iyy0 = iy0
                    iyy1 = iy0
                    izz0 = iz0
                    izz1 = iz0

                    if nx > 1:
                        ixx0 = ix0 - 1
                        ixx1 = ix0 + 1
                        dx_1 = 1. / ff.dx
                        dx_2 = 1. / ff.dx**2
                    if ny > 1:
                        iyy0 = iy0 - 1
                        iyy1 = iy0 + 1
                        dy_1 = 1. / ff.dy
                        dy_2 = 1. / ff.dy**2
                    if nz > 1:
                        izz0 = iz0 - 1
                        izz1 = iz0 + 1
                        dz_1 = 1. / ff.dz
                        dz_2 = 1. / ff.dz**2

                    ux = 0.
                    uy = 0.
                    uz = 0.
                    for ix in [ix0, ixx0, ixx1]:
                        weight_x = 0.
                        if ix - ix0 == -1 or ix - ix0 == 1:
                            weight_x = 1.125 - 1.5 * abs(
                                px - ff.x[ix]) * dx_1 + 0.5 * abs(
                                    px - ff.x[ix])**2 * dx_2
                        elif nx != 1:
                            weight_x = 0.75 - (px - ff.x[ix])**2 * dx_2

                        for iy in [iy0, iyy0, iyy1]:
                            weight_y = 0.
                            if iy - iy0 == -1 or iy - iy0 == 1:
                                weight_y = 1.125 - 1.5 * abs(
                                    py - ff.y[iy]) * dy_1 + 0.5 * abs(
                                        py - ff.y[iy])**2 * dy_2
                            elif ny != 1:
                                weight_y = 0.75 - (py - ff.y[iy])**2 * dy_2

                            for iz in [iz0, izz0, izz1]:
                                weight_z = 0.
                                if iz - iz0 == -1 or iz - iz0 == 1:
                                    weight_z = 1.125 - 1.5 * abs(
                                        pz - ff.z[iz]) * dz_1 + 0.5 * abs(
                                            pz - ff.z[iz])**2 * dz_2
                                elif nz != 1:
                                    weight_z = 0.75 - (pz - ff.z[iz])**2 * dz_2

                                weight = 1.
                                if nx > 1: weight = weight * weight_x
                                if ny > 1: weight = weight * weight_y
                                if nz > 1: weight = weight * weight_z

                                ux = ux + weight * ff.ux[iz, iy, ix]
                                uy = uy + weight * ff.uy[iz, iy, ix]
                                uz = uz + weight * ff.uz[iz, iy, ix]

                                if izz0 == izz1: break  # beware of degeneracy:
                            if iyy0 == iyy1: break  # beware of degeneracy:
                        if ixx0 == ixx1: break  # beware of degeneracy:

                    l_ux.append(ux)
                    l_uy.append(uy)
                    l_uz.append(uz)

            ## Convert all information into a single record array
            data_set = np.core.records.fromarrays(
                [
                    l_ipars.astype('int'), l_px, l_py, l_pz, l_vx, l_vy, l_vz,
                    l_rix, l_riy, l_riz, l_ix, l_iy, l_iz, l_ux, l_uy, l_uz
                ],
                names=
                'ipar, ipx, ipy, ipz, vx, vy, vz, rix, riy, riz, ix, iy, iz, ux, uy, uz',
                formats=
                'int, float, float, float, float, float, float, int, int, int, int, int, int, float, float, float'
            )
            gas_velo_at_particle_pos = np.sort(data_set,
                                               order=['ix', 'iy', 'iz'])

            Nix = int(gas_velo_at_particle_pos['rix'].max() + 1)
            Niy = int(gas_velo_at_particle_pos['riy'].max() + 1)
            Niz = int(gas_velo_at_particle_pos['riz'].max() + 1)

            Npar_arr = np.array([
                gas_velo_at_particle_pos['rix'],
                gas_velo_at_particle_pos['riy'],
                gas_velo_at_particle_pos['riz']
            ])
            #rgrid_edges = (grid.x[1:]-(grid.x[1:]-grid.x[:-1])/2)[2:-2]
            xrange = np.arange(
                0,
                float(gas_velo_at_particle_pos['rix'].max()) + 2)
            xrange = xrange - 0.5
            yrange = np.arange(
                0,
                float(gas_velo_at_particle_pos['riy'].max()) + 2)
            zrange = np.arange(
                0,
                float(gas_velo_at_particle_pos['riz'].max()) + 2)

            Npar_hist, edges = np.histogramdd(Npar_arr.T,
                                              bins=(xrange, yrange, zrange))
            Npar_hist, edges = np.histogramdd(Npar_arr.T, bins=(Nix, Niy, Niz))

            gas_velo_at_particle_pos = {
                'time':
                ff.t,
                'par_pos':
                np.array([
                    gas_velo_at_particle_pos['ipx'],
                    gas_velo_at_particle_pos['ipy'],
                    gas_velo_at_particle_pos['ipz']
                ]),
                'par_velo':
                np.array([
                    gas_velo_at_particle_pos['vx'],
                    gas_velo_at_particle_pos['vy'],
                    gas_velo_at_particle_pos['vz']
                ]),
                'par_idx':
                np.array([
                    gas_velo_at_particle_pos['rix'],
                    gas_velo_at_particle_pos['riy'],
                    gas_velo_at_particle_pos['riz']
                ]),
                'npar':
                np.array(Npar_hist[gas_velo_at_particle_pos['rix'],
                                   gas_velo_at_particle_pos['riy'],
                                   gas_velo_at_particle_pos['riz']]),
                'gas_velo':
                np.array([
                    gas_velo_at_particle_pos['ux'],
                    gas_velo_at_particle_pos['uy'],
                    gas_velo_at_particle_pos['uz']
                ])
            }

            print('## Saving dataset into ' + save_destination + '...')
            io.pkl_save(
                {
                    'gas_velo_at_particle_pos': gas_velo_at_particle_pos,
                    't': ff.t
                },
                save_filename,
                folder=save_destination)
        print('## Done!')
Exemplo n.º 11
0
def rename_in_submit_script(new_name,
                            submit_script_path=False,
                            sim=False,
                            old_name=False):
    import os
    from os.path import exists, join, abspath, dirname
    from pencil.io import timestamp, get_systemid, mkdir

    if submit_script_path != False:
        path = dirname(abspath(submit_script_path))
        filename = basename(abspath(submit_script_path))

    # locate submit script file if not given
    if submit_script_path == False:
        # get path of simulation folder where submit script should be around
        if sim == False:
            path = "."
        else:
            path = sim.path

        list_of_possible_submit_scripts = [
            f for f in os.listdir(path)
            if f.startswith("submit") and f.split(".")[-1] in ["sh", "csh"]
        ]

        # only works if a single submit script could be identified, else prompt error and let the user do it manually
        if len(list_of_possible_submit_scripts) == 0:
            print("!! ERROR: Could not find a submit script in " + str(path))
            return False
        elif len(list_of_possible_submit_scripts) > 1:
            print(
                "!! ERROR: Could not identify submit script, please specify manually:"
                + str(list_of_possible_submit_scripts))
            return False
        else:
            filename = list_of_possible_submit_scripts[0]

    # path to submit script should now be clear, but better check its a string
    if type(path) != type("STRING"):
        print(
            "!! ERROR: Could not identify submit script path, please check manually: "
            + str(path))

    path_filename = join(path, filename)

    # open submit script as f and read content into s
    with open(path_filename) as f:
        s = f.read()

    # if the old name is known, we can simply replace that string with the new name
    if old_name != False and type(old_name) == type("STRING"):
        with open(path_filename) as f:
            s = f.read()
            if old_name in s:
                s = s.replace(old_name, new_name)
            else:
                print("?? ERROR: Could not find old_name " + str(old_name) +
                      " in submit script " + str(path_filename))
                return False

    # else we need to look for specific identifiers, that differ from queue system and cluster
    else:
        # get submit name line identifier
        identify = get_systemid()[2]
        if identify == False:
            print(
                "!! ERROR: Could not identify an submit script name identifier, please update pc.io.get_systemid.py by adding your machine."
            )
        if identify in s:
            if s.count(identify) > 1:
                print(
                    "ERROR: Job name identifier has multiple appearences in submit script!"
                )
                return False
            s = s.split("\n")
            for ii, line in enumerate(s):
                if identify in line:
                    break
            s[ii] = identify + " " + new_name

            s = "\n".join(s)

        else:
            print(
                "!! ERROR: Could not find name identifier in submit script, identifier is "
                + str(identify))

    # s should now be updated, we can save it in now in the file submit_script_path, but let us backup the old one
    # backup
    from shutil import copyfile

    target_dir = join(path, "pc/backups/")
    mkdir(target_dir)
    copyfile(path_filename,
             join(target_dir, filename + ".BAK" + str(timestamp())))

    # save new submit script
    with open(path_filename, "w") as f:
        f.write(s)

    # done
    return True
Exemplo n.º 12
0
def clone_sims_from_obj(simset,
                        simsdir="..",
                        template_sim=None,
                        specify_nml=True):
    """
    clone_sims_from_obj(simset, simsdir="..", template_sim=None, specify_nml=True):

    Create a set of simulation directories from a dictionary object, simset,
    which contains the list of parameters to combine across set of clones.
    This differs from the clone_sims function in that it is more configurable,
    and uses the native Python way of copying the sim.

    Parameters
    ----------
    simset : dictionary
        Parameters to combine across set of clones.

    simsdir : string
        Root directory for collection of clone simulations.

    template_sim : obj
        A pencil simulation object (returned by pc.get_sim).
        If not specified, the simulation in the current directory will be used.

    specify_nml : bool
        Whether, for files like run.in, you also specify the namelist in simset.
        If so, f90nml will be directly used to write the value. If not, the
        change_value_in_file function will be used even for these files.

    Returns
    -------
    Set of unexecuted simulation run directories with
    parameters updated.

    Example
    --------
    >>> template_sim = pc.get_sim("simulation_to_clone")
    >>> template_sim.optionals.append("job.pbs") #A job submission script which
    ... #one wants to copy along with the other simulation files.
    >>> params = pc.pipelines.parameter_table("example_filename.txt")
    >>> simset = pc.pipelines.make_sims_dict(params)
    >>> pc.pipelines.clone_sims_from_obj(simset, template_sim=template_sim, simsdir=".")
    """
    if not template_sim:
        template_sim = get_sim()
    if not os.path.isdir(simsdir):
        if os.path.exists(simsdir):
            raise RuntimeError(
                "simsdir ({}) exists but is not a directory!".format(simsdir))
        else:
            mkdir(simsdir)
    # For each set of simulation parameters create new simulation subdirectory
    for sim in simset:
        newdir = join(simsdir, sim)
        out = template_sim.copy(path_root=simsdir, name=sim)
        if out is False:
            raise RuntimeError("Copying sim failed")
        for filename in simset[sim]:
            if filename == "compile":
                if simset[sim]["compile"]:
                    print(
                        "Warning: clone_sims_from_obj: compilation not implemented yet, so not compiling."
                    )
                    ## KG: I am not sure if the following works as intended; it just hangs for me.
                    # new_sim = get_sim(join(simsdir, newdir))
                    # new_sim.compile()
            elif specify_nml and ("run.in" in filename
                                  or "start.in" in filename):
                # Use f90nml for these files
                pars = f90nml.read(filename)
                newpars = pars.copy()
                for group in simset[sim][filename]:
                    for item in simset[sim][filename][group]:
                        newpars[group][item] = simset[sim][filename][group][
                            item]
                newpars.write(join(newdir, filename), force=True)
            else:
                file_path = newdir
                if "local" in filename:
                    file_path = join(file_path, "src")
                for item in simset[sim][filename]:
                    change_value_in_file(
                        filename,
                        item,
                        simset[sim][filename][item],
                        filepath=file_path,
                    )
Exemplo n.º 13
0
def clone_sims(simset, simsdir=None):
    """
    clone_sims(simset, simsdir=None)

    Create a set of simulation directories from a dictionary object, simset,
    which contains the list of parameters to combine across set of clones.

    Parameters
    ----------
    simset : dictionary
        Parameters to combine across set of clones.

    simsdir : string
        Root directory for collection of clone simulations.

    Returns
    -------
    Set of uncompiled and unexecuted simulation run directories with
    parameters updated.

    Notes
    -----
    It is assumed that the user is working in the compiled source directory.

    Examples
    --------
    >>> simsdir = '/path/to/set_of_clones'
    >>> params = pencil.pipelines.parameter_table('example_filename.txt')
    >>> params = user_own_trim_table(params)#See pc.pipelines.trim_table
    >>> simset = pencil.pipelines.make_sims_dict(params)
    >>> clone_sims(simset,simsdir=simsdir)
    """

    # If user provides no clone path
    if not simsdir:
        simsdir = os.getcwd().strip(os.getcwd().split("/")[-1])
    mkdir(simsdir)
    # For each set of simulation parameters create new simulation subdirectory
    sourced = False
    for sim in simset:
        newdir = join(simsdir, sim)
        cmd = ["pc_newrun", "-s", newdir]
        # Only compile if makefile.local or cparam.local change
        if "compile" in simset[sim].keys():
            if not sourced:
                moduleinfo = "src/.moduleinfo"
                cmd = ["source " + moduleinfo]
                process = subprocess.Popen(cmd,
                                           shell=True,
                                           stdout=subprocess.PIPE)
                try:
                    outs, errs = process.communicate()
                except TimeoutExpired:
                    process.kill()
                    outs, errs = process.communicate()
            if simset[sim]["compile"]:
                cmd = ["pc_newrun", newdir]
        process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
        process.communicate()
        for filename in simset[sim]:
            #'compile' flag only used above
            if not filename == "compile":
                # Files which are f90nml-compatible
                if "run.in" in filename or "start.in" in filename:
                    pars = f90nml.read(filename)
                    newpars = pars.copy()
                    for group in simset[sim][filename]:
                        for item in simset[sim][filename][group]:
                            newpars[group][item] = simset[sim][filename][
                                group][item]
                    newpars.write(join(newdir, filename), force=True)
                else:
                    file_path = newdir
                    if "local" in filename:
                        file_path = join(file_path, "src")
                    for item in simset[sim][filename]:
                        change_value_in_file(
                            filename,
                            item,
                            simset[sim][filename][item],
                            filepath=file_path,
                        )
Exemplo n.º 14
0
    def resume_from_var(self, sim_source, varno, DEBUG=False):
        """
        Copies everything to resume a run from an older state.

        It uses VAR-file number >varno< as new VAR0 and var.dat.
        Does copy PVAR as well if available.

        Parameters
        ----------
        sim_source : string
            Simulation from where to copy all the files.

        varno : int
            var-file number # from which to copy (VAR#).
        """

        from os import listdir
        from os.path import exists, join, isdir
        from pencil.math import is_int
        from pencil.io import mkdir

        def copyfile(src, dst, DEBUG=False):
            from shutil import copy2
            from os.path import exists

            if not exists(src):
                return False
            if DEBUG:
                print("< " + src)
            if DEBUG:
                print("> " + dst)
            copy2(src, dst)

        src = sim_source.datadir
        dst = self.datadir
        if is_int(varno):
            varno = "VAR" + str(int(varno))

        if not exists(src):
            print("! ERROR: Source data directory does not exist: " + str(src))
            return False
        if not exists(dst):
            print("! ERROR: Destination data directory does not exist: " +
                  str(dst))
            return False
        if not varno in sim_source.get_varlist():
            print("! ERROR: Could not find " + varno +
                  " in procX folder of sim_source: " + sim_source.name)
            return False

        data_folders = [p for p in listdir(src) if isdir(join(src, p))]
        procX_folder = [p for p in data_folders if p.startswith("proc")]
        for p in data_folders:
            mkdir(join(dst, p))

        # data/
        files = [
            "def_var.pro",
            "dim.dat",
            "index.pro",
            "move-me.list",
            "particles_stalker_header.dat",
            "params.log",
            "pc_constants.pro",
            "pdim.dat",
            "pencils.list",
            "pvarname.dat",
            "svnid.dat",
            "var.general",
            "variables.pro",
            "varname.dat",
        ]
        for f in files:
            copyfile(join(src, f), dst, DEBUG=DEBUG)

        # data/allprocs/
        files = ["grid.dat"]
        for f in files:
            copyfile(join(src, "allprocs", f),
                     join(dst, "allprocs/"),
                     DEBUG=DEBUG)

        # data/procX
        files = ["dim.dat", "grid.dat", "proc_bounds.dat"]
        for X in procX_folder:
            for f in files:
                copyfile(join(src, X, f), join(dst, X), DEBUG=DEBUG)
            copyfile(join(src, X, varno), join(dst, X, "VAR0"), DEBUG=DEBUG)
            copyfile(join(src, X, "P" + varno),
                     join(dst, X, "PVAR0"),
                     DEBUG=DEBUG)
            copyfile(join(src, X, varno), join(dst, X, "var.dat"), DEBUG=DEBUG)
            copyfile(join(src, X, "P" + varno),
                     join(dst, X, "pvar.dat"),
                     DEBUG=DEBUG)

        print("? WARNING: KNOWN ERRORS:")
        print(
            "? RUN MIGHT NOT START BECAUSE data/param.nml can get damaged in" +
            " a run that crashes. This is not fixed by this routine.")
        print("? TRY AND START A SINGLE CORE RUN WITH THIS SETUP AND USE THE" +
              " CREATED param.nml FOR YOUR PURPOSE INSTEAD.")
        print("? SAME FOR: - tstalk.dat")

        return True