예제 #1
0
    def __call__(self, binary_command, *args, may_wrap=True, **kwargs):
        self.project.name = kwargs.get("project_name", self.project.name)

        likwid_f = self.project.name + ".txt"
        jobs = self.config['jobs']
        res = []
        for group in ["CLOCK"]:
            run_cmd = \
                likwid_perfctr["-O", "-o", likwid_f, "-m",
                               "-C", "0-{0:d}".format(jobs),
                               "-g", group, binary_command]

            with pb.local.env(POLLI_ENABLE_LIKWID=1):
                res.extend(self.call_next(run_cmd, *args, **kwargs))

            likwid_measurement = likwid.perfcounters(likwid_f)
            for run_info in res:
                persist_likwid(run_info.db_run, run_info.session,
                               likwid_measurement)
                db.persist_config(run_info.db_run, run_info.session, {
                    "cores": str(jobs),
                    "likwid.group": group
                })
            rm("-f", likwid_f)
        return res
예제 #2
0
    def __call__(self,
                 command,
                 *args,
                 project=None,
                 rerun_on_error=True,
                 **kwargs):
        if project:
            self.project = project

        original_command = command[args]
        new_command = command["-Qunused-arguments"]
        new_command = new_command[args]
        new_command = new_command[self.project.cflags]
        new_command = new_command[self.project.ldflags]

        with track_execution(new_command, self.project, self.experiment,
                             **kwargs) as run:
            run_info = run()
            if self.config:
                LOG.info(
                    yaml.dump(self.config,
                              width=40,
                              indent=4,
                              default_flow_style=False))
                persist_config(run_info.db_run, run_info.session, self.config)

            if run_info.has_failed:
                with track_execution(original_command, self.project,
                                     self.experiment, **kwargs) as run:
                    LOG.warning("Fallback to: %s", str(original_command))
                    run_info = run()

        res = self.call_next(new_command, *args, **kwargs)
        res.append(run_info)
        return res
예제 #3
0
    def __call__(self, cc, *args, project=None, **kwargs):
        if project:
            self.project = project

        original_command = cc[args]
        clang = cc["-Qunused-arguments"]
        clang = clang[args]
        clang = clang[project.cflags]
        clang = clang[project.ldflags]
        clang = clang["-mllvm", "-stats"]

        run_config = self.config
        session = schema.Session()
        with u_run.track_execution(clang, self.project, self.experiment) as _run:
            run_info = _run()
            if run_config is not None:
                db.persist_config(run_info.db_run, session, run_config)

            if not run_info.has_failed:
                stats = []
                cls = ExtractCompileStats
                for stat in cls.get_compilestats(run_info.stderr):
                    compile_s = CompileStat()
                    compile_s.name = stat["desc"].rstrip()
                    compile_s.component = stat["component"].rstrip()
                    compile_s.value = stat["value"]
                    stats.append(compile_s)

                components = settings.CFG["cs"]["components"].value
                names = settings.CFG["cs"]["names"].value

                stats = [s for s in stats if str(s.component) in components] \
                    if components is not None else stats
                stats = [s for s in stats if str(s.name) in names] \
                    if names is not None else stats

                if stats:
                    for stat in stats:
                        LOG.info(" [%s] %s = %s", stat.component, stat.name,
                                 stat.value)
                    db.persist_compilestats(run_info.db_run, run_info.session,
                                            stats)
                else:
                    LOG.info("No compilestats left, after filtering.")
                    LOG.warning("  Components: %s", components)
                    LOG.warning("  Names:      %s", names)
            else:
                with u_run.track_execution(original_command, self.project,
                                         self.experiment, **kwargs) as _run:
                    LOG.warning("Fallback to: %s", str(original_command))
                    run_info = _run()

        ret = self.call_next(cc, *args, **kwargs)
        ret.append(run_info)
        session.commit()
        return ret
예제 #4
0
def run_without_recompile(project, experiment, config, jobs, run_f, args,
                          **kwargs):
    """
    Run the given binary wrapped with time.

    Args:
        project: The benchbuild.project.
        experiment: The benchbuild.experiment.
        config: The benchbuild.settings.config.
        jobs: Number of cores we should use for this exection.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    from benchbuild.utils.run import track_execution, fetch_time_output
    from benchbuild.settings import CFG
    from benchbuild.utils.db import persist_time, persist_config

    CFG.update(config)
    project.name = kwargs.get("project_name", project.name)
    timing_tag = "BB-JIT: "

    may_wrap = kwargs.get("may_wrap", True)

    run_cmd = local[run_f]
    run_cmd = run_cmd[args]
    if may_wrap:
        run_cmd = time["-f", timing_tag + "%U-%S-%e", run_cmd]

    with local.env(OMP_NUM_THREADS=str(jobs),
                   POLLI_LOG_FILE=CFG["slurm"]["extra_log"].value()):
        with track_execution(run_cmd, project, experiment) as run:
            ri = run()

        if may_wrap:
            timings = fetch_time_output(timing_tag,
                                        timing_tag + "{:g}-{:g}-{:g}",
                                        ri.stderr.split("\n"))
            if timings:
                persist_time(ri.db_run, ri.session, timings)
    persist_config(
        ri.db_run, ri.session, {
            "cores": str(jobs - 1),
            "cores-config": str(jobs),
            "recompilation": "disabled"
        })
    return ri
예제 #5
0
def run_with_time(project, experiment, config, jobs, run_f, args, **kwargs):
    """
    Run the given binary wrapped with time.

    Args:
        project: The benchbuild project that has called us.
        experiment: The benchbuild experiment which we operate under.
        config: The benchbuild configuration we are running with.
        jobs: The number of cores we are allowed to use. This may differ
            from the actual amount of available cores, obey it.
            We should enforce this from the outside. However, at the moment we
            do not do this.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
            may_wrap:
                Project may signal that it they are not suitable for
                wrapping. Usually because they scan/parse the output, which
                may interfere with the output of the wrapper binary.
    """
    CFG.update(config)
    project.name = kwargs.get("project_name", project.name)
    timing_tag = "BB-TIME: "

    may_wrap = kwargs.get("may_wrap", True)

    run_cmd = local[run_f]
    run_cmd = run_cmd[args]
    if may_wrap:
        run_cmd = time["-f", timing_tag + "%U-%S-%e", run_cmd]

    def handle_timing_info(ri):
        if may_wrap:
            timings = fetch_time_output(timing_tag,
                                        timing_tag + "{:g}-{:g}-{:g}",
                                        ri.stderr.split("\n"))
            if timings:
                persist_time(ri.db_run, ri.session, timings)
            else:
                logging.warn("No timing information found.")
        return ri

    with track_execution(run_cmd, project, experiment, **kwargs) as run:
        ri = handle_timing_info(run())
    persist_config(ri.db_run, ri.session, {"cores": str(jobs)})
    return ri
예제 #6
0
def run_with_likwid(project, experiment, config, jobs, run_f, args, **kwargs):
    """
    Run the given file wrapped by likwid.

    Args:
        project: The benchbuild.project.
        experiment: The benchbuild.experiment.
        config: The benchbuild.settings.config.
        jobs: Number of cores we should use for this exection.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    from benchbuild.settings import CFG as c
    from benchbuild.utils.run import guarded_exec, handle_stdin
    from benchbuild.utils.db import persist_likwid, persist_config
    from benchbuild.likwid import get_likwid_perfctr

    c.update(config)
    project.name = kwargs.get("project_name", project.name)
    likwid_f = project_name + ".txt"

    for group in ["CLOCK"]:
        likwid_path = path.join(c["likwiddir"], "bin")
        likwid_perfctr = local[path.join(likwid_path, "likwid-perfctr")]
        run_cmd = \
            likwid_perfctr["-O", "-o", likwid_f, "-m",
                           "-C", "0-{0:d}".format(jobs),
                           "-g", group, run_f]
        run_cmd = handle_stdin(run_cmd[args], kwargs)

        with local.env(POLLI_ENABLE_LIKWID=1):
            with guarded_exec(run_cmd, project, experiment) as run:
                ri = run()

        likwid_measurement = get_likwid_perfctr(likwid_f)
        persist_likwid(run, ri.session, likwid_measurement)
        persist_config(run, ri.session, {
            "cores": str(jobs),
            "likwid.group": group
        })
        rm("-f", likwid_f)
예제 #7
0
def run_with_likwid(project, experiment, config, jobs, run_f, args, **kwargs):
    """
    Run the given file wrapped by likwid.

    Args:
        project: The benchbuild.project.
        experiment: The benchbuild.experiment.
        config: The benchbuild.settings.config.
        jobs: Number of cores we should use for this exection.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    from benchbuild.settings import CFG
    from benchbuild.utils.run import track_execution, handle_stdin
    from benchbuild.utils.db import persist_likwid, persist_config
    from benchbuild.likwid import get_likwid_perfctr

    CFG.update(config)
    project.name = kwargs.get("project_name", project.name)
    likwid_f = project.name + ".txt"

    for group in ["CLOCK"]:
        likwid_path = path.join(CFG["likwiddir"], "bin")
        likwid_perfctr = local[path.join(likwid_path, "likwid-perfctr")]
        run_cmd = \
            likwid_perfctr["-O", "-o", likwid_f, "-m",
                           "-C", "0-{0:d}".format(jobs),
                           "-g", group, run_f]
        run_cmd = handle_stdin(run_cmd[args], kwargs)

        with local.env(POLLI_ENABLE_LIKWID=1):
            with track_execution(run_cmd, project, experiment) as run:
                ri = run()

        likwid_measurement = get_likwid_perfctr(likwid_f)
        persist_likwid(run, ri.session, likwid_measurement)
        persist_config(run, ri.session, {
            "cores": str(jobs),
            "likwid.group": group
        })
        rm("-f", likwid_f)
예제 #8
0
def run_with_perf(project, experiment, config, jobs, run_f, args, **kwargs):
    """
    Run the given binary wrapped with time.

    Args:
        project: The benchbuild.project.
        experiment: The benchbuild.experiment.
        config: The benchbuild.settings.config.
        jobs: Number of cores we should use for this exection.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    from benchbuild.settings import CFG as c
    from benchbuild.utils.run import guarded_exec, handle_stdin
    from benchbuild.utils.db import persist_perf, persist_config
    from plumbum.cmd import perf

    c.update(config)
    project.name = kwargs.get("project_name", project.name)
    run_cmd = local[run_f]
    run_cmd = handle_stdin(run_cmd[args], kwargs)
    run_cmd = perf["record", "-q", "-F", 6249, "-g", run_cmd]

    with local.env(OMP_NUM_THREADS=str(jobs)):
        with guarded_exec(run_cmd, project, experiment) as run:
            ri = run(retcode=None)

        fg_path = path.join(c["src_dir"], "extern/FlameGraph")
        if path.exists(fg_path):
            sc_perf = local[path.join(fg_path, "stackcollapse-perf.pl")]
            flamegraph = local[path.join(fg_path, "flamegraph.pl")]

            fold_cmd = ((perf["script"] | sc_perf) > run_f + ".folded")
            graph_cmd = (flamegraph[run_f + ".folded"] > run_f + ".svg")

            fold_cmd()
            graph_cmd()
            persist_perf(ri.db_run, ri.session, run_f + ".svg")
            persist_config(ri.db_run, ri.session, {"cores": str(jobs)})
예제 #9
0
def run_with_perf(project, experiment, config, jobs, run_f, args, **kwargs):
    """
    Run the given binary wrapped with time.

    Args:
        project: The benchbuild.project.
        experiment: The benchbuild.experiment.
        config: The benchbuild.settings.config.
        jobs: Number of cores we should use for this exection.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    from benchbuild.settings import CFG
    from benchbuild.utils.run import track_execution, handle_stdin
    from benchbuild.utils.db import persist_perf, persist_config
    from benchbuild.utils.cmd import perf

    CFG.update(config)
    project.name = kwargs.get("project_name", project.name)
    run_cmd = local[run_f]
    run_cmd = handle_stdin(run_cmd[args], kwargs)
    run_cmd = perf["record", "-q", "-F", 6249, "-g", run_cmd]

    with local.env(OMP_NUM_THREADS=str(jobs)):
        with track_execution(run_cmd, project, experiment) as run:
            ri = run(retcode=None)

        fg_path = path.join(CFG["src_dir"], "extern/FlameGraph")
        if path.exists(fg_path):
            sc_perf = local[path.join(fg_path, "stackcollapse-perf.pl")]
            flamegraph = local[path.join(fg_path, "flamegraph.pl")]

            fold_cmd = ((perf["script"] | sc_perf) > run_f + ".folded")
            graph_cmd = (flamegraph[run_f + ".folded"] > run_f + ".svg")

            fold_cmd()
            graph_cmd()
            persist_perf(ri.db_run, ri.session, run_f + ".svg")
            persist_config(ri.db_run, ri.session, {"cores": str(jobs)})
예제 #10
0
    def __call__(self, binary_command, *args, **kwargs):
        self.project.name = kwargs.get("project_name", self.project.name)

        cmd = binary_command[args]
        with run.track_execution(cmd, self.project, self.experiment,
                                 **kwargs) as _run:
            run_info = _run()
            if self.config:
                run_info.add_payload("config", self.config)
                LOG.info(
                    yaml.dump(self.config,
                              width=40,
                              indent=4,
                              default_flow_style=False))
                self.config['baseline'] = \
                    os.getenv("BB_IS_BASELINE", "False")
                db.persist_config(run_info.db_run, run_info.session,
                                  self.config)
        res = self.call_next(binary_command, *args, **kwargs)
        res.append(run_info)
        return res
예제 #11
0
def run_with_time(project, experiment, config, jobs, run_f, args, **kwargs):
    """
    Run the given binary wrapped with time.

    Args:
        project: The benchbuild.project.
        experiment: The benchbuild.experiment.
        config: The benchbuild.settings.config.
        jobs: Number of cores we should use for this exection.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    from benchbuild.utils.run import guarded_exec, handle_stdin, fetch_time_output
    from benchbuild.settings import CFG as c
    from benchbuild.utils.db import persist_time, persist_config

    c.update(config)
    project.name = kwargs.get("project_name", project.name)
    timing_tag = "BB-JIT: "

    run_cmd = time["-f", timing_tag + "%U-%S-%e", run_f]
    run_cmd = handle_stdin(run_cmd[args], kwargs)

    with local.env(OMP_NUM_THREADS=str(jobs)):
        with guarded_exec(run_cmd, project, experiment) as run:
            ri = run()
        timings = fetch_time_output(
            timing_tag, timing_tag + "{:g}-{:g}-{:g}", ri.stderr.split("\n"))
        if not timings:
            return

    persist_time(ri.db_run, ri.session, timings)
    persist_config(ri.db_run, ri.session, {"cores": str(jobs)})
예제 #12
0
파일: run.py 프로젝트: PolyJIT/benchbuild
    def __call__(self, binary_command, *args, **kwargs):
        self.project.name = kwargs.get("project_name", self.project.name)

        cmd = binary_command[args]
        with run.track_execution(cmd, self.project, self.experiment,
                                 **kwargs) as _run:
            run_info = _run()
            if self.config:
                run_info.add_payload("config", self.config)
                LOG.info(
                    yaml.dump(
                        self.config,
                        width=40,
                        indent=4,
                        default_flow_style=False))
                self.config['baseline'] = \
                    os.getenv("BB_IS_BASELINE", "False")
                db.persist_config(run_info.db_run, run_info.session,
                                  self.config)
        res = self.call_next(binary_command, *args, **kwargs)
        res.append(run_info)
        return res
예제 #13
0
파일: raw.py 프로젝트: clhunsen/benchbuild
def run_with_time(project, experiment, config, jobs, run_f, args, **kwargs):
    """
    Run the given binary wrapped with time.

    Args:
        project: The benchbuild project that has called us.
        experiment: The benchbuild experiment which we operate under.
        config: The benchbuild configuration we are running with.
        jobs: The number of cores we are allowed to use. This may differ
            from the actual amount of available cores, obey it.
            We should enforce this from the outside. However, at the moment we
            do not do this.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    CFG.update(config)
    project.name = kwargs.get("project_name", project.name)
    timing_tag = "BB-TIME: "

    run_cmd = time["-f", timing_tag + "%U-%S-%e", run_f]
    run_cmd = handle_stdin(run_cmd[args], kwargs)

    with local.env(OMP_NUM_THREADS=str(jobs)):
        with guarded_exec(run_cmd, project, experiment) as run:
            ri = run()
        timings = fetch_time_output(
            timing_tag, timing_tag + "{:g}-{:g}-{:g}", ri.stderr.split("\n"))
        if not timings:
            return

    persist_time(ri.db_run, ri.session, timings)
    persist_config(ri.db_run, ri.session, {"cores": str(jobs)})
예제 #14
0
    def __call__(self,
                 command,
                 *args,
                 project=None,
                 rerun_on_error=True,
                 **kwargs):
        if project:
            self.project = project

        original_command = command[args]
        new_command = command["-Qunused-arguments"]
        new_command = new_command[args]
        new_command = new_command[self.project.cflags]
        new_command = new_command[self.project.ldflags]

        with run.track_execution(new_command, self.project, self.experiment,
                                 **kwargs) as _run:
            run_info = _run()
            if self.config:
                LOG.info(
                    yaml.dump(
                        self.config,
                        width=40,
                        indent=4,
                        default_flow_style=False))
                db.persist_config(run_info.db_run, run_info.session,
                                  self.config)

            if run_info.has_failed:
                with run.track_execution(original_command, self.project,
                                         self.experiment, **kwargs) as _run:
                    LOG.warning("Fallback to: %s", str(original_command))
                    run_info = _run()

        res = self.call_next(new_command, *args, **kwargs)
        res.append(run_info)
        return res
예제 #15
0
def run_with_papi(project, experiment, config, jobs, run_f, args, **kwargs):
    """
    Run the given file with PAPI support.

    This just runs the project as PAPI support should be compiled in
    already. If not, this won't do a lot.

    Args:
        project: The benchbuild.project.
        experiment: The benchbuild.experiment.
        config: The benchbuild.settings.config.
        jobs: Number of cores we should use for this exection.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    from benchbuild.settings import CFG as c
    from benchbuild.utils.run import guarded_exec, handle_stdin
    from benchbuild.utils.db import persist_config

    c.update(config)
    project.name = kwargs.get("project_name", project.name)
    run_cmd = local[run_f]
    run_cmd = handle_stdin(run_cmd[args], kwargs)

    with local.env(POLLI_ENABLE_PAPI=1, OMP_NUM_THREADS=jobs):
        with guarded_exec(run_cmd, project, experiment) as run:
            ri = run()

    persist_config(ri.db_run, ri.session,
                   {"cores": str(jobs)})
예제 #16
0
def run_with_papi(project, experiment, config, jobs, run_f, args, **kwargs):
    """
    Run the given file with PAPI support.

    This just runs the project as PAPI support should be compiled in
    already. If not, this won't do a lot.

    Args:
        project: The benchbuild.project.
        experiment: The benchbuild.experiment.
        config: The benchbuild.settings.config.
        jobs: Number of cores we should use for this exection.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    from benchbuild.settings import CFG
    from benchbuild.utils.run import track_execution, handle_stdin
    from benchbuild.utils.db import persist_config

    CFG.update(config)
    project.name = kwargs.get("project_name", project.name)
    run_cmd = local[run_f]
    run_cmd = handle_stdin(run_cmd[args], kwargs)

    with local.env(POLLI_ENABLE_PAPI=1, OMP_NUM_THREADS=jobs):
        with track_execution(run_cmd, project, experiment) as run:
            run_info = run()

    persist_config(run_info.db_run, run_info.session, {"cores": str(jobs)})
예제 #17
0
def time_polyjit_and_polly(project: Project, experiment: Experiment,
                           config: Configuration, jobs: int, run_f: str,
                           args: Iterable[str], **kwargs):
    """
    Run the given binary wrapped with time.

    Args:
        project: The benchbuild.project.
        experiment: The benchbuild.experiment.
        config: The benchbuild.settings.config.
        jobs: Number of cores we should use for this execution.
        run_f: The file we want to execute.
        args: List of arguments that should be passed to the wrapped binary.
        **kwargs: Dictionary with our keyword args. We support the following
            entries:

            project_name: The real name of our project. This might not
                be the same as the configured project name, if we got wrapped
                with ::benchbuild.project.wrap_dynamic
            has_stdin: Signals whether we should take care of stdin.
    """
    from benchbuild.utils.run import track_execution, fetch_time_output
    from benchbuild.settings import CFG
    from benchbuild.utils.db import persist_time, persist_config

    CFG.update(config)
    project.name = kwargs.get("project_name", project.name)
    timing_tag = "BB-JIT: "

    may_wrap = kwargs.get("may_wrap", True)

    run_cmd = local[run_f]
    run_cmd = run_cmd[args]
    if may_wrap:
        run_cmd = time["-f", timing_tag + "%U-%S-%e", run_cmd]

    def handle_timing_info(run_info):
        if may_wrap:
            timings = fetch_time_output(timing_tag,
                                        timing_tag + "{:g}-{:g}-{:g}",
                                        run_info.stderr.split("\n"))
            if timings:
                persist_time(run_info.db_run, run_info.session, timings)
            else:
                logging.warning("No timing information found.")
        return run_info

    ri_1 = RunInfo()
    ri_2 = RunInfo()
    with track_execution(run_cmd, project, experiment) as run:
        with local.env(OMP_NUM_THREADS=str(jobs),
                       POLLI_LOG_FILE=CFG["slurm"]["extra_log"].value()):
            ri_1 = handle_timing_info(run())
            persist_config(
                ri_1.db_run, ri_1.session, {
                    "cores": str(jobs - 1),
                    "cores-config": str(jobs),
                    "recompilation": "enabled",
                    "specialization": "enabled"
                })

    with track_execution(run_cmd, project, experiment) as run:
        with local.env(OMP_NUM_THREADS=str(jobs),
                       POLLI_DISABLE_SPECIALIZATION=1,
                       POLLI_LOG_FILE=CFG["slurm"]["extra_log"].value()):
            ri_2 = handle_timing_info(run())
            persist_config(
                ri_2.db_run, ri_2.session, {
                    "cores": str(jobs - 1),
                    "cores-config": str(jobs),
                    "recompilation": "enabled",
                    "specialization": "disabled"
                })

    return ri_1 + ri_2