예제 #1
0
def DataFrameFromBundle(bundle):
    """Generates a DataFrame from a datreant bundle."""
    df = pd.DataFrame(
        columns=["module", "nodes", "ns/day", "run time [min]", "gpu", "host", "ncores"]
    )

    for i, sim in enumerate(bundle):
        # older versions wrote a version category. This ensures backwards compatibility
        if "module" in sim.categories:
            module = sim.categories["module"]
        else:
            module = sim.categories["version"]
        # call the engine specific analysis functions
        engine = detect_md_engine(module)
        df.loc[i] = utils.analyze_run(engine=engine, sim=sim)

    if df.empty:
        console.error("There is no data for the given path.")

    # Sort values by `nodes`
    df = df.sort_values(
        ["host", "module", "run time [min]", "gpu", "nodes"]
    ).reset_index(drop=True)

    return df
예제 #2
0
def ensure_correct_environment():
    old_packages = []
    import datreant as dtr

    try:
        version = dtr.__version__  # noqa: F401,F841
    except AttributeError:
        old_packages.append("datreant.core")

    try:
        import datreant.data  # noqa: F401

        old_packages.append("datreant.data")
    except ImportError:
        pass

    try:
        import mdsynthesis  # noqa: F401

        old_packages.append("mdsynthesis")
    except ImportError:
        pass

    if old_packages:
        console.error(MIGRATION_WARNING, ", ".join(old_packages),
                      DOCUMENTATION_URL)
예제 #3
0
def prepare_benchmark(name, relative_path, *args, **kwargs):
    benchmark = kwargs["benchmark"]

    if not kwargs["multidir"] == 1:
        console.error("The NAMD-engine currently only supports '--multidir 1'")

    if name.endswith(".namd"):
        name = name[:-5]

    namd = "{}.namd".format(name)
    psf = "{}.psf".format(name)
    pdb = "{}.pdb".format(name)

    namd_relpath = os.path.join(relative_path, namd)
    psf_relpath = os.path.join(relative_path, psf)
    pdb_relpath = os.path.join(relative_path, pdb)

    with open(namd_relpath) as fh:
        analyze_namd_file(fh)
        fh.seek(0)

    copyfile(namd_relpath, benchmark[namd].relpath)
    copyfile(psf_relpath, benchmark[psf].relpath)
    copyfile(pdb_relpath, benchmark[pdb].relpath)

    return name
예제 #4
0
def parse_bundle(bundle, columns, sort_values_by, discard_performance=False):
    """Generates a DataFrame from a datreant.Bundle."""
    data = []

    with click.progressbar(
        bundle, length=len(bundle), label="Analyzing benchmarks", show_pos=True
    ) as bar:
        for treant in bar:
            module = treant.categories["module"]
            engine = detect_md_engine(module)
            row = utils.analyze_benchmark(engine=engine, benchmark=treant)

            version = 2
            if "version" in treant.categories:
                version = 3
            if version == 2:
                row.pop()  # multidir is not a category for version 2 data
            row += [version]

            if discard_performance:
                row = row[:2] + row[3:]

            data.append(row)

    df = pd.DataFrame(data, columns=columns)

    # Exit if no data is available
    if df.empty:
        console.error("There is no data for the given path.")

    # Sort values by `nodes`
    df = df.sort_values(sort_values_by).reset_index(drop=True)

    return df
예제 #5
0
def get_batch_command():
    for p in PATHS:
        for b in BATCH_SYSTEMS.values():
            if glob(os.path.join(p, b)):
                return b
    console.error(
        "Was not able to find a batch system. Are you trying to use this "
        "package on a host with a queuing system?")
예제 #6
0
def filter_dataframe_for_plotting(df, host_name, module_name, gpu, cpu):
    if gpu and cpu:
        console.info("Plotting GPU and CPU data.")
    elif gpu and not cpu:
        df = df[df.gpu]
        console.info("Plotting GPU data only.")
    elif cpu and not gpu:
        df = df[~df.gpu]
        console.info("Plotting CPU data only.")
    elif not cpu and not gpu:
        console.error("CPU and GPU not set. Nothing to plot. Exiting.")

    if df.empty:
        console.error("Your filtering led to an empty dataset. Exiting.")

    df_filtered_hosts = df[df["host"].isin(host_name)]
    df_unique_hosts = np.unique(df_filtered_hosts["host"])

    if df_unique_hosts.size != len(host_name):
        console.error(
            "Could not find all provided hosts. Available hosts are: {}".format(
                ", ".join(np.unique(df["host"]))
            )
        )

    if not host_name:
        console.info("Plotting all hosts in input file.")
    else:
        df = df_filtered_hosts
        console.info(
            "Data for the following hosts will be plotted: {}".format(
                ", ".join(df_unique_hosts)
            )
        )

    for module in module_name:
        if module in SUPPORTED_ENGINES.keys():
            console.info("Plotting all modules for engine '{}'.", module)
        elif module in df["module"].tolist():
            console.info("Plotting module '{}'.", module)
        elif module not in df["module"].tolist():
            console.error(
                "The module '{}' does not exist in your data. Exiting.", module
            )

    if not module_name:
        console.info("Plotting all modules in your input data.")

    if module_name:
        df = df[df["module"].str.contains("|".join(module_name))]

    if df.empty:
        console.error(
            "Your selections contained no benchmarking information. "
            "Are you sure all your selections are correct?"
        )

    return df
예제 #7
0
def test_console_error():
    """Test the output of console.error()."""
    fh = StringIO()

    with pytest.raises(SystemExit) as error:
        console.error("Does not compute.", filehandler=fh)
    assert fh.getvalue() == "ERROR Does not compute.\n"
    assert error.type == SystemExit
    assert error.value.code == 1
예제 #8
0
def filter_dataframe_for_plotting(df, host_name, module_name, gpu, cpu):
    # gpu/cpu can be plotted together or separately
    if gpu and cpu:
        # if no flags are given by the user or both are set everything is plotted
        console.info("Plotting GPU and CPU data.")
    elif gpu and not cpu:
        df = df[df.gpu]
        console.info("Plotting GPU data only.")
    elif cpu and not gpu:
        df = df[~df.gpu]
        console.info("Plotting CPU data only.")
    elif not cpu and not gpu:
        console.error("CPU and GPU not set. Nothing to plot. Exiting.")

    if df.empty:
        console.error("Your filtering led to an empty dataset. Exiting.")

    df_filtered_hosts = df[df["host"].isin(host_name)]
    df_unique_hosts = np.unique(df_filtered_hosts["host"])

    if df_unique_hosts.size != len(host_name):
        console.error(
            "Could not find all provided hosts. Available hosts are: {}".
            format(", ".join(np.unique(df["host"]))))

    if not host_name:
        console.info("Plotting all hosts in input file.")
    else:
        df = df_filtered_hosts
        console.info("Data for the following hosts will be plotted: {}".format(
            ", ".join(df_unique_hosts)))

    for module in module_name:
        if module in ["gromacs", "namd"]:
            console.info("Plotting all modules for engine '{}'.", module)
        elif module in df["module"].tolist():
            console.info("Plotting module '{}'.", module)
        elif module not in df["module"].tolist():
            console.error(
                "The module '{}' does not exist in your data. Exiting.",
                module)

    if not module_name:
        console.info("Plotting all modules in your input data.")
    # this should work but we need to check before whether any of the entered
    # names are faulty/don't exist
    if module_name:
        df = df[df["module"].str.contains("|".join(module_name))]

    if df.empty:
        console.error("Your selections contained no benchmarking information. "
                      "Are you sure all your selections are correct?")

    return df
예제 #9
0
def check_input_file_exists(name):
    """Check if the TPR file exists."""
    fn = name
    if fn.endswith(".tpr"):
        fn = name[:-4]

    tpr = fn + ".tpr"
    if not os.path.exists(tpr):
        console.error(
            "File {} does not exist, but is needed for GROMACS benchmarks.",
            tpr)

    return True
예제 #10
0
def check_input_file_exists(name):
    """Check and append the correct file extensions for the NAMD module."""
    # Check whether the needed files are there.
    for extension in ["namd", "psf", "pdb"]:
        if name.endswith(".{}".format(extension)):
            name = name[:-1 - len(extension)]

        fn = "{}.{}".format(name, extension)
        if not os.path.exists(fn):
            console.error(
                "File {} does not exist, but is needed for NAMD benchmarks.",
                fn)

    return True
예제 #11
0
def do_analyze(directory, save_csv):
    """Analyze benchmarks."""
    bundle = dtr.discover(directory)
    version = VersionFactory(categories=bundle.categories).version_class

    df = parse_bundle(
        bundle,
        columns=version.analyze_categories,
        sort_values_by=version.analyze_sort,
    )

    # Remove the versions column from the DataFrame
    columns_to_drop = ["version"]
    df = df.drop(columns=columns_to_drop)

    if save_csv is not None:
        if not save_csv.endswith(".csv"):
            save_csv = "{}.csv".format(save_csv)
        df.to_csv(save_csv, index=False)

        console.success("Successfully benchmark data to {}.", save_csv)

    # Reformat NaN values nicely into question marks.
    # move this to the bundle function!
    df = df.replace(np.nan, "?")
    if df.isnull().values.any():
        console.warn(
            "We were not able to gather informations for all systems. "
            "Systems marked with question marks have either crashed or "
            "were not started yet.")

    # Warn user that we are going to print more than 50 benchmark results to the console
    if df.shape[0] > 50:
        if click.confirm(
                "We are about to print the results of {} benchmarks to the console. Continue?"
                .format(click.style(str(df.shape[0]), bold=True))):
            pass
        else:
            console.error("Exiting.")

    # Print the data to the console
    print_dataframe(
        df,
        columns=map_columns(version.category_mapping,
                            version.analyze_printing),
    )
예제 #12
0
def prepare_module_name(module, skip_validation=False):
    """Split the provided module name into its base MD engine and version.

    Currently we only try to split via the delimiter `/`, but this could be
    changed upon request or made configurable on a per-host basis.
    """
    try:
        basename, version = module.split("/")
    except (ValueError, AttributeError):
        if skip_validation:
            console.error(
                "Although you are using the {} option, you have to provide a valid "
                "MD engine name, e.g., {} or {}.",
                "--skip-validation",
                "gromacs/dummy",
                "namd/dummy",
            )
        console.error("We were not able to determine the module name.")

    return basename, version
예제 #13
0
def do_submit(directory, force_restart, yes):
    """Submit the benchmarks."""
    # Migrate from MDBenchmark<2 to MDBenchmark=>2
    mds_to_dtr.migrate_to_datreant(directory)

    bundle = dtr.discover(directory)

    # Exit if no bundles were found in the current directory.
    if not bundle:
        console.error("No benchmarks found.")

    grouped_bundles = bundle.categories.groupby("started")
    try:
        bundles_not_yet_started = grouped_bundles[False]
    except KeyError:
        bundles_not_yet_started = None
    if not bundles_not_yet_started and not force_restart:
        console.error(
            "All generated benchmarks were already started once. "
            "You can force a restart with {}.",
            "--force",
        )

    # Start all benchmark simulations if a restart was requested. Otherwise
    # only start the ones that were not run yet.
    bundles_to_start = bundle
    if not force_restart:
        bundles_to_start = bundles_not_yet_started

    df = DataFrameFromBundle(bundles_to_start)

    # Reformat NaN values nicely into question marks.
    df_to_print = df.replace(np.nan, "?")
    df_to_print = df.drop(columns=["ns/day", "ncores"])
    console.info("{}", "Benchmark Summary:")
    df_short = ConsolidateDataFrame(df_to_print)
    PrintDataFrame(df_short)

    # Ask the user to confirm whether they want to submit the benchmarks
    if yes:
        console.info("The above benchmarks will be submitted.")
    elif not click.confirm("The above benchmarks will be submitted. Continue?"):
        console.error("Exiting. No benchmarks submitted.")

    batch_cmd = get_batch_command()
    console.info("Submitting a total of {} benchmarks.", len(bundles_to_start))
    for sim in bundles_to_start:
        # Remove files generated by previous mdbenchmark run
        if force_restart:
            engine = detect_md_engine(sim.categories["module"])
            cleanup_before_restart(engine=engine, sim=sim)
        sim.categories["started"] = True
        os.chdir(sim.abspath)
        subprocess.call([batch_cmd, "bench.job"])
    console.info(
        "Submitted all benchmarks. Run {} once they are finished to get the results.",
        "mdbenchmark analyze",
    )
예제 #14
0
def analyze_namd_file(fh):
    """ Check whether the NAMD config file has any relative imports or variables
    """
    lines = fh.readlines()

    for line in lines:
        # Continue if we do not need to do anything with the current line
        if (("parameters" not in line) and ("coordinates" not in line)
                and ("structure" not in line)):
            continue

        path = line.split()[1]
        if "$" in path:
            console.error(
                "Variable Substitutions are not allowed in NAMD files!")
        if ".." in path:
            console.error("Relative file paths are not allowed in NAMD files!")
        if "/" not in path or ("/" in path and not path.startswith("/")):
            console.error("No absolute path detected in NAMD file!")
예제 #15
0
def do_plot(
    csv,
    output_name,
    output_format,
    template,
    module,
    gpu,
    cpu,
    plot_cores,
    fit,
    font_size,
    dpi,
    xtick_step,
    watermark,
):
    """Creates plots of benchmarks."""
    if not csv:
        raise click.BadParameter(
            "You must specify at least one CSV file.", param_hint='"--csv"'
        )

    df = pd.concat([pd.read_csv(c) for c in csv])
    performance_column = "performance" if "performance" in df.columns else "ns/day"

    df = filter_dataframe_for_plotting(df, template, module, gpu, cpu)

    # Exit if there is no performance data
    if df[performance_column].isnull().all():
        console.error("There is no performance data to plot.")

    rcParams["font.size"] = font_size
    fig = Figure()
    FigureCanvas(fig)
    ax = fig.add_subplot(111)
    ax = plot_over_group(
        df=df,
        plot_cores=plot_cores,
        fit=fit,
        performance_column=performance_column,
        ax=ax,
    )

    # Update xticks
    selection = "ncores" if plot_cores else "nodes"
    min_x = df[selection].min() if plot_cores else 1
    max_x = df[selection].max()
    xticks_steps = min_x
    xticks = np.arange(min_x, max_x + min_x, xticks_steps)
    step = get_xsteps(xticks.size, min_x, plot_cores, xtick_step)

    ax.set_xticks(xticks[::step])
    xdiff = min_x * 0.5 * step
    ax.set_xlim(min_x - xdiff, max_x + xdiff)

    # Update yticks
    max_y = df[performance_column].max() or 50
    yticks_steps = int(((max_y + 1) // 10))
    if yticks_steps == 0:
        yticks_steps = 1
    yticks = np.arange(0, max_y + (max_y * 0.25), yticks_steps)
    ax.set_yticks(yticks)
    ax.set_ylim(0, max_y + (max_y * 0.25))

    # Add watermark
    if watermark:
        ax.text(0.025, 0.925, "MDBenchmark", transform=ax.transAxes, alpha=0.3)

    legend = ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.175))
    plt.tight_layout()

    if output_name is None and len(csv) == 1:
        csv_string = csv[0].split(".")[0]
        output_name = "{}.{}".format(csv_string, output_format)
    elif output_name is None and len(csv) != 1:
        output_name = generate_output_name(output_format)
    elif not output_name.endswith(".{}".format(output_format)):
        output_name = "{}.{}".format(output_name, output_format)

    fig.savefig(
        output_name,
        type=output_format,
        bbox_extra_artists=(legend,),
        bbox_inches="tight",
        dpi=dpi,
    )
    console.info("The plot was saved as '{}'.", output_name)
예제 #16
0
def do_generate(
    name,
    cpu,
    gpu,
    module,
    host,
    min_nodes,
    max_nodes,
    time,
    skip_validation,
    job_name,
    yes,
    physical_cores,
    logical_cores,
    number_of_ranks,
    enable_hyperthreading,
    multidir,
):
    """Generate a bunch of benchmarks."""

    # Instantiate the version we are going to use
    benchmark_version = Version3Categories()

    # Validate the CPU and GPU flags
    validate_cpu_gpu_flags(cpu, gpu)

    # Validate the number of nodes
    validate_number_of_nodes(min_nodes=min_nodes, max_nodes=max_nodes)

    if logical_cores < physical_cores:
        console.error(
            "The number of logical cores cannot be smaller than the number of physical cores."
        )

    if physical_cores and not logical_cores:
        console.warn("Assuming logical_cores = 2 * physical_cores")
        logical_cores = 2 * physical_cores

    if physical_cores and logical_cores:
        processor = Processor(physical_cores=physical_cores,
                              logical_cores=logical_cores)
    else:
        processor = Processor()

    # Hyperthreading check
    if enable_hyperthreading and not processor.supports_hyperthreading:
        console.error(
            "The processor of this machine does not support hyperthreading.")

    if not number_of_ranks:
        number_of_ranks = (processor.physical_cores, )

    # Validate number of simulations
    validate_number_of_simulations(multidir, min_nodes, max_nodes,
                                   number_of_ranks)

    # Grab the template name for the host. This should always work because
    # click does the validation for us
    template = utils.retrieve_host_template(host)

    # Warn the user that NAMD support is still experimental.
    if any(["namd" in m for m in module]):
        console.warn(NAMD_WARNING, "--gpu")

    # Stop if we cannot find any modules. If the user specified multiple
    # modules, we will continue with only the valid ones.
    modules = mdengines.normalize_modules(module, skip_validation)
    if not modules:
        console.error("No requested modules available!")

    # Check if all needed files exist. Throw an error if they do not.
    validate_required_files(name=name, modules=modules)

    # Validate that we can use the number of ranks and threads.
    # We can continue, if no ValueError is thrown
    for ranks in number_of_ranks:
        try:
            processor.get_ranks_and_threads(
                ranks, with_hyperthreading=enable_hyperthreading)
        except ValueError as e:
            console.error(e)

    # Create all benchmark combinations and put them into a DataFrame
    data = construct_generate_data(
        name,
        job_name,
        modules,
        host,
        template,
        cpu,
        gpu,
        time,
        min_nodes,
        max_nodes,
        processor,
        number_of_ranks,
        enable_hyperthreading,
        multidir,
    )
    df = pd.DataFrame(data, columns=benchmark_version.generate_categories)

    # Consolidate the data by grouping on the number of nodes and print to the
    # user as an overview.
    consolidated_df = consolidate_dataframe(
        df, columns=benchmark_version.consolidate_categories)
    print_dataframe(
        consolidated_df[benchmark_version.generate_printing],
        columns=map_columns(
            map_dict=benchmark_version.category_mapping,
            columns=benchmark_version.generate_printing,
        ),
    )

    # Save the number of benchmarks for later printing
    number_of_benchmarks = df.shape[0]
    # Ask the user for confirmation to generate files.
    # If the user defined `--yes`, we will skip the confirmation immediately.
    if yes:
        console.info(
            "We will generate {} " +
            "{benchmark}.".format(benchmark="benchmark" if number_of_benchmarks
                                  == 1 else "benchmarks"),
            number_of_benchmarks,
        )
    elif not click.confirm("We will generate {} benchmarks. Continue?".format(
            number_of_benchmarks)):
        console.error("Exiting. No benchmarks were generated.")

    # Generate the benchmarks
    with click.progressbar(
            df.iterrows(),
            length=number_of_benchmarks,
            show_pos=True,
            label="Generating benchmarks",
    ) as bar:
        for _, row in bar:
            relative_path, file_basename = os.path.split(row["name"])
            mappings = benchmark_version.generate_mapping
            kwargs = {"name": file_basename, "relative_path": relative_path}
            for key, value in mappings.items():
                kwargs[value] = row[key]

            write_benchmark(**kwargs)

    # Finish up by telling the user how to submit the benchmarks
    console.info(
        "Finished! You can submit the jobs with {}.",
        "mdbenchmark submit",
    )
예제 #17
0
def normalize_modules(modules, skip_validation):
    """Validate that the provided module names are available.

    We first check whether the requested MD engine is supported by the package.
    Next we try to discover all available modules on the host. If this is not
    possible, or if the user has used the `--skip-validation` option, we skip
    the check and notify the user.

    If the user requested modules that were not found on the system, we inform
    the user and show all modules for that corresponding MD engine that were
    found.
    """
    # Check if modules are from supported md engines
    d = defaultdict(list)
    for m in modules:
        engine_name, version = prepare_module_name(m, skip_validation)
        d[engine_name] = version
    for engine_name in d.keys():
        if detect_md_engine(engine_name) is None:
            console.error(
                "There is currently no support for '{}'. " +
                "Supported MD engines are: {}.".format(", ".join(
                    sorted(SUPPORTED_ENGINES.keys()))),
                engine_name,
            )

    if skip_validation:
        console.warn("Not performing module name validation.")
        return modules

    available_modules = get_available_modules()
    if available_modules is None:
        console.warn(
            "Cannot locate modules available on this host. Not performing module name validation."
        )
        return modules

    good_modules = [
        m for m in modules if validate_module_name(m, available_modules)
    ]

    # Prepare to warn the user about missing modules
    missing_modules = set(modules).difference(good_modules)
    if missing_modules:
        d = defaultdict(list)
        for mm in sorted(missing_modules):
            engine_name, version = mm.split("/")
            d[engine_name].append(version)

        err = "We have problems finding all of your requested modules on this host.\n"
        args = []
        for engine_name in sorted(d.keys()):
            err += (
                "We were not able to find the following modules for MD engine {}: {}.\n"
            )
            args.append(engine_name)
            args.extend(d[engine_name])
            # Show all available modules that we found for the requested MD engine
            err += "Available modules are:\n{}\n"
            args.extend([
                "\n".join([
                    "{}/{}".format(engine_name, mde)
                    for mde in sorted(available_modules[engine_name])
                ])
            ])
        console.warn(err, bold=True, *args)

    return good_modules
예제 #18
0
def do_submit(directory, force_restart, yes):
    """Submit the benchmarks."""
    bundle = dtr.discover(directory)

    # Exit if no bundles were found in the current directory.
    if not bundle:
        console.error("No benchmarks found.")

    grouped_bundles = bundle.categories.groupby("started")
    try:
        bundles_not_yet_started = grouped_bundles[False]
    except KeyError:
        bundles_not_yet_started = None
    if not bundles_not_yet_started and not force_restart:
        console.error(
            "All generated benchmarks were already started once. "
            "You can force a restart with {}.",
            "--force",
        )

    # Start all benchmark simulations if a restart was requested. Otherwise
    # only start the ones that were not run yet.
    bundles_to_start = bundle
    if not force_restart:
        bundles_to_start = bundles_not_yet_started

    benchmark_version = VersionFactory(
        categories=bundles_to_start.categories).version_class

    df = parse_bundle(
        bundles_to_start,
        columns=benchmark_version.submit_categories,
        sort_values_by=benchmark_version.analyze_sort,
        discard_performance=True,
    )

    # Reformat NaN values nicely into question marks.
    df_to_print = df.replace(np.nan, "?")

    columns_to_drop = ["ncores", "version"]
    df_to_print = df.drop(columns=columns_to_drop)

    # Consolidate the data by grouping on the number of nodes and print to the
    # user as an overview.
    consolidated_df = consolidate_dataframe(
        df_to_print, columns=benchmark_version.consolidate_categories)
    print_dataframe(
        consolidated_df,
        columns=map_columns(
            map_dict=benchmark_version.category_mapping,
            columns=benchmark_version.generate_printing[1:],
        ),
    )

    # Ask the user to confirm whether they want to submit the benchmarks
    if yes:
        console.info("The above benchmarks will be submitted.")
    elif not click.confirm(
            "The above benchmarks will be submitted. Continue?"):
        console.error("Exiting. No benchmarks submitted.")

    batch_cmd = get_batch_command()
    console.info("Submitting a total of {} benchmarks.", len(bundles_to_start))
    for sim in bundles_to_start:
        # Remove files generated by previous mdbenchmark run
        if force_restart:
            engine = detect_md_engine(sim.categories["module"])
            cleanup_before_restart(engine=engine, sim=sim)
        sim.categories["started"] = True
        os.chdir(sim.abspath)
        subprocess.call([batch_cmd, "bench.job"])
    console.info(
        "Submitted all benchmarks. Run {} once they are finished to get the results.",
        "mdbenchmark analyze",
    )
예제 #19
0
def do_generate(
    name,
    cpu,
    gpu,
    module,
    host,
    min_nodes,
    max_nodes,
    time,
    skip_validation,
    job_name,
    yes,
):
    """Generate a bunch of benchmarks."""
    # Validate the CPU and GPU flags
    validate_cpu_gpu_flags(cpu, gpu)

    # Validate the number of nodes
    validate_number_of_nodes(min_nodes=min_nodes, max_nodes=max_nodes)

    # Grab the template name for the host. This should always work because
    # click does the validation for us
    template = utils.retrieve_host_template(host)

    # Warn the user that NAMD support is still experimental.
    if any(["namd" in m for m in module]):
        console.warn(NAMD_WARNING, "--gpu")

    module = mdengines.normalize_modules(module, skip_validation)

    # If several modules were given and we only cannot find one of them, we
    # continue.
    if not module:
        console.error("No requested modules available!")

    df_overview = pd.DataFrame(columns=[
        "name",
        "job_name",
        "base_directory",
        "template",
        "engine",
        "module",
        "nodes",
        "run time [min]",
        "gpu",
        "host",
    ])

    i = 1
    for m in module:
        # Here we detect the MD engine (supported: GROMACS and NAMD).
        engine = mdengines.detect_md_engine(m)

        # Check if all needed files exist. Throw an error if they do not.
        engine.check_input_file_exists(name)

        gpu_cpu = {"cpu": cpu, "gpu": gpu}
        for pu, state in sorted(gpu_cpu.items()):
            if not state:
                continue

            directory = "{}_{}".format(host, m)
            gpu = False
            gpu_string = ""
            if pu == "gpu":
                gpu = True
                directory += "_gpu"
                gpu_string = " with GPUs"

            console.info("Creating benchmark system for {}.", m + gpu_string)

            base_directory = dtr.Tree(directory)

            for nodes in range(min_nodes, max_nodes + 1):
                df_overview.loc[i] = [
                    name,
                    job_name,
                    base_directory,
                    template,
                    engine,
                    m,
                    nodes,
                    time,
                    gpu,
                    host,
                ]
                i += 1

    console.info("{}", "Benchmark Summary:")

    df_short = ConsolidateDataFrame(df_overview)
    PrintDataFrame(df_short)

    if yes:
        console.info("Generating the above benchmarks.")
    elif not click.confirm(
            "The above benchmarks will be generated. Continue?"):
        console.error("Exiting. No benchmarks generated.")

    for _, row in df_overview.iterrows():
        relative_path, file_basename = os.path.split(row["name"])
        write_benchmark(
            engine=row["engine"],
            base_directory=row["base_directory"],
            template=row["template"],
            nodes=row["nodes"],
            gpu=row["gpu"],
            module=row["module"],
            name=file_basename,
            relative_path=relative_path,
            job_name=row["job_name"],
            host=row["host"],
            time=row["run time [min]"],
        )

    # Provide some output for the user
    console.info(
        "Finished generating all benchmarks.\n"
        "You can now submit the jobs with {}.",
        "mdbenchmark submit",
    )