def test_get_batch_command(cli_runner, monkeypatch, tmpdir): """Test that the get_engine_command works correctly. It should exit if no batching system was found. """ # The following was taken from the basic testing example from the Click # documentation: http://click.pocoo.org/6/testing/#basic-testing # We need to do this, because `get_engine_command()` uses `click.echo` to # display an error message and we want to test its value. @click.group() def test_cli(): pass @test_cli.command() def test(): get_batch_command() # Test fail state output = 'ERROR Was not able to find a batch system. ' \ 'Are you trying to use this package on a host with a queuing system?\n' result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 1 assert result.output == output # Test non-fail state monkeypatch.setattr('mdbenchmark.submit.glob', lambda x: ['qsub']) result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 0
def test_submit_resubmit(cli_runner, monkeypatch, tmpdir, data): """Test that we cannot submit a benchmark system that was already submitted, unless we force it. """ with tmpdir.as_cwd(): # Test that we get an error if we try to point the submit function to # an non-existent path. result = cli_runner.invoke(cli, ["submit", "--directory=look_here/"], "--yes") assert result.exit_code == 1 assert result.output == "ERROR No benchmarks found.\n" # Test that we get an error if we try to start benchmarks that were # already started once. result = cli_runner.invoke( cli, [ "submit", "--directory={}".format( data["analyze-files-gromacs"]), "--yes" ], ) df = pd.read_csv(data["analyze-files-gromacs-consolidated.csv"], index_col=0) s = PrintDataFrame(df, False) output = "ERROR All generated benchmarks were already started once. You can force a restart with --force.\n" assert result.exit_code == 1 assert result.output == output # Test that we can force restart already run benchmarks. # Monkeypatch a few functions monkeypatch.setattr("subprocess.call", lambda x: True) monkeypatch.setattr("mdbenchmark.cli.submit.get_batch_command", lambda: "sbatch") monkeypatch.setattr("mdbenchmark.cli.submit.detect_md_engine", lambda x: gromacs) monkeypatch.setattr("mdbenchmark.submit.cleanup_before_restart", lambda engine, sim: True) output = ( "Benchmark Summary:\n" + s + "\nThe above benchmarks will be submitted.\n" + "Submitting a total of 5 benchmarks.\n" + "Submitted all benchmarks. Run mdbenchmark analyze once they are finished to get the results.\n" ) result = cli_runner.invoke( cli, [ "submit", "--directory={}".format(data["analyze-files-gromacs"]), "--force", "--yes", ], ) assert result.exit_code == 0 assert result.output == output
def test_generate_console_messages(cli_runner, monkeypatch, tmpdir): """Test that the CLI for generate prints all error messages as expected.""" with tmpdir.as_cwd(): # monkeypatch the output of the available modules monkeypatch.setattr('mdbenchmark.mdengines.get_available_modules', lambda: {'gromacs': ['2016']}) # Test that we get an error when not supplying a file name result = cli_runner.invoke( cli.cli, ['generate', '--module=gromacs/2016', '--host=draco']) output = 'Usage: cli generate [OPTIONS]\n\nError: Invalid value for ' \ '"-n" / "--name": Please specifiy the name of your input files.' # Test error message if the TPR file does not exist result = cli_runner.invoke( cli.cli, ['generate', '--module=gromacs/2016', '--host=draco', '--name=md']) output = 'ERROR File md.tpr does not exist, but is needed for GROMACS benchmarks.\n' assert result.exit_code == 1 assert result.output == output with open('protein.tpr', 'w') as fh: fh.write('This is a dummy tpr!') # Test that the minimal number of nodes must be bigger than the maximal number result = cli_runner.invoke(cli.cli, [ 'generate', '--module=gromacs/2016', '--host=draco', '--name=protein', '--min-nodes=6', '--max-nodes=4' ]) output = 'Usage: cli generate [OPTIONS]\n\nError: Invalid value for ' \ '"--min-nodes": The minimal number of nodes needs to be smaller ' \ 'than the maximal number.\n' assert result.exit_code == 2 assert result.output == output # Test error message if we pass an invalid template name result = cli_runner.invoke(cli.cli, [ 'generate', '--module=gromacs/2016', '--host=minerva', '--name=protein' ]) output = 'Could not find template for host \'minerva\'.\n' \ 'Available host templates:\n' \ 'draco\n' \ 'hydra\n' assert result.exit_code == 0 assert result.output == output # Test error message if we do not pass any module name result = cli_runner.invoke( cli.cli, ['generate', '--host=draco', '--name=protein']) output = 'Usage: cli generate [OPTIONS]\n\nError: Invalid value for ' \ '"-m" / "--module": Please specify which MD engine module ' \ 'to use for the benchmarks.\n' assert result.exit_code == 2 assert result.output == output
def test_normalize_modules(cli_runner, monkeypatch, tmpdir): """Test that normalize modules works as expected.""" @click.group() def test_cli(): pass # Test the warning when we skip the validation @test_cli.command() def skip_validation(): normalize_modules(modules=['gromacs/2016.4'], skip_validation=True) result = cli_runner.invoke(test_cli, ['skip_validation']) assert result.exit_code == 0 assert result.output == 'WARNING Not performing module name validation.\n' # Test the warning when we do not skip the validation @test_cli.command() def do_not_skip_validation(): normalize_modules(modules=['gromacs/2016.4'], skip_validation=False) result = cli_runner.invoke(test_cli, ['do_not_skip_validation']) assert result.exit_code == 0 assert result.output == 'WARNING Cannot locate modules available on this host. ' \ 'Not performing module name validation.\n' @test_cli.command() def test_normalize(): normalize_modules(modules=['gromacs/doesnotexist'], skip_validation=False) with tmpdir.as_cwd(): for k, v in DIR_STRUCTURE.items(): for k2, v2 in v.items(): os.makedirs(os.path.join(k, k2)) for v3 in v2: open(os.path.join(k, k2, v3), 'a').close() # Prepare path variable that we are going to monkeypatch for # `get_available_modules` dirs = ':'.join( [os.path.join(os.getcwd(), x) for x in os.listdir(os.getcwd())]) monkeypatch.setenv('MODULEPATH', dirs) modules = get_available_modules() output = 'WARNING We have problems finding all of your requested modules on this host.\n' \ 'We were not able to find the following modules for MD engine gromacs: ' \ 'doesnotexist.\n' \ 'Available modules are:\n' \ 'gromacs/2016.4\n' \ 'gromacs/2018.1\n' \ 'gromacs/5.1.4-plumed2.3\n\n' result = cli_runner.invoke(test_cli, ['test_normalize']) assert result.exit_code == 0 assert result.output == output
def test_analyze_namd_file(cli_runner, tmpdir): """Test that we check the `.namd` file as expected.""" @click.group() def test_cli(): pass @test_cli.command() def test(): with open('md.namd') as fh: namd.analyze_namd_file(fh) with tmpdir.as_cwd(): # Make sure that we do not throw any error when everything is fine! with open('md.namd', 'w') as fh: fh.write('dummy file') result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 0 assert result.output == '' # Assert that we fail, when a relative path is given. with open('md.namd', 'w') as fh: fh.write('parameters ./relative/path/') output = 'ERROR No absolute path detected in NAMD file!\n' result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 1 assert result.output == output # Fail if we do not give ANY absolute path. with open('md.namd', 'w') as fh: fh.write('parameters abc') output = 'ERROR No absolute path detected in NAMD file!\n' result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 1 assert result.output == output # Fail if we do not give ANY absolute path. with open('md.namd', 'w') as fh: fh.write('coordinates ../another/relative/path') output = 'ERROR Relative file paths are not allowed in NAMD files!\n' result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 1 assert result.output == output # Fail if we do not give ANY absolute path. with open('md.namd', 'w') as fh: fh.write('structure $') output = 'ERROR Variable Substitutions are not allowed in NAMD files!\n' result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 1 assert result.output == output
def test_submit_resubmit(cli_runner, monkeypatch, tmpdir, data): """Test that we cannot submit a benchmark system that was already submitted, unless we force it. """ # Define dummy function, so we can monkeypatch `subprocess.call` and # `mdbenchmark.utils.cleanup_before_restart`. def call(arg): return DummyEngine with tmpdir.as_cwd(): # Test that we get an error if we try to point the submit function to # an non-existent path. result = cli_runner.invoke(cli.cli, [ 'submit', '--directory=look_here/', ]) assert result.exit_code == 1 assert result.output == 'ERROR No benchmarks found.\n' # Test that we get an error if we try to start benchmarks that were # already started once. result = cli_runner.invoke(cli.cli, [ 'submit', '--directory={}'.format(data['analyze-files-gromacs']), ]) assert result.exit_code == 1 assert result.output == 'ERROR All generated benchmarks were already' \ ' started once. You can force a restart with' \ ' --force.\n' # Test that we can force restart already run benchmarks. # Monkeypatch a few functions monkeypatch.setattr('subprocess.call', call) monkeypatch.setattr('mdbenchmark.submit.get_batch_command', lambda: 'sbatch') # We need to patch the cleanup, as we otherwise delete our own test # files monkeypatch.setattr('mdbenchmark.submit.detect_md_engine', call) output = 'Submitting a total of 5 benchmarks.\n' \ 'Submitted all benchmarks. Run mdbenchmark analyze once' \ ' they are finished to get the results.\n' result = cli_runner.invoke(cli.cli, [ 'submit', '--directory={}'.format(data['analyze-files-gromacs']), '--force' ]) assert result.exit_code == 0 assert result.output == output
def test_generate_simple_input(cli_runner, generate_output, module, extensions, tmpdir): """Test that we can generate benchmarks for all supported MD engines w/o module validation.""" with tmpdir.as_cwd(): for ext in extensions: open("protein.{}".format(ext), "a").close() result = cli_runner.invoke( cli.cli, [ "generate", "--module={}".format(module), "--host=draco", "--max-nodes=4", "--gpu", "--no-cpu", "--name=protein", "--yes", ], ) output = generate_output().format(module) output = ( "WARNING Cannot locate modules available on this host. " "Not performing module name validation.\n" + output ) if "namd" in module: output = NAMD_WARNING_FORMATTED + output # Test that we get a warning, if no module name validation is performed. assert result.exit_code == 0 assert result.output == output
def test_generate_unsupported_engine(cli_runner, monkeypatch, tmpdir): """Make sure we throw the correct error, when passed an unsupported MD engine.""" with tmpdir.as_cwd(): for k, v in DIR_STRUCTURE.items(): for k2, v2 in v.items(): os.makedirs(os.path.join(k, k2)) for v3 in v2: open(os.path.join(k, k2, v3), "a").close() # Prepare path variable that we are going to monkeypatch for # `mdengines.get_available_modules` dirs = ":".join([os.path.join(os.getcwd(), x) for x in os.listdir(os.getcwd())]) monkeypatch.setenv("MODULEPATH", dirs) supported_engines = ", ".join(sorted([x for x in SUPPORTED_ENGINES])) output = ( "ERROR There is currently no support for 'doesnotexist'. " "Supported MD engines are: {}.\n".format(supported_engines) ) result = cli_runner.invoke( cli.cli, [ "generate", "--module=doesnotexist/version", "--host=draco", "--name=protein", "--yes", ], ) assert result.exit_code == 1 assert result.output == output
def test_generate_namd_experimental_warning(cli_runner, monkeypatch, tmpdir): """Test that we print the NAMD experimental warning.""" with tmpdir.as_cwd(): for f in ["md.namd", "md.psf", "md.pdb"]: open(f, "a").close() # monkeypatch the output of the available modules monkeypatch.setattr( "mdbenchmark.mdengines.get_available_modules", lambda: {"namd": ["123"]} ) result = cli_runner.invoke( cli, ["generate", "--module=namd/123", "--host=draco", "--name=md", "--yes"] ) output1 = ( "WARNING NAMD support is experimental. " "All input files must be in the current directory. " "Parameter paths must be absolute. Only crude file checks are performed! " "If you use the --gpu option make sure you use the GPU compatible NAMD module!\n" "Creating benchmark system for namd/123.\n" ) bundle = dtr.discover() df = DataFrameFromBundle(bundle) df = ConsolidateDataFrame(df) test_output = "Benchmark Summary:\n" + PrintDataFrame(df, False) + "\n" output2 = ( "Generating the above benchmarks.\n" "Finished generating all benchmarks.\nYou can " "now submit the jobs with mdbenchmark submit.\n" ) output = output1 + test_output + output2 assert result.exit_code == 0 assert result.output == output
def test_plot_module_only(cli_runner, tmpdir, module, data): """Test plotting function with one given engine or module. """ with tmpdir.as_cwd(): if module in ["gromacs", "namd"]: output = ( "Plotting GPU and CPU data.\n" "Plotting all hosts in input file.\n" "Plotting all modules for engine '{}'.\n" "Your file was saved as 'testpng.png' in the working directory.\n" .format(module)) else: output = ( "Plotting GPU and CPU data.\n" "Plotting all hosts in input file.\n" "Plotting module '{}'.\n" "Your file was saved as 'testpng.png' in the working directory.\n" .format(module)) result = cli_runner.invoke( cli.cli, [ "plot", "--csv={}".format(data["test.csv"]), "--module={}".format(module), "--output-name=testpng", ], ) assert result.exit_code == 0 assert result.output == output assert os.path.exists("testpng.png")
def test_plot_output_type(cli_runner, tmpdir, data, output_type): """check whether output types are constructed correctly. """ with tmpdir.as_cwd(): output = ( "All modules will be plotted.\n" "All hosts will be plotted.\n" "A total of 2 runs will be plotted.\n" "Your file was saved as 'test.{}' in the working directory.\n". format(output_type)) output = ("Plotting GPU and CPU data.\n" "Plotting all hosts in input file.\n" "Plotting all modules in your input data.\n" "Your file was saved as 'testfile.{}' in the working " "directory.\n".format(output_type)) result = cli_runner.invoke( cli.cli, [ "plot", "--csv={}".format(data["test.csv"]), "--output-name=testfile", "--output-format={}".format(output_type), ], ) assert result.output == output assert result.exit_code == 0
def test_guess_ncores(cli_runner, monkeypatch): """Test that we can guess the correct number of cores on the supported systems. """ def dummy(arg): return 'ABC' monkeypatch.setattr('mdbenchmark.utils.sys.platform', 'linux') monkeypatch.setattr( 'mdbenchmark.utils._cat_proc_cpuinfo_grep_query_sort_uniq', dummy) assert utils.guess_ncores() == 9 monkeypatch.setattr('mdbenchmark.utils.sys.platform', 'darwin') monkeypatch.setattr('mdbenchmark.utils.mp.cpu_count', lambda: 10) assert utils.guess_ncores() == 5 @click.group() def test_cli(): pass @test_cli.command() def test(): utils.guess_ncores() monkeypatch.setattr('mdbenchmark.utils.sys.platform', 'starlord') output = 'WARNING Could not guess number of physical cores. ' \ 'Assuming there is only 1 core per node.\n' result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 0 assert result.output == output
def test_analze_namd(cli_runner, tmpdir, data): with tmpdir.as_cwd(): result = cli_runner.invoke( cli.cli, ['analyze', '--directory={}'.format(data['analyze-files-namd'])]) assert result.exit_code == 0 assert result.output == """ module nodes ns/day run time [min] gpu host ncores
def test_plot_gpu(cli_runner, tmpdir, data): """Test gpu flage without any host or module. """ with tmpdir.as_cwd(): output = ( "Plotting GPU and CPU data.\n" "Plotting all hosts in input file.\n" "Plotting all modules in your input data.\n" "Your file was saved as 'testpng.png' in the working directory.\n") result = cli_runner.invoke( cli.cli, [ "plot", "--csv={}".format(data["test.csv"]), "--gpu", "--output-name=testpng", "--output-format=png", ], ) assert result.exit_code == 0 assert result.output == output assert os.path.exists("testpng.png")
def test_plot_host_only(cli_runner, tmpdir, host, data): """Test plotting function with one given host. """ with tmpdir.as_cwd(): output = ( "Plotting GPU and CPU data.\n" "Data for the following hosts will be plotted: {}\n" "Plotting all modules in your input data.\n" "Your file was saved as 'testpng.png' in the working directory.\n". format(host)) result = cli_runner.invoke( cli.cli, [ "plot", "--csv={}".format(data["test.csv"]), "--host={}".format(host), "--output-name=testpng", "--output-format=png", ], ) assert result.exit_code == 0 assert result.output == output assert os.path.exists("testpng.png")
def test_analyze_console_messages(cli_runner, tmpdir): """Test that the CLI for analyze prints all error messages as expected.""" with tmpdir.as_cwd(): # Test error message if the TPR file does not exist result = cli_runner.invoke(cli, ["analyze", "--directory=look_here/"]) output = "ERROR There is no data for the given path.\n" assert result.exit_code == 1 assert result.output == output
def test_aliasedgroup_unknown_command(cli_runner): """Test that we return an error, when invoking an unknown command.""" result = cli_runner.invoke(cli, ["unknown_command"]) assert result.exit_code == 2 output = ("Usage: cli [OPTIONS] COMMAND [ARGS]...\n" 'Try "cli --help" for help.\n\n' "Error: Sub command unknown: unknown_command\n") assert result.output == output
def test_analyze_gromacs(cli_runner, tmpdir, data): """Test that the output is OK when all outputs are fine.""" with tmpdir.as_cwd(): result = cli_runner.invoke(cli.cli, [ 'analyze', '--directory={}'.format(data['analyze-files-gromacs']), ]) assert result.exit_code == 0 assert result.output == """ module nodes ns/day run time [min] gpu host ncores
def test_prepare_module_name(cli_runner): """Test that prepare_module_name works as expected.""" @click.group() def test_cli(): pass @test_cli.command() def test_wrong_module_name(): prepare_module_name('gromacs-2016.4') @test_cli.command() def test_correct_module_name(): prepare_module_name('gromacs/2016.4') result = cli_runner.invoke(test_cli, ['test_wrong_module_name']) assert result.exit_code == 1 assert result.output == 'ERROR We were not able to determine the module name.\n' result = cli_runner.invoke(test_cli, ['test_correct_module_name']) assert result.exit_code == 0
def test_analyze_gromacs(cli_runner, tmpdir, data): """Test that the output is OK when all outputs are fine.""" with tmpdir.as_cwd(): result = cli_runner.invoke(cli, [ "analyze", "--directory={}".format(data["analyze-files-gromacs"]) ]) df = pd.read_csv(data["analyze-files-gromacs.csv"]) test_output = PrintDataFrame(df, False) + "\n" assert result.exit_code == 0 assert result.output == test_output
def test_analyze_namd(cli_runner, tmpdir, data): with tmpdir.as_cwd(): result = cli_runner.invoke( cli, ["analyze", "--directory={}".format(data["analyze-files-namd"])]) bundle = dtr.discover(data["analyze-files-namd"]) df = DataFrameFromBundle(bundle) test_output = PrintDataFrame(df, False) + "\n" assert result.exit_code == 0 assert result.output == test_output
def test_analyze_with_errors(cli_runner, tmpdir, data): """Test that we warn the user of errors in the output files. Also test that we show a question mark instead of a float in the corresponding cell. """ with tmpdir.as_cwd(): result = cli_runner.invoke(cli.cli, [ 'analyze', '--directory={}'.format(data['analyze-files-w-errors']), ]) assert result.exit_code == 0 assert result.output == """WARNING We were not able to gather informations for all systems. Systems marked with question marks have either crashed or were not started yet.
def test_analyze_many_rows(cli_runner, tmpdir, datafiles): """Test that pandas does not limit the number of printed rows.""" with tmpdir.as_cwd(): open("protein.tpr", "a").close() result = cli_runner.invoke( cli.cli, [ "generate", "--module=gromacs/2016.3", "--host=draco", "--max-nodes=64", "--name=protein", "--yes", ], ) result = cli_runner.invoke(cli.cli, ["analyze", "--directory=draco_gromacs"]) df = pd.read_csv(datafiles["analyze-many-rows.csv"], index_col=0) test_output = PrintDataFrame(df, False) + "\n" assert result.exit_code == 0 assert result.output == test_output
def test_check_file_extension(cli_runner, tmpdir): """Test that we check for all files needed to run NAMD benchmarks.""" @click.group() def test_cli(): pass @test_cli.command() def test(): namd.check_input_file_exists('md') output = 'ERROR File md.namd does not exist, but is needed for NAMD benchmarks.\n' result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 1 assert result.output == output NEEDED_FILES = ['md.namd', 'md.psf', 'md.pdb'] with tmpdir.as_cwd(): # Create files first for fn in NEEDED_FILES: with open(fn, 'w') as fh: fh.write('dummy file') result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 0
def test_analyze_plot(cli_runner, tmpdir, data): with tmpdir.as_cwd(): result = cli_runner.invoke(cli.cli, [ 'analyze', '--directory={}'.format(data['analyze-files-gromacs'], '--plot'), ]) assert result.exit_code == 0 assert result.output == """ module nodes ns/day run time [min] gpu host ncores 0 gromacs/2016.3 1 98.147 15 False draco 32 1 gromacs/2016.3 2 178.044 15 False draco 64 2 gromacs/2016.3 3 226.108 15 False draco 96 3 gromacs/2016.3 4 246.973 15 False draco 128 4 gromacs/2016.3 5 254.266 15 False draco 160 """ os.path.isfile("runtimes.pdf")
def test_analyze_with_errors(cli_runner, tmpdir, data): """Test that we warn the user of errors in the output files. Also test that we show a question mark instead of a float in the corresponding cell. """ with tmpdir.as_cwd(): result = cli_runner.invoke(cli, [ "analyze", "--directory={}".format(data["analyze-files-w-errors"]) ]) bundle = dtr.discover(data["analyze-files-w-errors"]) df = DataFrameFromBundle(bundle) df = df.replace(np.nan, "?") test_output = PrintDataFrame(df, False) + "\n" assert result.exit_code == 0 assert result.output == test_output
def test_analyze_plot(cli_runner, tmpdir, data): with tmpdir.as_cwd(): result = cli_runner.invoke( cli.cli, [ "analyze", "--directory={}".format(data["analyze-files-gromacs"], "--plot"), ], ) bundle = dtr.discover(data["analyze-files-gromacs"]) df = DataFrameFromBundle(bundle) test_output = PrintDataFrame(df, False) + "\n" assert result.exit_code == 0 assert result.output == test_output os.path.isfile("runtimes.pdf")
def test_generate_simple_input_with_cpu_gpu( cli_runner, generate_output_create, generate_output_finish, generate_output_table, module, extensions, tmpdir, ): """Test that we can generate benchmarks for CPUs and GPUs at once.""" with tmpdir.as_cwd(): for ext in extensions: open("protein.{}".format(ext), "a").close() result = cli_runner.invoke( cli.cli, [ "generate", "--module={}".format(module), "--template=draco", "--max-nodes=4", "--gpu", "--name=protein", "--yes", ], ) output = generate_output_create(gpu=False).format(module) output = ( "WARNING Cannot locate modules available on this host. " "Not performing module name validation.\n" + output ) output += generate_output_create(gpu=True).format(module) output += generate_output_table(True) output += generate_output_finish if "namd" in module: output = NAMD_WARNING_FORMATTED + output # Test that we get a warning, if no module name validation is performed. assert result.exit_code == 0 assert result.output == output
def test_validation(monkeypatch, tmpdir, cli_runner): """Test that we retrieve the correct module versions. Names are retrieved from a given path and the module names and versions are validated. """ @click.group() def test_cli(): pass @test_cli.command() def test(): validate_module_name('wrong-format') with tmpdir.as_cwd(): for k, v in DIR_STRUCTURE.items(): for k2, v2 in v.items(): os.makedirs(os.path.join(k, k2)) for v3 in v2: open(os.path.join(k, k2, v3), 'a').close() # Prepare path variable that we are going to monkeypatch for # `get_available_modules` dirs = ':'.join( [os.path.join(os.getcwd(), x) for x in os.listdir(os.getcwd())]) monkeypatch.setenv('MODULEPATH', dirs) modules = get_available_modules() # Assert that the correct modules and their versions are returned. assert set(modules['gromacs']) == set( ['2016.4', '5.1.4-plumed2.3', '2018.1']) assert set(modules['namd']) == set(['123', '456']) # Make sure we return a boolean if the module is available or not. assert not validate_module_name('gromacs/123', modules) assert validate_module_name('gromacs/2018.1', modules) output = 'ERROR We were not able to determine the module name.\n' result = cli_runner.invoke(test_cli, ['test']) assert result.exit_code == 1 assert result.output == output
def test_submit_test_prompt_no(cli_runner, tmpdir, data): """Test whether prompt answer no works.""" with tmpdir.as_cwd(): result = cli_runner.invoke( cli.cli, ["submit", "--directory={}".format(data["analyze-files-gromacs"])], input="n\n", ) df = pd.read_csv(data["analyze-files-gromacs-consolidated.csv"], index_col=0) s = PrintDataFrame(df, False) output = ( "Benchmark Summary:\n" + s + "\nThe above benchmarks will be submitted. Continue? [y/N]: n\n" + "ERROR Exiting. No benchmarks submitted.\n") assert result.exit_code == 1 assert result.output == output