Example #1
0
 def test_invalid_dataset(self):
     with pytest.raises(click.BadParameter, match=r"invalid_dataset"):
         run([
             str(DUMMY_BENCHMARK), '-l', '-d', 'invalid_dataset', '-s',
             'pgd'
         ],
             'benchopt',
             standalone_mode=False)
Example #2
0
    def setup_class(cls):
        "Make sure at least one result file is available"

        out = SuppressStd()
        with out:
            run([
                str(DUMMY_BENCHMARK), '-l', '-d', SELECT_ONE_SIMULATED, '-s',
                'pgd*False', '-n', '1', '-r', '1', '-p', '0.1', '--no-plot'
            ],
                'benchopt',
                standalone_mode=False)
        result_files = re.findall(r'Saving result in: (.*\.csv)', out.output)
        assert len(result_files) == 1, out.output
        result_file = result_files[0]
        cls.result_file = result_file
Example #3
0
    def test_benchopt_run(self):
        with CaptureRunOutput() as out:
            run([
                str(DUMMY_BENCHMARK), '-l', '-d', SELECT_ONE_SIMULATED, '-f',
                'pgd*False', '-n', '1', '-r', '1', '-p', '0.1', '--no-plot'
            ],
                'benchopt',
                standalone_mode=False)

        out.check_output('Simulated', repetition=1)
        out.check_output('Dummy Sparse Regression', repetition=1)
        out.check_output(r'Python-PGD\[use_acceleration=False\]', repetition=2)
        out.check_output(r'Python-PGD\[use_acceleration=True\]', repetition=0)

        # Make sure the results were saved in a result file
        assert len(out.result_files) == 1, out.output
Example #4
0
    def test_benchopt_run_in_env(self, test_env_name):
        with CaptureRunOutput() as out:
            with pytest.raises(SystemExit, match='False'):
                run([
                    str(DUMMY_BENCHMARK_PATH), '--env-name', test_env_name,
                    '-d', SELECT_ONE_SIMULATED, '-f', SELECT_ONE_PGD, '-n',
                    '1', '-r', '1', '-p', '0.1', '--no-plot'
                ],
                    'benchopt',
                    standalone_mode=False)

        out.check_output(f'conda activate {test_env_name}')
        out.check_output('Simulated', repetition=1)
        out.check_output('Dummy Sparse Regression', repetition=1)
        out.check_output(r'Python-PGD\[step_size=1\]', repetition=2)
        out.check_output(r'Python-PGD\[step_size=1.5\]', repetition=0)

        # Make sure the results were saved in a result file
        assert len(out.result_files) == 1, out.output
Example #5
0
    def test_benchopt_caching(self):

        n_rep = 2
        run_cmd = [
            str(DUMMY_BENCHMARK), '-l', '-d', SELECT_ONE_SIMULATED, '-s',
            'pgd*False', '-n', '1', '-r',
            str(n_rep), '-p', '0.1', '--no-plot'
        ]

        # Make a first run that should be put in cache
        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        # Now check that the cache is hit when running the benchmark a
        # second time without force
        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        out.check_output(r'Python-PGD\[use_acceleration=False\]', repetition=1)

        # Make sure that -f option forces the re-run for the solver
        run_cmd[4] = '-f'
        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        out.check_output(r'Python-PGD\[use_acceleration=False\]',
                         repetition=n_rep + 1)
Example #6
0
 def test_invalid_benchmark(self, invalid_benchmark, match):
     with pytest.raises(click.BadParameter, match=match):
         run([invalid_benchmark], 'benchopt', standalone_mode=False)
Example #7
0
 def test_invalid_benchmark(self):
     with pytest.raises(click.BadParameter, match=r"invalid_benchmark"):
         run(['invalid_benchmark'], 'benchopt', standalone_mode=False)
Example #8
0
def test_invalid_benchmark():
    with pytest.raises(SystemExit, match=r"2"):
        run(['invalid_benchmark'], 'benchopt')
Example #9
0
def test_invalid_dataset():
    with pytest.raises(SystemExit, match=r"2"):
        run(['lasso', '-d', 'invalid_dataset', '-s', 'baseline'], 'benchopt')
Example #10
0
def test_invalid_benchmark():
    with pytest.raises(AssertionError, match=r"{'fake_test'} is not"):
        run(['fake_test'], [], '')
Example #11
0
 def test_invalid_solver(self):
     with pytest.raises(click.BadParameter, match=r"invalid_solver"):
         run([str(DUMMY_BENCHMARK_PATH), '-l', '-s', 'invalid_solver'],
             'benchopt',
             standalone_mode=False)