Ejemplo n.º 1
0
 def setup_class(cls):
     "Make sure at least one result file is available"
     with SuppressStd() as out:
         clean([str(DUMMY_BENCHMARK_PATH)],
               'benchopt',
               standalone_mode=False)
         clean([str(REQUIREMENT_BENCHMARK_PATH)],
               'benchopt',
               standalone_mode=False)
         run([
             str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED,
             '-s', SELECT_ONE_PGD, '-n', '2', '-r', '1', '-o',
             SELECT_ONE_OBJECTIVE, '--no-plot'
         ],
             'benchopt',
             standalone_mode=False)
         time.sleep(1)  # Make sure there is 2 separate files
         run([
             str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED,
             '-s', SELECT_ONE_PGD, '-n', '2', '-r', '1', '-o',
             SELECT_ONE_OBJECTIVE, '--no-plot'
         ],
             'benchopt',
             standalone_mode=False)
     result_files = re.findall(r'Saving result in: (.*\.parquet)',
                               out.output)
     assert len(result_files) == 2, out.output
     cls.result_files = result_files
Ejemplo n.º 2
0
    def test_benchopt_run_custom_parameters(self):
        SELECT_DATASETS = r'simulated[n_features=[100, 200]]'
        SELECT_SOLVERS = r'python-pgd-with-cb[use_acceleration=[True, False]]'
        SELECT_OBJECTIVES = r'dummy*[0.1, 0.2]'

        with CaptureRunOutput() as out:
            run([
                str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_DATASETS, '-f',
                SELECT_SOLVERS, '-n', '1', '-r', '1', '-o', SELECT_OBJECTIVES,
                '--no-plot'
            ],
                'benchopt',
                standalone_mode=False)

        out.check_output(r'Simulated\[n_features=100,', repetition=1)
        out.check_output(r'Simulated\[n_features=200,', repetition=1)
        out.check_output(r'Simulated\[n_features=5000,', repetition=0)
        out.check_output(r'Dummy Sparse Regression\[reg=0.1\]', repetition=2)
        out.check_output(r'Dummy Sparse Regression\[reg=0.2\]', repetition=2)
        out.check_output(r'Dummy Sparse Regression\[reg=0.05\]', repetition=0)
        out.check_output(r'--Python-PGD\[', repetition=0)
        out.check_output(r'--Python-PGD-with-cb\[use_acceleration=False\]:',
                         repetition=28)
        out.check_output(r'--Python-PGD-with-cb\[use_acceleration=True\]:',
                         repetition=28)
Ejemplo n.º 3
0
 def test_invalid_dataset(self):
     with pytest.raises(click.BadParameter, match=r"invalid_dataset"):
         run([
             str(DUMMY_BENCHMARK_PATH), '-l', '-d', 'invalid_dataset', '-s',
             'pgd'
         ],
             'benchopt',
             standalone_mode=False)
Ejemplo n.º 4
0
 def setup_class(cls):
     "Make sure at least one result file is available"
     with SuppressStd() as out:
         run([str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED,
              '-s', SELECT_ONE_PGD, '-n', '2', '-r', '1', '-o',
              SELECT_ONE_OBJECTIVE, '--no-plot'], 'benchopt',
             standalone_mode=False)
     result_files = re.findall(r'Saving result in: (.*\.csv)', out.output)
     assert len(result_files) == 1, out.output
     result_file = result_files[0]
     cls.result_file = result_file
Ejemplo n.º 5
0
    def test_benchopt_run(self):
        with CaptureRunOutput() as out:
            run([str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED,
                 '-f', SELECT_ONE_PGD, '-n', '1', '-r', '1', '-o',
                 SELECT_ONE_OBJECTIVE], 'benchopt', standalone_mode=False)

        out.check_output('Simulated', repetition=1)
        out.check_output('Dummy Sparse Regression', repetition=1)
        out.check_output(r'Python-PGD\[step_size=1\]:', repetition=3)
        out.check_output(r'Python-PGD\[step_size=1.5\]:', repetition=0)

        # Make sure the results were saved in a result file
        assert len(out.result_files) == 1, out.output
Ejemplo n.º 6
0
    def test_changing_output_name(self):
        command = [
            str(DUMMY_BENCHMARK_PATH), '-l', '-s', SELECT_ONE_PGD, '-d',
            SELECT_ONE_SIMULATED, '-n', '1', '--output', 'unique_name',
            '--no-plot'
        ]
        with CaptureRunOutput() as out:
            run(command, 'benchopt', standalone_mode=False)
            run(command, 'benchopt', standalone_mode=False)

        result_files = re.findall(r'Saving result in: (.*\.parquet)',
                                  out.output)
        names = [Path(result_file).stem for result_file in result_files]
        assert names[0] == 'unique_name' and names[1] == 'unique_name_1'
Ejemplo n.º 7
0
    def test_benchopt_run_profile(self):
        with CaptureRunOutput() as out:
            run_cmd = [str(DUMMY_BENCHMARK_PATH),
                       '-d', SELECT_ONE_SIMULATED, '-f', SELECT_ONE_PGD,
                       '-n', '1', '-r', '1', '-o', SELECT_ONE_OBJECTIVE,
                       '--profile', '--no-plot']
            run(run_cmd, 'benchopt', standalone_mode=False)

        out.check_output('Using profiling', repetition=1)
        out.check_output("File: .*benchopt/tests/test_benchmarks/"
                         "dummy_benchmark/solvers/python_pgd.py", repetition=1)
        out.check_output(r'\s+'.join([
            "Line #", "Hits", "Time", "Per Hit", "% Time", "Line Contents"
        ]), repetition=1)
        out.check_output(r"def run\(self, n_iter\):", repetition=1)
Ejemplo n.º 8
0
    def test_benchopt_run_in_env(self, test_env_name):
        with CaptureRunOutput() as out:
            with pytest.raises(SystemExit, match='False'):
                run([str(DUMMY_BENCHMARK_PATH), '--env-name', test_env_name,
                     '-d', SELECT_ONE_SIMULATED, '-f', SELECT_ONE_PGD,
                     '-n', '1', '-r', '1', '-o', SELECT_ONE_OBJECTIVE,
                     '--no-plot'], 'benchopt', standalone_mode=False)

        out.check_output(f'conda activate {test_env_name}')
        out.check_output('Simulated', repetition=1)
        out.check_output('Dummy Sparse Regression', repetition=1)
        out.check_output(r'Python-PGD\[step_size=1\]:', repetition=3)
        out.check_output(r'Python-PGD\[step_size=1.5\]:', repetition=0)

        # Make sure the results were saved in a result file
        assert len(out.result_files) == 1, out.output
Ejemplo n.º 9
0
    def test_benchopt_caching(self):
        # Check that the computation caching is working properly.

        n_rep = 2
        run_cmd = [
            str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED, '-s',
            SELECT_ONE_PGD, '-n', '1', '-r',
            str(n_rep), '-p', '0.1', '--no-plot'
        ]

        # Make a first run that should be put in cache
        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        # Now check that the cache is hit when running the benchmark a
        # second time without force
        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        out.check_output(r'Python-PGD\[step_size=1\]:', repetition=1)

        # Make sure that -f option forces the re-run for the solver
        run_cmd[4] = '-f'
        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        out.check_output(r'Python-PGD\[step_size=1\]:',
                         repetition=2 * n_rep + 1)
Ejemplo n.º 10
0
    def test_benchopt_install_in_env_with_requirements(self, test_env_name):
        objective = REQUIREMENT_BENCHMARK.get_benchmark_objective()
        out = 'already installed but failed to import.'
        if not objective.is_installed(env_name=test_env_name):
            with CaptureRunOutput() as out:
                install([
                    str(REQUIREMENT_BENCHMARK_PATH), '--env-name',
                    test_env_name
                ],
                        'benchopt',
                        standalone_mode=False)
        assert objective.is_installed(env_name=test_env_name), out
        # XXX: run the bench

        with CaptureRunOutput() as out:
            with pytest.raises(SystemExit, match='False'):
                run_cmd = [
                    str(REQUIREMENT_BENCHMARK_PATH), '--env-name',
                    test_env_name, '-n', '10', '-r', '1', '--no-plot'
                ]
                run(run_cmd, 'benchopt', standalone_mode=False)

        out.check_output(r"done \(not enough run\)", repetition=1)
Ejemplo n.º 11
0
    def test_benchopt_caching(self, n_rep):
        clean([str(DUMMY_BENCHMARK_PATH)], 'benchopt', standalone_mode=False)

        # XXX - remove once this is fixed upstream with joblib/joblib#1289
        _FUNCTION_HASHES.clear()

        # Check that the computation caching is working properly.
        run_cmd = [
            str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED, '-s',
            SELECT_ONE_PGD, '-n', '1', '-r',
            str(n_rep), '-o', SELECT_ONE_OBJECTIVE, '--no-plot'
        ]

        # Make a first run that should be put in cache
        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        # Check that this run was properly done. If only one is detected, this
        # could indicate that the clean command does not work properly.
        out.check_output(r'Python-PGD\[step_size=1\]:',
                         repetition=5 * n_rep + 1)

        # Now check that the cache is hit when running the benchmark a
        # second time without force
        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        out.check_output(r'Python-PGD\[step_size=1\]:', repetition=1)

        # Check that the cache is also hit when running in parallel
        with CaptureRunOutput() as out:
            run(run_cmd + ['-j', 2], 'benchopt', standalone_mode=False)

        out.check_output(r'Python-PGD\[step_size=1\]:', repetition=1)

        # Make sure that -f option forces the re-run for the solver
        run_cmd[4] = '-f'
        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        out.check_output(r'Python-PGD\[step_size=1\]:',
                         repetition=5 * n_rep + 1)
Ejemplo n.º 12
0
    def test_benchopt_run_config_file(self):
        tmp = tempfile.NamedTemporaryFile(mode="w+")
        tmp.write("some_unknown_option: 0")
        tmp.flush()
        with pytest.raises(ValueError, match="Invalid config file option"):
            run(f'{str(DUMMY_BENCHMARK_PATH)} --config {tmp.name}'.split(),
                'benchopt',
                standalone_mode=False)

        config = f"""
        objective-filter:
          - {SELECT_ONE_OBJECTIVE}
        dataset:
          - {SELECT_ONE_SIMULATED}
        n-repetitions: 2
        max-runs: 1
        force-solver:
          - python-pgd[step_size=[2, 3]]
          - Test-Solver
        """
        tmp = tempfile.NamedTemporaryFile(mode="w+")
        tmp.write(config)
        tmp.flush()

        run_cmd = [
            str(DUMMY_BENCHMARK_PATH), '--config', tmp.name, '--no-plot'
        ]

        with CaptureRunOutput() as out:
            run(run_cmd, 'benchopt', standalone_mode=False)

        out.check_output(r'Test-Solver:', repetition=11)
        out.check_output(r'Python-PGD\[step_size=2\]:', repetition=11)
        out.check_output(r'Python-PGD\[step_size=3\]:', repetition=11)

        # test that CLI options take precedence
        with CaptureRunOutput() as out:
            run(run_cmd + ['-f', 'Test-Solver'],
                'benchopt',
                standalone_mode=False)

        out.check_output(r'Test-Solver:', repetition=11)
        out.check_output(r'Python-PGD\[step_size=1.5\]:', repetition=0)
Ejemplo n.º 13
0
 def test_invalid_benchmark(self, invalid_benchmark, match):
     with pytest.raises(click.BadParameter, match=match):
         run([invalid_benchmark], 'benchopt', standalone_mode=False)