예제 #1
0
def validate_benchmark(benchmark):
    try:
        get_benchmark_objective(benchmark)
    except RuntimeError:
        raise click.BadParameter(
            f"The folder '{benchmark}' does not contain `objective.py`.\n"
            "Make sure you provide the path to a valid benchmark.")
예제 #2
0
def test_benchmark_objective(benchmark_dataset_simu):
    """Check that the objective function and the datasets are well defined."""
    benchmark_name, dataset_class = benchmark_dataset_simu
    objective_class = get_benchmark_objective(benchmark_name)
    objective = objective_class.get_instance()

    dataset = dataset_class.get_instance()
    scale, data = dataset.get_data()
    objective.set_data(**data)

    # check that the reported scale is correct and that the result of
    # the objective function is a scalar
    beta_hat = np.zeros(scale)
    objective_value = objective(beta_hat)
    assert np.isscalar(objective_value), (
        "The output of the objective function should be a scalar.")
예제 #3
0
def test_benchmark_objective(benchmark_name, dataset_class):
    """Check that the objective function and the datasets are well defined."""
    objective_class = get_benchmark_objective(benchmark_name)
    objective = objective_class()

    parameters = {}
    dataset = dataset_class(**parameters)
    scale, data = dataset.get_data()
    objective.set_data(**data)

    # check that the reported scale si correct and that the result of
    # the objective function is a scalar
    beta_hat = np.zeros(scale)
    objective_value = objective(beta=beta_hat)
    assert np.isscalar(objective_value), (
        "The output of the objective function should be a scalar."
    )
예제 #4
0
def test_solver(benchmark_solver):

    benchmark_name, solver_class = benchmark_solver
    if not solver_class.is_installed():
        pytest.skip("Solver is not installed")

    # Skip test_solver for julia in OSX as it throw a segfault
    # See issue#64
    if 'julia' in solver_class.name.lower() and sys.platform == 'darwin':
        pytest.skip('Julia causes segfault on OSX for now.')

    objective_class = get_benchmark_objective(benchmark_name)
    objective = objective_class.get_instance()

    datasets = list_benchmark_datasets(benchmark_name)
    simulated_dataset = [d for d in datasets if d.name.lower() == 'simulated']

    assert len(simulated_dataset) == 1, (
        "All benchmark need to implement a simulated dataset for "
        "testing purpose")

    dataset_class = simulated_dataset[0]
    dataset = dataset_class.get_instance()

    scale, data = dataset.get_data()
    objective.set_data(**data)

    solver = solver_class.get_instance()
    solver.set_objective(**objective.to_dict())
    stop_val = 5000 if solver_class.stop_strategy == 'iteration' else 1e-15
    solver.run(stop_val)
    beta_hat_i = solver.get_result()

    assert beta_hat_i.shape == (scale, )

    val_star = objective(beta_hat_i)

    for _ in range(100):
        eps = 1e-5 * np.random.randn(scale)
        val_eps = objective(beta_hat_i + eps)
        diff = val_eps - val_star
        assert diff > 0
예제 #5
0
def test_solver(benchmark_name, solver_class):

    if solver_class.install_cmd == 'pip':
        for package in solver_class.requirements_import:
            pytest.importorskip(package)
    elif not solver_class.is_installed():
        pytest.skip("Solver is not installed")

    objective_class = get_benchmark_objective(benchmark_name)
    objective = objective_class()

    datasets = list_benchmark_datasets(benchmark_name)
    simulated_dataset = [d for d in datasets if d.name.lower() == 'simulated']

    assert len(simulated_dataset) == 1, (
        "All benchmark need to implement a simulated dataset for "
        "testing purpose")

    dataset_class = simulated_dataset[0]
    dataset = dataset_class()

    scale, data = dataset.get_data()
    objective.set_data(**data)

    solver = solver_class()
    solver.set_objective(**objective.to_dict())
    sample = 1000 if solver_class.sampling_strategy == 'iteration' else 1e-15
    solver.run(sample)
    beta_hat_i = solver.get_result()

    assert beta_hat_i.shape == (scale, )

    val_star = objective(beta_hat_i)

    for _ in range(100):
        eps = 1e-9 * np.random.randn(scale)
        val_eps = objective(beta_hat_i + eps)
        diff = val_eps - val_star
        assert diff > 0
예제 #6
0
def test_benchmark_objective(benchmark_dataset_simu):
    """Check that the objective function and the datasets are well defined."""
    benchmark_name, dataset_class = benchmark_dataset_simu
    objective_class = get_benchmark_objective(benchmark_name)
    objective = objective_class.get_instance()

    dataset = dataset_class.get_instance()
    scale, data = dataset.get_data()
    objective.set_data(**data)

    # check that the reported scale is correct and that the result of
    # the objective function is a dictionary containing a scalar value for
    # `objective_value`.
    beta_hat = np.zeros(scale)
    objective_dict = objective(beta_hat)

    assert 'objective_value' in objective_dict, (
        'When the output of objective is a dict, it should at least contain '
        'a value associated to `objective_value` which will be used to detect '
        'the convergence of the algorithm.')
    assert np.isscalar(objective_dict['objective_value']), (
        "The output of the objective function should be a scalar, or a dict "
        "containing a scalar associated to `objective_value`.")