Esempio n. 1
0
def test_gradient_optionality(simulated_problem: SimulatedProblemFixture, scipy_method: str) -> None:
    """Test that the option of not computing the gradient for simulated data does not affect estimates when the gradient
    isn't used.
    """
    simulation, _, problem, solve_options, _ = simulated_problem

    # define a custom optimization method that doesn't use gradients
    def custom_method(
            initial: Array, bounds: List[Tuple[float, float]], objective_function: Callable, _: Any) -> (
            Tuple[Array, bool]):
        """Optimize without gradients."""
        wrapper = lambda x: objective_function(x)[0]
        results = scipy.optimize.minimize(wrapper, initial, method=scipy_method, bounds=bounds)
        return results.x, results.success

    # solve the problem when not using gradients and when not computing them
    updated_solve_options1 = solve_options.copy()
    updated_solve_options2 = solve_options.copy()
    updated_solve_options1['optimization'] = Optimization(custom_method)
    updated_solve_options2['optimization'] = Optimization(scipy_method, compute_gradient=False)
    results1 = problem.solve(**updated_solve_options1)
    results2 = problem.solve(**updated_solve_options2)

    # test that all arrays are essentially identical
    for key, result1 in results1.__dict__.items():
        if isinstance(result1, np.ndarray) and result1.dtype != np.object:
            np.testing.assert_allclose(result1, getattr(results2, key), atol=1e-14, rtol=0, err_msg=key)
Esempio n. 2
0
def test_objective_gradient(
        simulated_problem: SimulatedProblemFixture, eliminate: bool, demand: bool, supply: bool, micro: bool) -> None:
    """Implement central finite differences in a custom optimization routine to test that analytic gradient values
    are close to estimated values.
    """
    simulation, _, problem, solve_options, problem_results = simulated_problem

    # skip some redundant tests
    if supply and problem.K3 == 0:
        return pytest.skip("The problem does not have supply-side moments to test.")
    if micro and not solve_options['micro_moments']:
        return pytest.skip("The problem does not have micro moments to test.")
    if not demand and not supply and not micro:
        return pytest.skip("There are no moments to test.")

    # configure the options used to solve the problem
    updated_solve_options = solve_options.copy()
    updated_solve_options.update({k: 0.9 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})

    # optionally include linear parameters in theta
    if not eliminate:
        if problem.K1 > 0:
            updated_solve_options['beta'][-1] = 0.9 * simulation.beta[-1]
        if problem.K3 > 0:
            updated_solve_options['gamma'] = np.full_like(simulation.gamma, np.nan)
            updated_solve_options['gamma'][-1] = 0.9 * simulation.gamma[-1]

    # zero out weighting matrix blocks to only test individual contributions of the gradient
    updated_solve_options['W'] = problem_results.W.copy()
    if not demand:
        updated_solve_options['W'][:problem.MD, :problem.MD] = 0
    if not supply and problem.K3 > 0:
        updated_solve_options['W'][problem.MD:problem.MD + problem.MS, problem.MD:problem.MD + problem.MS] = 0
    if not micro and updated_solve_options['micro_moments']:
        MM = len(updated_solve_options['micro_moments'])
        updated_solve_options['W'][-MM:, -MM:] = 0

    # use a restrictive iteration tolerance
    updated_solve_options['iteration'] = Iteration('squarem', {'atol': 1e-14})

    # compute the analytic gradient
    updated_solve_options['optimization'] = Optimization('return')
    exact = problem.solve(**updated_solve_options).gradient

    # define a custom optimization routine that tests central finite differences around starting parameter values
    def test_finite_differences(theta: Array, _: Any, objective_function: Callable, __: Any) -> Tuple[Array, bool]:
        estimated = np.zeros_like(exact)
        change = 10 * np.sqrt(np.finfo(np.float64).eps)
        for index in range(theta.size):
            theta1 = theta.copy()
            theta2 = theta.copy()
            theta1[index] += change / 2
            theta2[index] -= change / 2
            estimated[index] = (objective_function(theta1)[0] - objective_function(theta2)[0]) / change
        np.testing.assert_allclose(estimated, exact, atol=1e-10, rtol=1e-3)
        return theta, True

    # test the gradient
    updated_solve_options['optimization'] = Optimization(test_finite_differences, compute_gradient=False)
    problem.solve(**updated_solve_options)
Esempio n. 3
0
def test_entropy(lb: float, ub: float, method: str, method_options: Options,
                 compute_gradient: bool, universal_display: bool) -> None:
    """Test that solutions to the entropy maximization problem from Berger, Pietra, and Pietra (1996) are reasonably
    close to the exact solution (this is based on a subset of testing methods from scipy.optimize.tests.test_optimize).
    """
    def objective_function(x: Array) -> ObjectiveResults:
        """Evaluate the objective."""
        K = np.array([1, 0.3, 0.5])
        F = np.array([[1, 1, 1], [1, 1, 0], [1, 0, 1], [1, 0, 0], [1, 0, 0]])
        log_Z = np.log(np.exp(F @ x).sum())
        p = np.exp(F @ x - log_Z)
        objective = log_Z - K @ x
        gradient = F.T @ p - K if compute_gradient else None
        return objective, gradient

    # simple some methods
    if compute_gradient and method in {'nelder-mead', 'powell'}:
        return pytest.skip(
            "This method does not support an analytic gradient.")
    if not compute_gradient and method == 'newton-cg':
        return pytest.skip("This method requires an analytic gradient.")

    # skip optimization methods that haven't been configured properly
    try:
        optimization = Optimization(method, method_options, compute_gradient,
                                    universal_display)
    except OSError as exception:
        return pytest.skip(
            f"Failed to use the {method} method in this environment: {exception}."
        )

    # test that the configuration can be formatted
    assert str(optimization)

    # define the exact solution
    exact_values = np.array([0, -0.524869316, 0.487525860])

    # estimate the solution (use the exact values if the optimization routine will just return them)
    start_values = exact_values if method == 'return' else np.zeros_like(
        exact_values)
    bounds = 3 * [(lb, ub)]
    estimated_values, converged = optimization._optimize(
        start_values, bounds, lambda x, *_: objective_function(x))[:2]
    assert converged

    # test that the estimated objective is reasonably close to the exact objective
    exact = objective_function(exact_values)[0]
    estimated = objective_function(estimated_values)[0]
    np.testing.assert_allclose(estimated, exact, rtol=1e-5, atol=0)
Esempio n. 4
0
def test_entropy(lb, ub, method, method_options, compute_gradient,
                 universal_display):
    """Test that solutions to the entropy maximization problem from Berger, Pietra, and Pietra (1996) are reasonably
    close to the exact solution. Based on a subset of testing methods from scipy.optimize.tests.test_optimize.
    """

    # simple methods do not accept an analytic gradient
    if compute_gradient is True and method in {'nelder-mead', 'powell'}:
        return

    # Newton CG requires an analytic gradient
    if compute_gradient is False and method == 'newton-cg':
        return

    # the custom method needs to know if an analytic gradient will be computed
    if callable(method):
        method_options['jac'] = compute_gradient

    # define the objective function
    K = np.array([1, 0.3, 0.5])
    F = np.array([[1, 1, 1], [1, 1, 0], [1, 0, 1], [1, 0, 0], [1, 0, 0]])

    def objective_function(x):
        log_Z = np.log(np.exp(F @ x).sum())
        p = np.exp(F @ x - log_Z)
        objective = log_Z - K @ x
        gradient = F.T @ p - K
        return (objective, gradient) if compute_gradient else objective

    # estimate the solution
    optimization = Optimization(method, method_options, compute_gradient,
                                universal_display)
    start_values = np.array([0, 0, 0], options.dtype)
    bounds = 3 * [(lb, ub)]
    estimated_values, converged = optimization._optimize(
        start_values, bounds, lambda x, *_: objective_function(x))[:2]
    assert converged

    # test that the estimated objective is reasonably close to the exact objective
    exact_values = np.array([0, -0.524869316, 0.487525860], options.dtype)
    exact_results = objective_function(exact_values)
    estimated_results = objective_function(estimated_values)
    exact_objective = exact_results[0] if compute_gradient else exact_results
    estimated_objective = estimated_results[
        0] if compute_gradient else estimated_results
    np.testing.assert_allclose(estimated_objective,
                               exact_objective,
                               rtol=1e-5,
                               atol=0)
Esempio n. 5
0
def test_return(simulated_problem: SimulatedProblemFixture) -> None:
    """Test that using a trivial optimization and fixed point iteration routines that just return initial values yield
    results that are the same as the specified initial values.
    """
    simulation, simulation_results, problem, solve_options, _ = simulated_problem

    # specify initial values and the trivial routines
    initial_values = {
        'sigma': simulation.sigma,
        'pi': simulation.pi,
        'rho': simulation.rho,
        'beta': simulation.beta,
        'gamma': simulation.gamma if problem.K3 > 0 else None,
        'delta': simulation_results.delta
    }
    updated_solve_options = solve_options.copy()
    updated_solve_options.update({
        'optimization': Optimization('return'),
        'iteration': Iteration('return'),
        **initial_values
    })

    # obtain problem results and test that initial values are the same
    results = problem.solve(**updated_solve_options)
    for key, initial in initial_values.items():
        if initial is not None:
            np.testing.assert_allclose(initial,
                                       getattr(results, key),
                                       atol=1e-14,
                                       rtol=0,
                                       err_msg=key)
Esempio n. 6
0
def test_objective_gradient(simulated_problem, solve_options):
    """Implement central finite differences in a custom optimization routine to test that analytic gradient values
    are within 1% of estimated values.
    """
    simulation, _, problem, _ = simulated_problem

    # define a custom optimization routine that tests central finite differences around starting parameter values
    def test_finite_differences(*args):
        theta, _, objective_function, _ = args
        exact = objective_function(theta)[1]
        estimated = np.zeros_like(exact)
        change = np.sqrt(np.finfo(np.float64).eps)
        for index in range(theta.size):
            theta1 = theta.copy()
            theta2 = theta.copy()
            theta1[index] += change / 2
            theta2[index] -= change / 2
            estimated[index] = (objective_function(theta1)[0] -
                                objective_function(theta2)[0]) / change
        np.testing.assert_allclose(exact, estimated, atol=0, rtol=0.001)
        return theta, True

    # test the gradient at parameter values slightly different from the true ones so that the objective is sizable
    problem.solve(
        0.9 * simulation.sigma,
        0.9 * simulation.pi if simulation.pi is not None else None,
        steps=1,
        linear_costs=simulation.linear_costs,
        optimization=Optimization(test_finite_differences),
        iteration=Iteration('squarem', {
            'tol':
            1e-15 if solve_options.get('linear_fp') is False else 1e-14
        }),
        **solve_options)
Esempio n. 7
0
def test_objective_gradient(simulated_problem: SimulatedProblemFixture,
                            eliminate: bool,
                            solve_options_update: Options) -> None:
    """Implement central finite differences in a custom optimization routine to test that analytic gradient values
    are close to estimated values.
    """
    simulation, _, problem, solve_options, _ = simulated_problem

    # define a custom optimization routine that tests central finite differences around starting parameter values
    def test_finite_differences(theta: Array, _: Any,
                                objective_function: Callable,
                                __: Any) -> Tuple[Array, bool]:
        exact = objective_function(theta)[1]
        estimated = np.zeros_like(exact)
        change = np.sqrt(np.finfo(np.float64).eps)
        for index in range(theta.size):
            theta1 = theta.copy()
            theta2 = theta.copy()
            theta1[index] += change / 2
            theta2[index] -= change / 2
            estimated[index] = (objective_function(theta1)[0] -
                                objective_function(theta2)[0]) / change
        np.testing.assert_allclose(exact, estimated, atol=0, rtol=0.001)
        return theta, True

    # test the gradient at parameter values slightly different from the true ones so that the objective is sizable
    updated_solve_options = solve_options.copy()
    updated_solve_options.update(solve_options_update)
    updated_solve_options.update(
        {k: 0.9 * solve_options[k]
         for k in ['sigma', 'pi', 'rho', 'beta']})
    updated_solve_options.update({
        'method':
        '1s',
        'optimization':
        Optimization(test_finite_differences),
        'iteration':
        Iteration(
            'squarem', {
                'atol':
                1e-16 if solve_options_update.get('fp_type') == 'nonlinear'
                else 1e-14
            })
    })

    # optionally include linear parameters in theta
    if not eliminate:
        if problem.K1 > 0:
            updated_solve_options['beta'][-1] = 0.9 * simulation.beta[-1]
        if problem.K3 > 0:
            updated_solve_options['gamma'] = np.full_like(
                simulation.gamma, np.nan)
            updated_solve_options['gamma'][-1] = 0.9 * simulation.gamma[-1]

    # test the gradient
    problem.solve(**updated_solve_options)
Esempio n. 8
0
def test_knittel_metaxoglou_2014(knittel_metaxoglou_2014):
    """Replicate estimates created by replication code for Knittel and Metaxoglou (2014)."""
    results = knittel_metaxoglou_2014['problem'].solve(
        knittel_metaxoglou_2014.get('initial_sigma'),
        knittel_metaxoglou_2014.get('initial_pi'),
        optimization=Optimization('knitro', {
            'opttol': 1e-8,
            'xtol': 1e-8
        }),
        iteration=Iteration('simple', {'tol': 1e-12}),
        steps=1)

    # test closeness of primary results
    for key, expected in knittel_metaxoglou_2014.items():
        computed = getattr(results, key, None)
        if isinstance(computed, np.ndarray):
            np.testing.assert_allclose(expected,
                                       computed,
                                       atol=1e-8,
                                       rtol=1e-5,
                                       err_msg=key)

    # structure post-estimation outputs
    elasticities = results.compute_elasticities()
    changed_prices = results.solve_approximate_merger()
    changed_shares = results.compute_shares(changed_prices)
    post_estimation = {
        'elasticities':
        elasticities,
        'costs':
        results.compute_costs(),
        'changed_prices':
        changed_prices,
        'changed_shares':
        changed_shares,
        'own_elasticities':
        results.extract_diagonals(elasticities),
        'profits':
        results.compute_profits(),
        'changed_profits':
        results.compute_profits(changed_prices, changed_shares),
        'consumer_surpluses':
        results.compute_consumer_surpluses(),
        'changed_consumer_surpluses':
        results.compute_consumer_surpluses(changed_prices)
    }

    # test closeness of post-estimation outputs
    for key, computed in post_estimation.items():
        expected = knittel_metaxoglou_2014[key]
        np.testing.assert_allclose(expected,
                                   computed,
                                   atol=1e-8,
                                   rtol=1e-4,
                                   err_msg=key)
Esempio n. 9
0
def test_gradient_optionality(simulated_problem, scipy_method):
    """Test that the option of not computing the gradient for simulated data does not affect estimates when the gradient
    isn't used.
    """
    simulation, _, problem, _ = simulated_problem

    # define a custom optimization method that doesn't use gradients
    def custom_method(initial, bounds, objective_function, _):
        wrapper = lambda x: objective_function(x)[0]
        results = scipy.optimize.minimize(wrapper, initial, method=scipy_method, bounds=bounds)
        return results.x, results.success

    # solve the problem when not using gradients and when not computing them
    optimization1 = Optimization(custom_method)
    optimization2 = Optimization(scipy_method, compute_gradient=False)
    results1 = problem.solve(simulation.sigma, simulation.pi, steps=1, optimization=optimization1)
    results2 = problem.solve(simulation.sigma, simulation.pi, steps=1, optimization=optimization2)

    # test that all arrays are essentially identical
    for key, result1 in results1.__dict__.items():
        if isinstance(result1, np.ndarray) and result1.dtype != np.object:
            result2 = getattr(results2, key)
            np.testing.assert_allclose(result1, result2, atol=1e-14, rtol=0, err_msg=key)
Esempio n. 10
0
def simulated_problem(request: Any) -> SimulatedProblemFixture:
    """Configure and solve a simulated problem, either with or without supply-side data. Preclude overflow with rho
    bounds that are more conservative than the default ones.
    """
    name, supply = request.param
    simulation, simulation_results, simulated_micro_moments = request.getfixturevalue(
        f'{name}_simulation')
    micro_moments = []
    if simulated_micro_moments:
        micro_values = simulation_results.compute_micro(
            simulated_micro_moments)
        for moment, value in zip(simulated_micro_moments, micro_values):
            micro_moments.append(
                ProductsAgentsCovarianceMoment(moment.X2_index,
                                               moment.demographics_index,
                                               value, moment.market_ids))
    problem = simulation_results.to_problem(
        simulation.product_formulations[:2 + int(supply)])
    solve_options = {
        'sigma':
        simulation.sigma,
        'pi':
        simulation.pi,
        'rho':
        simulation.rho,
        'beta':
        np.where(simulation._parameters.alpha_index,
                 simulation.beta if supply else np.nan, np.nan),
        'rho_bounds':
        (np.zeros_like(simulation.rho), np.minimum(0.9, 1.5 * simulation.rho)),
        'costs_type':
        simulation.costs_type,
        'method':
        '1s',
        'check_optimality':
        'gradient',
        'optimization':
        Optimization('slsqp', {'ftol': 1e-10}),
        'micro_moments':
        micro_moments
    }
    problem_results = problem.solve(**solve_options)
    return simulation, simulation_results, problem, solve_options, problem_results
Esempio n. 11
0
def test_entropy(lb: float, ub: float, method: Union[str, Callable],
                 method_options: Options, compute_gradient: bool,
                 universal_display: bool) -> None:
    """Test that solutions to the entropy maximization problem from Berger, Pietra, and Pietra (1996) are reasonably
    close to the exact solution (this is based on a subset of testing methods from scipy.optimize.tests.test_optimize).
    """
    def objective_function(x: Array) -> Union[Array, Tuple[Array, Array]]:
        """Evaluate the objective."""
        K = np.array([1, 0.3, 0.5])
        F = np.array([[1, 1, 1], [1, 1, 0], [1, 0, 1], [1, 0, 0], [1, 0, 0]])
        log_Z = np.log(np.exp(F @ x).sum())
        p = np.exp(F @ x - log_Z)
        objective = log_Z - K @ x
        gradient = F.T @ p - K
        return (objective, gradient) if compute_gradient else objective

    # simple methods do not accept an analytic gradient
    if compute_gradient is True and method in {'nelder-mead', 'powell'}:
        return

    # Newton CG requires an analytic gradient
    if compute_gradient is False and method == 'newton-cg':
        return

    # the custom method needs to know if an analytic gradient will be computed
    if callable(method):
        method_options = method_options.copy()
        method_options['jac'] = compute_gradient

    # skip optimization methods that haven't been configured properly
    try:
        optimization = Optimization(method, method_options, compute_gradient,
                                    universal_display)
    except OSError as exception:
        return pytest.skip(
            f"Failed to use the {method} method in this environment: {exception}."
        )

    # test that the configuration can be formatted
    assert str(optimization)

    # define the exact solution
    exact_values = np.array([0, -0.524869316, 0.487525860], options.dtype)

    # estimate the solution (use the exact values if the optimization routine will just return them)
    start_values = exact_values if method == 'return' else np.array(
        [0, 0, 0], options.dtype)
    bounds = 3 * [(lb, ub)]
    estimated_values, converged = optimization._optimize(
        start_values, bounds, lambda x, *_: objective_function(x))[:2]
    assert converged

    # test that the estimated objective is reasonably close to the exact objective
    exact_results = objective_function(exact_values)
    estimated_results = objective_function(estimated_values)
    exact_objective = exact_results[0] if compute_gradient else exact_results
    estimated_objective = estimated_results[
        0] if compute_gradient else estimated_results
    np.testing.assert_allclose(estimated_objective,
                               exact_objective,
                               rtol=1e-5,
                               atol=0)
Esempio n. 12
0
def test_bounds(simulated_problem: SimulatedProblemFixture,
                method: str) -> None:
    """Test that non-binding bounds on parameters in simulated problems do not affect estimates and that binding bounds
    are respected. Forcing parameters to be far from their optimal values creates instability problems, so this is also
    a test of how well estimation handles unstable problems.
    """
    simulation, _, problem, solve_options, _ = simulated_problem

    # skip optimization methods that haven't been configured properly
    updated_solve_options = solve_options.copy()
    try:
        updated_solve_options['optimization'] = Optimization(method)
    except OSError as exception:
        return pytest.skip(
            f"Failed to use the {method} method in this environment: {exception}."
        )

    # solve the problem when unbounded
    unbounded_solve_options = updated_solve_options.copy()
    unbounded_solve_options.update({
        'sigma_bounds':
        (np.full_like(simulation.sigma,
                      -np.inf), np.full_like(simulation.sigma, +np.inf)),
        'pi_bounds': (np.full_like(simulation.pi, -np.inf),
                      np.full_like(simulation.pi, +np.inf)),
        'rho_bounds': (np.full_like(simulation.rho, -np.inf),
                       np.full_like(simulation.rho, +np.inf)),
        'beta_bounds': (np.full_like(simulation.beta, -np.inf),
                        np.full_like(simulation.beta, +np.inf)),
        'gamma_bounds': (np.full_like(simulation.gamma, -np.inf),
                         np.full_like(simulation.gamma, +np.inf))
    })
    unbounded_results = problem.solve(**unbounded_solve_options)

    # choose a parameter from each set and identify its estimated value
    sigma_index = pi_index = rho_index = beta_index = gamma_index = None
    sigma_value = pi_value = rho_value = beta_value = gamma_value = None
    if problem.K2 > 0:
        sigma_index = (simulation.sigma.nonzero()[0][0],
                       simulation.sigma.nonzero()[1][0])
        sigma_value = unbounded_results.sigma[sigma_index]
    if problem.D > 0:
        pi_index = (simulation.pi.nonzero()[0][0],
                    simulation.pi.nonzero()[1][0])
        pi_value = unbounded_results.pi[pi_index]
    if problem.H > 0:
        rho_index = (simulation.rho.nonzero()[0][0],
                     simulation.rho.nonzero()[1][0])
        rho_value = unbounded_results.rho[rho_index]
    if problem.K1 > 0:
        beta_index = (simulation.beta.nonzero()[0][-1],
                      simulation.beta.nonzero()[1][-1])
        beta_value = unbounded_results.beta[beta_index]
    if problem.K3 > 0:
        gamma_index = (simulation.gamma.nonzero()[0][-1],
                       simulation.gamma.nonzero()[1][-1])
        gamma_value = unbounded_results.gamma[gamma_index]

    # use different types of binding bounds
    for lb_scale, ub_scale in [(-0.1, +np.inf), (+1, -0.1), (0, 0)]:
        binding_sigma_bounds = (np.full_like(simulation.sigma, -np.inf),
                                np.full_like(simulation.sigma, +np.inf))
        binding_pi_bounds = (np.full_like(simulation.pi, -np.inf),
                             np.full_like(simulation.pi, +np.inf))
        binding_rho_bounds = (np.full_like(simulation.rho, -np.inf),
                              np.full_like(simulation.rho, +np.inf))
        binding_beta_bounds = (np.full_like(simulation.beta, -np.inf),
                               np.full_like(simulation.beta, +np.inf))
        binding_gamma_bounds = (np.full_like(simulation.gamma, -np.inf),
                                np.full_like(simulation.gamma, +np.inf))
        if problem.K2 > 0:
            binding_sigma_bounds[0][
                sigma_index] = sigma_value - lb_scale * np.abs(sigma_value)
            binding_sigma_bounds[1][
                sigma_index] = sigma_value + ub_scale * np.abs(sigma_value)
        if problem.D > 0:
            binding_pi_bounds[0][
                pi_index] = pi_value - lb_scale * np.abs(pi_value)
            binding_pi_bounds[1][
                pi_index] = pi_value + ub_scale * np.abs(pi_value)
        if problem.H > 0:
            binding_rho_bounds[0][
                rho_index] = rho_value - lb_scale * np.abs(rho_value)
            binding_rho_bounds[1][
                rho_index] = rho_value + ub_scale * np.abs(rho_value)
        if problem.K1 > 0:
            binding_beta_bounds[0][
                beta_index] = beta_value - lb_scale * np.abs(beta_value)
            binding_beta_bounds[1][
                beta_index] = beta_value + ub_scale * np.abs(beta_value)
        if problem.K3 > 0:
            binding_gamma_bounds[0][
                gamma_index] = gamma_value - lb_scale * np.abs(gamma_value)
            binding_gamma_bounds[1][
                gamma_index] = gamma_value + ub_scale * np.abs(gamma_value)

        # update options with the binding bounds
        binding_solve_options = updated_solve_options.copy()
        binding_solve_options.update({
            'sigma':
            np.clip(binding_solve_options['sigma'], *binding_sigma_bounds),
            'pi':
            np.clip(binding_solve_options['pi'], *binding_pi_bounds),
            'rho':
            np.clip(binding_solve_options['rho'], *binding_rho_bounds),
            'sigma_bounds':
            binding_sigma_bounds,
            'pi_bounds':
            binding_pi_bounds,
            'rho_bounds':
            binding_rho_bounds,
            'beta_bounds':
            binding_beta_bounds,
            'gamma_bounds':
            binding_gamma_bounds
        })
        if problem.K1 > 0:
            binding_solve_options['beta'] = binding_solve_options.get(
                'beta', np.full_like(simulation.beta, np.nan))
            binding_solve_options['beta'][beta_index] = beta_value
            with np.errstate(invalid='ignore'):
                binding_solve_options['beta'] = np.clip(
                    binding_solve_options['beta'], *binding_beta_bounds)
        if problem.K3 > 0:
            binding_solve_options['gamma'] = binding_solve_options.get(
                'gamma', np.full_like(simulation.gamma, np.nan))
            binding_solve_options['gamma'][gamma_index] = gamma_value
            with np.errstate(invalid='ignore'):
                binding_solve_options['gamma'] = np.clip(
                    binding_solve_options['gamma'], *binding_gamma_bounds)

        # solve the problem and test that the bounds are respected
        binding_results = problem.solve(**binding_solve_options)
        assert_array_less = lambda a, b: np.testing.assert_array_less(
            a, b + 1e-14, verbose=True)
        if problem.K2 > 0:
            assert_array_less(binding_sigma_bounds[0], binding_results.sigma)
            assert_array_less(binding_results.sigma, binding_sigma_bounds[1])
        if problem.D > 0:
            assert_array_less(binding_pi_bounds[0], binding_results.pi)
            assert_array_less(binding_results.pi, binding_pi_bounds[1])
        if problem.H > 0:
            assert_array_less(binding_rho_bounds[0], binding_results.rho)
            assert_array_less(binding_results.rho, binding_rho_bounds[1])
        if problem.K1 > 0:
            assert_array_less(binding_beta_bounds[0], binding_results.beta)
            assert_array_less(binding_results.beta, binding_beta_bounds[1])
        if problem.K3 > 0:
            assert_array_less(binding_gamma_bounds[0], binding_results.gamma)
            assert_array_less(binding_results.gamma, binding_gamma_bounds[1])
Esempio n. 13
0
def test_bounds(simulated_problem, method):
    """Test that non-binding bounds on parameters in simulated problems do not affect estimates and that binding bounds
    are respected.
    """
    simulation, _, problem, _ = simulated_problem

    # all problems will be solved with the same optimization method starting as close to the true parameters as possible
    solve = lambda s, p: problem.solve(
        np.minimum(np.maximum(simulation.sigma, s[0]), s[1]),
        np.minimum(np.maximum(simulation.pi, p[0]), p[1])
        if simulation.pi is not None else None,
        sigma_bounds=s,
        pi_bounds=p,
        steps=1,
        optimization=Optimization(method))

    # solve the problem when unbounded
    unbounded_sigma_bounds = (np.full_like(simulation.sigma, -np.inf),
                              np.full_like(simulation.sigma, +np.inf))
    unbounded_pi_bounds = None
    if simulation.pi is not None:
        unbounded_pi_bounds = (np.full_like(simulation.pi, -np.inf),
                               np.full_like(simulation.pi, +np.inf))
    unbounded_results = solve(unbounded_sigma_bounds, unbounded_pi_bounds)

    # choose an element in each parameter matrix and identify its estimated value
    sigma_index = (simulation.sigma.nonzero()[0][0],
                   simulation.sigma.nonzero()[1][0])
    sigma_value = unbounded_results.sigma[sigma_index]
    pi_index = None
    if simulation.pi is not None:
        pi_index = (simulation.pi.nonzero()[0][0],
                    simulation.pi.nonzero()[1][0])
        pi_value = unbounded_results.pi[pi_index]

    # use different types of binding bounds and skip types that fix all parameters
    for lb_scale, ub_scale in [(+np.inf, -0.1), (-0.1, +np.inf), (+1, -0.1),
                               (-0.1, +1), (0, 0)]:
        binding_sigma_bounds = (np.full_like(simulation.sigma, -np.inf),
                                np.full_like(simulation.sigma, +np.inf))
        binding_sigma_bounds[0][
            sigma_index] = sigma_value - lb_scale * np.abs(sigma_value)
        binding_sigma_bounds[1][
            sigma_index] = sigma_value + ub_scale * np.abs(sigma_value)
        if simulation.pi is None:
            binding_pi_bounds = None
            if np.array_equal(*map(np.abs, binding_sigma_bounds)):
                continue
        else:
            binding_pi_bounds = (np.full_like(simulation.pi, -np.inf),
                                 np.full_like(simulation.pi, +np.inf))
            binding_pi_bounds[0][
                pi_index] = pi_value - lb_scale * np.abs(pi_value)
            binding_pi_bounds[1][
                pi_index] = pi_value + ub_scale * np.abs(pi_value)
            if np.array_equal(
                    *map(np.abs, binding_sigma_bounds)) and np.array_equal(
                        *map(np.abs, binding_pi_bounds)):
                continue

        # solve the problem with binding bounds and test that they are essentially respected
        binding_results = solve(binding_sigma_bounds, binding_pi_bounds)
        assert_array_less = lambda a, b: np.testing.assert_array_less(
            a, b + 1e-14, verbose=True)
        assert_array_less(binding_sigma_bounds[0], binding_results.sigma)
        assert_array_less(binding_results.sigma, binding_sigma_bounds[1])
        if simulation.pi is not None:
            assert_array_less(binding_pi_bounds[0], binding_results.pi)
            assert_array_less(binding_results.pi, binding_pi_bounds[1])

    # for methods other than TNC, which works differently with bounds, test that non-binding bounds furnish results that
    #   are similar to their unbounded counterparts
    if method != 'tnc':
        unbinding_sigma_bounds = (simulation.sigma -
                                  1e10 * np.abs(simulation.sigma),
                                  simulation.sigma +
                                  1e10 * np.abs(simulation.sigma))
        unbinding_pi_bounds = None
        if simulation.pi is not None:
            unbinding_pi_bounds = (simulation.pi -
                                   1e10 * np.abs(simulation.pi),
                                   simulation.pi +
                                   1e10 * np.abs(simulation.pi))
        unbinding_results = solve(unbinding_sigma_bounds, unbinding_pi_bounds)
        np.testing.assert_allclose(unbounded_results.sigma,
                                   unbinding_results.sigma,
                                   atol=0,
                                   rtol=0.1)
        if simulation.pi is not None:
            np.testing.assert_allclose(unbounded_results.pi,
                                       unbinding_results.pi,
                                       atol=0,
                                       rtol=0.1)