Example #1
0
def test_optimal_instruments(simulated_problem: SimulatedProblemFixture,
                             compute_options: Options) -> None:
    """Test that starting parameters that are half their true values also give rise to errors of less than 10% under
    optimal instruments.
    """
    simulation, _, problem, solve_options, problem_results = simulated_problem

    # compute optimal instruments and update the problem (only use a few draws to speed up the test)
    compute_options = compute_options.copy()
    compute_options.update({'draws': 5, 'seed': 0})
    new_problem = problem_results.compute_optimal_instruments(
        **compute_options).to_problem()

    # update the default options and solve the problem
    updated_solve_options = solve_options.copy()
    updated_solve_options.update(
        {k: 0.5 * solve_options[k]
         for k in ['sigma', 'pi', 'rho', 'beta']})
    new_results = new_problem.solve(**updated_solve_options)

    # test the accuracy of the estimated parameters
    keys = ['beta', 'sigma', 'pi', 'rho']
    if problem.K3 > 0:
        keys.append('gamma')
    for key in keys:
        np.testing.assert_allclose(getattr(simulation, key),
                                   getattr(new_results, key),
                                   atol=0,
                                   rtol=0.1,
                                   err_msg=key)
Example #2
0
def test_optimal_instruments(simulated_problem: SimulatedProblemFixture,
                             compute_options: Options) -> None:
    """Test that starting parameters that are half their true values also give rise to errors of less than 10% under
    optimal instruments.
    """
    simulation, product_data, problem, solve_options, problem_results = simulated_problem

    # make product data mutable
    product_data = {k: product_data[k] for k in product_data.dtype.names}

    # split apart the full set of demand-side instruments so they can be included in formulations
    ZD_names: List[str] = []
    for index, instrument in enumerate(problem.products.ZD.T):
        name = f'ZD{index}'
        product_data[name] = instrument
        ZD_names.append(name)

    # without a supply side, compute expected prices with a reduced form regression on all instruments
    expected_prices = None
    if problem.K3 == 0:
        ZD_formula = ' + '.join(ZD_names)
        expected_prices = compute_fitted_values(
            product_data['prices'], Formulation(f'0 + {ZD_formula}'),
            product_data)

    # compute optimal instruments and update the problem (only use a few draws to speed up the test)
    compute_options = compute_options.copy()
    compute_options.update({
        'draws': 5,
        'seed': 0,
        'expected_prices': expected_prices
    })
    new_problem = problem_results.compute_optimal_instruments(
        **compute_options).to_problem()

    # update the default options and solve the problem
    updated_solve_options = solve_options.copy()
    updated_solve_options.update(
        {k: 0.5 * solve_options[k]
         for k in ['sigma', 'pi', 'rho', 'beta']})
    new_results = new_problem.solve(**updated_solve_options)

    # test the accuracy of the estimated parameters
    keys = ['beta', 'sigma', 'pi', 'rho']
    if problem.K3 > 0:
        keys.append('gamma')
    for key in keys:
        np.testing.assert_allclose(getattr(simulation, key),
                                   getattr(new_results, key),
                                   atol=0,
                                   rtol=0.1,
                                   err_msg=key)
Example #3
0
def test_objective_gradient(simulated_problem: SimulatedProblemFixture,
                            eliminate: bool,
                            solve_options_update: Options) -> None:
    """Implement central finite differences in a custom optimization routine to test that analytic gradient values
    are close to estimated values.
    """
    simulation, _, problem, solve_options, _ = simulated_problem

    # define a custom optimization routine that tests central finite differences around starting parameter values
    def test_finite_differences(theta: Array, _: Any,
                                objective_function: Callable,
                                __: Any) -> Tuple[Array, bool]:
        exact = objective_function(theta)[1]
        estimated = np.zeros_like(exact)
        change = np.sqrt(np.finfo(np.float64).eps)
        for index in range(theta.size):
            theta1 = theta.copy()
            theta2 = theta.copy()
            theta1[index] += change / 2
            theta2[index] -= change / 2
            estimated[index] = (objective_function(theta1)[0] -
                                objective_function(theta2)[0]) / change
        np.testing.assert_allclose(exact, estimated, atol=0, rtol=0.001)
        return theta, True

    # test the gradient at parameter values slightly different from the true ones so that the objective is sizable
    updated_solve_options = solve_options.copy()
    updated_solve_options.update(solve_options_update)
    updated_solve_options.update(
        {k: 0.9 * solve_options[k]
         for k in ['sigma', 'pi', 'rho', 'beta']})
    updated_solve_options.update({
        'method':
        '1s',
        'optimization':
        Optimization(test_finite_differences),
        'iteration':
        Iteration(
            'squarem', {
                'atol':
                1e-16 if solve_options_update.get('fp_type') == 'nonlinear'
                else 1e-14
            })
    })

    # optionally include linear parameters in theta
    if not eliminate:
        if problem.K1 > 0:
            updated_solve_options['beta'][-1] = 0.9 * simulation.beta[-1]
        if problem.K3 > 0:
            updated_solve_options['gamma'] = np.full_like(
                simulation.gamma, np.nan)
            updated_solve_options['gamma'][-1] = 0.9 * simulation.gamma[-1]

    # test the gradient
    problem.solve(**updated_solve_options)
Example #4
0
def test_scipy(method: Union[str, Callable], method_options: Options, tol: float) -> None:
    """Test that the solution to the example fixed point problem from scipy.optimize.fixed_point is reasonably close to
    the exact solution. Also verify that the configuration can be formatted.
    """

    # test that the configuration can be formatted
    if method != 'return':
        method_options = method_options.copy()
        method_options['tol'] = tol
    iteration = Iteration(method, method_options)
    assert str(iteration)

    # test that the solution is reasonably close (use the exact values if the iteration routine will just return them)
    contraction = lambda x: np.sqrt(np.array([10, 12]) / (x + np.array([3, 5])))
    exact_values = np.array([1.4920333, 1.37228132])
    start_values = exact_values if method == 'return' else np.ones_like(exact_values)
    computed_values = iteration._iterate(start_values, contraction)[0]
    np.testing.assert_allclose(exact_values, computed_values, rtol=0, atol=10 * tol)
Example #5
0
def test_entropy(lb: float, ub: float, method: Union[str, Callable],
                 method_options: Options, compute_gradient: bool,
                 universal_display: bool) -> None:
    """Test that solutions to the entropy maximization problem from Berger, Pietra, and Pietra (1996) are reasonably
    close to the exact solution (this is based on a subset of testing methods from scipy.optimize.tests.test_optimize).
    """
    def objective_function(x: Array) -> Union[Array, Tuple[Array, Array]]:
        """Evaluate the objective."""
        K = np.array([1, 0.3, 0.5])
        F = np.array([[1, 1, 1], [1, 1, 0], [1, 0, 1], [1, 0, 0], [1, 0, 0]])
        log_Z = np.log(np.exp(F @ x).sum())
        p = np.exp(F @ x - log_Z)
        objective = log_Z - K @ x
        gradient = F.T @ p - K
        return (objective, gradient) if compute_gradient else objective

    # simple methods do not accept an analytic gradient
    if compute_gradient is True and method in {'nelder-mead', 'powell'}:
        return

    # Newton CG requires an analytic gradient
    if compute_gradient is False and method == 'newton-cg':
        return

    # the custom method needs to know if an analytic gradient will be computed
    if callable(method):
        method_options = method_options.copy()
        method_options['jac'] = compute_gradient

    # skip optimization methods that haven't been configured properly
    try:
        optimization = Optimization(method, method_options, compute_gradient,
                                    universal_display)
    except OSError as exception:
        return pytest.skip(
            f"Failed to use the {method} method in this environment: {exception}."
        )

    # test that the configuration can be formatted
    assert str(optimization)

    # define the exact solution
    exact_values = np.array([0, -0.524869316, 0.487525860], options.dtype)

    # estimate the solution (use the exact values if the optimization routine will just return them)
    start_values = exact_values if method == 'return' else np.array(
        [0, 0, 0], options.dtype)
    bounds = 3 * [(lb, ub)]
    estimated_values, converged = optimization._optimize(
        start_values, bounds, lambda x, *_: objective_function(x))[:2]
    assert converged

    # test that the estimated objective is reasonably close to the exact objective
    exact_results = objective_function(exact_values)
    estimated_results = objective_function(estimated_values)
    exact_objective = exact_results[0] if compute_gradient else exact_results
    estimated_objective = estimated_results[
        0] if compute_gradient else estimated_results
    np.testing.assert_allclose(estimated_objective,
                               exact_objective,
                               rtol=1e-5,
                               atol=0)