예제 #1
0
def test_return(simulated_problem: SimulatedProblemFixture) -> None:
    """Test that using a trivial optimization and fixed point iteration routines that just return initial values yield
    results that are the same as the specified initial values.
    """
    simulation, simulation_results, problem, solve_options, _ = simulated_problem

    # specify initial values and the trivial routines
    initial_values = {
        'sigma': simulation.sigma,
        'pi': simulation.pi,
        'rho': simulation.rho,
        'beta': simulation.beta,
        'gamma': simulation.gamma if problem.K3 > 0 else None,
        'delta': simulation_results.delta
    }
    updated_solve_options = solve_options.copy()
    updated_solve_options.update({
        'optimization': Optimization('return'),
        'iteration': Iteration('return'),
        **initial_values
    })

    # obtain problem results and test that initial values are the same
    results = problem.solve(**updated_solve_options)
    for key, initial in initial_values.items():
        if initial is not None:
            np.testing.assert_allclose(initial,
                                       getattr(results, key),
                                       atol=1e-14,
                                       rtol=0,
                                       err_msg=key)
예제 #2
0
def test_objective_gradient(simulated_problem, solve_options):
    """Implement central finite differences in a custom optimization routine to test that analytic gradient values
    are within 1% of estimated values.
    """
    simulation, _, problem, _ = simulated_problem

    # define a custom optimization routine that tests central finite differences around starting parameter values
    def test_finite_differences(*args):
        theta, _, objective_function, _ = args
        exact = objective_function(theta)[1]
        estimated = np.zeros_like(exact)
        change = np.sqrt(np.finfo(np.float64).eps)
        for index in range(theta.size):
            theta1 = theta.copy()
            theta2 = theta.copy()
            theta1[index] += change / 2
            theta2[index] -= change / 2
            estimated[index] = (objective_function(theta1)[0] -
                                objective_function(theta2)[0]) / change
        np.testing.assert_allclose(exact, estimated, atol=0, rtol=0.001)
        return theta, True

    # test the gradient at parameter values slightly different from the true ones so that the objective is sizable
    problem.solve(
        0.9 * simulation.sigma,
        0.9 * simulation.pi if simulation.pi is not None else None,
        steps=1,
        linear_costs=simulation.linear_costs,
        optimization=Optimization(test_finite_differences),
        iteration=Iteration('squarem', {
            'tol':
            1e-15 if solve_options.get('linear_fp') is False else 1e-14
        }),
        **solve_options)
예제 #3
0
def test_scipy(method: str, method_options: Options, compute_jacobian: bool, use_weights: bool) -> None:
    """Test that the solution to the example fixed point problem from scipy.optimize.fixed_point is reasonably close to
    the exact solution. Also verify that the configuration can be formatted.
    """
    def contraction(x: Array) -> ContractionResults:
        """Evaluate the contraction."""
        c1 = np.array([10, 12])
        c2 = np.array([3, 5])
        x0, x = x, np.sqrt(c1 / (x + c2))
        weights = np.ones_like(x) if use_weights else None
        jacobian = -0.5 * np.eye(2) * x / (x0 + c2) if compute_jacobian else None
        return x, weights, jacobian

    # simple methods do not accept an analytic Jacobian
    if compute_jacobian and method not in {'hybr', 'lm'}:
        return pytest.skip("This method does not accept an analytic Jacobian.")

    # initialize the configuration and test that it can be formatted
    iteration = Iteration(method, method_options, compute_jacobian)
    assert str(iteration)

    # define the exact solution
    exact_values = np.array([1.4920333, 1.37228132])

    # test that the solution is reasonably close (use the exact values if the iteration routine will just return them)
    start_values = exact_values if method == 'return' else np.ones_like(exact_values)
    computed_values, stats = iteration._iterate(start_values, contraction)
    assert stats.converged
    np.testing.assert_allclose(exact_values, computed_values, rtol=0, atol=1e-5)
예제 #4
0
def test_objective_gradient(
        simulated_problem: SimulatedProblemFixture, eliminate: bool, demand: bool, supply: bool, micro: bool) -> None:
    """Implement central finite differences in a custom optimization routine to test that analytic gradient values
    are close to estimated values.
    """
    simulation, _, problem, solve_options, problem_results = simulated_problem

    # skip some redundant tests
    if supply and problem.K3 == 0:
        return pytest.skip("The problem does not have supply-side moments to test.")
    if micro and not solve_options['micro_moments']:
        return pytest.skip("The problem does not have micro moments to test.")
    if not demand and not supply and not micro:
        return pytest.skip("There are no moments to test.")

    # configure the options used to solve the problem
    updated_solve_options = solve_options.copy()
    updated_solve_options.update({k: 0.9 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})

    # optionally include linear parameters in theta
    if not eliminate:
        if problem.K1 > 0:
            updated_solve_options['beta'][-1] = 0.9 * simulation.beta[-1]
        if problem.K3 > 0:
            updated_solve_options['gamma'] = np.full_like(simulation.gamma, np.nan)
            updated_solve_options['gamma'][-1] = 0.9 * simulation.gamma[-1]

    # zero out weighting matrix blocks to only test individual contributions of the gradient
    updated_solve_options['W'] = problem_results.W.copy()
    if not demand:
        updated_solve_options['W'][:problem.MD, :problem.MD] = 0
    if not supply and problem.K3 > 0:
        updated_solve_options['W'][problem.MD:problem.MD + problem.MS, problem.MD:problem.MD + problem.MS] = 0
    if not micro and updated_solve_options['micro_moments']:
        MM = len(updated_solve_options['micro_moments'])
        updated_solve_options['W'][-MM:, -MM:] = 0

    # use a restrictive iteration tolerance
    updated_solve_options['iteration'] = Iteration('squarem', {'atol': 1e-14})

    # compute the analytic gradient
    updated_solve_options['optimization'] = Optimization('return')
    exact = problem.solve(**updated_solve_options).gradient

    # define a custom optimization routine that tests central finite differences around starting parameter values
    def test_finite_differences(theta: Array, _: Any, objective_function: Callable, __: Any) -> Tuple[Array, bool]:
        estimated = np.zeros_like(exact)
        change = 10 * np.sqrt(np.finfo(np.float64).eps)
        for index in range(theta.size):
            theta1 = theta.copy()
            theta2 = theta.copy()
            theta1[index] += change / 2
            theta2[index] -= change / 2
            estimated[index] = (objective_function(theta1)[0] - objective_function(theta2)[0]) / change
        np.testing.assert_allclose(estimated, exact, atol=1e-10, rtol=1e-3)
        return theta, True

    # test the gradient
    updated_solve_options['optimization'] = Optimization(test_finite_differences, compute_gradient=False)
    problem.solve(**updated_solve_options)
예제 #5
0
def test_objective_gradient(simulated_problem: SimulatedProblemFixture,
                            eliminate: bool,
                            solve_options_update: Options) -> None:
    """Implement central finite differences in a custom optimization routine to test that analytic gradient values
    are close to estimated values.
    """
    simulation, _, problem, solve_options, _ = simulated_problem

    # define a custom optimization routine that tests central finite differences around starting parameter values
    def test_finite_differences(theta: Array, _: Any,
                                objective_function: Callable,
                                __: Any) -> Tuple[Array, bool]:
        exact = objective_function(theta)[1]
        estimated = np.zeros_like(exact)
        change = np.sqrt(np.finfo(np.float64).eps)
        for index in range(theta.size):
            theta1 = theta.copy()
            theta2 = theta.copy()
            theta1[index] += change / 2
            theta2[index] -= change / 2
            estimated[index] = (objective_function(theta1)[0] -
                                objective_function(theta2)[0]) / change
        np.testing.assert_allclose(exact, estimated, atol=0, rtol=0.001)
        return theta, True

    # test the gradient at parameter values slightly different from the true ones so that the objective is sizable
    updated_solve_options = solve_options.copy()
    updated_solve_options.update(solve_options_update)
    updated_solve_options.update(
        {k: 0.9 * solve_options[k]
         for k in ['sigma', 'pi', 'rho', 'beta']})
    updated_solve_options.update({
        'method':
        '1s',
        'optimization':
        Optimization(test_finite_differences),
        'iteration':
        Iteration(
            'squarem', {
                'atol':
                1e-16 if solve_options_update.get('fp_type') == 'nonlinear'
                else 1e-14
            })
    })

    # optionally include linear parameters in theta
    if not eliminate:
        if problem.K1 > 0:
            updated_solve_options['beta'][-1] = 0.9 * simulation.beta[-1]
        if problem.K3 > 0:
            updated_solve_options['gamma'] = np.full_like(
                simulation.gamma, np.nan)
            updated_solve_options['gamma'][-1] = 0.9 * simulation.gamma[-1]

    # test the gradient
    problem.solve(**updated_solve_options)
예제 #6
0
def test_knittel_metaxoglou_2014(knittel_metaxoglou_2014):
    """Replicate estimates created by replication code for Knittel and Metaxoglou (2014)."""
    results = knittel_metaxoglou_2014['problem'].solve(
        knittel_metaxoglou_2014.get('initial_sigma'),
        knittel_metaxoglou_2014.get('initial_pi'),
        optimization=Optimization('knitro', {
            'opttol': 1e-8,
            'xtol': 1e-8
        }),
        iteration=Iteration('simple', {'tol': 1e-12}),
        steps=1)

    # test closeness of primary results
    for key, expected in knittel_metaxoglou_2014.items():
        computed = getattr(results, key, None)
        if isinstance(computed, np.ndarray):
            np.testing.assert_allclose(expected,
                                       computed,
                                       atol=1e-8,
                                       rtol=1e-5,
                                       err_msg=key)

    # structure post-estimation outputs
    elasticities = results.compute_elasticities()
    changed_prices = results.solve_approximate_merger()
    changed_shares = results.compute_shares(changed_prices)
    post_estimation = {
        'elasticities':
        elasticities,
        'costs':
        results.compute_costs(),
        'changed_prices':
        changed_prices,
        'changed_shares':
        changed_shares,
        'own_elasticities':
        results.extract_diagonals(elasticities),
        'profits':
        results.compute_profits(),
        'changed_profits':
        results.compute_profits(changed_prices, changed_shares),
        'consumer_surpluses':
        results.compute_consumer_surpluses(),
        'changed_consumer_surpluses':
        results.compute_consumer_surpluses(changed_prices)
    }

    # test closeness of post-estimation outputs
    for key, computed in post_estimation.items():
        expected = knittel_metaxoglou_2014[key]
        np.testing.assert_allclose(expected,
                                   computed,
                                   atol=1e-8,
                                   rtol=1e-4,
                                   err_msg=key)
예제 #7
0
def test_hasselblad(scheme):
    """Test that the solution to the fixed point problem from Hasselblad (1969) is reasonably close to the exact
    solution and that SQUAREM takes at least an order of magnitude fewer fixed point evaluations than does simple
    iteration. This same problem is used in an original SQUAREM unit test and is the first one discussed in Varadhan and
    Roland (2008).
    """
    method_options = {'tol': 1e-8, 'max_evaluations': 100, 'scheme': scheme}

    # define the frequency data and the contraction mapping
    y = np.array([162, 267, 271, 185, 111, 61, 27, 8, 3, 1])

    def contraction(x):
        i = np.arange(y.size)
        z = np.divide(
            x[0] * np.exp(-x[1]) * (x[1]**i), x[0] * np.exp(-x[1]) *
            (x[1]**i) + (1 - x[0]) * np.exp(-x[2]) * (x[2]**i))
        return np.array([(y * z).sum() / y.sum(),
                         (y * i * z).sum() / (y * z).sum(),
                         (y * i * (1 - z)).sum() / (y * (1 - z)).sum()])

    # solve the problem with SQUAREM and verify that the solution is reasonably close to the true solution
    initial_values = np.array([0.2, 2.5, 1.5])
    exact_values = np.array(
        [0.6401146029910, 2.6634043566619, 1.2560951012662])
    computed_values = Iteration('squarem', method_options)._iterate(
        initial_values, contraction)[0]
    np.testing.assert_allclose(exact_values,
                               computed_values,
                               rtol=0,
                               atol=1e-5)

    # verify that many more iterations would be needed to solve the problem with simple iteration
    del method_options['scheme']
    method_options['max_evaluations'] *= 10
    converged = Iteration('simple',
                          method_options)._iterate(initial_values,
                                                   contraction)[1]
    assert not converged
예제 #8
0
def test_scipy(method, method_options, tol):
    """Test that the solution to the example fixed point problem from scipy.optimize.fixed_point is reasonably close to
    the exact solution.
    """
    method_options['tol'] = tol
    contraction = lambda x: np.sqrt(
        np.array([10, 12]) / (x + np.array([3, 5])))
    exact_values = [1.4920333, 1.37228132]
    computed_values = Iteration(method, method_options)._iterate(
        np.ones(2), contraction)[0]
    np.testing.assert_allclose(exact_values,
                               computed_values,
                               rtol=0,
                               atol=10 * tol)
예제 #9
0
def test_scipy(method: Union[str, Callable], method_options: Options, tol: float) -> None:
    """Test that the solution to the example fixed point problem from scipy.optimize.fixed_point is reasonably close to
    the exact solution. Also verify that the configuration can be formatted.
    """

    # test that the configuration can be formatted
    if method != 'return':
        method_options = method_options.copy()
        method_options['tol'] = tol
    iteration = Iteration(method, method_options)
    assert str(iteration)

    # test that the solution is reasonably close (use the exact values if the iteration routine will just return them)
    contraction = lambda x: np.sqrt(np.array([10, 12]) / (x + np.array([3, 5])))
    exact_values = np.array([1.4920333, 1.37228132])
    start_values = exact_values if method == 'return' else np.ones_like(exact_values)
    computed_values = iteration._iterate(start_values, contraction)[0]
    np.testing.assert_allclose(exact_values, computed_values, rtol=0, atol=10 * tol)
예제 #10
0
    pytest.param(
        {
            'center_moments': False,
            'W_type': 'unadjusted',
            'se_type': 'clustered'
        },
        id="complex covariances"),
    pytest.param({'delta_behavior': 'last'},
                 id="faster starting delta values"),
    pytest.param({'fp_type': 'linear'}, id="non-safe linear fixed point"),
    pytest.param({'fp_type': 'safe_nonlinear'}, id="nonlinear fixed point"),
    pytest.param({'fp_type': 'nonlinear'},
                 id="non-safe nonlinear fixed point"),
    pytest.param(
        {
            'iteration': Iteration('hybr', {'xtol': 1e-12},
                                   compute_jacobian=True)
        },
        id="linear Newton fixed point"),
    pytest.param(
        {
            'fp_type': 'safe_nonlinear',
            'iteration': Iteration('hybr', {'xtol': 1e-12},
                                   compute_jacobian=True)
        },
        id="nonlinear Newton fixed point")
])
def test_accuracy(simulated_problem: SimulatedProblemFixture,
                  solve_options_update: Options) -> None:
    """Test that starting parameters that are half their true values give rise to errors of less than 10%."""
    simulation, _, problem, solve_options, _ = simulated_problem
예제 #11
0
    pytest.param(0, 1, None, id="1 supply-side FE, default method"),
    pytest.param(
        1, 1, 'simple',
        id="1 demand- and 1 supply-side FE, simple de-meaning"),
    pytest.param(2, 0, None, id="2 demand-side FEs, default method"),
    pytest.param(2, 0, 'memory', id="2 demand-side FEs, memory"),
    pytest.param(2, 2, 'speed', id="2 demand- and 2 supply-side FEs, speed"),
    pytest.param(3, 0, None, id="3 demand-side FEs"),
    pytest.param(0, 3, None, id="3 supply-side FEs"),
    pytest.param(
        3, 3, None, id="3 demand- and 3 supply-side FEs, default method"),
    pytest.param(
        2, 1, None, id="2 demand- and 1 supply-side FEs, default method"),
    pytest.param(1,
                 2,
                 Iteration('simple', {'tol': 1e-12}),
                 id="1 demand- and 2 supply-side FEs, iteration")
])
def test_fixed_effects(simulated_problem: SimulatedProblemFixture, ED: int,
                       ES: int,
                       absorb_method: Optional[Union[str, Iteration]]) -> None:
    """Test that absorbing different numbers of demand- and supply-side fixed effects gives rise to essentially
    identical first-stage results as does including indicator variables. Also test that optimal instruments results
    and marginal costs remain unchanged.
    """
    simulation, product_data, problem, solve_options, problem_results = simulated_problem

    # there cannot be supply-side fixed effects if there isn't a supply side
    if problem.K3 == 0:
        ES = 0
    if ED == ES == 0:
예제 #12
0
    for key in keys:
        result1 = getattr(results1, key)
        if result1 is not None:
            result2 = getattr(results2, key)
            if 'beta' in key or 'gamma' in key:
                result2 = result2[:result1.size]
            np.testing.assert_allclose(result1,
                                       result2,
                                       atol=1e-8,
                                       rtol=1e-5,
                                       err_msg=key)


@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('solve_options', [
    pytest.param({'iteration': Iteration('simple')},
                 id="configured iteration"),
    pytest.param({'processes': 2}, id="multiprocessing")
])
def test_merger(simulated_problem, solve_options):
    """Test that prices and shares simulated under changed firm IDs are reasonably close to prices and shares computed
    from the results of a solved problem. In particular, test that unchanged prices and shares are farther from their
    simulated counterparts than those computed by approximating a merger, which in turn are farther from their simulated
    counterparts than those computed by fully solving a merger. Also test that simple acquisitions increase HHI. These
    inequalities are only guaranteed because of the way in which the simulations are configured.
    """
    simulation, _, _, results = simulated_problem

    # get changed prices and shares
    changed_product_data = simulation.solve(firms_index=1)
예제 #13
0
from pyblp import Formulation, Iteration, Optimization, Problem, build_ownership, parallel
from pyblp.utilities.basics import Array, Options
from .conftest import SimulatedProblemFixture


@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('solve_options_update', [
    pytest.param({'method': '2s'}, id="two-step"),
    pytest.param({'center_moments': False, 'W_type': 'unadjusted', 'se_type': 'clustered'}, id="complex covariances"),
    pytest.param({'delta_behavior': 'last'}, id="faster starting delta values"),
    pytest.param({'fp_type': 'linear'}, id="non-safe linear fixed point"),
    pytest.param({'fp_type': 'safe_nonlinear'}, id="nonlinear fixed point"),
    pytest.param({'fp_type': 'nonlinear'}, id="non-safe nonlinear fixed point"),
    pytest.param(
        {'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True)},
        id="linear Newton fixed point"
    ),
    pytest.param(
        {'fp_type': 'safe_nonlinear', 'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True)},
        id="nonlinear Newton fixed point"
    )
])
def test_accuracy(simulated_problem: SimulatedProblemFixture, solve_options_update: Options) -> None:
    """Test that starting parameters that are half their true values give rise to errors of less than 10%."""
    simulation, _, problem, solve_options, _ = simulated_problem

    # update the default options and solve the problem
    updated_solve_options = solve_options.copy()
    updated_solve_options.update(solve_options_update)
    updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
예제 #14
0
파일: test_blp.py 프로젝트: yuminliut/pyblp
    results = []
    for solve_options in [solve_options1, solve_options2]:
        results.append(problem.solve(
            simulation.sigma, simulation.pi, steps=1, linear_costs=simulation.linear_costs, **solve_options
        ))

    # test that all arrays in the results are essentially identical
    for key, result1 in results[0].__dict__.items():
        if isinstance(result1, np.ndarray) and result1.dtype != np.object:
            result2 = getattr(results[1], key)
            np.testing.assert_allclose(result1, result2, atol=1e-14, rtol=0, err_msg=key)


@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('solve_options', [
    pytest.param({'iteration': Iteration('simple')}, id="configured iteration"),
    pytest.param({'processes': 2}, id="multiprocessing")
])
def test_merger(simulated_problem, solve_options):
    """Test that prices and shares simulated under changed firm IDs are reasonably close to prices and shares computed
    from the results of a solved problem. In particular, test that unchanged prices and shares are farther from their
    simulated counterparts than those computed by approximating a merger, which in turn are farther from their simulated
    counterparts than those computed by fully solving a merger. Also test that simple acquisitions increase HHI. These
    inequalities are only guaranteed because of the way in which the simulations are configured.
    """
    simulation, _, _, results = simulated_problem

    # get unchanged and changed prices and shares
    product_data = simulation.solve()
    changed_product_data = simulation.solve(firms_index=1)