示例#1
0
    def test_5(self):
        """ Test the scripts.
        """
        # Constraints that ensure that two alternative initialization files
        # can be used for the same simulated data.
        for _ in range(10):
            constr = dict()
            constr['periods'] = np.random.randint(1, 4)
            constr['agents'] = np.random.randint(5, 100)
            constr['is_estimation'] = True
            constr['edu'] = (7, 15)

            # Simulate a dataset
            generate_init(constr)
            respy_obj = RespyCls('test.respy.ini')
            simulate(respy_obj)

            # Create output to process a baseline.
            respy_obj.unlock()
            respy_obj.set_attr('maxfun', 0)
            respy_obj.lock()

            estimate(respy_obj)

            # Potentially evaluate at different points.
            generate_init(constr)

            init_file = 'test.respy.ini'
            file_sim = 'sim.respy.dat'

            gradient = np.random.choice([True, False])
            single = np.random.choice([True, False])
            resume = np.random.choice([True, False])
            update = np.random.choice([True, False])

            action = np.random.choice(['fix', 'free', 'value'])
            num_draws = np.random.randint(1, 20)

            # The set of identifiers is a little complicated as we only allow
            # sampling of the diagonal terms of the covariance matrix. Otherwise,
            # we sometimes run into the problem of very ill conditioned matrices
            # resulting in a failed Cholesky decomposition.
            set_ = list(range(16)) + [16, 18, 21, 25]

            identifiers = np.random.choice(set_, num_draws, replace=False)
            values = np.random.uniform(size=num_draws)

            scripts_estimate(resume, single, init_file, gradient)
            scripts_update(init_file)

            # The error can occur as the RESPY package is actually running an
            # estimation step that can result in very ill-conditioned covariance
            # matrices.
            try:
                scripts_simulate(update, init_file, file_sim, None)
                scripts_modify(identifiers, values, action, init_file)
            except np.linalg.linalg.LinAlgError:
                pass
示例#2
0
    def test_5(self):
        """ Test the scripts.
        """
        # Constraints that ensure that two alternative initialization files
        # can be used for the same simulated data.
        for _ in range(10):
            constr = dict()
            constr['periods'] = np.random.randint(1, 4)
            constr['agents'] = np.random.randint(5, 100)
            constr['is_estimation'] = True
            constr['edu'] = (7, 15)

            # Simulate a dataset
            generate_init(constr)
            respy_obj = RespyCls('test.respy.ini')
            simulate(respy_obj)

            # Create output to process a baseline.
            respy_obj.unlock()
            respy_obj.set_attr('maxfun', 0)
            respy_obj.lock()

            estimate(respy_obj)

            # Potentially evaluate at different points.
            generate_init(constr)

            init_file = 'test.respy.ini'
            file_sim = 'sim.respy.dat'

            gradient = np.random.choice([True, False])
            single = np.random.choice([True, False])
            resume = np.random.choice([True, False])
            update = np.random.choice([True, False])

            action = np.random.choice(['fix', 'free', 'value'])
            num_draws = np.random.randint(1, 20)

            # The set of identifiers is a little complicated as we only allow
            # sampling of the diagonal terms of the covariance matrix. Otherwise,
            # we sometimes run into the problem of very ill conditioned matrices
            # resulting in a failed Cholesky decomposition.
            set_ = list(range(16)) + [16, 18, 21, 25]

            identifiers = np.random.choice(set_, num_draws, replace=False)
            values = np.random.uniform(size=num_draws)

            scripts_estimate(resume, single, init_file, gradient)
            scripts_update(init_file)

            # The error can occur as the RESPY package is actually running an
            # estimation step that can result in very ill-conditioned covariance
            # matrices.
            try:
                scripts_simulate(update, init_file, file_sim, None)
                scripts_modify(identifiers, values, action, init_file)
            except np.linalg.linalg.LinAlgError:
                pass
示例#3
0
def run(spec_dict, fname, grid_slaves):
    """ Run an estimation task that allows to get a sense of the scalability
    of the code.
    """
    dirname = fname.replace('.ini', '')

    os.mkdir(dirname)
    os.chdir(dirname)

    respy_obj = respy.RespyCls(SPEC_DIR + fname)

    respy_obj.unlock()
    respy_obj.set_attr('is_debug', False)

    respy_obj.set_attr('file_est', '../data.respy.dat')
    for key_ in spec_dict.keys():
        respy_obj.set_attr(key_, spec_dict[key_])
    respy_obj.lock()

    maxfun = respy_obj.get_attr('maxfun')
    min_slave = min(grid_slaves)

    # Simulate the baseline dataset, which is used regardless of the number
    # of slaves.
    respy.simulate(respy_obj)
    respy_obj.write_out()

    # Iterate over the grid of requested slaves.
    for num_slaves in grid_slaves:
        dirname = '{:}'.format(num_slaves)

        os.mkdir(dirname)
        os.chdir(dirname)

        respy_obj.unlock()
        respy_obj.set_attr('num_procs', num_slaves + 1)
        if num_slaves > 1:
            respy_obj.set_attr('is_parallel', True)
        else:
            respy_obj.set_attr('is_parallel', False)
        respy_obj.lock()
        respy_obj.write_out()

        start_time = datetime.now()
        respy.estimate(respy_obj)
        finish_time = datetime.now()

        if num_slaves == min_slave:
            duration_baseline = finish_time - start_time
            num_evals = get_actual_evaluations()

        os.chdir('../')

        record_information(start_time, finish_time, num_slaves, maxfun,
                           duration_baseline, num_evals, min_slave)

    os.chdir('../')
示例#4
0
    def test_5(self):
        """ This test ensures that the logging looks exactly the same for the
        different versions.
        """

        max_draws = np.random.randint(10, 300)

        # Generate random initialization file
        constr = dict()
        constr['flag_parallelism'] = False
        constr['max_draws'] = max_draws
        constr['flag_interpolation'] = False
        constr['maxfun'] = 0

        # Generate random initialization file
        init_dict = generate_init(constr)

        # Perform toolbox actions
        respy_obj = RespyCls('test.respy.ini')

        # Iterate over alternative implementations
        base_sol_log, base_est_info_log, base_est_log = None, None, None
        base_sim_log = None

        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)

        for version in ['FORTRAN', 'PYTHON']:

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            simulate(respy_obj)

            estimate(respy_obj)

            # Check for identical logging
            if base_sol_log is None:
                base_sol_log = open('sol.respy.log', 'r').read()
            assert open('sol.respy.log', 'r').read() == base_sol_log

            # Check for identical logging
            if base_sim_log is None:
                base_sim_log = open('sim.respy.log', 'r').read()
            assert open('sim.respy.log', 'r').read() == base_sim_log

            if base_est_info_log is None:
                base_est_info_log = open('est.respy.info', 'r').read()
            assert open('est.respy.info', 'r').read() == base_est_info_log

            if base_est_log is None:
                base_est_log = open('est.respy.log', 'r').readlines()
            compare_est_log(base_est_log)
示例#5
0
    def test_6(self):
        """ This test ensures that the logging looks exactly the same for the
        different versions.
        """

        max_draws = np.random.randint(10, 300)

        # Generate random initialization file
        constr = dict()
        constr['flag_parallelism'] = False
        constr['max_draws'] = max_draws
        constr['flag_interpolation'] = False
        constr['maxfun'] = 0

        # Generate random initialization file
        init_dict = generate_init(constr)

        # Perform toolbox actions
        respy_obj = RespyCls('test.respy.ini')

        # Iterate over alternative implementations
        base_sol_log, base_est_info_log, base_est_log = None, None, None
        base_sim_log = None

        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)

        for version in ['FORTRAN', 'PYTHON']:

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            simulate(respy_obj)

            estimate(respy_obj)

            # Check for identical logging
            if base_sol_log is None:
                base_sol_log = open('sol.respy.log', 'r').read()
            assert open('sol.respy.log', 'r').read() == base_sol_log

            # Check for identical logging
            if base_sim_log is None:
                base_sim_log = open('sim.respy.log', 'r').read()
            assert open('sim.respy.log', 'r').read() == base_sim_log

            if base_est_info_log is None:
                base_est_info_log = open('est.respy.info', 'r').read()
            assert open('est.respy.info', 'r').read() == base_est_info_log

            if base_est_log is None:
                base_est_log = open('est.respy.log', 'r').readlines()
            compare_est_log(base_est_log)
示例#6
0
    def test_6(self):
        """ Test short estimation tasks.
        """
        # Constraints that ensures that the maximum number of iterations and
        # the number of function evaluations is set to the minimum values of
        # one.
        constr = dict()
        constr['is_estimation'] = True

        generate_init(constr)

        # Run estimation task.
        respy_obj = RespyCls('test.respy.ini')
        simulate(respy_obj)
        estimate(respy_obj)
示例#7
0
    def test_6(self):
        """ Test short estimation tasks.
        """
        # Constraints that ensures that the maximum number of iterations and
        # the number of function evaluations is set to the minimum values of
        # one.
        constr = dict()
        constr['is_estimation'] = True

        generate_init(constr)

        # Run estimation task.
        respy_obj = RespyCls('test.respy.ini')
        simulate(respy_obj)
        estimate(respy_obj)
示例#8
0
    def test_4(self):
        """ Test the solution of deterministic model with ambiguity and
        interpolation. This test has the same result as in the absence of
        random variation in payoffs, it does not matter whether the
        environment is ambiguous or not.
        """
        # Solve specified economy
        for version in ['FORTRAN', 'PYTHON']:
            respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Assess expected future value
            val = respy_obj.get_attr('periods_emax')[0, :1]
            np.testing.assert_allclose(val, 88750)

            # Assess evaluation
            _, val = estimate(respy_obj)
            np.testing.assert_allclose(val, -1.0)
示例#9
0
    def test_3(self):
        """ Test the solution of deterministic model with ambiguity and
        interpolation. This test has the same result as in the absence of
        random variation in payoffs, it does not matter whether the
        environment is ambiguous or not.
        """
        # Solve specified economy
        for version in ['FORTRAN', 'PYTHON']:
            respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Assess expected future value
            val = respy_obj.get_attr('periods_emax')[0, :1]
            np.testing.assert_allclose(val, 88750)

            # Assess evaluation
            _, val = estimate(respy_obj)
            np.testing.assert_allclose(val, -1.0)
示例#10
0
    def test_3(self):
        """ This test just locks in the evaluation of the criterion function
        for the original Keane & Wolpin data.
        """
        # Sample one task
        resources = ['kw_data_one.ini', 'kw_data_two.ini', 'kw_data_three.ini']
        fname = np.random.choice(resources)

        # Select expected result
        rslt = None
        if 'one' in fname:
            rslt = 0.261487735867433
        elif 'two' in fname:
            rslt = 1.126138097174159
        elif 'three' in fname:
            rslt = 1.895699121131644

        # Evaluate criterion function at true values.
        respy_obj = RespyCls(TEST_RESOURCES_DIR + '/' + fname)

        respy_obj.unlock()
        respy_obj.set_attr('maxfun', 0)
        respy_obj.lock()

        simulate(respy_obj)
        _, val = estimate(respy_obj)
        np.testing.assert_allclose(val, rslt)
示例#11
0
def run_estimation():
    """ Run an estimation with the respective release.
    """
    import numpy as np

    from respy import estimate
    from respy import RespyCls

    respy_obj = RespyCls('test.respy.ini')
    crit_val = estimate(respy_obj)[1]
    np.savetxt('.crit_val', np.array(crit_val, ndmin=2))
示例#12
0
    def test_2(self):
        """ This test ensures that the record files are identical.
        """
        # Generate random initialization file. The number of periods is
        # higher than usual as only FORTRAN implementations are used to
        # solve the random request. This ensures that also some cases of
        # interpolation are explored.
        constr = dict()
        constr['version'] = 'FORTRAN'
        constr['periods'] = np.random.randint(3, 10)
        constr['maxfun'] = 0

        init_dict = generate_random_dict(constr)

        base_sol_log, base_est_info_log, base_est_log = None, None, None
        for is_parallel in [False, True]:

            init_dict['PARALLELISM']['flag'] = is_parallel
            print_init_dict(init_dict)

            respy_obj = RespyCls('test.respy.ini')

            simulate(respy_obj)

            estimate(respy_obj)

            # Check for identical records
            if base_sol_log is None:
                base_sol_log = open('sol.respy.log', 'r').read()
            assert open('sol.respy.log', 'r').read() == base_sol_log

            if base_est_info_log is None:
                base_est_info_log = open('est.respy.info', 'r').read()
            assert open('est.respy.info', 'r').read() == base_est_info_log

            if base_est_log is None:
                base_est_log = open('est.respy.log', 'r').readlines()
            compare_est_log(base_est_log)
示例#13
0
    def test_4(self):
        """ Test the evaluation of the criterion function for random
        requests, not just at the true values.
        """
        # Constraints that ensure that two alternative initialization files
        # can be used for the same simulated data.
        constr = dict()
        constr['periods'] = np.random.randint(1, 4)
        constr['agents'] = np.random.randint(1, 100)
        constr['edu'] = (7, 15)
        constr['maxfun'] = 0

        # Simulate a dataset
        generate_init(constr)
        respy_obj = RespyCls('test.respy.ini')
        simulate(respy_obj)

        # Evaluate at different points, ensuring that the simulated dataset
        # still fits.
        generate_init(constr)

        respy_obj = RespyCls('test.respy.ini')
        estimate(respy_obj)
示例#14
0
def scripts_estimate(resume, single, init_file, gradient):
    """ Wrapper for the estimation.
    """
    # Read in baseline model specification.
    respy_obj = RespyCls(init_file)

    # Update parametrization of the model if resuming from a previous
    # estimation run.
    if resume:
        respy_obj.update_model_paras(get_est_info()['paras_step'])

    # Set maximum iteration count when only an evaluation of the criterion
    # function is requested.
    if single:
        respy_obj.unlock()
        respy_obj.set_attr('maxfun', 0)
        respy_obj.lock()

    # Optimize the criterion function.
    estimate(respy_obj)

    if gradient:
        add_gradient_information(respy_obj)
示例#15
0
    def test_4(self):
        """ Test the evaluation of the criterion function for random
        requests, not just at the true values.
        """
        # Constraints that ensure that two alternative initialization files
        # can be used for the same simulated data.
        constr = dict()
        constr['periods'] = np.random.randint(1, 4)
        constr['agents'] = np.random.randint(1, 100)
        constr['edu'] = (7, 15)
        constr['maxfun'] = 0

        # Simulate a dataset
        generate_init(constr)
        respy_obj = RespyCls('test.respy.ini')
        simulate(respy_obj)

        # Evaluate at different points, ensuring that the simulated dataset
        # still fits.
        generate_init(constr)

        respy_obj = RespyCls('test.respy.ini')
        estimate(respy_obj)
示例#16
0
    def test_3(self):
        """ Test the solution of model with ambiguity.
        """
        # Solve specified economy
        respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_third.respy.ini')
        respy_obj = simulate(respy_obj)

        # Assess expected future value
        val = respy_obj.get_attr('periods_emax')[0, :1]
        np.testing.assert_allclose(val, 86121.335057)

        # Assess evaluation
        _, val = estimate(respy_obj)
        np.testing.assert_allclose(val, 1.9162587639887239)
示例#17
0
    def test_4(self):
        """ Test the solution of model with ambiguity.
        """
        # Solve specified economy
        respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fourth.respy.ini')
        respy_obj = simulate(respy_obj)

        # Assess expected future value
        val = respy_obj.get_attr('periods_emax')[0, :1]
        np.testing.assert_allclose(val, 75.719528)

        # Assess evaluation
        _, val = estimate(respy_obj)
        np.testing.assert_allclose(val, 2.802285449312437)
示例#18
0
def scripts_estimate(resume, single, init_file, gradient):
    """ Wrapper for the estimation.
    """
    # Read in baseline model specification.
    respy_obj = RespyCls(init_file)

    # Update parametrization of the model if resuming from a previous
    # estimation run.
    if resume:
        respy_obj.update_model_paras(get_est_info()['paras_step'])

    # Set maximum iteration count when only an evaluation of the criterion
    # function is requested.
    if single:
        respy_obj.unlock()
        respy_obj.set_attr('maxfun', 0)
        respy_obj.lock()

    # Optimize the criterion function.
    estimate(respy_obj)

    if gradient:
        add_gradient_information(respy_obj)
示例#19
0
    def test_1(self):
        """ Test solution of simple model against hard-coded results.
        """

        # Solve specified economy
        respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_first.respy.ini')
        respy_obj = simulate(respy_obj)

        # Assess expected future value
        val = respy_obj.get_attr('periods_emax')[0, :1]
        np.testing.assert_allclose(val, 103320.40501)

        # Assess evaluation
        _, val = estimate(respy_obj)
        np.testing.assert_allclose(val, 1.9775860444869962)
示例#20
0
    def test_2(self):
        """ This test ensures that the evaluation of the criterion function
        at the starting value is identical between the different versions.
        """

        max_draws = np.random.randint(10, 100)

        # Generate random initialization file
        constr = dict()
        constr['flag_parallelism'] = False
        constr['max_draws'] = max_draws
        constr['flag_interpolation'] = False
        constr['maxfun'] = 0

        # Generate random initialization file
        init_dict = generate_init(constr)

        # Perform toolbox actions
        respy_obj = RespyCls('test.respy.ini')

        # Simulate a dataset
        simulate(respy_obj)

        # Iterate over alternative implementations
        base_x, base_val = None, None

        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)

        for version in ['FORTRAN', 'PYTHON']:

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            x, val = estimate(respy_obj)

            # Check for the returned parameters.
            if base_x is None:
                base_x = x
            np.testing.assert_allclose(base_x, x)

            # Check for the value of the criterion function.
            if base_val is None:
                base_val = val
            np.testing.assert_allclose(base_val, val)
示例#21
0
    def test_2(self):
        """ This test ensures that the evaluation of the criterion function
        at the starting value is identical between the different versions.
        """

        max_draws = np.random.randint(10, 100)

        # Generate random initialization file
        constr = dict()
        constr['flag_parallelism'] = False
        constr['max_draws'] = max_draws
        constr['flag_interpolation'] = False
        constr['maxfun'] = 0

        # Generate random initialization file
        init_dict = generate_init(constr)

        # Perform toolbox actions
        respy_obj = RespyCls('test.respy.ini')

        # Simulate a dataset
        simulate(respy_obj)

        # Iterate over alternative implementations
        base_x, base_val = None, None

        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)

        for version in ['FORTRAN', 'PYTHON']:

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            x, val = estimate(respy_obj)

            # Check for the returned parameters.
            if base_x is None:
                base_x = x
            np.testing.assert_allclose(base_x, x)

            # Check for the value of the criterion function.
            if base_val is None:
                base_val = val
            np.testing.assert_allclose(base_val, val)
示例#22
0
    def test_3(self):
        """ Testing whether the a simulated dataset and the evaluation of the
        criterion function are the same for a tiny delta and a myopic agent.
        """

        # Generate random initialization dictionary
        constr = dict()
        constr['maxfun'] = 0

        generate_init(constr)

        # Iterate over alternative discount rates.
        base_data, base_val = None, None

        for delta in [0.00, 0.000001]:

            respy_obj = RespyCls('test.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('delta', delta)

            respy_obj.lock()

            simulate(respy_obj)

            # This parts checks the equality of simulated dataset for the
            # different versions of the code.
            data_frame = pd.read_csv('data.respy.dat', delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the
            # criterion function.
            _, crit_val = estimate(respy_obj)

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val,
                                       crit_val,
                                       rtol=1e-03,
                                       atol=1e-03)
示例#23
0
    def test_3(self):
        """ Testing whether the a simulated dataset and the evaluation of the
        criterion function are the same for a tiny delta and a myopic agent.
        """

        # Generate random initialization dictionary
        constr = dict()
        constr['maxfun'] = 0

        generate_init(constr)

        # Iterate over alternative discount rates.
        base_data, base_val = None, None

        for delta in [0.00, 0.000001]:

            respy_obj = RespyCls('test.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('delta', delta)

            respy_obj.lock()

            simulate(respy_obj)

            # This parts checks the equality of simulated dataset for the
            # different versions of the code.
            data_frame = pd.read_csv('data.respy.dat', delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the
            # criterion function.
            _, crit_val = estimate(respy_obj)

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val, crit_val, rtol=1e-03, atol=1e-03)
示例#24
0
    def test_5(self):
        """ This test reproduces the results from evaluations of the
        criterion function for previously analyzed scenarios.
        """
        # Prepare setup
        version = str(sys.version_info[0])
        fname = 'test_vault_' + version + '.respy.pkl'

        tests = pkl.load(open(TEST_RESOURCES_DIR + '/' + fname, 'rb'))

        # We want this test to run even when not FORTRAN version is available.
        while True:
            idx = np.random.randint(0, len(tests))
            init_dict, crit_val = tests[idx]

            version = init_dict['PROGRAM']['version']

            if not IS_FORTRAN and version == 'FORTRAN':
                pass
            else:
                break

        # In the case where no parallelism is available, we need to ensure
        # that the request remains valid. This is fine as the disturbances
        # are aligned across parallel and scalar implementation.
        if not IS_PARALLEL:
            init_dict['PARALLELISM']['flag'] = False
        if not IS_FORTRAN:
            init_dict['PROGRAM']['version'] = 'PYTHON'

        print_init_dict(init_dict)

        respy_obj = RespyCls('test.respy.ini')

        simulate(respy_obj)

        _, val = estimate(respy_obj)
        np.testing.assert_almost_equal(val, crit_val)
示例#25
0
    def test_1(self):
        """ This test ensures that it makes no difference whether the
        criterion function is evaluated in parallel or not.
        """
        # Generate random initialization file
        constr = dict()
        constr['version'] = 'FORTRAN'
        constr['maxfun'] = np.random.randint(0, 50)
        init_dict = generate_random_dict(constr)

        base = None
        for is_parallel in [True, False]:

            init_dict['PARALLELISM']['flag'] = is_parallel
            print_init_dict(init_dict)

            respy_obj = RespyCls('test.respy.ini')
            respy_obj = simulate(respy_obj)
            _, crit_val = estimate(respy_obj)

            if base is None:
                base = crit_val
            np.testing.assert_equal(base, crit_val)
示例#26
0
    def test_4(self):
        """ Test the solution of deterministic model without ambiguity,
        but with interpolation. As a deterministic model is requested,
        all versions should yield the same result without any additional effort.
        """
        # Solve specified economy
        for version in ['FORTRAN', 'PYTHON']:
            respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Assess expected future value
            val = respy_obj.get_attr('periods_emax')[0, :1]
            np.testing.assert_allclose(val, 88750)

            # Assess evaluation
            _, val = estimate(respy_obj)
            np.testing.assert_allclose(val, -1.0)
示例#27
0
    def test_5(self):
        """ Test the solution of deterministic model without ambiguity,
        but with interpolation. As a deterministic model is requested,
        all versions should yield the same result without any additional effort.
        """
        # Solve specified economy
        for version in ['FORTRAN', 'PYTHON']:
            respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Assess expected future value
            val = respy_obj.get_attr('periods_emax')[0, :1]
            np.testing.assert_allclose(val, 88750)

            # Assess evaluation
            _, val = estimate(respy_obj)
            np.testing.assert_allclose(val, -1.0)
示例#28
0
import shutil
import glob
import os

import respy

# We can simply iterate over the different model specifications outlined in
# Table 1 of the paper.
for spec in ['kw_data_one.ini', 'kw_data_two.ini', 'kw_data_three.ini']:

    # Process relevant model initialization file
    respy_obj = respy.RespyCls(spec)

    # Let us simulate the datasets discussed on the page 658.
    respy.simulate(respy_obj)

    # To start estimations for the Monte Carlo exercises. For now, we just
    # evaluate the model at the starting values, i.e. maxfun set to zero in
    # the initialization file.
    respy_obj.unlock()
    respy_obj.set_attr('maxfun', 0)
    respy_obj.lock()

    respy.estimate(respy_obj)

    # Store results in directory for later inspection.
    dirname = spec.replace('.ini', '')
    os.mkdir(dirname)
    for fname in glob.glob('*.respy.*'):
        shutil.move(fname, dirname)
def run(spec_dict, fname):
    """ Run a version of the Monte Carlo exercise.
    """
    dirname = fname.replace('.ini', '')
    os.mkdir(dirname)
    os.chdir(dirname)

    # We first read in the first specification from the initial paper for our
    # baseline and process the deviations.
    respy_obj = respy.RespyCls(SPEC_DIR + fname)

    respy_obj.unlock()

    respy_obj.set_attr('file_est', '../truth/start/data.respy.dat')

    for key_ in spec_dict.keys():
        respy_obj.set_attr(key_, spec_dict[key_])

    if respy_obj.attr['num_procs'] > 1:
        respy_obj.set_attr('is_parallel', True)
    else:
        respy_obj.set_attr('is_parallel', False)

    respy_obj.lock()

    maxfun = respy_obj.get_attr('maxfun')

    # Let us first simulate a baseline sample, store the results for future
    # reference, and start an estimation from the true values.
    os.mkdir('truth')
    os.chdir('truth')
    respy_obj.write_out()

    simulate_specification(respy_obj, 'start', False)
    x, _ = respy.estimate(respy_obj)
    simulate_specification(respy_obj, 'stop', True, x)

    rmse_start, rmse_stop = get_rmse()
    num_evals, num_steps = get_est_log_info()

    os.chdir('../')

    record_results('Truth', rmse_start, rmse_stop, num_evals, num_steps, maxfun)

    # Now we will estimate a misspecified model on this dataset assuming that
    # agents are myopic. This will serve as a form of well behaved starting
    # values for the real estimation to follow.
    respy_obj.unlock()
    respy_obj.set_attr('delta', 0.00)
    respy_obj.lock()

    os.mkdir('static')
    os.chdir('static')

    respy_obj.write_out()

    simulate_specification(respy_obj, 'start', False)
    x, _ = respy.estimate(respy_obj)
    simulate_specification(respy_obj, 'stop', True, x)

    rmse_start, rmse_stop = get_rmse()
    num_evals, num_steps = get_est_log_info()

    os.chdir('../')

    record_results('Static', rmse_start, rmse_stop, num_evals, num_steps, maxfun)

    # # Using the results from the misspecified model as starting values, we see
    # # whether we can obtain the initial values.
    respy_obj.update_model_paras(x)

    respy_obj.unlock()
    respy_obj.set_attr('delta', 0.95)
    respy_obj.lock()

    os.mkdir('dynamic')
    os.chdir('dynamic')
    respy_obj.write_out()

    simulate_specification(respy_obj, 'start', False)
    x, _ = respy.estimate(respy_obj)
    simulate_specification(respy_obj, 'stop', True, x)

    rmse_start, rmse_stop = get_rmse()
    num_evals, num_steps = get_est_log_info()

    os.chdir('../')

    record_results('Dynamic', rmse_start, rmse_stop, num_evals, num_steps,
                   maxfun)

    os.chdir('../')
示例#30
0
#!/usr/bin/env python
""" This module runs the tutorial from the online documentation.
"""

import shutil
import glob
import os

import respy

# Initialize an instance of the RespyCls to manage all things related to the
# model specification.
respy_obj = respy.RespyCls('example.ini')

# Simulate the model according to the initial specification.
respy.simulate(respy_obj)

# Estimate the model using the true parameters as the starting values. The
# initialization specifies as single evaluation at the starting values as the
# maxfun flag is set to zero.
x, crit_val = respy.estimate(respy_obj)

# Update the respy class instance, with the new parameters.
respy_obj.update_model_paras(x)
respy.simulate(respy_obj)

# Store results in directory for later inspection.
os.mkdir('example')
for fname in glob.glob('*.respy.*'):
    shutil.move(fname, 'example')
示例#31
0


respy_obj = RespyCls('model.respy.ini')
simulate(respy_obj)


base = None
for num_procs in [1, 2]:

    respy_obj.unlock()
    respy_obj.set_attr('num_procs', num_procs)
    respy_obj.set_attr('is_parallel', (num_procs > 1))
    respy_obj.lock()

    x, crit_val = estimate(respy_obj)
    if base is None:
        base = crit_val

    np.testing.assert_equal(crit_val, base)
    print(num_procs, crit_val)
# print('working PYTHON')
# respy_obj = RespyCls('model.respy.ini')
# #respy_obj.attr['version'] = 'PYTHON'
# #respy_obj.attr['optimizer_used'] = 'SCIPY-POWELL'
# import time
# start = time.time()
#
# x, crit_val = estimate(respy_obj)
# print(crit_val, 'ONLY WORKING WIT MAXFUN 0')
#
示例#32
0
def add_gradient_information(respy_obj):
    """ This function adds information about the gradient to the information
    files. It is not part of the estimation _modules as it breaks the design
    and requires to carry additional attributes. This results in considerable
    overhead, which appears justified at this point.
    """

    model_paras, is_debug, paras_fixed, derivatives = \
        dist_class_attributes(respy_obj, 'model_paras', 'is_debug',
            'paras_fixed', 'derivatives')

    # Auxiliary objects
    coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky = \
        dist_model_paras(model_paras, is_debug)

    # Construct starting values
    x_all_start = get_optim_paras(coeffs_a, coeffs_b, coeffs_edu, coeffs_home,
            shocks_cholesky, 'all', paras_fixed, is_debug)

    x_free_start = get_optim_paras(coeffs_a, coeffs_b, coeffs_edu, coeffs_home,
            shocks_cholesky, 'free', paras_fixed, is_debug)

    # Construct auxiliary information
    num_free = len(x_free_start)

    # The information about the gradient is simply added to the original
    # information later. Note that the original file is read before the
    # gradient evaluation. This is required as the information otherwise
    # accounts for the multiple function evaluation during the gradient
    # approximation scheme.
    original_lines = open('est.respy.info', 'r').readlines()
    fmt_ = '{0:<25}{1:>15}\n'
    original_lines[-5] = fmt_.format(*[' Number of Steps', 0])
    original_lines[-3] = fmt_.format(*[' Number of Evaluations', num_free])

    # Approximate gradient by forward finite differences.
    grad, ei = np.zeros((num_free,), float), np.zeros((26,), float)
    dfunc_eps = derivatives[1]

    # Making sure that the criterion is only evaluated at the relevant
    # starting values.
    respy_obj.unlock()
    respy_obj.set_attr('maxfun', 0)
    respy_obj.lock()

    _, f0 = estimate(respy_obj)

    for k, i in enumerate(np.where(np.logical_not(paras_fixed))[0].tolist()):
        x_baseline = x_all_start.copy()

        ei[i] = 1.0
        d = dfunc_eps * ei
        respy_obj.update_model_paras(x_baseline + d)

        _, f1 = estimate(respy_obj)

        grad[k] = (f1 - f0) / d[k]
        ei[i] = 0.0

    grad = np.random.uniform(0, 1, 26 - sum(paras_fixed)).tolist()
    norm = np.amax(np.abs(grad))

    # Write out extended information
    with open('est.respy.info', 'a') as out_file:
        # Insert information about gradient
        out_file.write('\n\n\n\n Gradient\n\n')
        fmt_ = '{0:>15}    {1:>15}\n\n'
        out_file.write(fmt_.format(*['Identifier', 'Start']))
        fmt_ = '{0:>15}    {1:15.4f}\n'

        # Iterate over all candidate values, but only write the free
        # ones to file. This ensure that the identifiers line up.
        for j in range(26):
            is_fixed = paras_fixed[j]
            if not is_fixed:
                values = [j, grad.pop(0)]
                out_file.write(fmt_.format(*values))

        out_file.write('\n')

        # Add value of infinity norm
        values = ['Norm', norm]
        out_file.write(fmt_.format(*values))
        out_file.write('\n\n')
示例#33
0
    def test_1(self):
        """ Testing the equality of an evaluation of the criterion function for
        a random request.
        """
        # Run evaluation for multiple random requests.
        is_deterministic = np.random.choice([True, False], p=[0.10, 0.9])
        is_interpolated = np.random.choice([True, False], p=[0.10, 0.9])
        is_myopic = np.random.choice([True, False], p=[0.10, 0.9])
        max_draws = np.random.randint(10, 100)

        # Generate random initialization file
        constr = dict()
        constr['is_deterministic'] = is_deterministic
        constr['flag_parallelism'] = False
        constr['is_myopic'] = is_myopic
        constr['max_draws'] = max_draws
        constr['maxfun'] = 0

        init_dict = generate_random_dict(constr)

        # The use of the interpolation routines is a another special case.
        # Constructing a request that actually involves the use of the
        # interpolation routine is a little involved as the number of
        # interpolation points needs to be lower than the actual number of
        # states. And to know the number of states each period, I need to
        # construct the whole state space.
        if is_interpolated:
            # Extract from future initialization file the information
            # required to construct the state space. The number of periods
            # needs to be at least three in order to provide enough state
            # points.
            num_periods = np.random.randint(3, 6)
            edu_start = init_dict['EDUCATION']['start']
            edu_max = init_dict['EDUCATION']['max']
            min_idx = min(num_periods, (edu_max - edu_start + 1))

            max_states_period = pyth_create_state_space(num_periods, edu_start,
                edu_max, min_idx)[3]

            # Updates to initialization dictionary that trigger a use of the
            # interpolation code.
            init_dict['BASICS']['periods'] = num_periods
            init_dict['INTERPOLATION']['flag'] = True
            init_dict['INTERPOLATION']['points'] = \
                np.random.randint(10, max_states_period)

        # Print out the relevant initialization file.
        print_init_dict(init_dict)

        # Write out random components and interpolation grid to align the
        # three implementations.
        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)
        write_interpolation_grid('test.respy.ini')

        # Clean evaluations based on interpolation grid,
        base_val, base_data = None, None

        for version in ['PYTHON', 'FORTRAN']:
            respy_obj = RespyCls('test.respy.ini')

            # Modify the version of the program for the different requests.
            respy_obj.unlock()
            respy_obj.set_attr('version', version)
            respy_obj.lock()

            # Solve the model
            respy_obj = simulate(respy_obj)

            # This parts checks the equality of simulated dataset for the
            # different versions of the code.
            data_frame = pd.read_csv('data.respy.dat', delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the
            # criterion function.
            _, crit_val = estimate(respy_obj)

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val, crit_val, rtol=1e-05,
                                       atol=1e-06)

            # We know even more for the deterministic case.
            if constr['is_deterministic']:
                assert (crit_val in [-1.0, 0.0])
示例#34
0
import os
if True:
    cwd = os.getcwd()
    os.chdir('../../respy')
    assert os.system(
        './waf distclean; ./waf configure build > allout.txt 2>&1 ') == 0
    os.chdir(cwd)
else:
    print('not compiling')

import sys
import time
from respy import RespyCls, estimate
sys.path.insert(
    0,
    '/home/peisenha/Dropbox/business/office/workspace/software/repositories/organizations/restudToolbox/package/respy/tests'
)

#respy_obj = RespyCls('model.respy.ini')
#simulate(respy_obj)
#x, crit_val = estimate(respy_obj)
print('working PYTHON')
respy_obj = RespyCls('model.respy.ini')
#respy_obj.attr['version'] = 'PYTHON'
#respy_obj.attr['optimizer_used'] = 'SCIPY-POWELL'
start = time.time()

x, crit_val = estimate(respy_obj)
end = time.time()
print(end - start, crit_val)
示例#35
0
    def test_1(self):
        """ Testing the equality of an evaluation of the criterion function for
        a random request.
        """
        # Run evaluation for multiple random requests.
        is_deterministic = np.random.choice([True, False], p=[0.10, 0.9])
        is_interpolated = np.random.choice([True, False], p=[0.10, 0.9])
        is_myopic = np.random.choice([True, False], p=[0.10, 0.9])
        max_draws = np.random.randint(10, 100)

        # Generate random initialization file
        constr = dict()
        constr['is_deterministic'] = is_deterministic
        constr['flag_parallelism'] = False
        constr['is_myopic'] = is_myopic
        constr['max_draws'] = max_draws
        constr['maxfun'] = 0

        init_dict = generate_random_dict(constr)

        # The use of the interpolation routines is a another special case.
        # Constructing a request that actually involves the use of the
        # interpolation routine is a little involved as the number of
        # interpolation points needs to be lower than the actual number of
        # states. And to know the number of states each period, I need to
        # construct the whole state space.
        if is_interpolated:
            # Extract from future initialization file the information
            # required to construct the state space. The number of periods
            # needs to be at least three in order to provide enough state
            # points.
            num_periods = np.random.randint(3, 6)
            edu_start = init_dict['EDUCATION']['start']
            edu_max = init_dict['EDUCATION']['max']
            min_idx = min(num_periods, (edu_max - edu_start + 1))

            max_states_period = pyth_create_state_space(
                num_periods, edu_start, edu_max, min_idx)[3]

            # Updates to initialization dictionary that trigger a use of the
            # interpolation code.
            init_dict['BASICS']['periods'] = num_periods
            init_dict['INTERPOLATION']['flag'] = True
            init_dict['INTERPOLATION']['points'] = \
                np.random.randint(10, max_states_period)

        # Print out the relevant initialization file.
        print_init_dict(init_dict)

        # Write out random components and interpolation grid to align the
        # three implementations.
        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)
        write_interpolation_grid('test.respy.ini')

        # Clean evaluations based on interpolation grid,
        base_val, base_data = None, None

        for version in ['PYTHON', 'FORTRAN']:
            respy_obj = RespyCls('test.respy.ini')

            # Modify the version of the program for the different requests.
            respy_obj.unlock()
            respy_obj.set_attr('version', version)
            respy_obj.lock()

            # Solve the model
            respy_obj = simulate(respy_obj)

            # This parts checks the equality of simulated dataset for the
            # different versions of the code.
            data_frame = pd.read_csv('data.respy.dat', delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the
            # criterion function.
            _, crit_val = estimate(respy_obj)

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val,
                                       crit_val,
                                       rtol=1e-05,
                                       atol=1e-06)

            # We know even more for the deterministic case.
            if constr['is_deterministic']:
                assert (crit_val in [-1.0, 0.0])
示例#36
0
    def test_2(self):
        """ Compare the solution of simple model against hard-coded results.
        """
        # Solve specified economy
        respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_second.respy.ini')
        respy_obj = simulate(respy_obj)

        # Distribute class attributes
        systematic = respy_obj.get_attr('periods_payoffs_systematic')
        emax = respy_obj.get_attr('periods_emax')

        # PERIOD 3: Check the systematic payoffs against hand calculations.
        vals = [[2.7456010000000000, 07.5383250000000000, -3999.60, 1.140]]
        vals += [[3.0343583944356758, 09.2073308658822519, -3999.60, 1.140]]
        vals += [[3.0343583944356758, 09.2073308658822519, 0000.90, 1.140]]
        vals += [[3.3534846500000000, 11.2458593100000000, 0000.40, 1.140]]
        vals += [[3.5966397255692826, 12.0612761204447200, -3999.60, 1.140]]
        vals += [[3.9749016274947495, 14.7316759204425760, -3999.60, 1.140]]
        vals += [[3.9749016274947495, 14.7316759204425760, 0000.90, 1.140]]
        vals += [[6.2338866585247175, 31.1869581683094590, -3999.60, 1.140]]
        vals += [[3.4556134647626764, 11.5883467192233920, -3999.60, 1.140]]
        vals += [[3.8190435053663370, 14.1540386453758080, -3999.60, 1.140]]
        vals += [[3.8190435053663370, 14.1540386453758080, 0000.90, 1.140]]
        vals += [[4.5267307943142532, 18.5412874597468690, -3999.60, 1.140]]
        vals += [[5.5289614776240041, 27.6603505585167470, -3999.60, 1.140]]
        for i, val in enumerate(vals):
            (np.testing.assert_allclose(systematic[2, i, :], val))

        # PERIOD 3: Check expected future values. As there are no
        # random draws, this corresponds to the maximum
        # value in the last period.
        vals = [7.53832493366, 9.20733086588, 9.20733086588, 11.2458593149]
        vals += [12.06127612040, 14.7316759204, 14.7316759204, 31.1869581683]
        vals += [11.58834671922, 14.1540386453, 14.1540386453, 18.5412874597]
        vals += [27.660350558516747]
        for i, val in enumerate(vals):
            (np.testing.assert_allclose(emax[2, i], [val]))

        # PERIOD 2: Check the systematic payoffs against hand calculations.
        vals = [[2.7456010150169163, 07.5383249336619222, -3999.60, 1.140]]
        vals += [[3.0343583944356758, 09.2073308658822519, 0000.90, 1.140]]
        vals += [[3.5966397255692826, 12.0612761204447200, -3999.60, 1.140]]
        vals += [[3.4556134647626764, 11.5883467192233920, -3999.60, 1.140]]
        for i, val in enumerate(vals):
            (np.testing.assert_allclose(systematic[1, i, :], val))

        # PERIOD 2: Check expected future values.
        vals = [18.9965372481, 23.2024229903, 41.6888863803, 29.7329464954]
        for i, val in enumerate(vals):
            (np.testing.assert_allclose(emax[1, i], [val]))

        # PERIOD 1: Check the systematic payoffs against hand calculations.
        vals = [[2.7456010150169163, 7.5383249336619222, 0.90, 1.140]]
        for i, val in enumerate(vals):
            (np.testing.assert_allclose(systematic[0, i, :], val))

        # PERIOD 1 Check expected future values.
        vals = [47.142766995]
        for i, val in enumerate(vals):
            (np.testing.assert_allclose(emax[0, 0], [val]))

        # Assess evaluation
        _, val = estimate(respy_obj)
        np.testing.assert_allclose(val, 0.00)
示例#37
0
# for the execution of the script.
cwd = os.getcwd()
os.chdir(PACKAGE_DIR + '/respy')
subprocess.check_call(python_exec + ' waf distclean', shell=True)
subprocess.check_call(python_exec + ' waf configure build --debug', shell=True)
os.chdir(cwd)

# Import package. The late import is required as the compilation needs to
# take place first.
from respy.python.shared.shared_constants import TEST_RESOURCES_DIR
from respy.python.shared.shared_auxiliary import print_init_dict

from respy import RespyCls
from respy import simulate
from respy import estimate

############################################################################
# RUN
############################################################################
fname = 'test_vault_' + str(PYTHON_VERSION) + '.respy.pkl'
tests = pkl.load(open(TEST_RESOURCES_DIR + '/' + fname, 'rb'))

for idx, _ in enumerate(tests[:num_tests]):
    print('\n Checking Test ', idx, 'with version ', PYTHON_VERSION)
    init_dict, crit_val = tests[idx]

    print_init_dict(init_dict)
    respy_obj = RespyCls('test.respy.ini')
    simulate(respy_obj)
    np.testing.assert_almost_equal(estimate(respy_obj)[1], crit_val)
示例#38
0
def run(spec_dict, fname):
    """ Run a version of the Monte Carlo exercise.
    """
    dirname = fname.replace('.ini', '')
    os.mkdir(dirname)
    os.chdir(dirname)

    # We first read in the first specification from the initial paper for our
    # baseline and process the deviations.
    respy_obj = respy.RespyCls(SPEC_DIR + fname)

    respy_obj.unlock()

    respy_obj.set_attr('file_est', '../truth/start/data.respy.dat')

    for key_ in spec_dict.keys():
        respy_obj.set_attr(key_, spec_dict[key_])

    if respy_obj.attr['num_procs'] > 1:
        respy_obj.set_attr('is_parallel', True)
    else:
        respy_obj.set_attr('is_parallel', False)

    respy_obj.lock()

    maxfun = respy_obj.get_attr('maxfun')

    # Let us first simulate a baseline sample, store the results for future
    # reference, and start an estimation from the true values.
    os.mkdir('truth')
    os.chdir('truth')
    respy_obj.write_out()

    simulate_specification(respy_obj, 'start', False)
    x, _ = respy.estimate(respy_obj)
    simulate_specification(respy_obj, 'stop', True, x)

    rmse_start, rmse_stop = get_rmse()
    num_evals, num_steps = get_est_log_info()

    os.chdir('../')

    record_results('Truth', rmse_start, rmse_stop, num_evals, num_steps,
                   maxfun)

    # Now we will estimate a misspecified model on this dataset assuming that
    # agents are myopic. This will serve as a form of well behaved starting
    # values for the real estimation to follow.
    respy_obj.unlock()
    respy_obj.set_attr('delta', 0.00)
    respy_obj.lock()

    os.mkdir('static')
    os.chdir('static')

    respy_obj.write_out()

    simulate_specification(respy_obj, 'start', False)
    x, _ = respy.estimate(respy_obj)
    simulate_specification(respy_obj, 'stop', True, x)

    rmse_start, rmse_stop = get_rmse()
    num_evals, num_steps = get_est_log_info()

    os.chdir('../')

    record_results('Static', rmse_start, rmse_stop, num_evals, num_steps,
                   maxfun)

    # # Using the results from the misspecified model as starting values, we see
    # # whether we can obtain the initial values.
    respy_obj.update_model_paras(x)

    respy_obj.unlock()
    respy_obj.set_attr('delta', 0.95)
    respy_obj.lock()

    os.mkdir('dynamic')
    os.chdir('dynamic')
    respy_obj.write_out()

    simulate_specification(respy_obj, 'start', False)
    x, _ = respy.estimate(respy_obj)
    simulate_specification(respy_obj, 'stop', True, x)

    rmse_start, rmse_stop = get_rmse()
    num_evals, num_steps = get_est_log_info()

    os.chdir('../')

    record_results('Dynamic', rmse_start, rmse_stop, num_evals, num_steps,
                   maxfun)

    os.chdir('../')
示例#39
0
def add_gradient_information(respy_obj):
    """ This function adds information about the gradient to the information
    files. It is not part of the estimation _modules as it breaks the design
    and requires to carry additional attributes. This results in considerable
    overhead, which appears justified at this point.
    """

    model_paras, is_debug, paras_fixed, derivatives = \
        dist_class_attributes(respy_obj, 'model_paras', 'is_debug',
            'paras_fixed', 'derivatives')

    # Auxiliary objects
    coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky = \
        dist_model_paras(model_paras, is_debug)

    # Construct starting values
    x_all_start = get_optim_paras(coeffs_a, coeffs_b, coeffs_edu, coeffs_home,
                                  shocks_cholesky, 'all', paras_fixed,
                                  is_debug)

    x_free_start = get_optim_paras(coeffs_a, coeffs_b, coeffs_edu, coeffs_home,
                                   shocks_cholesky, 'free', paras_fixed,
                                   is_debug)

    # Construct auxiliary information
    num_free = len(x_free_start)

    # The information about the gradient is simply added to the original
    # information later. Note that the original file is read before the
    # gradient evaluation. This is required as the information otherwise
    # accounts for the multiple function evaluation during the gradient
    # approximation scheme.
    original_lines = open('est.respy.info', 'r').readlines()
    fmt_ = '{0:<25}{1:>15}\n'
    original_lines[-5] = fmt_.format(*[' Number of Steps', 0])
    original_lines[-3] = fmt_.format(*[' Number of Evaluations', num_free])

    # Approximate gradient by forward finite differences.
    grad, ei = np.zeros((num_free, ), float), np.zeros((26, ), float)
    dfunc_eps = derivatives[1]

    # Making sure that the criterion is only evaluated at the relevant
    # starting values.
    respy_obj.unlock()
    respy_obj.set_attr('maxfun', 0)
    respy_obj.lock()

    _, f0 = estimate(respy_obj)

    for k, i in enumerate(np.where(np.logical_not(paras_fixed))[0].tolist()):
        x_baseline = x_all_start.copy()

        ei[i] = 1.0
        d = dfunc_eps * ei
        respy_obj.update_model_paras(x_baseline + d)

        _, f1 = estimate(respy_obj)

        grad[k] = (f1 - f0) / d[k]
        ei[i] = 0.0

    grad = np.random.uniform(0, 1, 26 - sum(paras_fixed)).tolist()
    norm = np.amax(np.abs(grad))

    # Write out extended information
    with open('est.respy.info', 'a') as out_file:
        # Insert information about gradient
        out_file.write('\n\n\n\n Gradient\n\n')
        fmt_ = '{0:>15}    {1:>15}\n\n'
        out_file.write(fmt_.format(*['Identifier', 'Start']))
        fmt_ = '{0:>15}    {1:15.4f}\n'

        # Iterate over all candidate values, but only write the free
        # ones to file. This ensure that the identifiers line up.
        for j in range(26):
            is_fixed = paras_fixed[j]
            if not is_fixed:
                values = [j, grad.pop(0)]
                out_file.write(fmt_.format(*values))

        out_file.write('\n')

        # Add value of infinity norm
        values = ['Norm', norm]
        out_file.write(fmt_.format(*values))
        out_file.write('\n\n')