示例#1
0
    def test_3(self):
        """ Test the solution of deterministic model with ambiguity and
        interpolation. This test has the same result as in the absence of
        random variation in payoffs, it does not matter whether the
        environment is ambiguous or not.
        """
        # Solve specified economy
        for version in ['FORTRAN', 'PYTHON']:
            respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Assess expected future value
            val = respy_obj.get_attr('periods_emax')[0, :1]
            np.testing.assert_allclose(val, 88750)

            # Assess evaluation
            _, val = estimate(respy_obj)
            np.testing.assert_allclose(val, -1.0)
    def test_9(self):
        """ This test just locks in the evaluation of the criterion function for the
        original Keane & Wolpin data. We create an additional initialization files that
        include numerous types and initial conditions.

        """
        # This ensures that the experience effect is taken care of properly.
        open(".restud.respy.scratch", "w").close()

        kw_spec, result = random.choice([
            ("kw_data_one", 10.45950941513551),
            ("kw_data_two", 45.04552402391903),
            ("kw_data_three", 74.28253652773714),
            ("kw_data_one_types", 9.098738585839529),
            ("kw_data_one_initial", 7.965979149372883),
        ])

        base_path = TEST_RESOURCES_DIR / kw_spec

        # Evaluate criterion function at true values.
        respy_obj = RespyCls(base_path.with_suffix(".csv"),
                             base_path.with_suffix(".json"))

        respy_obj.unlock()
        respy_obj.set_attr("maxfun", 0)
        respy_obj.lock()

        simulate_observed(respy_obj, is_missings=False)

        _, val = respy_obj.fit()
        np.testing.assert_allclose(val, result)
示例#3
0
    def test_3(self):
        """ This test just locks in the evaluation of the criterion function
        for the original Keane & Wolpin data.
        """
        # Sample one task
        resources = ['kw_data_one.ini', 'kw_data_two.ini', 'kw_data_three.ini']
        fname = np.random.choice(resources)

        # Select expected result
        rslt = None
        if 'one' in fname:
            rslt = 0.261487735867433
        elif 'two' in fname:
            rslt = 1.126138097174159
        elif 'three' in fname:
            rslt = 1.895699121131644

        # Evaluate criterion function at true values.
        respy_obj = RespyCls(TEST_RESOURCES_DIR + '/' + fname)

        respy_obj.unlock()
        respy_obj.set_attr('maxfun', 0)
        respy_obj.lock()

        simulate(respy_obj)
        _, val = estimate(respy_obj)
        np.testing.assert_allclose(val, rslt)
    def test_2(self):
        """ If there is no random variation in rewards then the number of draws to simulate the
        expected future value should have no effect.
        """
        params_spec, options_spec = generate_random_model(deterministic=True)

        # Initialize auxiliary objects
        base = None

        for _ in range(2):
            num_draws_emax = np.random.randint(1, 100)
            respy_obj = RespyCls(params_spec, options_spec)
            respy_obj.unlock()
            respy_obj.set_attr("num_draws_emax", num_draws_emax)
            respy_obj.lock()
            respy_obj = simulate_observed(respy_obj)
            periods_emax = respy_obj.get_attr("periods_emax")

            if base is None:
                base = periods_emax.copy()

            diff = np.max(
                abs(
                    np.ma.masked_invalid(base) -
                    np.ma.masked_invalid(periods_emax)))
            np.testing.assert_almost_equal(diff, 0.0)
示例#5
0
    def test_4(self):
        """ Test the solution of deterministic model with ambiguity and
        interpolation. This test has the same result as in the absence of
        random variation in payoffs, it does not matter whether the
        environment is ambiguous or not.
        """
        # Solve specified economy
        for version in ['FORTRAN', 'PYTHON']:
            respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Assess expected future value
            val = respy_obj.get_attr('periods_emax')[0, :1]
            np.testing.assert_allclose(val, 88750)

            # Assess evaluation
            _, val = estimate(respy_obj)
            np.testing.assert_allclose(val, -1.0)
示例#6
0
    def test_5(self):
        """ Test the scripts.
        """
        # Constraints that ensure that two alternative initialization files
        # can be used for the same simulated data.
        for _ in range(10):
            constr = dict()
            constr['periods'] = np.random.randint(1, 4)
            constr['agents'] = np.random.randint(5, 100)
            constr['is_estimation'] = True
            constr['edu'] = (7, 15)

            # Simulate a dataset
            generate_init(constr)
            respy_obj = RespyCls('test.respy.ini')
            simulate(respy_obj)

            # Create output to process a baseline.
            respy_obj.unlock()
            respy_obj.set_attr('maxfun', 0)
            respy_obj.lock()

            estimate(respy_obj)

            # Potentially evaluate at different points.
            generate_init(constr)

            init_file = 'test.respy.ini'
            file_sim = 'sim.respy.dat'

            gradient = np.random.choice([True, False])
            single = np.random.choice([True, False])
            resume = np.random.choice([True, False])
            update = np.random.choice([True, False])

            action = np.random.choice(['fix', 'free', 'value'])
            num_draws = np.random.randint(1, 20)

            # The set of identifiers is a little complicated as we only allow
            # sampling of the diagonal terms of the covariance matrix. Otherwise,
            # we sometimes run into the problem of very ill conditioned matrices
            # resulting in a failed Cholesky decomposition.
            set_ = list(range(16)) + [16, 18, 21, 25]

            identifiers = np.random.choice(set_, num_draws, replace=False)
            values = np.random.uniform(size=num_draws)

            scripts_estimate(resume, single, init_file, gradient)
            scripts_update(init_file)

            # The error can occur as the RESPY package is actually running an
            # estimation step that can result in very ill-conditioned covariance
            # matrices.
            try:
                scripts_simulate(update, init_file, file_sim, None)
                scripts_modify(identifiers, values, action, init_file)
            except np.linalg.linalg.LinAlgError:
                pass
示例#7
0
    def test_5(self):
        """ Test the scripts.
        """
        # Constraints that ensure that two alternative initialization files
        # can be used for the same simulated data.
        for _ in range(10):
            constr = dict()
            constr['periods'] = np.random.randint(1, 4)
            constr['agents'] = np.random.randint(5, 100)
            constr['is_estimation'] = True
            constr['edu'] = (7, 15)

            # Simulate a dataset
            generate_init(constr)
            respy_obj = RespyCls('test.respy.ini')
            simulate(respy_obj)

            # Create output to process a baseline.
            respy_obj.unlock()
            respy_obj.set_attr('maxfun', 0)
            respy_obj.lock()

            estimate(respy_obj)

            # Potentially evaluate at different points.
            generate_init(constr)

            init_file = 'test.respy.ini'
            file_sim = 'sim.respy.dat'

            gradient = np.random.choice([True, False])
            single = np.random.choice([True, False])
            resume = np.random.choice([True, False])
            update = np.random.choice([True, False])

            action = np.random.choice(['fix', 'free', 'value'])
            num_draws = np.random.randint(1, 20)

            # The set of identifiers is a little complicated as we only allow
            # sampling of the diagonal terms of the covariance matrix. Otherwise,
            # we sometimes run into the problem of very ill conditioned matrices
            # resulting in a failed Cholesky decomposition.
            set_ = list(range(16)) + [16, 18, 21, 25]

            identifiers = np.random.choice(set_, num_draws, replace=False)
            values = np.random.uniform(size=num_draws)

            scripts_estimate(resume, single, init_file, gradient)
            scripts_update(init_file)

            # The error can occur as the RESPY package is actually running an
            # estimation step that can result in very ill-conditioned covariance
            # matrices.
            try:
                scripts_simulate(update, init_file, file_sim, None)
                scripts_modify(identifiers, values, action, init_file)
            except np.linalg.linalg.LinAlgError:
                pass
示例#8
0
    def test_6(self):
        """ This test ensures that the logging looks exactly the same for the
        different versions.
        """

        max_draws = np.random.randint(10, 300)

        # Generate random initialization file
        constr = dict()
        constr['flag_parallelism'] = False
        constr['max_draws'] = max_draws
        constr['flag_interpolation'] = False
        constr['maxfun'] = 0

        # Generate random initialization file
        init_dict = generate_init(constr)

        # Perform toolbox actions
        respy_obj = RespyCls('test.respy.ini')

        # Iterate over alternative implementations
        base_sol_log, base_est_info_log, base_est_log = None, None, None
        base_sim_log = None

        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)

        for version in ['FORTRAN', 'PYTHON']:

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            simulate(respy_obj)

            estimate(respy_obj)

            # Check for identical logging
            if base_sol_log is None:
                base_sol_log = open('sol.respy.log', 'r').read()
            assert open('sol.respy.log', 'r').read() == base_sol_log

            # Check for identical logging
            if base_sim_log is None:
                base_sim_log = open('sim.respy.log', 'r').read()
            assert open('sim.respy.log', 'r').read() == base_sim_log

            if base_est_info_log is None:
                base_est_info_log = open('est.respy.info', 'r').read()
            assert open('est.respy.info', 'r').read() == base_est_info_log

            if base_est_log is None:
                base_est_log = open('est.respy.log', 'r').readlines()
            compare_est_log(base_est_log)
示例#9
0
    def test_2(self):
        """Ensure that the evaluation of the criterion is equal across versions."""
        max_draws = np.random.randint(10, 100)

        # It seems to be important that max_draws and max_agents is the same
        # number because otherwise some functions that read draws from a file
        # to ensure compatibility of fortran and python versions won't work.
        bound_constr = {"max_draws": max_draws, "max_agents": max_draws}

        point_constr = {
            "interpolation": {"flag": False},
            "program": {"procs": 1, "threads": 1, "version": "python"},
            "estimation": {"maxfun": 0},
        }

        params_spec, options_spec = generate_random_model(
            point_constr=point_constr, bound_constr=bound_constr
        )
        respy_obj = RespyCls(params_spec, options_spec)

        num_agents_sim, optim_paras = dist_class_attributes(
            respy_obj, "num_agents_sim", "optim_paras"
        )

        type_shares = optim_paras["type_shares"]

        # Simulate a dataset
        simulate_observed(respy_obj)

        # Iterate over alternative implementations
        base_x, base_val = None, None

        num_periods = options_spec["num_periods"]

        write_draws(num_periods, max_draws)
        write_types(type_shares, num_agents_sim)

        for version in ["python", "fortran"]:

            respy_obj.unlock()

            respy_obj.set_attr("version", version)

            respy_obj.lock()

            x, val = respy_obj.fit()

            # Check for the returned parameters.
            if base_x is None:
                base_x = x
            np.testing.assert_allclose(base_x, x)

            # Check for the value of the criterion function.
            if base_val is None:
                base_val = val
            np.testing.assert_allclose(base_val, val)
示例#10
0
    def test_5(self):
        """ This test ensures that the logging looks exactly the same for the
        different versions.
        """

        max_draws = np.random.randint(10, 300)

        # Generate random initialization file
        constr = dict()
        constr['flag_parallelism'] = False
        constr['max_draws'] = max_draws
        constr['flag_interpolation'] = False
        constr['maxfun'] = 0

        # Generate random initialization file
        init_dict = generate_init(constr)

        # Perform toolbox actions
        respy_obj = RespyCls('test.respy.ini')

        # Iterate over alternative implementations
        base_sol_log, base_est_info_log, base_est_log = None, None, None
        base_sim_log = None

        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)

        for version in ['FORTRAN', 'PYTHON']:

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            simulate(respy_obj)

            estimate(respy_obj)

            # Check for identical logging
            if base_sol_log is None:
                base_sol_log = open('sol.respy.log', 'r').read()
            assert open('sol.respy.log', 'r').read() == base_sol_log

            # Check for identical logging
            if base_sim_log is None:
                base_sim_log = open('sim.respy.log', 'r').read()
            assert open('sim.respy.log', 'r').read() == base_sim_log

            if base_est_info_log is None:
                base_est_info_log = open('est.respy.info', 'r').read()
            assert open('est.respy.info', 'r').read() == base_est_info_log

            if base_est_log is None:
                base_est_log = open('est.respy.log', 'r').readlines()
            compare_est_log(base_est_log)
def scripts_simulate(init_file, file_sim):
    """ Wrapper for the simulation.
    """
    respy_obj = RespyCls(init_file)

    # Update file for output.
    if file_sim is not None:
        respy_obj.unlock()
        respy_obj.set_attr("file_sim", file_sim)
        respy_obj.lock()

    # Optimize the criterion function.
    respy_obj.simulate()
    def test_3(self):
        """ Testing whether the a simulated dataset and the evaluation of the criterion function
        are the same for a tiny delta and a myopic agent.
        """
        constr = {"estimation": {"maxfun": 0}}
        params_spec, options_spec = generate_random_model(point_constr=constr,
                                                          myopic=True)
        respy_obj = RespyCls(params_spec, options_spec)

        optim_paras, num_agents_sim, edu_spec = dist_class_attributes(
            respy_obj, "optim_paras", "num_agents_sim", "edu_spec")

        write_types(optim_paras["type_shares"], num_agents_sim)
        write_edu_start(edu_spec, num_agents_sim)
        write_lagged_start(num_agents_sim)

        # Iterate over alternative discount rates.
        base_data, base_val = None, None

        for delta in [0.00, 0.000001]:

            respy_obj = RespyCls(params_spec, options_spec)

            respy_obj.unlock()

            respy_obj.attr["optim_paras"]["delta"] = np.array([delta])

            respy_obj.lock()

            simulate_observed(respy_obj)

            # This parts checks the equality of simulated dataset for the different
            # versions of the code.
            data_frame = pd.read_csv("data.respy.dat", delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the criterion function.
            _, crit_val = respy_obj.fit()

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val,
                                       crit_val,
                                       rtol=1e-03,
                                       atol=1e-03)
示例#13
0
    def test_2(self):
        """ This test ensures that the evaluation of the criterion function
        at the starting value is identical between the different versions.
        """

        max_draws = np.random.randint(10, 100)

        # Generate random initialization file
        constr = dict()
        constr['flag_parallelism'] = False
        constr['max_draws'] = max_draws
        constr['flag_interpolation'] = False
        constr['maxfun'] = 0

        # Generate random initialization file
        init_dict = generate_init(constr)

        # Perform toolbox actions
        respy_obj = RespyCls('test.respy.ini')

        # Simulate a dataset
        simulate(respy_obj)

        # Iterate over alternative implementations
        base_x, base_val = None, None

        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)

        for version in ['FORTRAN', 'PYTHON']:

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            x, val = estimate(respy_obj)

            # Check for the returned parameters.
            if base_x is None:
                base_x = x
            np.testing.assert_allclose(base_x, x)

            # Check for the value of the criterion function.
            if base_val is None:
                base_val = val
            np.testing.assert_allclose(base_val, val)
示例#14
0
    def test_2(self):
        """ This test ensures that the evaluation of the criterion function
        at the starting value is identical between the different versions.
        """

        max_draws = np.random.randint(10, 100)

        # Generate random initialization file
        constr = dict()
        constr['flag_parallelism'] = False
        constr['max_draws'] = max_draws
        constr['flag_interpolation'] = False
        constr['maxfun'] = 0

        # Generate random initialization file
        init_dict = generate_init(constr)

        # Perform toolbox actions
        respy_obj = RespyCls('test.respy.ini')

        # Simulate a dataset
        simulate(respy_obj)

        # Iterate over alternative implementations
        base_x, base_val = None, None

        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)

        for version in ['FORTRAN', 'PYTHON']:

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            x, val = estimate(respy_obj)

            # Check for the returned parameters.
            if base_x is None:
                base_x = x
            np.testing.assert_allclose(base_x, x)

            # Check for the value of the criterion function.
            if base_val is None:
                base_val = val
            np.testing.assert_allclose(base_val, val)
示例#15
0
    def test_3(self):
        """ Testing whether the a simulated dataset and the evaluation of the
        criterion function are the same for a tiny delta and a myopic agent.
        """

        # Generate random initialization dictionary
        constr = dict()
        constr['maxfun'] = 0

        generate_init(constr)

        # Iterate over alternative discount rates.
        base_data, base_val = None, None

        for delta in [0.00, 0.000001]:

            respy_obj = RespyCls('test.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('delta', delta)

            respy_obj.lock()

            simulate(respy_obj)

            # This parts checks the equality of simulated dataset for the
            # different versions of the code.
            data_frame = pd.read_csv('data.respy.dat', delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the
            # criterion function.
            _, crit_val = estimate(respy_obj)

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val,
                                       crit_val,
                                       rtol=1e-03,
                                       atol=1e-03)
示例#16
0
    def test_2(self):
        """ If there is no random variation in payoffs then the number of
        draws to simulate the expected future value should have no effect.
        """
        # Generate constraints
        constr = dict()
        constr['is_deterministic'] = True

        # Generate random initialization file
        generate_init(constr)

        # Initialize auxiliary objects
        base = None

        for _ in range(2):

            # Draw a random number of draws for
            # expected future value calculations.
            num_draws_emax = np.random.randint(1, 100)

            # Perform toolbox actions
            respy_obj = RespyCls('test.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('num_draws_emax', num_draws_emax)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Distribute class attributes
            periods_emax = respy_obj.get_attr('periods_emax')

            if base is None:
                base = periods_emax.copy()

            # Statistic
            diff = np.max(
                abs(
                    np.ma.masked_invalid(base) -
                    np.ma.masked_invalid(periods_emax)))

            # Checks
            assert (np.isfinite(diff))
            assert (diff < 10e-10)
示例#17
0
    def test_2(self):
        """ If there is no random variation in payoffs then the number of
        draws to simulate the expected future value should have no effect.
        """
        # Generate constraints
        constr = dict()
        constr['is_deterministic'] = True

        # Generate random initialization file
        generate_init(constr)

        # Initialize auxiliary objects
        base = None

        for _ in range(2):

            # Draw a random number of draws for
            # expected future value calculations.
            num_draws_emax = np.random.randint(1, 100)

            # Perform toolbox actions
            respy_obj = RespyCls('test.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('num_draws_emax', num_draws_emax)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Distribute class attributes
            periods_emax = respy_obj.get_attr('periods_emax')

            if base is None:
                base = periods_emax.copy()

            # Statistic
            diff = np.max(abs(np.ma.masked_invalid(base) - np.ma.masked_invalid(
                periods_emax)))

            # Checks
            assert (np.isfinite(diff))
            assert (diff < 10e-10)
    def test_5(self):
        """ Test the scripts.
        """
        # Constraints that ensure that two alternative initialization files can be used
        # for the same simulated data.
        for _ in range(10):
            num_agents = np.random.randint(5, 100)
            constr = {
                "simulation": {
                    "agents": num_agents
                },
                "num_periods": np.random.randint(1, 4),
                "edu_spec": {
                    "start": [7],
                    "max": 15,
                    "share": [1.0]
                },
                "estimation": {
                    "maxfun": 0,
                    "agents": num_agents
                },
            }
            # Simulate a dataset
            params_spec, options_spec = generate_random_model(
                point_constr=constr)
            respy_obj = RespyCls(params_spec, options_spec)
            simulate_observed(respy_obj)

            # Create output to process a baseline.
            respy_obj.unlock()
            respy_obj.set_attr("maxfun", 0)
            respy_obj.lock()

            respy_obj.fit()

            # Potentially evaluate at different points.
            params_spec, options_spec = generate_random_model(
                point_constr=constr)
            respy_obj = RespyCls(params_spec, options_spec)

            single = np.random.choice([True, False])

            scripts_check("estimate", respy_obj)
            scripts_estimate(single, respy_obj)
示例#19
0
    def test_3(self):
        """ Testing whether the a simulated dataset and the evaluation of the
        criterion function are the same for a tiny delta and a myopic agent.
        """

        # Generate random initialization dictionary
        constr = dict()
        constr['maxfun'] = 0

        generate_init(constr)

        # Iterate over alternative discount rates.
        base_data, base_val = None, None

        for delta in [0.00, 0.000001]:

            respy_obj = RespyCls('test.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('delta', delta)

            respy_obj.lock()

            simulate(respy_obj)

            # This parts checks the equality of simulated dataset for the
            # different versions of the code.
            data_frame = pd.read_csv('data.respy.dat', delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the
            # criterion function.
            _, crit_val = estimate(respy_obj)

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val, crit_val, rtol=1e-03, atol=1e-03)
示例#20
0
def scripts_simulate(update, init_file, file_sim, solved):
    """ Wrapper for the estimation.
    """
    # Read in baseline model specification.
    if solved is not None:
        respy_obj = pkl.load(open(solved, 'rb'))
    else:
        respy_obj = RespyCls(init_file)

    # Update parametrization of the model if resuming from a previous
    # estimation run.
    if update:
        respy_obj.update_model_paras(get_est_info()['paras_step'])

    # Update file for output.
    if file_sim is not None:
        respy_obj.unlock()
        respy_obj.set_attr('file_sim', file_sim)
        respy_obj.lock()

    # Optimize the criterion function.
    simulate(respy_obj)
示例#21
0
def scripts_simulate(update, init_file, file_sim, solved):
    """ Wrapper for the estimation.
    """
    # Read in baseline model specification.
    if solved is not None:
        respy_obj = pkl.load(open(solved, 'rb'))
    else:
        respy_obj = RespyCls(init_file)

    # Update parametrization of the model if resuming from a previous
    # estimation run.
    if update:
        respy_obj.update_model_paras(get_est_info()['paras_step'])

    # Update file for output.
    if file_sim is not None:
        respy_obj.unlock()
        respy_obj.set_attr('file_sim', file_sim)
        respy_obj.lock()

    # Optimize the criterion function.
    simulate(respy_obj)
示例#22
0
def scripts_estimate(resume, single, init_file, gradient):
    """ Wrapper for the estimation.
    """
    # Read in baseline model specification.
    respy_obj = RespyCls(init_file)

    # Update parametrization of the model if resuming from a previous
    # estimation run.
    if resume:
        respy_obj.update_model_paras(get_est_info()['paras_step'])

    # Set maximum iteration count when only an evaluation of the criterion
    # function is requested.
    if single:
        respy_obj.unlock()
        respy_obj.set_attr('maxfun', 0)
        respy_obj.lock()

    # Optimize the criterion function.
    estimate(respy_obj)

    if gradient:
        add_gradient_information(respy_obj)
示例#23
0
def scripts_estimate(resume, single, init_file, gradient):
    """ Wrapper for the estimation.
    """
    # Read in baseline model specification.
    respy_obj = RespyCls(init_file)

    # Update parametrization of the model if resuming from a previous
    # estimation run.
    if resume:
        respy_obj.update_model_paras(get_est_info()['paras_step'])

    # Set maximum iteration count when only an evaluation of the criterion
    # function is requested.
    if single:
        respy_obj.unlock()
        respy_obj.set_attr('maxfun', 0)
        respy_obj.lock()

    # Optimize the criterion function.
    estimate(respy_obj)

    if gradient:
        add_gradient_information(respy_obj)
示例#24
0
    def test_4(self):
        """ Test the solution of deterministic model without ambiguity,
        but with interpolation. As a deterministic model is requested,
        all versions should yield the same result without any additional effort.
        """
        # Solve specified economy
        for version in ['FORTRAN', 'PYTHON']:
            respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Assess expected future value
            val = respy_obj.get_attr('periods_emax')[0, :1]
            np.testing.assert_allclose(val, 88750)

            # Assess evaluation
            _, val = estimate(respy_obj)
            np.testing.assert_allclose(val, -1.0)
示例#25
0
    def test_5(self):
        """ Test the solution of deterministic model without ambiguity,
        but with interpolation. As a deterministic model is requested,
        all versions should yield the same result without any additional effort.
        """
        # Solve specified economy
        for version in ['FORTRAN', 'PYTHON']:
            respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')

            respy_obj.unlock()

            respy_obj.set_attr('version', version)

            respy_obj.lock()

            respy_obj = simulate(respy_obj)

            # Assess expected future value
            val = respy_obj.get_attr('periods_emax')[0, :1]
            np.testing.assert_allclose(val, 88750)

            # Assess evaluation
            _, val = estimate(respy_obj)
            np.testing.assert_allclose(val, -1.0)
示例#26
0
    def test_3(self):
        """Ensure that the log looks exactly the same for different versions."""
        max_draws = np.random.randint(10, 100)

        bound_constr = {"max_draws": max_draws, "max_agents": max_draws}

        point_constr = {
            "interpolation": {"flag": False},
            "program": {"procs": 1, "threads": 1, "version": "python"},
            "estimation": {"maxfun": 0},
        }

        params_spec, options_spec = generate_random_model(
            point_constr=point_constr, bound_constr=bound_constr
        )
        respy_obj = RespyCls(params_spec, options_spec)

        num_agents_sim, optim_paras, file_sim = dist_class_attributes(
            respy_obj, "num_agents_sim", "optim_paras", "file_sim"
        )

        # Iterate over alternative implementations
        base_sol_log, base_est_info, base_est_log = None, None, None
        base_sim_log = None

        type_shares = respy_obj.attr["optim_paras"]["type_shares"]
        num_periods = options_spec["num_periods"]

        edu_spec = options_spec["edu_spec"]

        write_draws(num_periods, max_draws)
        write_types(type_shares, num_agents_sim)
        write_edu_start(edu_spec, num_agents_sim)
        write_lagged_start(num_agents_sim)

        for version in ["fortran", "python"]:

            respy_obj.unlock()

            respy_obj.set_attr("version", version)

            respy_obj.lock()

            simulate_observed(respy_obj)

            # Check for identical logging
            fname = file_sim + ".respy.sol"
            if base_sol_log is None:
                base_sol_log = open(fname, "r").read()
            assert open(fname, "r").read() == base_sol_log

            # Check for identical logging
            fname = file_sim + ".respy.sim"
            if base_sim_log is None:
                base_sim_log = open(fname, "r").read()
            assert open(fname, "r").read() == base_sim_log

            respy_obj.fit()

            if base_est_info is None:
                base_est_info = open("est.respy.info", "r").read()
                assert open("est.respy.info", "r").read() == base_est_info

            if base_est_log is None:
                base_est_log = open("est.respy.log", "r").readlines()
            compare_est_log(base_est_log)
示例#27
0
    def test_1(self):
        """ Testing the equality of an evaluation of the criterion function for a random
        request.
        """
        # Run evaluation for multiple random requests.
        is_deterministic = np.random.choice([True, False], p=[0.10, 0.9])
        is_interpolated = bool(np.random.choice([True, False], p=[0.10, 0.9]))
        is_myopic = np.random.choice([True, False], p=[0.10, 0.9])
        max_draws = np.random.randint(11, 100)
        num_agents = np.random.randint(10, max_draws)

        bound_constr = {"max_draws": max_draws}
        point_constr = {
            "interpolation": {"flag": is_interpolated},
            "program": {"procs": 1, "threads": 1, "version": "python"},
            "estimation": {"maxfun": 0, "agents": num_agents},
            "simulation": {"agents": num_agents},
            "num_periods": np.random.randint(1, 5),
        }

        num_types = np.random.randint(2, 5)

        if is_interpolated:
            point_constr["num_periods"] = np.random.randint(3, 5)

        params_spec, options_spec = generate_random_model(
            bound_constr=bound_constr,
            point_constr=point_constr,
            deterministic=is_deterministic,
            myopic=is_myopic,
            num_types=num_types,
        )

        edu_spec = options_spec["edu_spec"]
        num_periods = point_constr["num_periods"]

        # The use of the interpolation routines is a another special case. Constructing
        #  a request that actually involves the use of the interpolation routine is a
        #  little involved as the number of interpolation points needs to be lower than
        #  the actual number of states. And to know the number of states each period, I
        #  need to construct the whole state space.
        if is_interpolated:
            state_space = StateSpace(
                num_periods, num_types, edu_spec["start"], edu_spec["max"]
            )

            max_states_period = state_space.states_per_period.max()

            options_spec["interpolation"]["points"] = np.random.randint(
                10, max_states_period
            )

        # Write out random components and interpolation grid to align the three
        # implementations.
        write_draws(num_periods, max_draws)
        respy_obj = RespyCls(params_spec, options_spec)
        write_interpolation_grid(respy_obj)

        type_shares = respy_obj.attr["optim_paras"]["type_shares"]

        write_types(type_shares, num_agents)
        write_edu_start(edu_spec, num_agents)
        write_lagged_start(num_agents)

        # Clean evaluations based on interpolation grid,
        base_val, base_data = None, None

        for version in ["python", "fortran"]:
            respy_obj = RespyCls(params_spec, options_spec)

            # Modify the version of the program for the different requests.
            respy_obj.unlock()
            respy_obj.set_attr("version", version)
            respy_obj.lock()

            # Solve the model
            respy_obj = simulate_observed(respy_obj)

            # This parts checks the equality of simulated dataset for the different
            # versions of the code.
            data_frame = pd.read_csv("data.respy.dat", delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the criterion function.
            _, crit_val = respy_obj.fit()

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val, crit_val, rtol=1e-05, atol=1e-06)

            # We know even more for the deterministic case.
            if is_deterministic:
                assert crit_val in [-1.0, 0.0]
示例#28
0
import pickle as pkl



respy_obj = RespyCls('model.respy.ini')
simulate(respy_obj)


base = None
for num_procs in [1, 2]:

    respy_obj.unlock()
    respy_obj.set_attr('num_procs', num_procs)
    respy_obj.set_attr('is_parallel', (num_procs > 1))
    respy_obj.lock()

    x, crit_val = estimate(respy_obj)
    if base is None:
        base = crit_val

    np.testing.assert_equal(crit_val, base)
    print(num_procs, crit_val)
# print('working PYTHON')
# respy_obj = RespyCls('model.respy.ini')
# #respy_obj.attr['version'] = 'PYTHON'
# #respy_obj.attr['optimizer_used'] = 'SCIPY-POWELL'
# import time
# start = time.time()
#
# x, crit_val = estimate(respy_obj)
示例#29
0
    def test_1(self):
        """ Testing the equality of an evaluation of the criterion function for
        a random request.
        """
        # Run evaluation for multiple random requests.
        is_deterministic = np.random.choice([True, False], p=[0.10, 0.9])
        is_interpolated = np.random.choice([True, False], p=[0.10, 0.9])
        is_myopic = np.random.choice([True, False], p=[0.10, 0.9])
        max_draws = np.random.randint(10, 100)

        # Generate random initialization file
        constr = dict()
        constr['is_deterministic'] = is_deterministic
        constr['flag_parallelism'] = False
        constr['is_myopic'] = is_myopic
        constr['max_draws'] = max_draws
        constr['maxfun'] = 0

        init_dict = generate_random_dict(constr)

        # The use of the interpolation routines is a another special case.
        # Constructing a request that actually involves the use of the
        # interpolation routine is a little involved as the number of
        # interpolation points needs to be lower than the actual number of
        # states. And to know the number of states each period, I need to
        # construct the whole state space.
        if is_interpolated:
            # Extract from future initialization file the information
            # required to construct the state space. The number of periods
            # needs to be at least three in order to provide enough state
            # points.
            num_periods = np.random.randint(3, 6)
            edu_start = init_dict['EDUCATION']['start']
            edu_max = init_dict['EDUCATION']['max']
            min_idx = min(num_periods, (edu_max - edu_start + 1))

            max_states_period = pyth_create_state_space(
                num_periods, edu_start, edu_max, min_idx)[3]

            # Updates to initialization dictionary that trigger a use of the
            # interpolation code.
            init_dict['BASICS']['periods'] = num_periods
            init_dict['INTERPOLATION']['flag'] = True
            init_dict['INTERPOLATION']['points'] = \
                np.random.randint(10, max_states_period)

        # Print out the relevant initialization file.
        print_init_dict(init_dict)

        # Write out random components and interpolation grid to align the
        # three implementations.
        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)
        write_interpolation_grid('test.respy.ini')

        # Clean evaluations based on interpolation grid,
        base_val, base_data = None, None

        for version in ['PYTHON', 'FORTRAN']:
            respy_obj = RespyCls('test.respy.ini')

            # Modify the version of the program for the different requests.
            respy_obj.unlock()
            respy_obj.set_attr('version', version)
            respy_obj.lock()

            # Solve the model
            respy_obj = simulate(respy_obj)

            # This parts checks the equality of simulated dataset for the
            # different versions of the code.
            data_frame = pd.read_csv('data.respy.dat', delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the
            # criterion function.
            _, crit_val = estimate(respy_obj)

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val,
                                       crit_val,
                                       rtol=1e-05,
                                       atol=1e-06)

            # We know even more for the deterministic case.
            if constr['is_deterministic']:
                assert (crit_val in [-1.0, 0.0])
示例#30
0
    def test_1(self):
        """ Testing the equality of an evaluation of the criterion function for
        a random request.
        """
        # Run evaluation for multiple random requests.
        is_deterministic = np.random.choice([True, False], p=[0.10, 0.9])
        is_interpolated = np.random.choice([True, False], p=[0.10, 0.9])
        is_myopic = np.random.choice([True, False], p=[0.10, 0.9])
        max_draws = np.random.randint(10, 100)

        # Generate random initialization file
        constr = dict()
        constr['is_deterministic'] = is_deterministic
        constr['flag_parallelism'] = False
        constr['is_myopic'] = is_myopic
        constr['max_draws'] = max_draws
        constr['maxfun'] = 0

        init_dict = generate_random_dict(constr)

        # The use of the interpolation routines is a another special case.
        # Constructing a request that actually involves the use of the
        # interpolation routine is a little involved as the number of
        # interpolation points needs to be lower than the actual number of
        # states. And to know the number of states each period, I need to
        # construct the whole state space.
        if is_interpolated:
            # Extract from future initialization file the information
            # required to construct the state space. The number of periods
            # needs to be at least three in order to provide enough state
            # points.
            num_periods = np.random.randint(3, 6)
            edu_start = init_dict['EDUCATION']['start']
            edu_max = init_dict['EDUCATION']['max']
            min_idx = min(num_periods, (edu_max - edu_start + 1))

            max_states_period = pyth_create_state_space(num_periods, edu_start,
                edu_max, min_idx)[3]

            # Updates to initialization dictionary that trigger a use of the
            # interpolation code.
            init_dict['BASICS']['periods'] = num_periods
            init_dict['INTERPOLATION']['flag'] = True
            init_dict['INTERPOLATION']['points'] = \
                np.random.randint(10, max_states_period)

        # Print out the relevant initialization file.
        print_init_dict(init_dict)

        # Write out random components and interpolation grid to align the
        # three implementations.
        num_periods = init_dict['BASICS']['periods']
        write_draws(num_periods, max_draws)
        write_interpolation_grid('test.respy.ini')

        # Clean evaluations based on interpolation grid,
        base_val, base_data = None, None

        for version in ['PYTHON', 'FORTRAN']:
            respy_obj = RespyCls('test.respy.ini')

            # Modify the version of the program for the different requests.
            respy_obj.unlock()
            respy_obj.set_attr('version', version)
            respy_obj.lock()

            # Solve the model
            respy_obj = simulate(respy_obj)

            # This parts checks the equality of simulated dataset for the
            # different versions of the code.
            data_frame = pd.read_csv('data.respy.dat', delim_whitespace=True)

            if base_data is None:
                base_data = data_frame.copy()

            assert_frame_equal(base_data, data_frame)

            # This part checks the equality of an evaluation of the
            # criterion function.
            _, crit_val = estimate(respy_obj)

            if base_val is None:
                base_val = crit_val

            np.testing.assert_allclose(base_val, crit_val, rtol=1e-05,
                                       atol=1e-06)

            # We know even more for the deterministic case.
            if constr['is_deterministic']:
                assert (crit_val in [-1.0, 0.0])
    def test_10(self):
        """ This test ensures that the order of the initial schooling level specified in
        the initialization files does not matter for the simulation of a dataset and
        subsequent evaluation of the criterion function.

        Warning
        -------
        This test fails if types have the identical intercept as no unique ordering is
        determined than.

        """
        point_constr = {
            "estimation": {
                "maxfun": 0
            },
            # We cannot allow for interpolation as the order of states within each
            # period changes and thus the prediction model is altered even if the same
            # state identifier is used.
            "interpolation": {
                "flag": False
            },
        }

        params_spec, options_spec = generate_random_model(
            point_constr=point_constr)

        respy_obj = RespyCls(params_spec, options_spec)

        edu_baseline_spec, num_types, num_paras, optim_paras = dist_class_attributes(
            respy_obj, "edu_spec", "num_types", "num_paras", "optim_paras")

        # We want to randomly shuffle the list of initial schooling but need to maintain
        # the order of the shares.
        edu_shuffled_start = np.random.permutation(
            edu_baseline_spec["start"]).tolist()

        edu_shuffled_share, edu_shuffled_lagged = [], []
        for start in edu_shuffled_start:
            idx = edu_baseline_spec["start"].index(start)
            edu_shuffled_lagged += [edu_baseline_spec["lagged"][idx]]
            edu_shuffled_share += [edu_baseline_spec["share"][idx]]

        edu_shuffled_spec = copy.deepcopy(edu_baseline_spec)
        edu_shuffled_spec["lagged"] = edu_shuffled_lagged
        edu_shuffled_spec["start"] = edu_shuffled_start
        edu_shuffled_spec["share"] = edu_shuffled_share

        # We are only looking at a single evaluation as otherwise the reordering affects
        # the optimizer that is trying better parameter values one-by-one. The
        # reordering might also violate the bounds.
        for i in range(53, num_paras):
            optim_paras["paras_bounds"][i] = [None, None]
            optim_paras["paras_fixed"][i] = False

        # We need to ensure that the baseline type is still in the first position.
        types_order = [0] + np.random.permutation(range(1, num_types)).tolist()

        type_shares = []
        for i in range(num_types):
            lower, upper = i * 2, (i + 1) * 2
            type_shares += [optim_paras["type_shares"][lower:upper].tolist()]

        optim_paras_baseline = copy.deepcopy(optim_paras)
        optim_paras_shuffled = copy.deepcopy(optim_paras)

        list_ = [
            optim_paras["type_shifts"][i, :].tolist() for i in types_order
        ]
        optim_paras_shuffled["type_shifts"] = np.array(list_)

        list_ = [type_shares[i] for i in types_order]
        optim_paras_shuffled["type_shares"] = np.array(list_).flatten()

        base_data, base_val = None, None

        k = 0

        for optim_paras in [optim_paras_baseline, optim_paras_shuffled]:
            for edu_spec in [edu_baseline_spec, edu_shuffled_spec]:

                respy_obj.unlock()
                respy_obj.set_attr("edu_spec", edu_spec)
                respy_obj.lock()

                # There is some more work to do to update the coefficients as we
                # distinguish between the economic and optimization version of the
                # parameters.
                x = get_optim_paras(optim_paras, num_paras, "all", True)
                shocks_cholesky, _ = extract_cholesky(x)
                shocks_coeffs = cholesky_to_coeffs(shocks_cholesky)
                x[43:53] = shocks_coeffs
                respy_obj.update_optim_paras(x)

                respy_obj.reset()

                simulate_observed(respy_obj)

                # This part checks the equality of simulated dataset.
                data_frame = pd.read_csv("data.respy.dat",
                                         delim_whitespace=True)

                if base_data is None:
                    base_data = data_frame.copy()

                assert_frame_equal(base_data, data_frame)

                # This part checks the equality of a single function evaluation.
                _, val = respy_obj.fit()
                if base_val is None:
                    base_val = val
                np.testing.assert_almost_equal(base_val, val)

                respy_obj.reset()
                k += 1
示例#32
0
from respy import simulate, RespyCls, estimate
import numpy as np

import pickle as pkl

respy_obj = RespyCls('model.respy.ini')
simulate(respy_obj)

base = None
for num_procs in [1, 2]:

    respy_obj.unlock()
    respy_obj.set_attr('num_procs', num_procs)
    respy_obj.set_attr('is_parallel', (num_procs > 1))
    respy_obj.lock()

    x, crit_val = estimate(respy_obj)
    if base is None:
        base = crit_val

    np.testing.assert_equal(crit_val, base)
    print(num_procs, crit_val)
# print('working PYTHON')
# respy_obj = RespyCls('model.respy.ini')
# #respy_obj.attr['version'] = 'PYTHON'
# #respy_obj.attr['optimizer_used'] = 'SCIPY-POWELL'
# import time
# start = time.time()
#
# x, crit_val = estimate(respy_obj)