def test_normal_unscented_predict_focus_on_colums(self, mock_transform):
        mock_transform.return_value = self.sps1
        kf.normal_unscented_predict(
            self.stage, self.sps1, self.flat_sps1, self.sws_m, self.sws_c,
            self.Q, self.transform_sps_args, self.out_states, self.out_covs)

        aaae(self.out_states, self.expected_states1)
def test_three_independent_constraints():
    params = np.arange(10)
    params[0] = 2

    constraints = [
        {
            "loc": [0, 1, 2],
            "type": "covariance"
        },
        {
            "loc": [4, 5],
            "type": "fixed"
        },
        {
            "loc": [7, 8],
            "type": "linear",
            "value": 15,
            "weights": 1
        },
    ]

    res = minimize(
        criterion=lambda x: x @ x,
        params=params,
        algorithm="scipy_lbfgsb",
        constraints=constraints,
        algo_options={"convergence.relative_criterion_tolerance": 1e-12},
    )
    expected = np.array([0] * 4 + [4, 5] + [0] + [7.5] * 2 + [0])

    aaae(res.params, expected, decimal=4)
 def test_sqrt_cov_update_with_nans(self):
     kf.sqrt_linear_update(
         self.states, self.mcovs, self.like_vec, self.y, self.c,
         self.delta, self.h, self.sqrt_r, self.positions, self.weights)
     cholcovs = self.mcovs[:, :, 1:, 1:]
     make_unique(cholcovs.reshape(12, 3, 3))
     aaae(cholcovs, self.exp_cholcovs)
    def test_iv_math(self):
        expected_beta = LinearIVGMM(
            endog=self.y, exog=self.x, instrument=self.z).fitgmm(
                start=None, weights=self.w)
        calc_beta = wf._iv_math(self.y, self.x, self.z, self.w)

        aaae(calc_beta.flatten(), expected_beta)
def assert_output_equal(o1, o2):
    nt.assert_equal(len(o1), len(o2))
    for i in xrange(len(o1)):
        #print i
        #print o1[i]
        #print o2[i]
        aaae(o1[i], o2[i])
def test_history_collection_with_parallelization(algorithm, tmp_path):
    lb = np.zeros(5) if algorithm in BOUNDED else None
    ub = np.full(5, 10) if algorithm in BOUNDED else None

    logging = tmp_path / "log.db"

    collected_hist = minimize(
        criterion=lambda x: {
            "root_contributions": x,
            "value": x @ x
        },
        params=np.arange(5),
        algorithm=algorithm,
        lower_bounds=lb,
        upper_bounds=ub,
        algo_options={
            "n_cores": 2,
            "stopping.max_iterations": 3
        },
        logging=logging,
        log_options={
            "if_database_exists": "replace",
            "fast_logging": True
        },
    ).history

    reader = OptimizeLogReader(logging)

    log_hist = reader.read_history()

    # We cannot expect the order to be the same
    aaae(sorted(collected_hist["criterion"]), sorted(log_hist["criterion"]))
def test_criterion_and_derivative_template(base_inputs, direction, crit, deriv,
                                           crit_and_deriv):
    inputs = base_inputs.copy()
    inputs["first_criterion_evaluation"]["output"] = crit(inputs["params"])
    crit = crit if (deriv, crit_and_deriv) == (None,
                                               None) else no_second_call(crit)

    inputs["criterion"] = crit
    inputs["derivative"] = no_second_call(deriv)
    inputs["criterion_and_derivative"] = no_second_call(crit_and_deriv)
    inputs["direction"] = direction

    calc_criterion, calc_derivative = internal_criterion_and_derivative_template(
        task="criterion_and_derivative", **inputs)

    calc_criterion2 = internal_criterion_and_derivative_template(
        task="criterion", **inputs)

    calc_derivative2 = internal_criterion_and_derivative_template(
        task="derivative", **inputs)

    if direction == "minimize":
        for c in calc_criterion, calc_criterion2:
            assert c == 30

        for d in calc_derivative, calc_derivative2:
            aaae(d, 2 * np.arange(5))
    else:
        for c in calc_criterion, calc_criterion2:
            assert c == -30

        for d in calc_derivative, calc_derivative2:
            aaae(d, -2 * np.arange(5))
def test_valid_derivative_versions(direction, algorithm, derivative,
                                   criterion_and_derivative):
    start_params = pd.DataFrame()
    start_params["value"] = [1, 2, 3]

    if direction == "minimize":
        res = minimize(
            criterion=sos_dict_criterion,
            params=start_params,
            algorithm=algorithm,
            derivative=derivative,
            criterion_and_derivative=criterion_and_derivative,
            error_handling="raise",
        )
    else:
        deriv = derivative if derivative is None else switch_sign(derivative)
        crit_and_deriv = (criterion_and_derivative
                          if criterion_and_derivative is None else
                          switch_sign(criterion_and_derivative))
        res = maximize(
            criterion=switch_sign(sos_dict_criterion),
            params=start_params,
            algorithm=algorithm,
            derivative=deriv,
            criterion_and_derivative=crit_and_deriv,
            error_handling="raise",
        )

    aaae(res.params["value"].to_numpy(), np.zeros(3), decimal=4)
    def test_sqrt_unscented_predict_focus_on_weighting(self, mock_transform):
        mock_transform.return_value = self.sps2
        kf.sqrt_unscented_predict(
            self.stage, self.sps2, self.flat_sps2, self.sws_m, self.sws_c,
            self.Q, self.transform_sps_args, self.out_states, self.out_sqrt_covs)

        aaae(self.out_states, self.expected_states2)
Example #10
0
def test_sdcorr_params_to_sds_and_corr():
    sdcorr_params = pd.Series([1, 2, 3, 0.1, 0.2, 0.3])
    exp_corr = np.array([[1, 0.1, 0.2], [0.1, 1, 0.3], [0.2, 0.3, 1]])
    exp_sds = np.array([1, 2, 3])
    calc_sds, calc_corr = sdcorr_params_to_sds_and_corr(sdcorr_params)
    aaae(calc_sds, exp_sds)
    aaae(calc_corr, exp_corr)
 def test_sqrt_cov_update_with_nans(self):
     kf.sqrt_linear_update(self.states, self.mcovs, self.like_vec, self.y,
                           self.c, self.delta, self.h, self.sqrt_r,
                           self.positions, self.weights)
     cholcovs = self.mcovs[:, :, 1:, 1:]
     make_unique(cholcovs.reshape(12, 3, 3))
     aaae(cholcovs, self.exp_cholcovs)
Example #12
0
def test_scaling_with_gradient(inputs):
    calc_factor, calc_offset = calculate_scaling_factor_and_offset(
        *inputs, method="gradient", clipping_value=0.2
    )

    aaae(calc_factor, np.array([0.2, 2, 4]))
    assert calc_offset is None
Example #13
0
 def test_predict_ff_intermediate_false_mocked(self, mock_tsp, mock_pp):
     mock_tsp.side_effect = fake_tsp
     self.likelihood_arguments_dict = Mock(return_value=self.lh_args)
     exp = np.ones((10, 2)) * 4
     exp[:, 0] = 12
     calc = smo._predict_final_factors(self, self.change)
     aaae(calc, exp)
Example #14
0
def test_cov_matrix_to_sdcorr_params():
    sds = np.sqrt([1, 2, 3])
    corrs = [0.07071068, 0.11547005, 0.08981462]
    expected = np.hstack([sds, corrs])
    cov = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])
    calculated = cov_matrix_to_sdcorr_params(cov)
    aaae(calculated, expected)
Example #15
0
def test_scaling_with_bounds(inputs):
    calc_factor, calc_offset = calculate_scaling_factor_and_offset(
        *inputs, method="bounds"
    )

    aaae(calc_factor, np.array([11, 10, 10]))
    aaae(calc_offset, np.array([-1, 0, 0]))
Example #16
0
def test_maximize(algorithm):
    np.random.seed(1234)
    params = pd.Series([1, -1, -1.5, 1.5], name="value").to_frame()
    params["lower"] = -2
    params["upper"] = 2

    origin, algo_name = algorithm.split("_", 1)
    if origin == "pygmo":
        if algo_name == "simulated_annealing":
            algo_options = {}
        elif algo_name in ["ihs"]:
            algo_options = {"popsize": 1, "gen": 1000}
        elif algo_name in ["sga"]:
            algo_options = {"popsize": 50, "gen": 500}
        elif algo_name in ["sea"]:
            algo_options = {"popsize": 5, "gen": 7000}
        elif algo_name == "simulated_annealing":
            np.random.seed(5471)
            algo_options = {"n_T_adj": 20, "Tf": 0.0001, "n_range_adj": 20}
        else:
            algo_options = {"popsize": 30, "gen": 150}
    else:
        algo_options = {}
    res_dict, final_params = maximize(
        f, params, algorithm, algo_options=algo_options, logging=False,
    )
    aaae(final_params["value"].to_numpy(), np.zeros(len(final_params)), decimal=2)
Example #17
0
 def test_merwe_sigma_point_construction(self):
     expected_sps = np.array(self.fixtures['merwe_points']).reshape(
         self.nemf * self.nind, self.nsigma, self.nfac)
     calculate_sigma_points(states=self.states, flat_covs=self.lcovs_t,
                            scaling_factor=0.234520787991, out=self.out,
                            square_root_filters=True)
     aaae(self.out, expected_sps)
Example #18
0
def test_multistart_minimize_with_sum_of_squares_at_defaults(
        criterion, direction, params):

    if direction == "minimize":
        res = minimize(
            criterion=criterion,
            params=params,
            algorithm="scipy_lbfgsb",
            multistart=True,
        )
    else:
        res = maximize(
            criterion=switch_sign(sos_dict_criterion),
            params=params,
            algorithm="scipy_lbfgsb",
            multistart=True,
        )

    assert "multistart_info" in res
    ms_info = res["multistart_info"]
    assert len(ms_info["exploration_sample"]) == 40
    assert len(ms_info["exploration_results"]) == 40
    assert all(
        isinstance(entry, dict) for entry in ms_info["exploration_results"])
    assert all(isinstance(entry, dict) for entry in ms_info["local_optima"])
    assert all(
        isinstance(entry, pd.DataFrame)
        for entry in ms_info["start_parameters"])
    assert np.allclose(res["solution_criterion"], 0)
    aaae(res["solution_params"]["value"], np.zeros(4))
Example #19
0
 def test_marginal_effect_outcome_anch_outcome(self):
     self.anchoring = True
     self.me_anchor_on = True
     self.me_on = 'anch_outcome'
     exp = np.ones((10)) * 4
     calc = smo._marginal_effect_outcome(self, self.change)
     aaae(calc, exp)
Example #20
0
def test_sds_and_corr_to_cov():
    sds = [1, 2, 3]
    corr = np.ones((3, 3)) * 0.2
    corr[np.diag_indices(3)] = 1
    calculated = sds_and_corr_to_cov(sds, corr)
    expected = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]])
    aaae(calculated, expected)
Example #21
0
def test_first_derivative_scalar(method):
    def f(x):
        return x**2

    calculated = first_derivative(f, 3.0)
    expected = 6.0
    aaae(calculated, expected)
def test_constrained_minimization(criterion_name, algorithm, derivative,
                                  constraint_name, params_type):

    constraints = CONSTR_INFO[constraint_name]
    criterion = FUNC_INFO[criterion_name]["criterion"]
    if params_type == "pandas":
        params = pd.Series(START_INFO[constraint_name],
                           name="value").to_frame()
    else:
        params = np.array(START_INFO[constraint_name])

    res = minimize(
        criterion=criterion,
        params=params,
        algorithm=algorithm,
        derivative=derivative,
        constraints=constraints,
        algo_options={"convergence.relative_criterion_tolerance": 1e-12},
    )

    if params_type == "pandas":
        calculated = res.params["value"].to_numpy()
    else:
        calculated = res.params

    expected = FUNC_INFO[criterion_name].get(
        f"{constraint_name}_result",
        FUNC_INFO[criterion_name]["default_result"])

    aaae(calculated, expected, decimal=4)
def test_calculate_or_validate_base_steps_hessian():
    x = np.array([0.05, 1, -5])
    expected = np.array([0.1, 1, 5]) * np.finfo(float).eps ** (1 / 3)
    calculated = _calculate_or_validate_base_steps(
        None, x, "second_derivative", 0, scaling_factor=1.0
    )
    aaae(calculated, expected, decimal=12)
def test_likelihood_value():
    df = pd.read_stata("skillmodels/tests/estimation/chs_test_ex2.dta")
    df.set_index(["id", "period"], inplace=True)
    with open("skillmodels/tests/estimation/test_model2.json") as j:
        model_dict = json.load(j)

    mod = SkillModel(
        model_dict=model_dict, dataset=df, estimator="chs", model_name="test_model"
    )

    args = mod.likelihood_arguments_dict()

    params_df = pd.read_csv("skillmodels/tests/estimation/like_reg_params_new.csv")
    params_df["name2"].fillna("", inplace=True)
    params_df["name1"].replace("0", 0, inplace=True)
    params_df.set_index(["category", "period", "name1", "name2"], inplace=True)
    mod.start_params = params_df

    full_params = mod.generate_full_start_params()["value"]

    log_like_contributions = log_likelihood_contributions(full_params, **args)
    like_contributions = np.exp(log_like_contributions)
    small = 1e-250
    like_vec = np.prod(like_contributions, axis=0)
    like_vec[like_vec < small] = small
    res = np.log(like_vec)

    in_path = "skillmodels/tests/estimation/regression_test_fixture.pickle"
    with open(in_path, "rb") as p:
        last_result = pickle.load(p)

    aaae(res, last_result)
Example #25
0
def test_sdcorr_params_to_matrix():
    sds = np.sqrt([1, 2, 3])
    corrs = [0.07071068, 0.11547005, 0.08981462]
    params = np.hstack([sds, corrs])
    expected = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])
    calculated = sdcorr_params_to_matrix(params)
    aaae(calculated, expected)
def test_calculate_or_validate_base_steps_jacobian_with_scaling_factor():
    x = np.array([0.05, 1, -5])
    expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps) * 2
    calculated = _calculate_or_validate_base_steps(
        None, x, "first_derivative", 0, scaling_factor=2.0
    )
    aaae(calculated, expected, decimal=12)
Example #27
0
def test_run_explorations():
    def _dummy(x, **kwargs):
        assert set(kwargs) == {
            "task",
            "algorithm_info",
            "error_handling",
            "error_penalty",
            "fixed_log_data",
        }
        if x.sum() == 5:
            out = {"value": np.nan}
        else:
            out = {"value": -x.sum()}
        return out

    calculated = run_explorations(
        func=_dummy,
        sample=np.arange(6).reshape(3, 2),
        batch_evaluator="joblib",
        n_cores=1,
        step_id=0,
        error_handling="raise",
    )

    exp_values = np.array([-9, -1])
    exp_sample = np.array([[4, 5], [0, 1]])

    aaae(calculated["sorted_values"], exp_values)
    aaae(calculated["sorted_sample"], exp_sample)
Example #28
0
def test_first_derivative_jacobian_works_at_defaults(binary_choice_inputs):
    fix = binary_choice_inputs
    func = partial(logit_loglikeobs, y=fix["y"], x=fix["x"])
    calculated = first_derivative(func=func,
                                  params=fix["params_np"],
                                  n_cores=1)
    expected = logit_loglikeobs_jacobian(fix["params_np"], fix["y"], fix["x"])
    aaae(calculated["derivative"], expected, decimal=6)
Example #29
0
 def test_generate_start_factors_cov_cholesky(self):
     self.nobs = 200000
     self.me_params = np.array([5, 10, 1, 0.1, 1.99749844])
     self.cholesky_of_P_zero = True
     calc_factors = smo._generate_start_factors(self)
     df = pd.DataFrame(calc_factors)
     calc_cov = df.cov().values
     aaae(calc_cov, self.exp_cov, decimal=2)
Example #30
0
def test_latin_hypercube_property():
    """Check that for each single dimension the points are uniformly distributed."""
    rng = get_rng(seed=1234)
    n_dim, n_points = rng.integers(2, 100, size=2)
    sample = _create_upscaled_lhs_sample(n_dim, n_points, n_designs=1, rng=rng)
    index = np.arange(n_points)
    for j in range(n_dim):
        aaae(index, np.sort(sample[0][:, j]))
    def test_linear_transition_equation(self):

        expected_result = np.ones((self.nemf, self.nind, self.nsigma)) * 3
        expected_result[1, :, :] *= 2
        expected_result[:, :, 0] = 9
        expected_result = expected_result.flatten()
        aaae(tf.linear(self.sp, self.coeffs, self.incl_pos),
             expected_result)
Example #32
0
    def test_predict_ff_mocked_same_result_in_second(self, mock_tsp, mock_pp):
        # this test makes sure that y copy arrays where necessary
        mock_tsp.side_effect = fake_tsp
        self.likelihood_arguments_dict = Mock(return_value=self.lh_args)

        calc1 = smo._predict_final_factors(self, self.change)
        calc2 = smo._predict_final_factors(self, self.change)
        aaae(calc1, calc2)
def test_calculate_or_validate_base_steps_binding_min_step():
    x = np.array([0.05, 1, -5])
    expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps)
    expected[0] = 1e-8
    calculated = _calculate_or_validate_base_steps(
        None, x, "first_derivative", 1e-8, scaling_factor=1.0
    )
    aaae(calculated, expected, decimal=12)
 def test_sqrt_unscented_predict_focus_on_covs(self, mock_transform):
     mock_transform.return_value = self.sps3
     # self.q = np.eye(3) * 0.25 + np.ones((3, 3)) * 0.5
     kf.sqrt_unscented_predict(
         self.stage, self.sps3, self.flat_sps3, self.sws_m, self.sws_c,
         self.Q, self.transform_sps_args, self.out_states, self.out_sqrt_covs)
     make_unique(self.out_covs)
     aaae(self.out_covs, self.exp_cholcovs)
def test_logit_loglike(logit_inputs, logit_object):
    x = logit_inputs["params"]["value"].to_numpy()
    expected_value = logit_object.loglike(x)
    expected_contribs = logit_object.loglikeobs(x)
    calculated = logit_loglike(**logit_inputs)

    assert np.allclose(calculated["value"], expected_value)
    aaae(calculated["contributions"], expected_contribs)
def test_probability_to_internal_jacobian(dim, seed):
    external = get_external_probability(dim)

    func = partial(kt.probability_to_internal, **{"constr": None})
    numerical_deriv = first_derivative(func, external)
    deriv = kt.probability_to_internal_jacobian(external, None)

    aaae(deriv, numerical_deriv, decimal=3)
 def test_translog(self):
     expected_result = np.zeros((self.nemf, self.nind, self.nsigma))
     expected_result[:] = np.array(
         [0.76, 0.61, 1.32, 0.04, 0.77, 0.01, -0.07, 0.56, 70.92])
     expected_result = expected_result.reshape(
         self.nemf * self.nind * self.nsigma)
     calculated_result = tf.translog(self.sp, self.coeffs, self.incl_pos)
     aaae(calculated_result, expected_result)
Example #38
0
def test_cov_to_sds_and_corr():
    cov = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]])
    calc_sds, calc_corr = cov_to_sds_and_corr(cov)
    exp_sds = [1, 2, 3]
    exp_corr = np.ones((3, 3)) * 0.2
    exp_corr[np.diag_indices(3)] = 1
    aaae(calc_sds, exp_sds)
    aaae(calc_corr, exp_corr)
Example #39
0
 def test_translog(self):
     expected_result = np.zeros((self.nemf, self.nind, self.nsigma))
     expected_result[:] = np.array(
         [0.76, 0.61, 1.32, 0.04, 0.77, 0.01, -0.07, 0.56, 70.92])
     expected_result = expected_result.reshape(self.nemf * self.nind *
                                               self.nsigma)
     calculated_result = tf.translog(self.sp, self.coeffs, self.incl_pos)
     aaae(calculated_result, expected_result)
Example #40
0
    def test_iv_math(self):
        expected_beta = LinearIVGMM(endog=self.y,
                                    exog=self.x,
                                    instrument=self.z).fitgmm(start=None,
                                                              weights=self.w)
        calc_beta = wf._iv_math(self.y, self.x, self.z, self.w)

        aaae(calc_beta.flatten(), expected_beta)
def test_update_and_evaluate_likelihood(i, kalman_results):
    inp = kalman_results["mean"][i]["input"]
    calculated_mean, calculated_like = update_mean_and_evaluate_likelihood(
        *inp)
    expected_mean = kalman_results["mean"][i]["output_mean"]
    expected_like = kalman_results["mean"][i]["output_loglike"]
    aaae(calculated_mean, expected_mean)
    aaae(calculated_like, expected_like)
Example #42
0
    def test_iv_reg_array_dict_y(self, mock_tf):
        mock_tf.iv_formula_some_func.return_value = self.formula_tuple

        expected_y = np.array([0, 5])
        calculated_y = wf.iv_reg_array_dict(
            self.depvar_name, self.indepvar_names, self.instrument_names,
            self.transition_name, self.data)['depvar_arr']
        aaae(expected_y, calculated_y)
Example #43
0
def test_covariance_to_internal_jacobian(dim, seed):
    external = get_external_covariance(dim)

    func = partial(kt.covariance_to_internal, **{"constr": None})
    numerical_deriv = first_derivative(func, external)
    deriv = kt.covariance_to_internal_jacobian(external, None)

    aaae(deriv, numerical_deriv["derivative"], decimal=3)
def test_sdcorr_from_internal_jacobian(dim, seed):
    internal = get_internal_cholesky(dim)

    func = partial(kt.sdcorr_from_internal, **{"constr": None})
    numerical_deriv = first_derivative(func, internal)
    deriv = kt.sdcorr_from_internal_jacobian(internal, None)

    aaae(deriv, numerical_deriv, decimal=3)
Example #45
0
def test_legendre_gauss_lobatto_nodes_weights():
    from polynomials import legendre_gauss_lobatto_nodes_weights as gll

    # n = 6 
    x_ref = numpy.float128([-1, -0.830223896278567, -0.468848793470714, 0])
    w_ref = numpy.float128( \
            [0.04761904761904762, 0.276826047361566, 0.431745381209863, 0.487619047619048])
    x, w = gll(6)
    aaae(x_ref, x[:4], 15, 'n=6, x')
    aaae(w_ref, w[:4], 15, 'n=6, w')
Example #46
0
 def test_predict_ff_intermediate_true_mocked(self, mock_tsp, mock_pp):
     mock_tsp.side_effect = fake_tsp
     self.likelihood_arguments_dict = Mock(return_value=self.lh_args)
     exp1 = np.ones((10, 2))
     exp2 = np.ones((10, 2)) * 2
     exp2[:, 0] = 4
     exp = [exp1, exp2]
     calc = smo._predict_final_factors(self, self.change, True)
     for c, e in zip(calc, exp):
         aaae(c, e)
    def test_transform_coeffs_log_ces_long_to_short(self):
        big_out = np.zeros((2, 2))
        small_out = big_out[0, :]
        coeffs = np.array([0.5, 0.5, 3])

        tf.transform_coeffs_log_ces(
            coeffs, self.incl_fac, 'long_to_short', small_out)

        expected = np.zeros((2, 2))
        expected[0, :] = np.array([1, 3])
        aaae(big_out, expected)
Example #48
0
def test_legendre_gauss_nodes_weights():
    from polynomials import legendre_gauss_nodes_weights as gl

    # n = 6 
    x_ref = numpy.float128( \
            [-0.949107912342759, -0.741531185599395, -0.405845151377397, 0])
    w_ref = numpy.float128( \
            [0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469])
    x, w = gl(6)
    aaae(x_ref, x[:4], 15, 'n=6, x')
    aaae(w_ref, w[:4], 15, 'n=6, w')
Example #49
0
    def test_iv_reg_array_dict_z(self, mock_tf):
        mock_tf.iv_formula_some_func.return_value = self.formula_tuple

        expected_z = self.data['z'].copy()
        expected_z['np.square(m4)'] = np.square(expected_z['m4'])
        expected_z = expected_z.values[:2, :]

        calculated_z = wf.iv_reg_array_dict(
            self.depvar_name, self.indepvar_names, self.instrument_names,
            self.transition_name, self.data)['instruments_arr']
        aaae(expected_z, calculated_z)
Example #50
0
    def test_iv_reg_array_dict_x(self, mock_tf):
        mock_tf.iv_formula_some_func.return_value = self.formula_tuple

        expected_x = self.data['x'].copy()
        expected_x['m1_resid:m3_resid'] = \
            expected_x['m1_resid'] * expected_x['m3_resid']
        expected_x = expected_x.values[:2, :]

        calculated_x = wf.iv_reg_array_dict(
            self.depvar_name, self.indepvar_names, self.instrument_names,
            self.transition_name, self.data)['indepvars_arr']
        aaae(expected_x, calculated_x)
    def test_tsp_no_anchoring_no_endog(self, mock_trans):
        mock_trans.fake1.side_effect = fake1
        mock_trans.fake2.side_effect = fake2

        exp = np.zeros((10, 2))
        exp[:, 0] = np.arange(10) + 1
        exp[:, 1] = np.arange(start=10, stop=20) + np.arange(10) - 0.2

        transform_sigma_points(
            stage=self.stage, flat_sigma_points=self.flat_sigma_points,
            transition_argument_dicts=self.transition_argument_dicts,
            transition_function_names=self.transition_function_names)

        calc = self.flat_sigma_points.copy()
        aaae(calc, exp)
    def test_tsp_with_anchoring_no_endog_integration(self, mock_trans):
        mock_trans.fake1.side_effect = fake1
        mock_trans.fake2.side_effect = fake2

        exp = np.zeros((10, 2))
        exp[:, 0] = np.arange(10) + 1
        exp[:, 1] = np.arange(start=10, stop=20) + 0.5 * np.arange(10) - 0.1

        transform_sigma_points(
            stage=self.stage, flat_sigma_points=self.flat_sigma_points,
            transition_argument_dicts=self.transition_argument_dicts,
            transition_function_names=self.transition_function_names,
            anchoring_type='linear', anchoring_positions=[1],
            anch_params=np.array([0, 2.0]))

        calc = self.flat_sigma_points.copy()
        aaae(calc, exp)
    def test_loadings_intercepts_transparams_anchparams_and_xzeros(self):
        self.nobs = 5000
        self.base_meas_sd = 0.00001
        self.base_trans_sd = 0.00001
        self.anch_sd = 0.1

        self.true_meas_sd = self.true_loadings * self.base_meas_sd
        self.true_meas_var = self.true_meas_sd ** 2
        self.true_trans_sd = self.base_trans_sd * np.arange(
            start=0.2, step=0.1, stop=0.75).reshape(self.nperiods - 1, 2)
        self.true_trans_var = self.true_trans_sd ** 2
        self.true_cov_matrix = np.array([[1.44, 0.05, 0.1],
                                         [0.05, 2.25, 0.0],
                                         [0.1, 0.0, 4.0]])
        self.true_P_zero = self.true_cov_matrix[np.triu_indices(self.nfac)]

        self.y_data = generate_test_data(
            nobs=self.nobs, factors=self.factor_names, periods=self.periods,
            included_positions=self.included_positions,
            meas_names=self.meas_names,
            initial_mean=self.true_X_zero, initial_cov=self.true_cov_matrix,
            intercepts=self.true_intercepts, loadings=self.true_loadings,
            meas_sd=self.true_meas_sd, gammas=self.true_gammas,
            trans_sd=self.true_trans_sd,
            anch_intercept=self.anch_intercept,
            anch_loadings=self.anch_loadings, anch_sd=self.anch_sd)

        wa_model = SkillModel(
            model_name='no_squares_translog', dataset_name='test_data',
            model_dict=model_dict, dataset=self.y_data, estimator='wa')

        calc_storage_df, calc_X_zero, calc_P_zero, calc_gammas, trans_vars, \
            anch_intercept, anch_loadings, anch_variance = \
            wa_model._calculate_wa_quantities()

        calc_loadings = calc_storage_df['loadings']
        calc_intercepts = calc_storage_df['intercepts']

        aaae(calc_loadings.values, self.true_loadings, decimal=3)
        aaae(calc_intercepts.values, self.true_intercepts, decimal=3)
        aaae(calc_X_zero, self.true_X_zero, decimal=1)
        for arr1, arr2 in zip(calc_gammas, self.true_gammas):
            aaae(arr1, arr2, decimal=3)
        assert_almost_equal(anch_intercept, 3.0, places=1)
        aaae(anch_loadings, self.anch_loadings, decimal=2)
def test_likelihood_value():
    df = pd.read_stata('skillmodels/tests/estimation/chs_test_ex2.dta')
    with open('skillmodels/tests/estimation/test_model2.json') as j:
        model_dict = json.load(j)

    mod = SkillModel(model_dict=model_dict, dataset=df, estimator='chs',
                     model_name='test_model')

    args = mod.likelihood_arguments_dict(params_type='short')

    params = [1,
              1.01, 1.02, 1.03, 1.04, 1.05, 1.06, 1.07, 1.08, 1.09, 1.1,
              1.095, 1.085, 1.075, 1.065, 1.055, 1.045, 1.035, 1.025, 1.015,
              1.005, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99,
              0.995, 0.985, 0.975, 0.965, 0.955, 0.945, 0.935, 0.925, 0.915,
              0.905, 1.01, 1.02, 1.03, 1.04, 1.05, 1.06, 1.07, 1.08, 1.09, 1.1,
              1.095, 1.085, 1.075, 1.065, 1.055, 1.045, 1.035, 1.025, 1.015,
              1.005, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99,
              0.995, 0.985, 0.975, 0.965, 0.955, 0.945, 0.935, 0.925, 0.915,
              0.905, 1.01, 1.02, 1.03, 1.04, 1.05, 1.06, 1.07, 1.08, 1.09, 1.1,
              1.095, 1.085, 1.075, 1.065, 1.055, 1.045, 1.035, 1.025, 1.015,
              1.005, 1, 1, 1, 1.2, 1.4, 0.8, 0.6, 1.2, 0.8, 1.2, 1.4, 0.8, 0.6,
              1.2, 1.4, 0.8, 0.6, 1.2, 1.4, 0.8, 0.6, 1.2, 1.4, 0.8, 0.6, 1.2,
              1.4, 0.8, 0.6, 1.2, 1.4, 0.8, 0.6, 1.2, 1.4, 0.8, 0.6, 1, 0.5,
              0.51, 0.52, 0.53, 0.54, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.58,
              0.57, 0.56, 0.55, 0.54, 0.53, 0.52, 0.51, 0.5, 0.51, 0.52, 0.53,
              0.54, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.58, 0.57, 0.56, 0.55,
              0.54, 0.53, 0.53, 0.52, 0.52, 0.51, 0.51, 0.5, 0.5, 0.5, 0.5,
              0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.1, 0.1, 0.447, 0, 0, 0.447,
              0, 0.447, 3, 3, -0.5, 0.6]

    res = log_likelihood_per_individual(params, **args)

    in_path = 'skillmodels/tests/estimation/regression_test_fixture.pickle'
    with open(in_path, 'rb') as p:
        last_result = pickle.load(p)
    aaae(res, last_result)
Example #55
0
def test_legendre_polynomial():
    from polynomials import legendre_polynomial

    # scalar
    x = numpy.random.uniform(-1,1)
    for k in xrange(7):
        leg_ref, dleg_ref = legendre_ref(k, x)
        leg, dleg = legendre_polynomial(k, x)
        aaae(leg_ref, leg, 15, 'degree %d, pol' % k)
        aaae(dleg_ref, dleg, 15, 'degree %d, deriv' % k)

    # array
    x = numpy.linspace(-1,1,100)
    for k in xrange(7):
        leg_ref, dleg_ref = legendre_ref(k, x)
        leg, dleg = legendre_polynomial(k, x)
        aaae(leg_ref, leg, 15, 'degree %d, pol' % k)
        aaae(dleg_ref, dleg, 15, 'degree %d, deriv' % k)
Example #56
0
 def test_intercepts_from_means_without_normalization(self):
     expected_intercepts = self.true_intercepts_series
     calc_intercepts, calc_mean = wf.intercepts_from_means(
         self.data, [], self.true_loadings_series)
     aaae(calc_intercepts.values, expected_intercepts.values, decimal=3)
     assert_equal(calc_mean, None)
Example #57
0
 def test_iv_gmm_weights_optimal_small_case_calculated_manually(self):
     expected_w = np.array([[11.27417695, -10.54526749, -0.56018519],
                            [-10.54526749, 10.51028807, 0.31481481],
                            [-0.56018519, 0.31481481, 0.20833333]])
     calculated_w = wf._iv_gmm_weights(self.z_small, self.u)
     aaae(calculated_w, expected_w)
Example #58
0
 def test_iv_gmm_weights_2sls_comparison_with_statsmodels(self):
     mod = LinearIVGMM(endog=self.fake_y, exog=self.fake_x,
                       instrument=self.z_large)
     expected_w = mod.start_weights(inv=False)
     calculated_w = wf._iv_gmm_weights(self.z_large)
     aaae(calculated_w, expected_w)
Example #59
0
 def test_epsilon_variances(self):
     expected_epsilon_variances = self.true_epsilon_variances
     calc_epsilon_variances = \
         wf.factor_covs_and_measurement_error_variances(
             self.meas_cov, self.loading_series, self.meas_per_factor)[1]
     aaae(calc_epsilon_variances, expected_epsilon_variances)
Example #60
0
 def test_factor_cov_matrix(self):
     expected_cov_matrix = self.true_factor_cov_elements
     calc_cov_matrix = wf.factor_covs_and_measurement_error_variances(
         self.meas_cov, self.loading_series, self.meas_per_factor)[0]
     aaae(calc_cov_matrix, expected_cov_matrix)