def prepare_release_tests_5(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """
    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    init_dict = generate_init(constr)

    json.dump(init_dict, open("old/init_dict.respy.json", "w"))

    # We added an additional coefficient indicating whether there is any experience in a
    # particular job.
    init_dict["OCCUPATION A"]["coeffs"].append(0.00)
    init_dict["OCCUPATION A"]["bounds"].append([None, None])
    init_dict["OCCUPATION A"]["fixed"].append(True)

    init_dict["OCCUPATION B"]["coeffs"].append(0.00)
    init_dict["OCCUPATION B"]["bounds"].append([None, None])
    init_dict["OCCUPATION B"]["fixed"].append(True)

    # This release rescaled the squared term in the experience variable by 100. The presence of
    # the scratch file ensures that this is undone
    open("new/.restud.respy.scratch", "w").close()

    json.dump(init_dict, open("new/init_dict.respy.json", "w"))
def prepare_release_tests_2(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """
    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    constr["level"] = 0.00
    constr["fixed_ambiguity"] = True
    constr["file_est"] = "../data.respy.dat"

    init_dict = generate_init(constr)

    json.dump(init_dict, open("new/init_dict.respy.json", "w"))

    # In the old version, we did not allow for variability in the standard deviations.
    del init_dict["AMBIGUITY"]["mean"]

    json.dump(init_dict, open("old/init_dict.respy.json", "w"))
def prepare_release_tests_10(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """

    # This script is also imported (but not used) for the creation of the virtual environments.
    # Thus, the imports might not be valid when starting with a clean slate.
    import numpy as np

    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    constr["flag_estimation"] = True
    init_dict = generate_init(constr)

    # We aligned the indicator functions with the KW1997 setup and also added a constant term for
    # the general rewards. Finally, we added the common rewards.
    for label in ["OCCUPATION A", "OCCUPATION B"]:
        for j in range(8, 15):
            init_dict[label]["coeffs"][j] = 0
            init_dict[label]["bounds"][j] = (None, None)
            init_dict[label]["fixed"][j] = True

    for j in [0, 1]:
        init_dict["COMMON"]["coeffs"][j] = 0
        init_dict["COMMON"]["bounds"][j] = (None, None)
        init_dict["COMMON"]["fixed"][j] = True

    new_dict = copy.deepcopy(init_dict)

    # We swapped to the order to align it with the KW1997 setup.
    for label in ["OCCUPATION A", "OCCUPATION B"]:
        new_dict[label]["coeffs"][8], new_dict[label]["coeffs"][11] = (
            new_dict[label]["coeffs"][11],
            new_dict[label]["coeffs"][8],
        )
        new_dict[label]["coeffs"][9], new_dict[label]["coeffs"][12] = (
            new_dict[label]["coeffs"][12],
            new_dict[label]["coeffs"][9],
        )

    json.dump(new_dict, open("new/init_dict.respy.json", "w"))

    old_dict = copy.deepcopy(init_dict)
    for label in ["OCCUPATION A", "OCCUPATION B"]:
        for name in ["coeffs", "bounds", "fixed"]:
            old_dict[label][name].pop(10)

    del old_dict["COMMON"]

    json.dump(old_dict, open("old/init_dict.respy.json", "w"))
def prepare_release_tests_6(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """
    # This script is also imported (but not used) for the creation of the virtual environments.
    # Thus, the imports might not be valid when starting with a clean slate.
    import numpy as np

    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    # Unfortunately, we needed to perform edits to the likelihood function which breaks
    # comparability in all cases but for a model with a single period. We also changed the
    # treatment of inadmissible states, so we need to ensure that these are not relevant.
    constr["periods"] = 1
    edu_start = np.random.randint(1, 5)
    constr["edu"] = (edu_start, edu_start + 100)

    init_dict = generate_init(constr)

    old_dict = init_dict.copy()
    old_dict["EDUCATION"]["start"] = old_dict["EDUCATION"]["start"][0]
    del old_dict["TYPE_SHARES"], old_dict["TYPE_SHIFTS"], old_dict[
        "EDUCATION"]["share"]
    json.dump(old_dict, open("old/init_dict.respy.json", "w"))

    # We need to specify a sample with a baseline type only a single initial condition.
    init_dict["TYPE_SHIFTS"] = {}
    init_dict["TYPE_SHIFTS"]["coeffs"] = [0.0, 0.0, 0.0, 0.0]
    init_dict["TYPE_SHIFTS"]["bounds"] = [
        (None, None),
        (None, None),
        (None, None),
        (None, None),
    ]
    init_dict["TYPE_SHIFTS"]["fixed"] = [True, True, True, True]

    init_dict["TYPE_SHARES"] = {}
    init_dict["TYPE_SHARES"]["coeffs"] = [1.0]
    init_dict["TYPE_SHARES"]["bounds"] = [(0.0, None)]
    init_dict["TYPE_SHARES"]["fixed"] = [True]

    init_dict["EDUCATION"]["start"] = [init_dict["EDUCATION"]["start"]]
    init_dict["EDUCATION"]["share"] = [1.0]
    init_dict["EDUCATION"]["max"] = init_dict["EDUCATION"]["max"]

    json.dump(init_dict, open("new/init_dict.respy.json", "w"))
def prepare_release_tests_7(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """
    # This script is also imported (but not used) for the creation of the virtual environments.
    # Thus, the imports might not be valid when starting with a clean slate.
    import numpy as np

    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    # This dictionary is generated by the current code, so we first store the new initialization
    # dictionary.
    init_dict = generate_init(constr)

    new_dict = init_dict.copy()
    for name in ["OCCUPATION A", "OCCUPATION B", "HOME"]:
        new_dict[name]["coeffs"][-2:] = [0.0, 0.0]
        new_dict[name]["bounds"][-2:] = [[None, None], [None, None]]
        new_dict[name]["fixed"][-2:] = [True, True]

    new_dict["EDUCATION"]["coeffs"][2] = 0.0
    new_dict["EDUCATION"]["bounds"][2] = [None, None]
    new_dict["EDUCATION"]["fixed"][2] = True

    new_dict["EDUCATION"]["coeffs"][-2:] = [0.0, 0.0]
    new_dict["EDUCATION"]["bounds"][-2:] = [[None, None], [None, None]]
    new_dict["EDUCATION"]["fixed"][-2:] = [True, True]
    json.dump(new_dict, open("new/init_dict.respy.json", "w"))

    old_dict = init_dict.copy()
    for name in ["OCCUPATION A", "OCCUPATION B", "HOME"]:
        for label in ["coeffs", "bounds", "fixed"]:
            old_dict[name][label] = old_dict[name][label][:-2]
    for label in ["coeffs", "bounds", "fixed"]:
        del old_dict["EDUCATION"][label][2]
        del old_dict["EDUCATION"][label][-2:]
    json.dump(old_dict, open("old/init_dict.respy.json", "w"))
def no_preparations_required(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """
    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    init_dict = generate_init(constr)

    json.dump(init_dict, open("new/init_dict.respy.json", "w"))
    json.dump(init_dict, open("old/init_dict.respy.json", "w"))
def prepare_release_tests_4(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """
    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    init_dict = generate_init(constr)

    # We need to make sure that there are no effects on the reentry costs, as there are
    # separately estimation in the new release. They are fixed during an estimation there as well.
    init_dict["EDUCATION"]["fixed"][-1] = True
    json.dump(init_dict, open("old/init_dict.respy.json", "w"))

    # We added sheepskin effects to the wage equations.
    init_dict["OCCUPATION A"]["coeffs"] += [0.0, 0.0]
    init_dict["OCCUPATION A"]["fixed"] += [True, True]
    init_dict["OCCUPATION A"]["bounds"] += [[None, None], [None, None]]

    init_dict["OCCUPATION B"]["coeffs"] += [0.0, 0.0]
    init_dict["OCCUPATION B"]["fixed"] += [True, True]
    init_dict["OCCUPATION B"]["bounds"] += [[None, None], [None, None]]

    # We are also splitting up the re-entry costs between high school and college graduation
    init_dict["EDUCATION"]["coeffs"].append(
        init_dict["EDUCATION"]["coeffs"][-1])
    init_dict["EDUCATION"]["fixed"].append(True)
    init_dict["EDUCATION"]["bounds"].append(
        init_dict["EDUCATION"]["bounds"][-1])

    json.dump(init_dict, open("new/init_dict.respy.json", "w"))
def prepare_release_tests_8(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """
    # This script is also imported (but not used) for the creation of the virtual environments.
    # Thus, the imports might not be valid when starting with a clean slate.
    import numpy as np

    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    # As we added more information about lagged activities the interpolation estimation does not
    # yield the same results.
    constr["flag_interpolation"] = False
    init_dict = generate_init(constr)

    new_dict = init_dict.copy()
    for name in ["OCCUPATION A", "OCCUPATION B"]:
        new_dict[name]["coeffs"][8] = 0.00
        new_dict[name]["bounds"][8] = [None, None]
        new_dict[name]["fixed"][8] = True
    json.dump(new_dict, open("new/init_dict.respy.json", "w"))

    # We need to remove the coefficient denoting whether an individual worked in the same
    # occupation last period.
    old_dict = init_dict.copy()
    for name in ["OCCUPATION A", "OCCUPATION B"]:
        for label in ["coeffs", "bounds", "fixed"]:
            old_dict[name][label].pop(8)
    json.dump(old_dict, open("old/init_dict.respy.json", "w"))
def prepare_release_tests_1(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """
    # This script is also imported (but not used) for the creation of the virtual environments.
    # Thus, the imports might not be valid when starting with a clean slate.
    import numpy as np

    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    constr["level"] = 0.00
    constr["fixed_ambiguity"] = True
    constr["fixed_delta"] = True
    constr["file_est"] = "../data.respy.dat"

    init_dict = generate_init(constr)

    # In the old release, there was just one location to define the step size for all derivative
    # approximations.
    eps = np.round(init_dict["SCIPY-BFGS"]["eps"], decimals=15)
    init_dict["PRECONDITIONING"]["eps"] = eps
    init_dict["FORT-BFGS"]["eps"] = eps

    # We also endogenized the discount rate, so we need to restrict the analysis to estimations
    # where the discount rate is fixed.
    init_dict["BASICS"]["delta"] = init_dict["BASICS"]["coeffs"][0]

    # We did not have any preconditioning implemented in the PYTHON version initially. We had to
    # switch the preconditioning scheme in the new release and now use the absolute value and
    # thus preserve the sign of the derivative.
    init_dict["PRECONDITIONING"]["type"] = "identity"

    # Some of the optimization algorithms were not available in the old release.
    opt_pyth = np.random.choice(["SCIPY-BFGS", "SCIPY-POWELL"])
    opt_fort = np.random.choice(["FORT-BFGS", "FORT-NEWUOA"])

    if init_dict["PROGRAM"]["version"] == "PYTHON":
        init_dict["ESTIMATION"]["optimizer"] = opt_pyth
    else:
        init_dict["ESTIMATION"]["optimizer"] = opt_fort

    del init_dict["FORT-BOBYQA"]
    del init_dict["SCIPY-LBFGSB"]

    # The concept of bounds for parameters was not available and the coefficients in the
    # initialization file were only printed to the first four digits.
    for label in [
            "HOME", "OCCUPATION A", "OCCUPATION B", "EDUCATION", "SHOCKS"
    ]:
        num = len(init_dict[label]["fixed"])
        coeffs = np.round(init_dict[label]["coeffs"], decimals=4).tolist()
        init_dict[label]["bounds"] = [(None, None)] * num
        init_dict[label]["coeffs"] = coeffs

    # In the original release we treated TAU as an integer when printing to file by accident.
    init_dict["ESTIMATION"]["tau"] = int(init_dict["ESTIMATION"]["tau"])
    json.dump(init_dict, open("new/init_dict.respy.json", "w"))

    # Added more fine grained scaling. Needs to be aligned across old/new with identity or flag
    # False first and then we want to allow for more nuanced check.
    init_dict["SCALING"] = {}
    init_dict["SCALING"]["flag"] = init_dict["PRECONDITIONING"][
        "type"] == "gradient"
    init_dict["SCALING"]["minimum"] = init_dict["PRECONDITIONING"]["minimum"]

    # More flexible parallelism. We removed the extra section onn parallelism.
    init_dict["PARALLELISM"] = {}
    init_dict["PARALLELISM"]["flag"] = init_dict["PROGRAM"]["procs"] > 1
    init_dict["PARALLELISM"]["procs"] = init_dict["PROGRAM"]["procs"]

    # We had a section that enforced the same step size for the derivative calculation in each.
    init_dict["DERIVATIVES"] = {}
    init_dict["DERIVATIVES"]["version"] = "FORWARD-DIFFERENCES"
    init_dict["DERIVATIVES"]["eps"] = eps

    # Cleanup
    del init_dict["PROGRAM"]["procs"]
    del init_dict["SCIPY-BFGS"]["eps"]
    del init_dict["FORT-BFGS"]["eps"]
    del init_dict["PRECONDITIONING"]

    # Ambiguity was not yet available
    del init_dict["AMBIGUITY"]

    json.dump(init_dict, open("old/init_dict.respy.json", "w"))
def prepare_release_tests_9(constr):
    """ This function prepares the initialization files so that they can be processed by both
    releases under investigation. The idea is to have all hand-crafted modifications grouped in
    this function only.
    """
    def transform_to_logit(shares):
        """ This function transform
        """
        num_types = len(shares)
        denominator = 1.0 / shares[0]

        coeffs = []
        for i in range(num_types):
            coeffs += [np.log(shares[i] * denominator)]

        return coeffs

    # This script is also imported (but not used) for the creation of the virtual environments.
    # Thus, the imports might not be valid when starting with a clean slate.
    import numpy as np

    sys.path.insert(0, "../../../respy/tests")
    from respy.tests.codes.auxiliary import get_valid_shares
    from respy.tests.codes.random_model import generate_init

    # Prepare fresh subdirectories
    for which in ["old", "new"]:
        if os.path.exists(which):
            shutil.rmtree(which)
        os.mkdir(which)

    constr["flag_estimation"] = True
    init_dict = generate_init(constr)

    # I now need to determine the number of types and sample the fixed type probabilites.
    num_types = int((len(init_dict["TYPE SHARES"]["coeffs"]) / 2) + 1)
    shares = get_valid_shares(num_types)

    new_dict = init_dict.copy()
    coeffs = transform_to_logit(shares)

    new_dict["TYPE SHARES"]["coeffs"] = []
    new_dict["TYPE SHARES"]["fixed"] = []
    new_dict["TYPE SHARES"]["bounds"] = []

    for coeff in coeffs[1:]:
        new_dict["TYPE SHARES"]["coeffs"] += [coeff]
        new_dict["TYPE SHARES"]["coeffs"] += [0.0]
        new_dict["TYPE SHARES"]["fixed"] += [True, True]
        new_dict["TYPE SHARES"]["bounds"] += [[None, None], [None, None]]

    json.dump(new_dict, open("new/init_dict.respy.json", "w"))

    old_dict = init_dict.copy()

    old_dict["TYPE_SHIFTS"] = old_dict["TYPE SHIFTS"]
    del old_dict["TYPE SHIFTS"]

    old_dict["TYPE_SHARES"] = {}
    old_dict["TYPE_SHARES"]["coeffs"] = shares
    old_dict["TYPE_SHARES"]["fixed"] = [True] * num_types
    old_dict["TYPE_SHARES"]["bounds"] = [[0.00, None]] * num_types

    json.dump(old_dict, open("old/init_dict.respy.json", "w"))