Пример #1
0
    def calcAgeDistribution(self):
        '''
        Calculates the long run distribution of t_cycle in the population.
        '''
        AgeMarkov = np.zeros((self.T_cycle + 1, self.T_cycle + 1))
        for t in range(self.T_cycle):
            p = self.LivPrb[t][0]
            AgeMarkov[t, t + 1] = p
            AgeMarkov[t, 0] = 1. - p
        AgeMarkov[-1, 0] = 1.

        AgeMarkovT = np.transpose(AgeMarkov)
        vals, vecs = np.linalg.eig(AgeMarkovT)
        dist = np.abs(np.abs(vals) - 1.)
        idx = np.argmin(dist)

        with warnings.catch_warnings():
            warnings.simplefilter(
                "ignore"
            )  # Ignore warning about casting complex eigenvector to float
            LRagePrbs = vecs[:, idx].astype(float)
        LRagePrbs /= np.sum(LRagePrbs)
        age_vec = np.arange(self.T_cycle + 1).astype(int)
        self.LRageDstn = DiscreteDistribution(LRagePrbs,
                                              age_vec,
                                              seed=self.RNG.randint(
                                                  0, 2**31 - 1))
Пример #2
0
    def setUp(self):

        # Define the Markov transition matrix for serially correlated unemployment
        unemp_length = 5  # Averange length of unemployment spell
        urate_good = 0.05  # Unemployment rate when economy is in good state
        urate_bad = 0.12  # Unemployment rate when economy is in bad state
        bust_prob = 0.01  # Probability of economy switching from good to bad
        recession_length = 20  # Averange length of bad state
        p_reemploy = 1.0 / unemp_length
        p_unemploy_good = p_reemploy * urate_good / (1 - urate_good)
        p_unemploy_bad = p_reemploy * urate_bad / (1 - urate_bad)
        boom_prob = 1.0 / recession_length
        MrkvArray = np.array([
            [
                (1 - p_unemploy_good) * (1 - bust_prob),
                p_unemploy_good * (1 - bust_prob),
                (1 - p_unemploy_good) * bust_prob,
                p_unemploy_good * bust_prob,
            ],
            [
                p_reemploy * (1 - bust_prob),
                (1 - p_reemploy) * (1 - bust_prob),
                p_reemploy * bust_prob,
                (1 - p_reemploy) * bust_prob,
            ],
            [
                (1 - p_unemploy_bad) * boom_prob,
                p_unemploy_bad * boom_prob,
                (1 - p_unemploy_bad) * (1 - boom_prob),
                p_unemploy_bad * (1 - boom_prob),
            ],
            [
                p_reemploy * boom_prob,
                (1 - p_reemploy) * boom_prob,
                p_reemploy * (1 - boom_prob),
                (1 - p_reemploy) * (1 - boom_prob),
            ],
        ])

        init_serial_unemployment = copy(init_idiosyncratic_shocks)
        init_serial_unemployment["MrkvArray"] = [MrkvArray]
        init_serial_unemployment[
            "UnempPrb"] = 0.0  # to make income distribution when employed
        init_serial_unemployment["global_markov"] = False
        self.model = MarkovConsumerType(**init_serial_unemployment)
        self.model.cycles = 0
        self.model.vFuncBool = False  # for easy toggling here

        # Replace the default (lognormal) income distribution with a custom one
        employed_income_dist = DiscreteDistribution(
            np.ones(1), np.array([[1.0], [1.0]]))  # Definitely get income
        unemployed_income_dist = DiscreteDistribution(
            np.ones(1), np.array([[1.0], [0.0]]))  # Definitely don't
        self.model.IncShkDstn = [[
            employed_income_dist,
            unemployed_income_dist,
            employed_income_dist,
            unemployed_income_dist,
        ]]
Пример #3
0
    def setUp(self):

        self.draw_1 = np.array([
            [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
            [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
        ])

        self.draw_2 = -1 * self.draw_1

        X = np.stack([self.draw_1, self.draw_2], axis=-1)
        pmf = np.array([0.5, 0.5])

        self.mat_distr = DiscreteDistribution(pmf, X, seed=0)
Пример #4
0
    def main_test(self):

        Markov_vFuncBool_example = MarkovConsumerType(**Markov_Dict)

        TranShkDstn_e = MeanOneLogNormal(
            Markov_vFuncBool_example.TranShkStd[0],
            123).approx(Markov_vFuncBool_example.TranShkCount)
        TranShkDstn_u = DiscreteDistribution(np.ones(1), np.ones(1) * .2)
        PermShkDstn = MeanOneLogNormal(
            Markov_vFuncBool_example.PermShkStd[0],
            123).approx(Markov_vFuncBool_example.PermShkCount)

        #employed Income shock distribution
        employed_IncShkDstn = combine_indep_dstns(PermShkDstn, TranShkDstn_e)

        #unemployed Income shock distribution
        unemployed_IncShkDstn = combine_indep_dstns(PermShkDstn, TranShkDstn_u)

        # Specify list of IncShkDstns for each state
        Markov_vFuncBool_example.IncShkDstn = [[
            employed_IncShkDstn, unemployed_IncShkDstn
        ]]

        #solve the consumer's problem
        Markov_vFuncBool_example.solve()

        self.assertAlmostEqual(
            Markov_vFuncBool_example.solution[0].vFunc[1](0.4),
            -4.127935542867632)
Пример #5
0
class MatrixDiscreteDistributionTests(unittest.TestCase):
    """
    Tests matrix-valued discrete distribution.
    """
    def setUp(self):

        self.draw_1 = np.array([
            [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
            [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
        ])

        self.draw_2 = -1 * self.draw_1

        X = np.stack([self.draw_1, self.draw_2], axis=-1)
        pmf = np.array([0.5, 0.5])

        self.mat_distr = DiscreteDistribution(pmf, X, seed=0)

    def test_draw(self):
        """
        Check that the draws are the matrices we
        want them to be
        """

        draw = self.mat_distr.draw(1)
        self.assertTrue(np.allclose(draw[..., 0], self.draw_2))

    def test_expected(self):

        # Expectation without transformation
        exp = calc_expectation(self.mat_distr)

        # Check the expectation is of the shape we want
        self.assertTrue(exp.shape[0] == self.draw_1.shape[0])
        self.assertTrue(exp.shape[1] == self.draw_1.shape[1])

        # Check that its value is what we expect
        self.assertTrue(np.allclose(exp, 0.0))

        # Expectation of the sum
        exp = calc_expectation(self.mat_distr, func=np.sum)
        self.assertTrue(float(exp) == 0.0)

    def test_distr_of_fun(self):

        # A function that receives a (2,n,m) matrix
        # and sums across n, getting a (2,1,m) matrix
        def myfunc(mat):

            return np.sum(mat, axis=1, keepdims=True)

        mydistr = distr_of_function(self.mat_distr, myfunc)

        # Check the dimensions
        self.assertTrue(mydistr.dim() == (2, 1, 3))
Пример #6
0
    def calcAgeDistribution(self):
        '''
        Calculates the long run distribution of t_cycle in the population.
        '''
        AgeMarkov = np.zeros((self.T_cycle + 1, self.T_cycle + 1))
        for t in range(self.T_cycle):
            p = self.LivPrb[t][0]
            AgeMarkov[t, t + 1] = p
            AgeMarkov[t, 0] = 1. - p
        AgeMarkov[-1, 0] = 1.

        AgeMarkovT = np.transpose(AgeMarkov)
        vals, vecs = np.linalg.eig(AgeMarkovT)
        dist = np.abs(np.abs(vals) - 1.)
        idx = np.argmin(dist)

        LRagePrbs = vecs[:, idx].astype(float)
        LRagePrbs /= np.sum(LRagePrbs)
        age_vec = np.arange(self.T_cycle + 1).astype(int)
        self.LRageDstn = DiscreteDistribution(LRagePrbs, age_vec)
Пример #7
0
 def reset(self):
     self.initialize_sim()
     self.t_age = DiscreteDistribution(
         self.AgeDstn,
         np.arange(self.AgeDstn.size),
         seed=self.RNG.randint(0, 2**31 - 1)).draw(
             self.AgentCount, exact_match=False).astype(int)
     self.t_cycle = copy(self.t_age)
     if hasattr(self, 'kGrid'):
         self.aLvlNow = self.kInit * np.ones(
             self.AgentCount)  # Start simulation near SS
         self.aNrmNow = self.aLvlNow / self.pLvlNow
Пример #8
0
    def setUp(self):
        # Set up and solve TBS
        base_primitives = {'UnempPrb': .015,
                           'DiscFac': 0.9,
                           'Rfree': 1.1,
                           'PermGroFac': 1.05,
                           'CRRA': .95}
        TBSType = TractableConsumerType(**base_primitives)
        TBSType.solve()

        # Set up and solve Markov
        MrkvArray = [np.array([[1.0-base_primitives['UnempPrb'], base_primitives['UnempPrb']],[0.0, 1.0]])]
        Markov_primitives = {"CRRA": base_primitives['CRRA'],
                             "Rfree": np.array(2*[base_primitives['Rfree']]),
                             "PermGroFac": [np.array(2*[base_primitives['PermGroFac'] /
                                            (1.0-base_primitives['UnempPrb'])])],
                             "BoroCnstArt": None,
                             "PermShkStd": [0.0],
                             "PermShkCount": 1,
                             "TranShkStd": [0.0],
                             "TranShkCount": 1,
                             "T_total": 1,
                             "UnempPrb": 0.0,
                             "UnempPrbRet": 0.0,
                             "T_retire": 0,
                             "IncUnemp": 0.0,
                             "IncUnempRet": 0.0,
                             "aXtraMin": 0.001,
                             "aXtraMax": TBSType.mUpperBnd,
                             "aXtraCount": 48,
                             "aXtraExtra": [None],
                             "aXtraNestFac": 3,
                             "LivPrb":[np.array([1.0,1.0]),],
                             "DiscFac": base_primitives['DiscFac'],
                             'Nagents': 1,
                             'psi_seed': 0,
                             'xi_seed': 0,
                             'unemp_seed': 0,
                             'tax_rate': 0.0,
                             'vFuncBool': False,
                             'CubicBool': True,
                             'MrkvArray': MrkvArray,
                             'T_cycle':1
                             }

        MarkovType = MarkovConsumerType(**Markov_primitives)
        MarkovType.cycles = 0
        employed_income_dist = DiscreteDistribution(np.ones(1),
                                                    [np.ones(1),
                                                     np.ones(1)])
        unemployed_income_dist = DiscreteDistribution(np.ones(1),
                                                      [np.ones(1),
                                                       np.zeros(1)])
        MarkovType.IncomeDstn = [[employed_income_dist,
                                  unemployed_income_dist]]

        MarkovType.solve()
        MarkovType.unpackcFunc()

        self.TBSType = TBSType
        self.MarkovType = MarkovType
Пример #9
0
            which_agents] = 0  # How many periods since each agent was born
        self.t_cycle[
            which_agents] = 0  # Which period of the cycle each agents is currently in
        return None


# Make a lifecycle consumer to be used for estimation, including simulated shocks (plus an initial distribution of wealth)
EstimationAgent = TempConsumerType(
    **Params.init_consumer_objects)  # Make a TempConsumerType for estimation
EstimationAgent(T_sim=EstimationAgent.T_cycle +
                1)  # Set the number of periods to simulate
EstimationAgent.track_vars = ['bNrmNow'
                              ]  # Choose to track bank balances as wealth
EstimationAgent.aNrmInit = DiscreteDistribution(
    Params.initial_wealth_income_ratio_probs,
    Params.initial_wealth_income_ratio_vals,
    seed=Params.seed).drawDiscrete(
        N=Params.num_agents)  # Draw initial assets for each consumer
EstimationAgent.makeShockHistory()


# Define the objective function for the simulated method of moments estimation
def smmObjectiveFxn(DiscFacAdj,
                    CRRA,
                    agent=EstimationAgent,
                    DiscFacAdj_bound=Params.DiscFacAdj_bound,
                    CRRA_bound=Params.CRRA_bound,
                    empirical_data=Data.w_to_y_data,
                    empirical_weights=Data.empirical_weights,
                    empirical_groups=Data.empirical_groups,
                    map_simulated_to_empirical_cohorts=Data.
Пример #10
0
#
# Let's create a consumer similar to the one in "idiosyncratic shock" model but who faces serially correlated unemployment during boom or bust cycles of the economy.

# %%
# Make a consumer with serially correlated unemployment, subject to boom and bust cycles
init_serial_unemployment = copy(init_idiosyncratic_shocks)
init_serial_unemployment["MrkvArray"] = [MrkvArray]
init_serial_unemployment["UnempPrb"] = 0  # to make income distribution when employed
init_serial_unemployment["global_markov"] = False
SerialUnemploymentExample = MarkovConsumerType(**init_serial_unemployment)
SerialUnemploymentExample.cycles = 0
SerialUnemploymentExample.vFuncBool = False  # for easy toggling here

# %%
# Replace the default (lognormal) income distribution with a custom one
employed_income_dist = DiscreteDistribution(np.ones(1), [np.ones(1), np.ones(1)])  # Definitely get income
unemployed_income_dist = DiscreteDistribution(np.ones(1), [np.ones(1), np.zeros(1)]) # Definitely don't
SerialUnemploymentExample.IncomeDstn = [
    [
        employed_income_dist,
        unemployed_income_dist,
        employed_income_dist,
        unemployed_income_dist,
    ]
]

# %% [markdown]
# Note that $\texttt{MarkovConsumerType}$ currently has no method to automatically construct a valid IncomeDstn - $\texttt{IncomeDstn}$ is manually constructed in each case. Writing a method to supersede $\texttt{IndShockConsumerType.updateIncomeProcess}$ for the “Markov model” would be a welcome contribution!

# %%
# Interest factor, permanent growth rates, and survival probabilities are constant arrays
Пример #11
0
def run_web(spec_name):
    # Keep parameters in check as they are changed by reloading them
    import Parameters
    reload(Parameters)
    from Parameters import T_sim, init_dropout, init_highschool, init_college, EducShares, DiscFacDstns,\
     AgentCountTotal, base_dict, stimulus_changes, pandemic_changes, AggregationFactor
    from GiveItAwayModel import GiveItAwayNowType
    from GiveItAwayTools import runExperiment, makePandemicShockProbsFigure

    t0 = time()
    data = dict()
    mystr = lambda x: '{:.2f}'.format(x)
    figs_dir = '../../Figures/' + spec_name + '/'

    # debug
    #print(pandemic_changes['DeepA1'], base_dict['Lspell_real'], init_dropout['uPfac_big'])

    # # Make baseline types
    DropoutType = GiveItAwayNowType(**init_dropout)
    HighschoolType = GiveItAwayNowType(**init_highschool)
    CollegeType = GiveItAwayNowType(**init_college)
    BaseTypeList = [DropoutType, HighschoolType, CollegeType]

    # Fill in the Markov income distribution for each base type
    IncomeDstn_unemp = DiscreteDistribution(
        np.array([1.0]),
        [np.array([1.0]), np.array([DropoutType.IncUnemp])])
    IncomeDstn_big = []
    for ThisType in BaseTypeList:
        for t in range(ThisType.T_cycle):
            if t < ThisType.T_retire:
                IncomeDstn_big.append([
                    ThisType.IncomeDstn[t], IncomeDstn_unemp, IncomeDstn_unemp,
                    ThisType.IncomeDstn[t], IncomeDstn_unemp, IncomeDstn_unemp
                ])
                ThisType.IncomeDstn[t] = [
                    ThisType.IncomeDstn[t], IncomeDstn_unemp
                ]
            else:
                IncomeDstn_big.append(6 * [ThisType.IncomeDstn[t]])
                ThisType.IncomeDstn[t] = 2 * [ThisType.IncomeDstn[t]]
        ThisType.IncomeDstn_big = IncomeDstn_big

    # Make the overall list of types
    TypeList = []
    n = 0
    for b in range(DiscFacDstns[0].X.size):
        for e in range(3):
            DiscFac = DiscFacDstns[e].X[b]
            AgentCount = int(
                np.floor(AgentCountTotal * EducShares[e] *
                         DiscFacDstns[e].pmf[b]))
            ThisType = deepcopy(BaseTypeList[e])
            ThisType.AgentCount = AgentCount
            ThisType.DiscFac = DiscFac
            ThisType.seed = n
            TypeList.append(ThisType)
            n += 1
    base_dict['Agents'] = TypeList

    # Make a figure to show unemployment probabilities by demographics
    pandemic_changes['show_fig'] = False
    data['fig6'] = makePandemicShockProbsFigure(BaseTypeList, 'TRASH',
                                                **pandemic_changes)
    del pandemic_changes['show_fig']

    # Solve and simulate each type to get to the initial distribution of states
    # and then prepare for new counterfactual simulations
    baseline_commands = [
        'solve()', 'initializeSim()', 'simulate()', 'saveState()',
        'switchToCounterfactualMode()', 'makeAlternateShockHistories()'
    ]
    multiThreadCommands(TypeList, baseline_commands)

    # Define dictionaries to be used in counterfactual scenarios
    stim_dict = base_dict.copy()
    stim_dict.update(**stimulus_changes)
    pan_dict = base_dict.copy()
    pan_dict.update(**pandemic_changes)
    both_dict = pan_dict.copy()
    both_dict.update(**stimulus_changes)

    # Run the baseline consumption level
    C_base, X_base, Z_base, cAll_base, Weight_base, Mrkv_base, U_base, ltAll_base, LT_by_inc_base = runExperiment(
        **base_dict)

    # Get consumption when the pandemic hits (no stim)
    C_pan, X_pan, Z_pan, cAll_pan, Weight_pan, Mrkv_pan, U_pan, ltAll_pan, LT_by_inc_pan = runExperiment(
        **pan_dict)

    # Get consumption when the pandemic hits and there's a stimulus
    C_both, X_both, Z_both, cAll_both, Weight_both, Mrkv_both, U_both, ltAll_both, LT_by_inc_both = runExperiment(
        **both_dict)

    # Calculate baseline consumption for those who *would* be in each Markov state in the pandemic
    X_alt = np.zeros((4, T_sim))
    X_alt[0, :] = np.sum(cAll_base * Weight_base * Mrkv_pan[0, :],
                         axis=1) / np.sum(Weight_base * Mrkv_pan[0, :], axis=1)
    X_alt[1, :] = np.sum(cAll_base * Weight_base * Mrkv_pan[1, :],
                         axis=1) / np.sum(Weight_base * Mrkv_pan[1, :], axis=1)
    X_alt[2, :] = np.sum(cAll_base * Weight_base * Mrkv_pan[2, :],
                         axis=1) / np.sum(Weight_base * Mrkv_pan[2, :], axis=1)
    X_alt[3, :] = np.sum(cAll_base * Weight_base * Mrkv_pan[3, :],
                         axis=1) / np.sum(Weight_base * Mrkv_pan[3, :], axis=1)

    # Calculate [baseline] labor and transfer income for those who *would* be in each Markov state in the pandemic
    LT_base = np.zeros((4, T_sim))
    LT_base[0, :] = np.sum(ltAll_base * Weight_base * Mrkv_pan[0, :],
                           axis=1) / np.sum(Weight_base * Mrkv_pan[0, :],
                                            axis=1)
    LT_base[1, :] = np.sum(ltAll_base * Weight_base * Mrkv_pan[1, :],
                           axis=1) / np.sum(Weight_base * Mrkv_pan[1, :],
                                            axis=1)
    LT_base[2, :] = np.sum(ltAll_base * Weight_base * Mrkv_pan[2, :],
                           axis=1) / np.sum(Weight_base * Mrkv_pan[2, :],
                                            axis=1)
    LT_base[3, :] = np.sum(ltAll_base * Weight_base * Mrkv_pan[3, :],
                           axis=1) / np.sum(Weight_base * Mrkv_pan[3, :],
                                            axis=1)
    LT_base_all = np.sum(ltAll_base * Weight_base, axis=1) / np.sum(
        Weight_base, axis=1)

    # Calculate [pandemic] labor and transfer income for those who *would* be in each Markov state in the pandemic
    LT_pan = np.zeros((4, T_sim))
    LT_pan[0, :] = np.sum(ltAll_pan * Weight_base * Mrkv_pan[0, :],
                          axis=1) / np.sum(Weight_base * Mrkv_pan[0, :],
                                           axis=1)
    LT_pan[1, :] = np.sum(ltAll_pan * Weight_base * Mrkv_pan[1, :],
                          axis=1) / np.sum(Weight_base * Mrkv_pan[1, :],
                                           axis=1)
    LT_pan[2, :] = np.sum(ltAll_pan * Weight_base * Mrkv_pan[2, :],
                          axis=1) / np.sum(Weight_base * Mrkv_pan[2, :],
                                           axis=1)
    LT_pan[3, :] = np.sum(ltAll_pan * Weight_base * Mrkv_pan[3, :],
                          axis=1) / np.sum(Weight_base * Mrkv_pan[3, :],
                                           axis=1)
    LT_pan_all = np.sum(ltAll_pan * Weight_base, axis=1) / np.sum(Weight_base,
                                                                  axis=1)

    # Calculate [pandemic with stimulus] labor and transfer income for those who *would* be in each Markov state in the pandemic
    LT_both = np.zeros((4, T_sim))
    LT_both[0, :] = np.sum(ltAll_both * Weight_base * Mrkv_pan[0, :],
                           axis=1) / np.sum(Weight_base * Mrkv_pan[0, :],
                                            axis=1)
    LT_both[1, :] = np.sum(ltAll_both * Weight_base * Mrkv_pan[1, :],
                           axis=1) / np.sum(Weight_base * Mrkv_pan[1, :],
                                            axis=1)
    LT_both[2, :] = np.sum(ltAll_both * Weight_base * Mrkv_pan[2, :],
                           axis=1) / np.sum(Weight_base * Mrkv_pan[2, :],
                                            axis=1)
    LT_both[3, :] = np.sum(ltAll_both * Weight_base * Mrkv_pan[3, :],
                           axis=1) / np.sum(Weight_base * Mrkv_pan[3, :],
                                            axis=1)
    LT_both_all = np.sum(ltAll_both * Weight_base, axis=1) / np.sum(
        Weight_base, axis=1)

    # Plot the unemployment rate over time in baseline and pandemic
    data["fig1"] = [U_base * 100, U_pan * 100]

    # Plot aggregate consumption under baseline, pandemic, and pandemic + CARES Act
    data["fig2"] = [
        C_base * AggregationFactor,
        C_pan * AggregationFactor,
        C_both * AggregationFactor,
    ]

    # Plot aggregate income under baseline, pandemic, and pandemic + CARES Act
    data["fig3"] = [
        LT_base_all * AggregationFactor,
        LT_pan_all * AggregationFactor,
        LT_both_all * AggregationFactor,
    ]

    # Plot average consumption by initial employment state after pandemic
    data["fig4"] = [
        X_both[0, :] * 1000,
        X_both[1, :] * 1000,
        X_both[2, :] * 1000,
        X_pan[0, :] * 1000,
        X_pan[1, :] * 1000,
        X_pan[2, :] * 1000,
        X_alt[0, :] * 1000,
        X_alt[1, :] * 1000,
        X_alt[2, :] * 1000,
    ]

    # Plot average labor income plus transfers by initial employment state after pandemic
    data["fig5"] = [
        LT_both[0, :] * 1000,
        LT_both[1, :] * 1000,
        LT_both[2, :] * 1000,
        LT_pan[0, :] * 1000,
        LT_pan[1, :] * 1000,
        LT_pan[2, :] * 1000,
        LT_base[0, :] * 1000,
        LT_base[1, :] * 1000,
        LT_base[2, :] * 1000,
    ]

    t1 = time()
    print('Running the specification called ' + spec_name + ' took ' +
          mystr(t1 - t0) + ' seconds.')
    with open(f"../../Data/Dashboard/data_{spec_name}.pickle", "wb") as f:
        pickle.dump(data, f)
    "aXtraMax": ExampleType.mUpperBnd,  # Maximum value of assets above minimum in grid
    "aXtraCount": 48,  # Number of points in assets grid
    "aXtraExtra": [None],  # Additional points to include in assets grid
    "aXtraNestFac": 3,  # Degree of exponential nesting when constructing assets grid
    "LivPrb": [np.array([1.0, 1.0])],  # Survival probability
    "DiscFac": base_primitives["DiscFac"],  # Intertemporal discount factor
    "AgentCount": 1,  # Number of agents in a simulation (irrelevant)
    "tax_rate": 0.0,  # Tax rate on labor income (irrelevant)
    "vFuncBool": False,  # Whether to calculate the value function
    "CubicBool": True,  # Whether to use cubic splines (False --> linear splines)
    "MrkvArray": [MrkvArray],  # State transition probabilities
}
MarkovType = MarkovConsumerType(**init_consumer_objects)  # Make a basic consumer type

employed_income_dist = DiscreteDistribution(np.ones(1),
    [np.ones(1), np.ones(1)]
    )  # Income distribution when employed
unemployed_income_dist = DiscreteDistribution(np.ones(1),
    [np.ones(1), np.zeros(1)]
    )  # Income distribution when permanently unemployed

MarkovType.IncomeDstn = [
    [employed_income_dist, unemployed_income_dist]
]  # set the income distribution in each state
MarkovType.cycles = 0

# Solve the "Markov TBS" model
t_start = process_time()
MarkovType.solve()
t_end = process_time()
MarkovType.unpack('cFunc')
    for i in range(2):
        for M in AggShockMrkvExample.Mgrid.tolist():
            mMin = AggShockMrkvExample.solution[0].mNrmMin[i](M)
            c_at_this_M = AggShockMrkvExample.cFunc[0][i](m_grid + mMin, M *
                                                          np.ones_like(m_grid))
            plt.plot(m_grid + mMin, c_at_this_M)
        plt.ylim(0.0, None)
        plt.show()

if solve_krusell_smith:
    # Make a Krusell-Smith agent type
    # NOTE: These agents aren't exactly like KS, as they don't have serially correlated unemployment
    KSexampleType = deepcopy(AggShockMrkvExample)
    KSexampleType.IncomeDstn[0] = [
        DiscreteDistribution(np.array(
            [0.96, 0.04]), [np.array([1.0, 1.0]),
                            np.array([1.0 / 0.96, 0.0])]),
        DiscreteDistribution(
            np.array([0.90, 0.10]),
            [np.array([1.0, 1.0]),
             np.array([1.0 / 0.90, 0.0])],
        )
    ]

    # Make a KS economy
    KSeconomy = deepcopy(MrkvEconomyExample)
    KSeconomy.agents = [KSexampleType]
    KSeconomy.AggShkDstn = [
        DiscreteDistribution(
            np.array([1.0]),
            [np.array([1.0]), np.array([1.05])],
Пример #14
0
 def test_draw(self):
     self.assertEqual(
         DiscreteDistribution(np.ones(1), np.zeros(1)).draw(1)[0],
         0,
     )
Пример #15
0
    def update_income_process(self):

        self.wage = 1 / (self.SSPmu)  #calculate SS wage
        self.N = (self.mu_u * (self.IncUnemp * self.UnempPrb) + self.G) / (
            self.wage * self.tax_rate
        )  #calculate SS labor supply from Budget Constraint

        PermShkDstn_U = Lognormal(
            np.log(self.mu_u) - (self.L * (self.PermShkStd[0])**2) / 2,
            self.L * self.PermShkStd[0],
            123).approx(self.PermShkCount
                        )  #Permanent Shock Distribution faced when unemployed
        PermShkDstn_E = MeanOneLogNormal(self.PermShkStd[0], 123).approx(
            self.PermShkCount
        )  #Permanent Shock Distribution faced when employed

        pmf_P = np.concatenate(((1 - self.UnempPrb) * PermShkDstn_E.pmf,
                                self.UnempPrb * PermShkDstn_U.pmf))
        X_P = np.concatenate((PermShkDstn_E.X, PermShkDstn_U.X))
        PermShkDstn = [DiscreteDistribution(pmf_P, X_P)]
        self.PermShkDstn = PermShkDstn

        TranShkDstn_E = MeanOneLogNormal(self.TranShkStd[0], 123).approx(
            self.TranShkCount
        )  #Transitory Shock Distribution faced when employed
        TranShkDstn_E.X = (
            TranShkDstn_E.X * (1 - self.tax_rate) * self.wage * self.N
        ) / (
            1 - self.UnempPrb
        )**2  #NEED TO FIX THIS SQUARE TERM #add wage, tax rate and labor supply

        lng = len(TranShkDstn_E.X)
        TranShkDstn_U = DiscreteDistribution(
            np.ones(lng) / lng, self.IncUnemp *
            np.ones(lng))  #Transitory Shock Distribution faced when unemployed

        IncShkDstn_E = combine_indep_dstns(
            PermShkDstn_E,
            TranShkDstn_E)  # Income Distribution faced when Employed
        IncShkDstn_U = combine_indep_dstns(
            PermShkDstn_U,
            TranShkDstn_U)  # Income Distribution faced when Unemployed

        #Combine Outcomes of both distributions
        X_0 = np.concatenate((IncShkDstn_E.X[0], IncShkDstn_U.X[0]))
        X_1 = np.concatenate((IncShkDstn_E.X[1], IncShkDstn_U.X[1]))
        X_I = [X_0, X_1]  #discrete distribution takes in a list of arrays

        #Combine pmf Arrays
        pmf_I = np.concatenate(((1 - self.UnempPrb) * IncShkDstn_E.pmf,
                                self.UnempPrb * IncShkDstn_U.pmf))

        IncShkDstn = [DiscreteDistribution(pmf_I, X_I)]
        self.IncShkDstnN = IncShkDstn

        self.IncShkDstn = IncShkDstn
        self.add_to_time_vary('IncShkDstn')

        PermShkDstn_Uw = Lognormal(
            np.log(self.mu_u) - (self.L * (self.PermShkStd[0])**2) / 2,
            self.L * self.PermShkStd[0],
            123).approx(self.PermShkCount
                        )  #Permanent Shock Distribution faced when unemployed
        PermShkDstn_Ew = MeanOneLogNormal(self.PermShkStd[0], 123).approx(
            self.PermShkCount
        )  #Permanent Shock Distribution faced when employed

        TranShkDstn_Ew = MeanOneLogNormal(self.TranShkStd[0], 123).approx(
            self.TranShkCount
        )  #Transitory Shock Distribution faced when employed
        TranShkDstn_Ew.X = (
            TranShkDstn_Ew.X * (1 - self.tax_rate) *
            (self.wage + self.dx) * self.N) / (
                1 - self.UnempPrb)**2  #add wage, tax rate and labor supply

        lng = len(TranShkDstn_Ew.X)
        TranShkDstn_Uw = DiscreteDistribution(
            np.ones(lng) / lng, self.IncUnemp *
            np.ones(lng))  #Transitory Shock Distribution faced when unemployed

        IncShkDstn_Ew = combine_indep_dstns(
            PermShkDstn_Ew,
            TranShkDstn_Ew)  # Income Distribution faced when Employed
        IncShkDstn_Uw = combine_indep_dstns(
            PermShkDstn_Uw,
            TranShkDstn_Uw)  # Income Distribution faced when Unemployed

        #Combine Outcomes of both distributions
        X_0 = np.concatenate((IncShkDstn_Ew.X[0], IncShkDstn_Uw.X[0]))
        X_1 = np.concatenate((IncShkDstn_Ew.X[1], IncShkDstn_Uw.X[1]))
        X_I = [X_0, X_1]  #discrete distribution takes in a list of arrays

        #Combine pmf Arrays
        pmf_I = np.concatenate(((1 - self.UnempPrb) * IncShkDstn_Ew.pmf,
                                self.UnempPrb * IncShkDstn_Uw.pmf))

        IncShkDstnW = [DiscreteDistribution(pmf_I, X_I)]

        self.IncShkDstnW = IncShkDstnW
        self.add_to_time_vary('IncShkDstnW')
Пример #16
0
class GiveItAwayNowType(MarkovConsumerType):
    time_inv_ = MarkovConsumerType.time_inv_ + ['uPfac']
    
    def __init__(self,cycles=1,time_flow=True,**kwds):
        MarkovConsumerType.__init__(self,cycles=1,time_flow=True,**kwds)
        self.solveOnePeriod = solveConsMarkovALT
        
        
    def preSolve(self):
        self.MrkvArray = self.MrkvArray_pcvd
        MarkovConsumerType.preSolve(self)
        self.updateSolutionTerminal()
        
        
    def initializeSim(self):
        MarkovConsumerType.initializeSim(self)
        if hasattr(self,'T_advance'):
            self.restoreState()
            self.MrkvArray = self.MrkvArray_sim
        elif not hasattr(self,'mortality_off'):
            self.calcAgeDistribution()
            self.initializeAges()
        if (hasattr(self,'Mrkv_univ') and self.Mrkv_univ is not None):
            self.MrkvNow[:] = self.Mrkv_univ
        
        
    def getMortality(self):
        '''
        A modified version of getMortality that reads mortality history if the
        attribute read_mortality exists.  This is a workaround to make sure the
        history of death events is identical across simulations.
        '''
        if (self.read_shocks or hasattr(self,'read_mortality')):
            who_dies = self.who_dies_backup[self.t_sim,:]
        else:
            who_dies = self.simDeath()
        self.simBirth(who_dies)
        self.who_dies = who_dies
        return None
    
    
    def simDeath(self):
        if hasattr(self,'mortality_off'):
            return np.zeros(self.AgentCount, dtype=bool)
        else:
            return MarkovConsumerType.simDeath(self)
        
        
    def getStates(self):
        MarkovConsumerType.getStates(self)
        if hasattr(self,'T_advance'): # This means we're in the policy experiment
            self.noticeStimulus()
            self.makeWeights()
        if hasattr(self,'ContUnempBenefits'):
            if self.ContUnempBenefits==True:
                self.continueUnemploymentBenefits()
        
        # Store indicators of whether this agent is a worker and unemployed
        w = self.t_cycle <= self.T_retire
        u = np.logical_and(np.mod(self.MrkvNow,3) > 0, w)
        lLvl = self.pLvlNow*self.TranShkNow
        lLvl[u] = 0.
        lLvl[self.t_cycle > self.T_retire] = 0.
        self.lLvlNow = lLvl
        self.uNow = u
        self.wNow = w
        
        
    def updateMrkvArray(self):
        '''
        Constructs an updated MrkvArray_pcvd attribute to be used in solution (perceived),
        as well as MrkvArray_sim attribute to be used in simulation (actual).
        Uses the primitive attributes Uspell, Urate, Dspell_pcvd, Lspell_pcvd,
        Dspell_real, Lspell_real.
        '''
        self.MrkvArray_pcvd = makeMrkvArray(self.Urate, self.Uspell, self.Dspell_pcvd, self.Lspell_pcvd)
        self.MrkvArray_sim = makeMrkvArray(self.Urate, self.Uspell, self.Dspell_real, self.Lspell_real)

        
    def calcAgeDistribution(self):
        '''
        Calculates the long run distribution of t_cycle in the population.
        '''
        AgeMarkov = np.zeros((self.T_cycle+1,self.T_cycle+1))
        for t in range(self.T_cycle):
            p = self.LivPrb[t][0]
            AgeMarkov[t,t+1] = p
            AgeMarkov[t,0] = 1. - p
        AgeMarkov[-1,0] = 1.
        
        AgeMarkovT = np.transpose(AgeMarkov)
        vals, vecs = np.linalg.eig(AgeMarkovT)
        dist = np.abs(np.abs(vals) - 1.)
        idx = np.argmin(dist)
        
        with warnings.catch_warnings():
            warnings.simplefilter("ignore") # Ignore warning about casting complex eigenvector to float
            LRagePrbs = vecs[:,idx].astype(float)
        LRagePrbs /= np.sum(LRagePrbs)
        age_vec = np.arange(self.T_cycle+1).astype(int)
        self.LRageDstn = DiscreteDistribution(LRagePrbs, age_vec)
        
        
    def initializeAges(self):
        '''
        Assign initial values of t_cycle to simulated agents, using the attribute
        LRageDstn as the distribution of discrete ages.
        '''
        age = self.LRageDstn.drawDiscrete(self.AgentCount,
                           seed=self.RNG.randint(0,2**31-1))
        age = age.astype(int)
        self.t_cycle = age
        self.t_age = age
        
    
    def switchToCounterfactualMode(self):
        '''
        Very small method that swaps in the "big" six-Markov-state versions of some
        solution attributes, replacing the "small" two-state versions that are used
        only to generate the pre-pandemic initial distbution of state variables.
        It then prepares this type to create alternate shock histories so it can
        run counterfactual experiments.
        '''
        del self.solution
        self.delFromTimeVary('solution')
        
        # Swap in "big" versions of the Markov-state-varying attributes
        self.LivPrb = self.LivPrb_big
        self.PermGroFac = self.PermGroFac_big
        self.MrkvArray = self.MrkvArray_big
        self.Rfree = self.Rfree_big
        self.uPfac = self.uPfac_big
        self.IncomeDstn = self.IncomeDstn_big
        
        # Adjust simulation parameters for the counterfactual experiments
        self.T_sim = T_sim
        self.track_vars = ['cNrmNow','pLvlNow','Weight','lLvlNow','uNow','wNow','TranShkNow','t_cycle']
        self.T_advance = None
        self.MrkvArray_pcvd = self.MrkvArray
        #print('Finished type ' + str(self.seed) + '!')
        
        
    def makeAlternateShockHistories(self):
        '''
        Make a history of Markov states and income shocks starting from each Markov state.
        '''
        self.MrkvArray = self.MrkvArray_sim
        J = self.MrkvArray[0].shape[0]
        DeathHist = np.zeros((J,self.T_sim,self.AgentCount), dtype=bool)
        MrkvHist = np.zeros((J,self.T_sim,self.AgentCount), dtype=int)
        TranShkHist = np.zeros((J,self.T_sim,self.AgentCount))
        PermShkHist = np.zeros((J,self.T_sim,self.AgentCount))
        for j in range(6):
            self.Mrkv_univ = j
            self.read_shocks = False
            self.makeShockHistory()
            DeathHist[j,:,:] = self.history['who_dies']
            MrkvHist[j,:,:] = self.history['MrkvNow']
            PermShkHist[j,:,:] = self.history['PermShkNow']
            TranShkHist[j,:,:] = self.history['TranShkNow']
            self.read_mortality = True # Make sure that every death history is the same
            self.who_dies_backup = self.history['who_dies'].copy()
        self.DeathHistAll = DeathHist
        self.MrkvHistAll = MrkvHist
        self.PermShkHistAll = PermShkHist
        self.TranShkHistAll = TranShkHist
        self.Mrkv_univ = None
        self.MrkvArray_sim_prev = self.MrkvArray_sim
        self.L_shared_prev = self.L_shared
        del(self.read_mortality)
        
        
    def solveIfChanged(self):
        '''
        Re-solve the lifecycle model only if the attributes MrkvArray_pcvd and uPfac
        do not match those in MrkvArray_pcvd_prev and uPfac_prev.
        '''
        # Check whether MrkvArray_pcvd and uPfac have changed (and whether they exist at all!)
        try:
            same_MrkvArray = distanceMetric(self.MrkvArray_pcvd, self.MrkvArray_pcvd_prev) == 0.
            same_uPfac = distanceMetric(self.uPfac, self.uPfac_prev) == 0.
            if (same_MrkvArray and same_uPfac):
                return
        except:
            pass
        
        # Re-solve the model, then note the values in MrkvArray_pcvd and uPfac
        self.solve()
        self.MrkvArray_pcvd_prev = self.MrkvArray_pcvd
        self.uPfac_prev = self.uPfac
        
        
    def makeShocksIfChanged(self):
        '''
        Re-draw the histories of Markov states and income shocks only if the attributes
        MrkvArray_sim and L_shared do not match those in MrkvArray_sim_prev and L_shared_prev.
        '''
        # Check whether MrkvArray_sim and L_shared have changed (and whether they exist at all!)
        try:
            same_MrkvArray = distanceMetric(self.MrkvArray_sim, self.MrkvArray_sim_prev) == 0.
            same_shared = self.L_shared == self.L_shared_prev
            if (same_MrkvArray and same_shared):
                return
        except:
            pass
        
        # Re-draw the shock histories, then note the values in MrkvArray_sim and L_shared
        self.makeAlternateShockHistories()


    def makeWeights(self):
        '''
        Create the attribute Weight.
        '''
        self.Weight = self.PopGroFac**((self.t_sim+self.t_sim_base)-self.t_cycle)
    
    
    def saveState(self):
        '''
        Record the current state of simulation variables for later use.
        '''
        self.aNrm_base = self.aNrmNow.copy()
        self.pLvl_base = self.pLvlNow.copy()
        self.Mrkv_base = self.MrkvNow.copy()
        self.age_base  = self.t_cycle.copy()
        self.t_sim_base = self.t_sim
        self.PlvlAgg_base = self.PlvlAggNow


    def restoreState(self):
        '''
        Restore the state of the simulation to some baseline values.
        '''
        self.aNrmNow = self.aNrm_base.copy()
        self.pLvlNow = self.pLvl_base.copy()
        self.MrkvNow = self.Mrkv_base.copy()
        self.t_cycle = self.age_base.copy()
        self.t_age   = self.age_base.copy()
        self.PlvlAggNow = self.PlvlAgg_base
        
        
    def hitWithPandemicShock(self):
        '''
        Alter the Markov state of each simulated agent, jumping some people into
        an otherwise inaccessible "deep unemployment" state, and others into
        normal unemployment.
        '''
        # Calculate (cumulative) probabilities of each agent being shocked into each state
        age = (self.t_age/4) + 24
        DeepX = self.DeepParam0 + self.DeepParam1*np.log(self.pLvlNow) + self.DeepParam2*age + self.DeepParam3*age**2
        UnempX = self.UnempParam0 + self.UnempParam1*np.log(self.pLvlNow) + self.UnempParam2*age + self.UnempParam3*age**2
        expDeepX = np.exp(DeepX)
        expUnempX = np.exp(UnempX)
        denom = 1. + expDeepX + expUnempX
        EmpPrb = 1./denom
        UnempPrb = expUnempX/denom
        DeepPrb = expDeepX/denom
        PrbArray = np.vstack([EmpPrb,UnempPrb,DeepPrb])
        CumPrbArray = np.cumsum(PrbArray, axis=0)
        
        # Draw new Markov states for each agent
        draws = Uniform().draw(self.AgentCount, seed=self.RNG.randint(0,2**31-1))
        draws = self.RNG.permutation(draws)
        MrkvNew = np.zeros(self.AgentCount, dtype=int)
        MrkvNew[draws > CumPrbArray[0]] = 1
        MrkvNew[draws > CumPrbArray[1]] = 2
        if (self.PanShock and not self.L_shared): # If the pandemic actually occurs,
            MrkvNew += 3 # then put everyone into the low marginal utility world/
            # This is (momentarily) skipped over if the lockdown state is shared
            # rather than idiosyncratic.  See a few lines below.
        
        # Move agents to those Markov states 
        self.MrkvNow = MrkvNew
        
        # Take the appropriate shock history for each agent, depending on their state
        J = self.MrkvArray[0].shape[0]
        for j in range(J):
            these = self.MrkvNow == j
            self.history['who_dies'][:,these] = self.DeathHistAll[j,:,:][:,these]
            self.history['MrkvNow'][:,these] = self.MrkvHistAll[j,:,:][:,these]
            self.history['PermShkNow'][:,these] = self.PermShkHistAll[j,:,:][:,these]
            self.history['TranShkNow'][:,these] = self.TranShkHistAll[j,:,:][:,these]
        
        # If the lockdown is a common/shared event, rather than idiosyncratic, bump
        # everyone into the lockdown state for *exactly* T_lockdown periods
        if (self.PanShock and self.L_shared):
            T = self.T_lockdown
            self.history['MrkvNow'][0:T,:] += 3
            
        # Edit the first period of the shock history to give all unemployed
        # people a bonus payment in just that quarter
        one_off_benefits = True   # If agents get continued unemployment benefits, the first period benefits are counted later
        if hasattr(self,'ContUnempBenefits'):
            if self.ContUnempBenefits==True:
                one_off_benefits = False
        if one_off_benefits:
            young = self.age_base < self.T_retire
            unemp = np.logical_and(np.mod(self.MrkvNow,3) == 1, young)
            deep  = np.logical_and(np.mod(self.MrkvNow,3) == 2, young)
            self.history['TranShkNow'][0,unemp] += self.BonusUnemp/(self.pLvlNow[unemp]*self.history['PermShkNow'][0,unemp])
            self.history['TranShkNow'][0,deep]  += self.BonusDeep/(self.pLvlNow[deep]*self.history['PermShkNow'][0,deep])
            
        
    def announceStimulus(self):
        '''
        Announce a stimulus payment T periods in advance of when it will actually occur.
        '''
        self.T_til_check = self.T_advance
        self.Stim_unnoticed = np.ones(self.AgentCount, dtype=bool)
        
        # Determine stimulus check size for each simulated agent
        StimLvl = np.ones(self.AgentCount)*self.StimMax
        if self.StimCut1 is not None:
            these = self.pLvl_base > self.StimCut1
            StimLvl[these] = 0. # Eliminate stimulus check for those above top threshold
        if self.StimCut0 is not None:
            these = np.logical_and(self.pLvl_base > self.StimCut0, self.pLvl_base <= self.StimCut1)
            alpha = (self.pLvl_base[these] - self.StimCut0) / (self.StimCut1 - self.StimCut0)
            StimLvl[these] *= 1.-alpha # Phase out stimulus check for those above bottom threshold
        self.StimLvl = StimLvl
        
    
    def noticeStimulus(self):
        '''
        Give each agent the opportunity to notice the future stimulus payment and
        mentally account for it in their market resources.
        '''
        if self.T_til_check > 0:
            self.T_til_check -= 1
        
        updaters = Bernoulli(p=self.UpdatePrb).draw(self.AgentCount, seed=self.RNG.randint(0,2**31-1))
        if self.T_til_check == 0:
            updaters = np.ones(self.AgentCount, dtype=bool)
        
        self.mNrmNow[updaters] += self.Stim_unnoticed[updaters]*self.StimLvl[updaters]/self.pLvlNow[updaters]*self.Rfree[0]**(-self.T_til_check)
        self.Stim_unnoticed[updaters] = False
        
    def continueUnemploymentBenefits(self):
        '''
        Continue to give unemployment benefits if utility of consumption remains depressed
        '''
        young = self.t_cycle < self.T_retire
        unemp = np.logical_and(self.MrkvNow == 4, young)
        deep  = np.logical_and(self.MrkvNow == 5, young)
        self.mNrmNow[unemp] += self.BonusUnemp/(self.pLvlNow[unemp]*self.history['PermShkNow'][0,unemp])
        self.mNrmNow[deep] += self.BonusDeep/(self.pLvlNow[deep]*self.history['PermShkNow'][0,deep])
Пример #17
0
import matplotlib.pyplot as plt
from copy import deepcopy

if __name__ == '__main__':

    mystr = lambda x: '{:.2f}'.format(x)

    # Make baseline types
    DropoutType = GiveItAwayNowType(**init_dropout)
    HighschoolType = GiveItAwayNowType(**init_highschool)
    CollegeType = GiveItAwayNowType(**init_college)
    BaseTypeList = [DropoutType, HighschoolType, CollegeType]

    # Fill in the Markov income distribution for each base type
    IncomeDstn_unemp = DiscreteDistribution(
        np.array([1.0]),
        [np.array([1.0]), np.array([DropoutType.IncUnemp])])
    IncomeDstn_big = []
    for ThisType in BaseTypeList:
        for t in range(ThisType.T_cycle):
            if t < ThisType.T_retire:
                IncomeDstn_big.append([
                    ThisType.IncomeDstn[t], IncomeDstn_unemp, IncomeDstn_unemp,
                    ThisType.IncomeDstn[t], IncomeDstn_unemp, IncomeDstn_unemp
                ])
                ThisType.IncomeDstn[t] = [
                    ThisType.IncomeDstn[t], IncomeDstn_unemp
                ]
            else:
                IncomeDstn_big.append(6 * [ThisType.IncomeDstn[t]])
                ThisType.IncomeDstn[t] = 2 * [ThisType.IncomeDstn[t]]
Пример #18
0
prb_ub = 1-prb_eb     # Probability of unemployment in the bad state
p_ind  = 1            # Persistent component of income is always 1
ell_ug = ell_ub = 0   # Labor supply is zero for unemployed consumers in either agg state
ell_eg = 1.0/prb_eg   # Labor supply for employed consumer in good state
ell_eb = 1.0/prb_eb   # 1=pe_g*ell_ge+pu_b*ell_gu=pe_b*ell_be+pu_b*ell_gu

# IncomeDstn is a list of lists, one for each aggregate Markov state
# Each contains three arrays of floats, representing a discrete approximation to the income process. 
# Order: 
#   state probabilities 
#   idiosyncratic persistent income level by state (KS have no persistent shocks p_ind is always 1.0)
#   idiosyncratic transitory income level by state

KSAgent.IncomeDstn[0] = [
     DiscreteDistribution(np.array([prb_eg,prb_ug]), 
                          [np.array([p_ind,p_ind]),
                           np.array([ell_eg,ell_ug])]), # Agg state good
     DiscreteDistribution(np.array([prb_eb,prb_ub]),
                          [np.array([p_ind,p_ind]),
                           np.array([ell_eb,ell_ub])])  # Agg state bad
]

# %% [markdown]
# Up to this point, individual agents do not have enough information to solve their decision problem yet. What is missing are beliefs about the endogenous macro variables $r$ and $w$, both of which are functions of $\bar{k}$. 

# %% [markdown]
# #### The Aggregate Economy

# %% code_folding=[]
from HARK.ConsumptionSaving.ConsAggShockModel import CobbDouglasMarkovEconomy
Пример #19
0
    def setUp(self):
        # Set up and solve TBS
        base_primitives = {
            "UnempPrb": 0.015,
            "DiscFac": 0.9,
            "Rfree": 1.1,
            "PermGroFac": 1.05,
            "CRRA": 0.95,
        }
        TBSType = TractableConsumerType(**base_primitives)
        TBSType.solve()

        # Set up and solve Markov
        MrkvArray = [
            np.array([
                [
                    1.0 - base_primitives["UnempPrb"],
                    base_primitives["UnempPrb"]
                ],
                [0.0, 1.0],
            ])
        ]
        Markov_primitives = {
            "CRRA":
            base_primitives["CRRA"],
            "Rfree":
            np.array(2 * [base_primitives["Rfree"]]),
            "PermGroFac": [
                np.array(2 * [
                    base_primitives["PermGroFac"] /
                    (1.0 - base_primitives["UnempPrb"])
                ])
            ],
            "BoroCnstArt":
            None,
            "PermShkStd": [0.0],
            "PermShkCount":
            1,
            "TranShkStd": [0.0],
            "TranShkCount":
            1,
            "T_total":
            1,
            "UnempPrb":
            0.0,
            "UnempPrbRet":
            0.0,
            "T_retire":
            0,
            "IncUnemp":
            0.0,
            "IncUnempRet":
            0.0,
            "aXtraMin":
            0.001,
            "aXtraMax":
            TBSType.mUpperBnd,
            "aXtraCount":
            48,
            "aXtraExtra": [None],
            "aXtraNestFac":
            3,
            "LivPrb": [
                np.array([1.0, 1.0]),
            ],
            "DiscFac":
            base_primitives["DiscFac"],
            "Nagents":
            1,
            "psi_seed":
            0,
            "xi_seed":
            0,
            "unemp_seed":
            0,
            "tax_rate":
            0.0,
            "vFuncBool":
            False,
            "CubicBool":
            True,
            "MrkvArray":
            MrkvArray,
            "T_cycle":
            1,
        }

        MarkovType = MarkovConsumerType(**Markov_primitives)
        MarkovType.cycles = 0
        employed_income_dist = DiscreteDistribution(np.ones(1),
                                                    np.array([[1.0], [1.0]]))
        unemployed_income_dist = DiscreteDistribution(np.ones(1),
                                                      np.array([[1.0], [0.0]]))
        MarkovType.IncShkDstn = [[
            employed_income_dist, unemployed_income_dist
        ]]

        MarkovType.solve()
        MarkovType.unpack("cFunc")

        self.TBSType = TBSType
        self.MarkovType = MarkovType
Пример #20
0
def create_income_dstn(epsilon):

    # No permanent income shocks and 1+eps, 1-eps with half chance each for transitory.
    IncomeDstn = DiscreteDistribution(np.array(
        [0.5, 0.5]), [np.array([1, 1]),
                      np.array([1 - epsilon, 1 + epsilon])])