Exemple #1
0
def describeMPCdstn(SomeTypes,percentiles):
    MPC_sim = np.concatenate([ThisType.MPCnow for ThisType in SomeTypes])
    MPCpercentiles_quarterly = getPercentiles(MPC_sim,percentiles=percentiles)
    MPCpercentiles_annual = 1.0 - (1.0 - MPCpercentiles_quarterly)**4
    
    for j in range(len(percentiles)):
        print('The ' + str(100*percentiles[j]) + 'th percentile of the MPC is ' + str(MPCpercentiles_annual[j]))
Exemple #2
0
def makeMPCfig(kappa, weights):
    '''
    Plot the CDF of the marginal propensity to consume. A sub-function of makeCSTWresults().

    Parameters
    ----------
    kappa : np.array
        Array of (annualized) marginal propensities to consume for the economy.
    weights : np.array
        Age-conditional weight array for the data in kappa.

    Returns
    -------
    these_percents : np.array
        Array of percentiles of the marginal propensity to consume.
    kappa_percentiles : np.array
        Array of MPCs corresponding to the percentiles in these_percents.
    '''
    these_percents = np.linspace(0.0001, 0.9999, 201)
    kappa_percentiles = getPercentiles(kappa,
                                       weights,
                                       percentiles=these_percents)
    plt.plot(kappa_percentiles, these_percents, '-k', linewidth=1.5)
    plt.xlabel('Marginal propensity to consume', fontsize=14)
    plt.ylabel('Cumulative probability', fontsize=14)
    plt.title('CDF of the MPC', fontsize=16)
    plt.show()
    return (these_percents, kappa_percentiles)
def FagerengObjFunc(center, spread, verbose=False):
    '''
    Objective function for the quick and dirty structural estimation to fit
    Fagereng, Holm, and Natvik's Table 9 results with a basic infinite horizon
    consumption-saving model (with permanent and transitory income shocks).

    Parameters
    ----------
    center : float
        Center of the uniform distribution of discount factors.
    spread : float
        Width of the uniform distribution of discount factors.
    verbose : bool
        When True, print to screen MPC table for these parameters.  When False,
        print (center, spread, distance).

    Returns
    -------
    distance : float
        Euclidean distance between simulated MPCs and (adjusted) Table 9 MPCs.
    '''
    # Give our consumer types the requested discount factor distribution
    beta_set = approxUniform(N=TypeCount,
                             bot=center - spread,
                             top=center + spread)[1]
    for j in range(TypeCount):
        EstTypeList[j](DiscFac=beta_set[j])

    # Solve and simulate all consumer types, then gather their wealth levels
    multiThreadCommands(
        EstTypeList,
        ['solve()', 'initializeSim()', 'simulate()', 'unpackcFunc()'])
    WealthNow = np.concatenate([ThisType.aLvlNow for ThisType in EstTypeList])

    # Get wealth quartile cutoffs and distribute them to each consumer type
    quartile_cuts = getPercentiles(WealthNow, percentiles=[0.25, 0.50, 0.75])
    for ThisType in EstTypeList:
        WealthQ = np.zeros(ThisType.AgentCount, dtype=int)
        for n in range(3):
            WealthQ[ThisType.aLvlNow > quartile_cuts[n]] += 1
        ThisType(WealthQ=WealthQ)

    # Keep track of MPC sets in lists of lists of arrays
    MPC_set_list = [[[], [], [], []], [[], [], [], []], [[], [], [], []],
                    [[], [], [], []]]

    # Calculate the MPC for each of the four lottery sizes for all agents
    for ThisType in EstTypeList:
        ThisType.simulate(1)
        c_base = ThisType.cNrmNow
        MPC_this_type = np.zeros((ThisType.AgentCount, 4))
        for k in range(4):  # Get MPC for all agents of this type
            Llvl = lottery_size[k]
            Lnrm = Llvl / ThisType.pLvlNow
            if do_secant:
                SplurgeNrm = Splurge / ThisType.pLvlNow
                mAdj = ThisType.mNrmNow + Lnrm - SplurgeNrm
                cAdj = ThisType.cFunc[0](mAdj) + SplurgeNrm
                MPC_this_type[:, k] = (cAdj - c_base) / Lnrm
            else:
                mAdj = ThisType.mNrmNow + Lnrm
                MPC_this_type[:, k] = cAdj = ThisType.cFunc[0].derivative(mAdj)

        # Sort the MPCs into the proper MPC sets
        for q in range(4):
            these = ThisType.WealthQ == q
            for k in range(4):
                MPC_set_list[k][q].append(MPC_this_type[these, k])

    # Calculate average within each MPC set
    simulated_MPC_means = np.zeros((4, 4))
    for k in range(4):
        for q in range(4):
            MPC_array = np.concatenate(MPC_set_list[k][q])
            simulated_MPC_means[k, q] = np.mean(MPC_array)

    # Calculate Euclidean distance between simulated MPC averages and Table 9 targets
    diff = simulated_MPC_means - MPC_target
    if drop_corner:
        diff[0, 0] = 0.0
    distance = np.sqrt(np.sum((diff)**2))
    if verbose:
        print(simulated_MPC_means)
    else:
        print(center, spread, distance)
    return distance
Exemple #4
0
def makePandemicShockProbsFigure(Agents,
                                 spec_name,
                                 PanShock,
                                 UnempD,
                                 UnempH,
                                 UnempC,
                                 UnempP,
                                 UnempA1,
                                 UnempA2,
                                 DeepD,
                                 DeepH,
                                 DeepC,
                                 DeepP,
                                 DeepA1,
                                 DeepA2,
                                 show_fig=False,
                                 for_mini=False):
    '''
    Make figures showing the probability of becoming unemployed and deeply
    unemployed when the pandemic hits, by age, income, and education.
    
    Parameters
    ----------
    Agents : [AgentType]
        List of types of agents in the economy.  Only the first three types
        will actually be used by this function, and not changed at all--
        they are deepcopied and manipulated.
    spec_name : str
        Filename suffix for figure to be saved, or subdirectory of ./Figures/
        for the figure to be saved (if for_mini is True)
    PanShock : bool
        This isn't used; it's here for technical reasons.
    UnempD : float
        Constant for highschool dropouts in the Markov-shock logit for unemployment.
    UnempH : float
        Constant for highschool grads in the Markov-shock logit for unemployment.
    UnempC : float
        Constant for college goers in the Markov-shock logit for unemployment.
    UnempP : float
        Coefficient on log permanent income in the Markov-shock logit for unemploment.
    UnempA1 : float
        Coefficient on age in the Markov-shock logit for unemployment.
    UnempA2 : float
        Coefficient on age squared in the Markov-shock logit for unemployment.
    DeepD : float
        Constant for highschool dropouts in the Markov-shock logit for deep unemployment.
    DeepH : float
        Constant for highschool grads in the Markov-shock logit for deep unemployment.
    DeepC : float
        Constant for college goers in the Markov-shock logit for deep unemployment.
    DeepP : float
        Coefficient on log permanent income in the Markov-shock logit for deep unemploment.
    DeepA1 : float
        Coefficient on age in the Markov-shock logit for deep unemployment.
    DeepA2 : float
        Coefficient on age squared in the Markov-shock logit for deep unemployment.
    show_fig : bool
        Indicator for whether the figure should be displayed to screen; default True.
    for_mini : bool
        Indicator for whether this call was from the MINI script, which changes the
        target directory where the figure is saved.
        
    Returns
    -------
    data: Dict
        A dictionary with data to plot pandemic shock unemployment probablities.
    '''
    if for_mini:
        fig_name_base = figs_dir + spec_name + '/UbyDemog'
    else:
        fig_name_base = figs_dir + 'UnempProbByDemog' + spec_name

    BigPop = 100000
    T = Agents[0].T_retire + 1
    PlvlAgg_adjuster = Agents[0].PermGroFacAgg**(-np.arange(T))
    Unemp0 = [UnempD, UnempH, UnempC]
    Deep0 = [DeepD, DeepH, DeepC]

    # Initialize an array to hold permanent income percentile data
    pctiles = [0.05, 0.25, 0.5, 0.75, 0.95]
    pLvlPercentiles = np.zeros((3, 5, T)) + np.nan

    # Get distribution of permanent income at each age for each education level,
    # as well as the probability of unemployment and deep unemployment for all
    TempTypes = deepcopy(Agents)
    for n in range(3):
        ThisType = TempTypes[n]
        e = ThisType.EducType
        ThisType.AgentCount = int(EducShares[e] * BigPop)
        ThisType.mortality_off = True
        ThisType.T_sim = T
        ThisType.initializeSim()
        pLvlInit = ThisType.pLvlNow.copy()
        ThisType.makeShockHistory()
        ThisType.history['pLvlNow'] = np.cumprod(
            ThisType.history['PermShkNow'], axis=0)
        ThisType.history['pLvlNow'] *= np.tile(
            np.reshape(pLvlInit, (1, ThisType.AgentCount)), (T, 1))
        ThisType.history['pLvlNow'] *= np.tile(
            np.reshape(PlvlAgg_adjuster, (T, 1)), (1, ThisType.AgentCount))
        for t in range(T):
            pLvlPercentiles[n, :, t] = getPercentiles(
                ThisType.history['pLvlNow'][t, :], percentiles=pctiles)
        AgeArray = np.tile(np.reshape(np.arange(T) / 4 + 24, (T, 1)),
                           (1, ThisType.AgentCount))
        AgeSqArray = np.tile(np.reshape(np.arange(T) / 4 + 24, (T, 1)),
                             (1, ThisType.AgentCount))
        UnempX = np.exp(Unemp0[e] +
                        UnempP * np.log(ThisType.history['pLvlNow']) +
                        UnempA1 * AgeArray + UnempA2 * AgeSqArray)
        DeepX = np.exp(Deep0[e] + DeepP * np.log(ThisType.history['pLvlNow']) +
                       DeepA1 * AgeArray + DeepA2 * AgeSqArray)
        denom = (1. + UnempX + DeepX)
        UnempPrb = UnempX / denom
        DeepPrb = DeepX / denom
        ThisType.history['UnempPrb'] = UnempPrb
        ThisType.history['DeepPrb'] = DeepPrb

    UnempPrbAll = np.concatenate(
        [ThisType.history['UnempPrb'] for ThisType in TempTypes], axis=1)
    DeepPrbAll = np.concatenate(
        [ThisType.history['DeepPrb'] for ThisType in TempTypes], axis=1)

    # Get overall unemployment and deep unemployment probabilities at each age
    UnempPrbMean = np.mean(UnempPrbAll, axis=1)
    DeepPrbMean = np.mean(DeepPrbAll, axis=1)

    data = dict()
    # Plot overall unemployment probabilities in top left
    plt.subplot(2, 2, 1)
    dashes2 = [10, 2]
    AgeVec = np.arange(T) / 4 + 24
    plt.plot(AgeVec, UnempPrbMean, '-b')
    plt.plot(AgeVec, DeepPrbMean, 'r', dashes=dashes2)
    data['overall'] = [AgeVec, UnempPrbMean, DeepPrbMean]
    plt.legend(['Unemployed', 'Deep unemp'], loc=1)
    plt.ylim(0, 0.20)
    plt.xticks([])
    plt.ylabel('Probability')
    plt.title('All education (mean)')

    # Plot dropout unemployment probabilities by permanent income
    e = 0
    p = 0
    data['dropout'] = dict()
    plt.subplot(2, 2, 2)
    UnempX = np.exp(Unemp0[e] + UnempP * np.log(pLvlPercentiles[e, p, :]) +
                    UnempA1 * AgeVec + UnempA2 * AgeVec**2)
    DeepX = np.exp(Deep0[e] + DeepP * np.log(pLvlPercentiles[e, p, :]) +
                   DeepA1 * AgeVec + DeepA2 * AgeVec**2)
    denom = (1. + UnempX + DeepX)
    UnempPrb = UnempX / denom
    DeepPrb = DeepX / denom
    plt.plot(AgeVec, UnempPrb, ':b')
    plt.plot(AgeVec, DeepPrb, ':r')
    data['dropout'][p] = [AgeVec, UnempPrb, DeepPrb]

    p = 4
    UnempX = np.exp(Unemp0[e] + UnempP * np.log(pLvlPercentiles[e, p, :]) +
                    UnempA1 * AgeVec + UnempA2 * AgeVec**2)
    DeepX = np.exp(Deep0[e] + DeepP * np.log(pLvlPercentiles[e, p, :]) +
                   DeepA1 * AgeVec + DeepA2 * AgeVec**2)
    denom = (1. + UnempX + DeepX)
    UnempPrb = UnempX / denom
    DeepPrb = DeepX / denom
    plt.plot(AgeVec, UnempPrb, ':b')
    plt.plot(AgeVec, DeepPrb, ':r')
    data['dropout'][p] = [AgeVec, UnempPrb, DeepPrb]

    p = 2
    UnempX = np.exp(Unemp0[e] + UnempP * np.log(pLvlPercentiles[e, p, :]) +
                    UnempA1 * AgeVec + UnempA2 * AgeVec**2)
    DeepX = np.exp(Deep0[e] + DeepP * np.log(pLvlPercentiles[e, p, :]) +
                   DeepA1 * AgeVec + DeepA2 * AgeVec**2)
    denom = (1. + UnempX + DeepX)
    UnempPrb = UnempX / denom
    DeepPrb = DeepX / denom
    plt.plot(AgeVec, UnempPrb, '-b')
    plt.plot(AgeVec, DeepPrb, 'r', dashes=dashes2)
    plt.yticks([])
    plt.xticks([])
    plt.ylim(0, 0.20)
    data['dropout'][p] = [AgeVec, UnempPrb, DeepPrb]
    plt.title('Dropout')

    # Plot highschool unemployment probabilities by permanent income
    e = 1
    p = 0
    data['highschool'] = dict()
    plt.subplot(2, 2, 3)
    UnempX = np.exp(Unemp0[e] + UnempP * np.log(pLvlPercentiles[e, p, :]) +
                    UnempA1 * AgeVec + UnempA2 * AgeVec**2)
    DeepX = np.exp(Deep0[e] + DeepP * np.log(pLvlPercentiles[e, p, :]) +
                   DeepA1 * AgeVec + DeepA2 * AgeVec**2)
    denom = (1. + UnempX + DeepX)
    UnempPrb = UnempX / denom
    DeepPrb = DeepX / denom
    plt.plot(AgeVec, UnempPrb, ':b')
    plt.plot(AgeVec, DeepPrb, ':r')
    data['highschool'][p] = [AgeVec, UnempPrb, DeepPrb]

    p = 4
    UnempX = np.exp(Unemp0[e] + UnempP * np.log(pLvlPercentiles[e, p, :]) +
                    UnempA1 * AgeVec + UnempA2 * AgeVec**2)
    DeepX = np.exp(Deep0[e] + DeepP * np.log(pLvlPercentiles[e, p, :]) +
                   DeepA1 * AgeVec + DeepA2 * AgeVec**2)
    denom = (1. + UnempX + DeepX)
    UnempPrb = UnempX / denom
    DeepPrb = DeepX / denom
    plt.plot(AgeVec, UnempPrb, ':b')
    plt.plot(AgeVec, DeepPrb, ':r')
    data['highschool'][p] = [AgeVec, UnempPrb, DeepPrb]

    p = 2
    UnempX = np.exp(Unemp0[e] + UnempP * np.log(pLvlPercentiles[e, p, :]) +
                    UnempA1 * AgeVec + UnempA2 * AgeVec**2)
    DeepX = np.exp(Deep0[e] + DeepP * np.log(pLvlPercentiles[e, p, :]) +
                   DeepA1 * AgeVec + DeepA2 * AgeVec**2)
    denom = (1. + UnempX + DeepX)
    UnempPrb = UnempX / denom
    DeepPrb = DeepX / denom
    plt.plot(AgeVec, UnempPrb, '-b')
    plt.plot(AgeVec, DeepPrb, 'r', dashes=dashes2)
    data['highschool'][p] = [AgeVec, UnempPrb, DeepPrb]
    plt.ylim(0, 0.20)
    plt.xlabel('Age')
    plt.ylabel('Probability')
    plt.title('High school')

    # Plot college unemployment probabilities by permanent income
    e = 2
    p = 0
    data['college'] = dict()

    plt.subplot(2, 2, 4)
    UnempX = np.exp(Unemp0[e] + UnempP * np.log(pLvlPercentiles[e, p, :]) +
                    UnempA1 * AgeVec + UnempA2 * AgeVec**2)
    DeepX = np.exp(Deep0[e] + DeepP * np.log(pLvlPercentiles[e, p, :]) +
                   DeepA1 * AgeVec + DeepA2 * AgeVec**2)
    denom = (1. + UnempX + DeepX)
    UnempPrb = UnempX / denom
    DeepPrb = DeepX / denom
    plt.plot(AgeVec, UnempPrb, ':b')
    plt.plot(AgeVec, DeepPrb, ':r')
    data['college'][p] = [AgeVec, UnempPrb, DeepPrb]

    p = 4
    UnempX = np.exp(Unemp0[e] + UnempP * np.log(pLvlPercentiles[e, p, :]) +
                    UnempA1 * AgeVec + UnempA2 * AgeVec**2)
    DeepX = np.exp(Deep0[e] + DeepP * np.log(pLvlPercentiles[e, p, :]) +
                   DeepA1 * AgeVec + DeepA2 * AgeVec**2)
    denom = (1. + UnempX + DeepX)
    UnempPrb = UnempX / denom
    DeepPrb = DeepX / denom
    plt.plot(AgeVec, UnempPrb, ':b')
    plt.plot(AgeVec, DeepPrb, ':r')
    data['college'][p] = [AgeVec, UnempPrb, DeepPrb]

    p = 2
    UnempX = np.exp(Unemp0[e] + UnempP * np.log(pLvlPercentiles[e, p, :]) +
                    UnempA1 * AgeVec + UnempA2 * AgeVec**2)
    DeepX = np.exp(Deep0[e] + DeepP * np.log(pLvlPercentiles[e, p, :]) +
                   DeepA1 * AgeVec + DeepA2 * AgeVec**2)
    denom = (1. + UnempX + DeepX)
    UnempPrb = UnempX / denom
    DeepPrb = DeepX / denom
    plt.plot(AgeVec, UnempPrb, '-b')
    plt.plot(AgeVec, DeepPrb, 'r', dashes=dashes2)
    data['college'][p] = [AgeVec, UnempPrb, DeepPrb]
    plt.ylim(0, 0.20)
    plt.xlabel('Age')
    plt.yticks([])
    plt.title('College')

    # Save the figure and display it to screen
    plt.suptitle('Unemployment probability after pandemic shock')
    plt.savefig(fig_name_base + '.pdf', bbox_inches='tight')
    if not for_mini:
        plt.savefig(fig_name_base + '.png', bbox_inches='tight')
        plt.savefig(fig_name_base + '.svg', bbox_inches='tight')
    if show_fig:
        plt.show()
    else:
        plt.clf()
    plt.close()
    return data
Exemple #5
0
def runExperiment(Agents, PanShock, StimMax, StimCut0, StimCut1, BonusUnemp,
                  BonusDeep, T_ahead, UnempD, UnempH, UnempC, UnempP, UnempA1,
                  UnempA2, DeepD, DeepH, DeepC, DeepP, DeepA1, DeepA2,
                  Dspell_pcvd, Dspell_real, Lspell_pcvd, Lspell_real,
                  L_shared):
    '''
    Conduct a fiscal policy experiment by announcing a fiscal stimulus T periods
    ahead of when checks will actually arrive.  The stimulus is in response to a
    global health crisis that shocks many consumers into unemployment, and others
    into "deep" unemployment that they think will last several quarters.
    
    Parameters
    ----------
    Agents : [AgentType]
        List of agent types in the economy.
    PanShock : bool
        Indicator for whether the pandemic actually hits.
    StimMax : float
        Maximum stimulus check a household can receive, denominated in $1000.
    StimCut0 : float or None
        Permanent income threshold where stimulus check begins to phase out.
        Can only be None if StimCut1 is also None.
    StimCut1 : float or None
        Permanent income threshold where stimulus check is completely phased out.
        None means that the same stimulus check is given to everyone.
    BonusUnemp : float
        One time "bonus benefit" given to regular unemployed people at t=0.
    BonusDeep : float
        One time "bonus benefit" given to deeply unemployed people at t=0.
    T_ahead : int
        Number of quarters after announcement that the stimulus checks will arrive.
    UnempD : float
        Constant for highschool dropouts in the Markov-shock logit for unemployment.
    UnempH : float
        Constant for highschool grads in the Markov-shock logit for unemployment.
    UnempC : float
        Constant for college goers in the Markov-shock logit for unemployment.
    UnempP : float
        Coefficient on log permanent income in the Markov-shock logit for unemploment.
    UnempA1 : float
        Coefficient on age in the Markov-shock logit for unemployment.
    UnempA2 : float
        Coefficient on age squared in the Markov-shock logit for unemployment.
    DeepD : float
        Constant for highschool dropouts in the Markov-shock logit for deep unemployment.
    DeepH : float
        Constant for highschool grads in the Markov-shock logit for deep unemployment.
    DeepC : float
        Constant for college goers in the Markov-shock logit for deep unemployment.
    DeepP : float
        Coefficient on log permanent income in the Markov-shock logit for deep unemploment.
    DeepA1 : float
        Coefficient on age in the Markov-shock logit for deep unemployment.
    DeepA2 : float
        Coefficient on age squared in the Markov-shock logit for deep unemployment.
    Dspell_pcvd : float
        Perceived average duration of a "deep unemployment" spell.
    Dspell_real : float
        Actual average duration of a "deep unemployment" spell.
    Lspell_pcvd : float
        Perceived average duration of the marginal utility-reducing lockdown.
    Lspell_real : float
        Actual average duration of the marginal utility-reducing lockdown.  If
        L_shared is True, it represents that *exact* duration of the lockdown,
        which should be an integer.
    L_shared : bool
        Indicator for whether the "lockdown" being lifted is a common/shared event
        across all agents (True) versus whether it's an idiosyncratic shock (False).
        
    Returns
    -------
    TBD
    '''
    PlvlAgg_adjuster = Agents[0].PlvlAgg_base
    T = Agents[0].T_sim

    # Adjust fiscal stimulus parameters by the level of aggregate productivity,
    # which is 96 years more advanced than you would expect because reasons.
    # Multiply unemployment benefits by 0.8 to reflect fact that labor force participation rate is 0.8.
    StimMax *= PlvlAgg_adjuster
    BonusUnemp *= PlvlAgg_adjuster * 0.8
    BonusDeep *= PlvlAgg_adjuster * 0.8
    if StimCut0 is not None:
        StimCut0 *= PlvlAgg_adjuster
    if StimCut1 is not None:
        StimCut1 *= PlvlAgg_adjuster

    # Make dictionaries of parameters to give to the agents
    experiment_dict_D = {
        'PanShock': PanShock,
        'T_advance': T_ahead + 1,
        'StimMax': StimMax,
        'StimCut0': StimCut0,
        'StimCut1': StimCut1,
        'BonusUnemp': BonusUnemp,
        'BonusDeep': BonusDeep,
        'UnempParam0': UnempD,
        'UnempParam1': UnempP,
        'UnempParam2': UnempA1,
        'UnempParam3': UnempA2,
        'DeepParam0': DeepD,
        'DeepParam1': DeepP,
        'DeepParam2': DeepA1,
        'DeepParam3': DeepA2,
        'Dspell_pcvd': Dspell_pcvd,
        'Dspell_real': Dspell_real,
        'Lspell_pcvd': Lspell_pcvd,
        'Lspell_real': Lspell_real,
        'L_shared': L_shared
    }
    experiment_dict_H = experiment_dict_D.copy()
    experiment_dict_H['UnempParam0'] = UnempH
    experiment_dict_H['DeepParam0'] = DeepH
    experiment_dict_C = experiment_dict_D.copy()
    experiment_dict_C['UnempParam0'] = UnempC
    experiment_dict_C['DeepParam0'] = DeepC
    experiment_dicts = [
        experiment_dict_D, experiment_dict_H, experiment_dict_C
    ]

    # Begin the experiment by resetting each type's state to the baseline values
    PopCount = 0
    for ThisType in Agents:
        ThisType.read_shocks = True
        e = ThisType.EducType
        ThisType(**experiment_dicts[e])
        PopCount += ThisType.AgentCount

    # Update the perceived and actual Markov arrays, solve and re-draw shocks if
    # warranted, then impose the pandemic shock and the stimulus, and finally
    # simulate the model for three years.
    experiment_commands = [
        'updateMrkvArray()', 'solveIfChanged()', 'makeShocksIfChanged()',
        'initializeSim()', 'hitWithPandemicShock()', 'announceStimulus()',
        'simulate()'
    ]
    multiThreadCommandsFake(Agents, experiment_commands)

    # Extract simulated consumption, labor income, and weight data
    cNrm_all = np.concatenate(
        [ThisType.history['cNrmNow'] for ThisType in Agents], axis=1)
    lLvl_all = np.concatenate(
        [ThisType.history['lLvlNow'] for ThisType in Agents], axis=1)

    Mrkv_hist = np.concatenate(
        [ThisType.history['MrkvNow'] for ThisType in Agents], axis=1)
    t_cycle_hist = np.concatenate(
        [ThisType.history['t_cycle'] for ThisType in Agents], axis=1)
    u_all = np.concatenate([ThisType.history['uNow'] for ThisType in Agents],
                           axis=1)
    w_all = np.concatenate([ThisType.history['wNow'] for ThisType in Agents],
                           axis=1)
    pLvl_all = np.concatenate(
        [ThisType.history['pLvlNow'] for ThisType in Agents], axis=1)
    Weight_all = np.concatenate(
        [ThisType.history['Weight'] for ThisType in Agents], axis=1)
    pLvl_all /= PlvlAgg_adjuster
    lLvl_all /= PlvlAgg_adjuster
    cLvl_all = cNrm_all * pLvl_all

    # Get initial Markov states
    Mrkv_init = np.concatenate(
        [ThisType.history['MrkvNow'][0, :] for ThisType in Agents])
    Age_init = np.concatenate([ThisType.age_base for ThisType in Agents])
    WorkingAge = Age_init <= 163
    Employed = np.logical_and(np.logical_or(Mrkv_init == 0, Mrkv_init == 3),
                              WorkingAge)
    Unemployed = np.logical_and(np.logical_or(Mrkv_init == 1, Mrkv_init == 4),
                                WorkingAge)
    DeepUnemp = np.logical_and(np.logical_or(Mrkv_init == 2, Mrkv_init == 5),
                               WorkingAge)
    MrkvTags = (np.vstack([Employed, Unemployed, DeepUnemp, WorkingAge]))

    # Calculate an alternate version of labor and transfer income that removes all
    # transitory shocks except unemployment.
    yAlt_all = pLvl_all.copy()
    Checks = np.zeros_like(yAlt_all)
    Checks[T_ahead, :] = np.concatenate(
        [ThisType.StimLvl for ThisType in Agents], axis=0)
    yAlt_all[u_all.astype(bool)] *= Agents[0].IncUnemp
    if (hasattr(Agents[0], 'ContUnempBenefits')
            and Agents[0].ContUnempBenefits):
        yAlt_all[np.logical_and(
            Mrkv_hist == 4,
            t_cycle_hist <= 163)] += BonusUnemp / PlvlAgg_adjuster
        yAlt_all[np.logical_and(
            Mrkv_hist == 5,
            t_cycle_hist <= 163)] += BonusDeep / PlvlAgg_adjuster
    else:
        yAlt_all[0, Unemployed] += BonusUnemp / PlvlAgg_adjuster
        yAlt_all[0, DeepUnemp] += BonusDeep / PlvlAgg_adjuster
    yAlt_all += Checks / PlvlAgg_adjuster
    laborandtransferLvl_all = yAlt_all

    # Partition the working age agents by the initial permanent income
    pLvl_init = np.concatenate(
        [ThisType.history['pLvlNow'][0, :] for ThisType in Agents])
    Weight_init = np.concatenate(
        [ThisType.history['Weight'][0, :] for ThisType in Agents]) * WorkingAge
    quintile_cuts = getPercentiles(pLvl_init, Weight_init,
                                   [0.2, 0.4, 0.6, 0.8])
    inc_quint = np.zeros(PopCount)
    for q in range(4):
        inc_quint += pLvl_init >= quintile_cuts[q]
    which_inc_quint = np.zeros((5, PopCount), dtype=bool)
    for q in range(5):
        which_inc_quint[q, :] = inc_quint == q
    which_inc_quint[:, np.logical_not(WorkingAge)] = False

    # Calculate the time series of mean consumption in each quarter
    C = np.sum(cLvl_all * Weight_all, axis=1) / np.sum(Weight_all, axis=1)

    # Calculate unemployment rate each quarter
    U = np.sum(u_all * Weight_all, axis=1) / np.sum(w_all * Weight_all, axis=1)

    # Calculate mean consumption *among the working age* by initial Markov state
    with warnings.catch_warnings():
        warnings.simplefilter(
            "ignore"
        )  # Ignore divide by zero warning when no one is deeply unemployed
        C_by_mrkv = np.zeros((4, T))
        C_by_mrkv[0, :] = np.sum(cLvl_all * Weight_all * Employed,
                                 axis=1) / np.sum(Weight_all * Employed,
                                                  axis=1)
        C_by_mrkv[1, :] = np.sum(cLvl_all * Weight_all * Unemployed,
                                 axis=1) / np.sum(Weight_all * Unemployed,
                                                  axis=1)
        C_by_mrkv[2, :] = np.sum(cLvl_all * Weight_all * DeepUnemp,
                                 axis=1) / np.sum(Weight_all * DeepUnemp,
                                                  axis=1)
        C_by_mrkv[3, :] = np.sum(cLvl_all * Weight_all * WorkingAge,
                                 axis=1) / np.sum(Weight_all * WorkingAge,
                                                  axis=1)

    # Calculate mean consumption *among the working age* by income quintile
    C_by_inc = np.zeros((5, T))
    LT_by_inc = np.zeros((5, T))
    for q in range(5):
        C_by_inc[q, :] = np.sum(cLvl_all * Weight_all * which_inc_quint[q, :],
                                axis=1) / np.sum(
                                    Weight_all * which_inc_quint[q, :], axis=1)
        LT_by_inc[q, :] = np.sum(
            laborandtransferLvl_all * Weight_all * which_inc_quint[q, :],
            axis=1) / np.sum(Weight_all * which_inc_quint[q, :], axis=1)
    return C, C_by_mrkv, C_by_inc, cLvl_all, Weight_all, MrkvTags, U, laborandtransferLvl_all, LT_by_inc
# ## Calculating the Lorenz Distance at Targets
#
# Now we want to construct a function that calculates the Euclidean distance between simulated and actual Lorenz curves at the four percentiles of interest: 20, 40, 60, and 80.

# %% [markdown]
# ## The Distribution Of the Marginal Propensity to Consume
#
# For many macroeconomic purposes, the distribution of the MPC $\kappa$ is more important than the distribution of wealth.  Ours is a quarterly model, and MPC's are typically reported on an annual basis; we can compute an approximate MPC from the quraterly ones as $\kappa_{Y} \approx 1.0 - (1.0 - \kappa_{Q})^4$
#
# In the cell below, we retrieve the MPCs from our simulated consumers and show that the 10th percentile in the MPC distribution is only about 6 percent, while at the 90th percentile it is almost 0.5

# %%
# Retrieve the MPC's
percentiles = np.linspace(0.1, 0.9, 9)
MPC_sim = np.concatenate([ThisType.MPCnow for ThisType in MyTypes])
MPCpercentiles_quarterly = getPercentiles(MPC_sim, percentiles=percentiles)
MPCpercentiles_annual = 1.0 - (1.0 - MPCpercentiles_quarterly)**4

print('The MPC at the 10th percentile of the distribution is ' +
      str(decfmt2(MPCpercentiles_annual[0])))
print('The MPC at the 50th percentile of the distribution is ' +
      str(decfmt2(MPCpercentiles_annual[4])))
print('The MPC at the 90th percentile of the distribution is ' +
      str(decfmt2(MPCpercentiles_annual[-1])))

# %% [markdown]
# ## Adding Very Impatient Households
#
# Now that we have some tools for examining both microeconomic (the MPC across the population) and macroeconomic (the distribution and overall level of wealth) outcomes from our model, we are all set to conduct our experiment.
#
# In this exercise, we are going to add very impatient households to the economy in a very direct way: by replacing the *most impatient consumer type* with an *even more impatient type*.  Specifically, we will have these agents have a discount factor of $\beta = 0.80$ at a quarterly frequency, which corresponds to $\beta \approx 0.41$ annual.
Exemple #7
0
    def calcStats(self, aLvlNow, pLvlNow, MPCnow, lIncomeLvl, EmpNow, t_age,
                  LorenzBool, ManyStatsBool):
        '''
        Calculate various statistics about the current population in the economy.
        
        Parameters
        ----------
        aLvlNow : [np.array]
            Arrays with end-of-period assets, listed by each ConsumerType in self.agents.
        pLvlNow : [np.array]
            Arrays with permanent income levels, listed by each ConsumerType in self.agents.
        MPCnow : [np.array]
            Arrays with marginal propensity to consume, listed by each ConsumerType in self.agents.
        lIncomeLvl : [np.array]
            Arrays with labor income levels, listed by each ConsumerType in self.agents.
        EmpNow : [np.array]
            Arrays with employment states: True if employed, False otherwise.
        t_age : [np.array]
            Arrays with periods elapsed since model entry, listed by each ConsumerType in self.agents.
        LorenzBool: bool
            Indicator for whether the Lorenz target points should be calculated.  Usually False,
            only True when DiscFac has been identified for a particular nabla.
        ManyStatsBool: bool
            Indicator for whether a lot of statistics for tables should be calculated. Usually False,
            only True when parameters have been estimated and we want values for tables.
            
        Returns
        -------
        None
        '''
        # Combine inputs into single arrays
        aLvl = np.hstack(aLvlNow)
        pLvl = np.hstack(pLvlNow)
        age = np.hstack(t_age)
        IncLvl = np.hstack(lIncomeLvl)
        Emp = np.hstack(EmpNow)

        # Calculate the capital to income ratio in the economy
        CohortWeight = self.PopGroFac**(-age)
        CapAgg = np.sum(aLvl * CohortWeight)
        IncAgg = np.sum(IncLvl * CohortWeight)
        KtoYnow = CapAgg / IncAgg
        self.KtoYnow = KtoYnow

        # Store Lorenz data if requested
        self.LorenzLong = np.nan
        if LorenzBool:
            order = np.argsort(aLvl)
            aLvl = aLvl[order]
            CohortWeight = CohortWeight[order]
            wealth_shares = getLorenzShares(aLvl,
                                            weights=CohortWeight,
                                            percentiles=self.LorenzPercentiles,
                                            presorted=True)
            self.Lorenz = wealth_shares
            if ManyStatsBool:
                self.LorenzLong = getLorenzShares(aLvl,
                                                  weights=CohortWeight,
                                                  percentiles=np.arange(
                                                      0.01, 1.0, 0.01),
                                                  presorted=True)
        else:
            self.Lorenz = np.nan  # Store nothing if we don't want Lorenz data

        # Calculate a whole bunch of statistics if requested
        if ManyStatsBool:
            # Reshape other inputs
            MPC = np.hstack(MPCnow)

            # Sort other data items if aLvl and CohortWeight were sorted
            if LorenzBool:
                pLvl = pLvl[order]
                MPC = MPC[order]
                IncLvl = IncLvl[order]
                age = age[order]
                Emp = Emp[order]
            aNrm = aLvl / pLvl  # Normalized assets (wealth ratio)

            # Calculate overall population MPC and by subpopulations
            # MPC_cf_BPP is the MPC that is comparable with the empirical estimation method
            MPC_cf_BPP = 1.0 - 0.25 * ((1.0 - MPC) + (1.0 - MPC)**2 +
                                       (1.0 - MPC)**3 + (1.0 - MPC)**4)
            self.MPCall = np.sum(
                MPC_cf_BPP * CohortWeight) / np.sum(CohortWeight)
            employed = Emp
            unemployed = np.logical_not(employed)
            self.MPCbyWealthRatio = calcSubpopAvg(MPC_cf_BPP, aNrm,
                                                  self.cutoffs, CohortWeight)
            self.MPCbyIncome = calcSubpopAvg(MPC_cf_BPP, IncLvl, self.cutoffs,
                                             CohortWeight)

            # Calculate the wealth quintile distribution of "hand to mouth" consumers
            quintile_cuts = getPercentiles(aLvl,
                                           weights=CohortWeight,
                                           percentiles=[0.2, 0.4, 0.6, 0.8])
            wealth_quintiles = np.ones(aLvl.size, dtype=int)
            wealth_quintiles[aLvl > quintile_cuts[0]] = 2
            wealth_quintiles[aLvl > quintile_cuts[1]] = 3
            wealth_quintiles[aLvl > quintile_cuts[2]] = 4
            wealth_quintiles[aLvl > quintile_cuts[3]] = 5
            MPC_cutoff = getPercentiles(
                MPC_cf_BPP, weights=CohortWeight, percentiles=[
                    2.0 / 3.0
                ])  # Looking at consumers with MPCs in the top 1/3
            these = MPC_cf_BPP > MPC_cutoff
            in_top_third_MPC = wealth_quintiles[these]
            temp_weights = CohortWeight[these]
            hand_to_mouth_total = np.sum(temp_weights)
            hand_to_mouth_pct = []
            for q in range(1, 6):
                hand_to_mouth_pct.append(
                    np.sum(temp_weights[in_top_third_MPC == q]) /
                    hand_to_mouth_total)
            self.HandToMouthPct = np.array(hand_to_mouth_pct)

        else:  # If we don't want these stats, just put empty values in history
            self.MPCall = np.nan
            self.MPCunemployed = np.nan
            self.MPCemployed = np.nan
            self.MPCretired = np.nan
            self.MPCbyWealthRatio = np.nan
            self.MPCbyIncome = np.nan
            self.HandToMouthPct = np.nan
Exemple #8
0
def makeCSTWstats(DiscFac,
                  nabla,
                  this_type_list,
                  age_weight,
                  lorenz_distance=0.0,
                  save_name=None):
    '''
    Displays (and saves) a bunch of statistics.  Separate from makeCSTWresults()
    for compatibility with the aggregate shock model.

    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors
    nabla : float
        Width of the uniform distribution of discount factors
    this_type_list : [cstwMPCagent]
        List of agent types in the economy.
    age_weight : np.array
        Age-conditional array of weights for the wealth data.
    lorenz_distance : float
        Distance between simulated and actual Lorenz curves, for display.
    save_name : string
        Name to save the calculated results, for later use in producing figures
        and tables, etc.

    Returns
    -------
    none
    '''
    sim_length = this_type_list[0].sim_periods
    sim_wealth = (np.vstack(
        (this_type.W_history for this_type in this_type_list))).flatten()
    sim_wealth_short = (np.vstack(
        (this_type.W_history[0:sim_length, :]
         for this_type in this_type_list))).flatten()
    sim_kappa = (np.vstack(
        (this_type.kappa_history for this_type in this_type_list))).flatten()
    sim_income = (np.vstack((this_type.pHist[0:sim_length, :] *
                             np.asarray(this_type.TranShkHist[0:sim_length, :])
                             for this_type in this_type_list))).flatten()
    sim_ratio = (np.vstack((this_type.W_history[0:sim_length, :] /
                            this_type.pHist[0:sim_length, :]
                            for this_type in this_type_list))).flatten()
    if Params.do_lifecycle:
        sim_unemp = (np.vstack((np.vstack((
            this_type.IncUnemp == this_type.TranShkHist[0:Params.working_T, :],
            np.zeros((Params.retired_T + 1, this_type_list[0].Nagents),
                     dtype=bool)))
                                for this_type in this_type_list))).flatten()
        sim_emp = (np.vstack((np.vstack(
            (this_type.IncUnemp !=
             this_type.TranShkHist[0:Params.working_T, :],
             np.zeros((Params.retired_T + 1, this_type_list[0].Nagents),
                      dtype=bool)))
                              for this_type in this_type_list))).flatten()
        sim_ret = (np.vstack((np.vstack(
            (np.zeros((Params.working_T, this_type_list[0].Nagents),
                      dtype=bool),
             np.ones((Params.retired_T + 1, this_type_list[0].Nagents),
                     dtype=bool)))
                              for this_type in this_type_list))).flatten()
    else:
        sim_unemp = np.vstack(
            (this_type.IncUnemp == this_type.TranShkHist[0:sim_length, :]
             for this_type in this_type_list)).flatten()
        sim_emp = np.vstack(
            (this_type.IncUnemp != this_type.TranShkHist[0:sim_length, :]
             for this_type in this_type_list)).flatten()
        sim_ret = np.zeros(sim_emp.size, dtype=bool)
    sim_weight_all = np.tile(np.repeat(age_weight, this_type_list[0].Nagents),
                             Params.pref_type_count)

    if Params.do_beta_dist and Params.do_lifecycle:
        kappa_mean_by_age_type = (np.mean(np.vstack(
            (this_type.kappa_history for this_type in this_type_list)),
                                          axis=1)).reshape(
                                              (Params.pref_type_count * 3,
                                               DropoutType.T_total + 1))
        kappa_mean_by_age_pref = np.zeros(
            (Params.pref_type_count, DropoutType.T_total + 1)) + np.nan
        for j in range(Params.pref_type_count):
            kappa_mean_by_age_pref[
                j, ] = Params.d_pct * kappa_mean_by_age_type[
                    3 * j + 0, ] + Params.h_pct * kappa_mean_by_age_type[
                        3 * j + 1, ] + Params.c_pct * kappa_mean_by_age_type[
                            3 * j + 2, ]
        kappa_mean_by_age = np.mean(kappa_mean_by_age_pref, axis=0)
        kappa_lo_beta_by_age = kappa_mean_by_age_pref[0, :]
        kappa_hi_beta_by_age = kappa_mean_by_age_pref[Params.pref_type_count -
                                                      1, :]

    lorenz_fig_data = makeLorenzFig(Params.SCF_wealth, Params.SCF_weights,
                                    sim_wealth, sim_weight_all)
    mpc_fig_data = makeMPCfig(sim_kappa, sim_weight_all)

    kappa_all = calcWeightedAvg(
        np.vstack((this_type.kappa_history for this_type in this_type_list)),
        np.tile(age_weight / float(Params.pref_type_count),
                Params.pref_type_count))
    kappa_unemp = np.sum(
        sim_kappa[sim_unemp] * sim_weight_all[sim_unemp]) / np.sum(
            sim_weight_all[sim_unemp])
    kappa_emp = np.sum(sim_kappa[sim_emp] * sim_weight_all[sim_emp]) / np.sum(
        sim_weight_all[sim_emp])
    kappa_ret = np.sum(sim_kappa[sim_ret] * sim_weight_all[sim_ret]) / np.sum(
        sim_weight_all[sim_ret])

    my_cutoffs = [(0.99, 1), (0.9, 1), (0.8, 1), (0.6, 0.8), (0.4, 0.6),
                  (0.2, 0.4), (0.0, 0.2)]
    kappa_by_ratio_groups = calcSubpopAvg(sim_kappa, sim_ratio, my_cutoffs,
                                          sim_weight_all)
    kappa_by_income_groups = calcSubpopAvg(sim_kappa, sim_income, my_cutoffs,
                                           sim_weight_all)

    quintile_points = getPercentiles(sim_wealth_short,
                                     weights=sim_weight_all,
                                     percentiles=[0.2, 0.4, 0.6, 0.8])
    wealth_quintiles = np.ones(sim_wealth_short.size, dtype=int)
    wealth_quintiles[sim_wealth_short > quintile_points[0]] = 2
    wealth_quintiles[sim_wealth_short > quintile_points[1]] = 3
    wealth_quintiles[sim_wealth_short > quintile_points[2]] = 4
    wealth_quintiles[sim_wealth_short > quintile_points[3]] = 5
    MPC_cutoff = getPercentiles(sim_kappa,
                                weights=sim_weight_all,
                                percentiles=[2.0 / 3.0])
    these_quintiles = wealth_quintiles[sim_kappa > MPC_cutoff]
    these_weights = sim_weight_all[sim_kappa > MPC_cutoff]
    hand_to_mouth_total = np.sum(these_weights)
    hand_to_mouth_pct = []
    for q in range(5):
        hand_to_mouth_pct.append(
            np.sum(these_weights[these_quintiles == (q + 1)]) /
            hand_to_mouth_total)

    results_string = 'Estimate is DiscFac=' + str(DiscFac) + ', nabla=' + str(
        nabla) + '\n'
    results_string += 'Lorenz distance is ' + str(lorenz_distance) + '\n'
    results_string += 'Average MPC for all consumers is ' + mystr(
        kappa_all) + '\n'
    results_string += 'Average MPC in the top percentile of W/Y is ' + mystr(
        kappa_by_ratio_groups[0]) + '\n'
    results_string += 'Average MPC in the top decile of W/Y is ' + mystr(
        kappa_by_ratio_groups[1]) + '\n'
    results_string += 'Average MPC in the top quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[2]) + '\n'
    results_string += 'Average MPC in the second quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[3]) + '\n'
    results_string += 'Average MPC in the middle quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[4]) + '\n'
    results_string += 'Average MPC in the fourth quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[5]) + '\n'
    results_string += 'Average MPC in the bottom quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[6]) + '\n'
    results_string += 'Average MPC in the top percentile of y is ' + mystr(
        kappa_by_income_groups[0]) + '\n'
    results_string += 'Average MPC in the top decile of y is ' + mystr(
        kappa_by_income_groups[1]) + '\n'
    results_string += 'Average MPC in the top quintile of y is ' + mystr(
        kappa_by_income_groups[2]) + '\n'
    results_string += 'Average MPC in the second quintile of y is ' + mystr(
        kappa_by_income_groups[3]) + '\n'
    results_string += 'Average MPC in the middle quintile of y is ' + mystr(
        kappa_by_income_groups[4]) + '\n'
    results_string += 'Average MPC in the fourth quintile of y is ' + mystr(
        kappa_by_income_groups[5]) + '\n'
    results_string += 'Average MPC in the bottom quintile of y is ' + mystr(
        kappa_by_income_groups[6]) + '\n'
    results_string += 'Average MPC for the employed is ' + mystr(
        kappa_emp) + '\n'
    results_string += 'Average MPC for the unemployed is ' + mystr(
        kappa_unemp) + '\n'
    results_string += 'Average MPC for the retired is ' + mystr(
        kappa_ret) + '\n'
    results_string += 'Of the population with the 1/3 highest MPCs...' + '\n'
    results_string += mystr(
        hand_to_mouth_pct[0] *
        100) + '% are in the bottom wealth quintile,' + '\n'
    results_string += mystr(
        hand_to_mouth_pct[1] *
        100) + '% are in the second wealth quintile,' + '\n'
    results_string += mystr(hand_to_mouth_pct[2] *
                            100) + '% are in the third wealth quintile,' + '\n'
    results_string += mystr(
        hand_to_mouth_pct[3] *
        100) + '% are in the fourth wealth quintile,' + '\n'
    results_string += 'and ' + mystr(
        hand_to_mouth_pct[4] *
        100) + '% are in the top wealth quintile.' + '\n'
    print(results_string)

    if save_name is not None:
        with open('./Results/' + save_name + 'LorenzFig.txt', 'w') as f:
            my_writer = csv.writer(
                f,
                delimiter='\t',
            )
            for j in range(len(lorenz_fig_data[0])):
                my_writer.writerow([
                    lorenz_fig_data[0][j], lorenz_fig_data[1][j],
                    lorenz_fig_data[2][j]
                ])
            f.close()
        with open('./Results/' + save_name + 'MPCfig.txt', 'w') as f:
            my_writer = csv.writer(f, delimiter='\t')
            for j in range(len(mpc_fig_data[0])):
                my_writer.writerow([lorenz_fig_data[0][j], mpc_fig_data[1][j]])
            f.close()
        if Params.do_beta_dist and Params.do_lifecycle:
            with open('./Results/' + save_name + 'KappaByAge.txt', 'w') as f:
                my_writer = csv.writer(f, delimiter='\t')
                for j in range(len(kappa_mean_by_age)):
                    my_writer.writerow([
                        kappa_mean_by_age[j], kappa_lo_beta_by_age[j],
                        kappa_hi_beta_by_age[j]
                    ])
                f.close()
        with open('./Results/' + save_name + 'Results.txt', 'w') as f:
            f.write(results_string)
            f.close()