Ejemplo n.º 1
0
def calcKappaMean(DiscFac, nabla):
    '''
    Calculates the average MPC for the given parameters.  This is a very small
    sub-function of sensitivityAnalysis.
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors
    nabla : float
        Width of the uniform distribution of discount factors
        
    Returns
    -------
    kappa_all : float
        Average marginal propensity to consume in the population.
    '''
    DiscFac_list = approxUniform(N=Params.pref_type_count,
                                 bot=DiscFac - nabla,
                                 top=DiscFac + nabla)[1]
    assignBetaDistribution(est_type_list, DiscFac_list)
    multiThreadCommandsFake(est_type_list, beta_point_commands)

    kappa_all = calcWeightedAvg(
        np.vstack((this_type.kappa_history for this_type in est_type_list)),
        np.tile(Params.age_weight_all / float(Params.pref_type_count),
                Params.pref_type_count))
    return kappa_all
Ejemplo n.º 2
0
 def updateEvolution(self):
     '''
     Updates the "population punk proportion" evolution array.  Fasion victims
     believe that the proportion of punks in the subsequent period is a linear
     function of the proportion of punks this period, subject to a uniform
     shock.  Given attributes of self pNextIntercept, pNextSlope, pNextCount,
     pNextWidth, and pGrid, this method generates a new array for the attri-
     bute pEvolution, representing a discrete approximation of next period
     states for each current period state in pGrid.
     
     Parameters
     ----------
     none
     
     Returns
     -------
     none
     '''
     self.pEvolution = np.zeros((self.pCount, self.pNextCount))
     for j in range(self.pCount):
         pNow = self.pGrid[j]
         pNextMean = self.pNextIntercept + self.pNextSlope * pNow
         dist = approxUniform(N=self.pNextCount,
                              bot=pNextMean - self.pNextWidth,
                              top=pNextMean + self.pNextWidth)[1]
         self.pEvolution[j, :] = dist
Ejemplo n.º 3
0
def calcKappaMean(DiscFac,nabla):
    '''
    Calculates the average MPC for the given parameters.  This is a very small
    sub-function of sensitivityAnalysis.
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors
    nabla : float
        Width of the uniform distribution of discount factors
        
    Returns
    -------
    kappa_all : float
        Average marginal propensity to consume in the population.
    '''
    DiscFac_list = approxUniform(N=Params.pref_type_count,bot=DiscFac-nabla,top=DiscFac+nabla)[1]
    assignBetaDistribution(est_type_list,DiscFac_list)
    multiThreadCommandsFake(est_type_list,beta_point_commands)
    
    kappa_all = calcWeightedAvg(np.vstack((this_type.kappa_history for this_type in est_type_list)),
                                np.tile(Params.age_weight_all/float(Params.pref_type_count),
                                        Params.pref_type_count))
    return kappa_all
Ejemplo n.º 4
0
def makeCSTWresults(DiscFac, nabla, save_name=None):
    '''
    Produces a variety of results for the cstwMPC paper (usually after estimating).
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors
    nabla : float
        Width of the uniform distribution of discount factors
    save_name : string
        Name to save the calculated results, for later use in producing figures
        and tables, etc.
        
    Returns
    -------
    none
    '''
    DiscFac_list = approxUniform(N=Params.pref_type_count,
                                 bot=DiscFac - nabla,
                                 top=DiscFac + nabla)[1]
    assignBetaDistribution(est_type_list, DiscFac_list)
    multiThreadCommandsFake(est_type_list, beta_point_commands)

    lorenz_distance = np.sqrt(betaDistObjective(nabla))

    makeCSTWstats(DiscFac, nabla, est_type_list, Params.age_weight_all,
                  lorenz_distance, save_name)
Ejemplo n.º 5
0
def makeCSTWresults(DiscFac,nabla,save_name=None):
    '''
    Produces a variety of results for the cstwMPC paper (usually after estimating).
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors
    nabla : float
        Width of the uniform distribution of discount factors
    save_name : string
        Name to save the calculated results, for later use in producing figures
        and tables, etc.
        
    Returns
    -------
    none
    '''
    DiscFac_list = approxUniform(N=Params.pref_type_count,bot=DiscFac-nabla,top=DiscFac+nabla)[1]
    assignBetaDistribution(est_type_list,DiscFac_list)
    multiThreadCommandsFake(est_type_list,beta_point_commands)
    
    lorenz_distance = np.sqrt(betaDistObjective(nabla))
    
    makeCSTWstats(DiscFac,nabla,est_type_list,Params.age_weight_all,lorenz_distance,save_name)   
Ejemplo n.º 6
0
    def distributeParams(self, param_name, param_count, center, spread,
                         dist_type):
        '''
        Distributes heterogeneous values of one parameter to the AgentTypes in self.agents.
        
        Parameters
        ----------
        param_name : string
            Name of the parameter to be assigned.
        param_count : int
            Number of different values the parameter will take on.
        center : float
            A measure of centrality for the distribution of the parameter.
        spread : float
            A measure of spread or diffusion for the distribution of the parameter.
        dist_type : string
            The type of distribution to be used.  Can be "lognormal" or "uniform" (can expand).
            
        Returns
        -------
        None
        '''
        # Get a list of discrete values for the parameter
        if dist_type == 'uniform':
            # If uniform, center is middle of distribution, spread is distance to either edge
            param_dist = approxUniform(N=param_count,
                                       bot=center - spread,
                                       top=center + spread)
        elif dist_type == 'lognormal':
            # If lognormal, center is the mean and spread is the standard deviation (in log)
            tail_N = 3
            param_dist = approxLognormal(N=param_count - tail_N,
                                         mu=np.log(center) - 0.5 * spread**2,
                                         sigma=spread,
                                         tail_N=tail_N,
                                         tail_bound=[0.0, 0.9],
                                         tail_order=np.e)

        # Distribute the parameters to the various types, assigning consecutive types the same
        # value if there are more types than values
        replication_factor = len(self.agents) / param_count
        j = 0
        b = 0
        while j < len(self.agents):
            for n in range(replication_factor):
                self.agents[j](AgentCount=int(self.Population *
                                              param_dist[0][b] *
                                              self.TypeWeight[n]))
                exec('self.agents[j](' + param_name + '= param_dist[1][b])')
                j += 1
            b += 1
Ejemplo n.º 7
0
 def distributeParams(self,param_name,param_count,center,spread,dist_type):
     '''
     Distributes heterogeneous values of one parameter to the AgentTypes in self.agents.
     
     Parameters
     ----------
     param_name : string
         Name of the parameter to be assigned.
     param_count : int
         Number of different values the parameter will take on.
     center : float
         A measure of centrality for the distribution of the parameter.
     spread : float
         A measure of spread or diffusion for the distribution of the parameter.
     dist_type : string
         The type of distribution to be used.  Can be "lognormal" or "uniform" (can expand).
         
     Returns
     -------
     None
     '''
     # Get a list of discrete values for the parameter
     if dist_type == 'uniform':
         # If uniform, center is middle of distribution, spread is distance to either edge
         param_dist = approxUniform(N=param_count,bot=center-spread,top=center+spread)
     elif dist_type == 'lognormal':
         # If lognormal, center is the mean and spread is the standard deviation (in log)
         tail_N = 3
         param_dist = approxLognormal(N=param_count-tail_N,mu=np.log(center)-0.5*spread**2,sigma=spread,tail_N=tail_N,tail_bound=[0.0,0.9], tail_order=np.e)
         
     # Distribute the parameters to the various types, assigning consecutive types the same
     # value if there are more types than values
     replication_factor = len(self.agents)/param_count
     j = 0
     b = 0
     while j < len(self.agents):
         for n in range(replication_factor):
             self.agents[j](AgentCount = int(self.Population*param_dist[0][b]*self.TypeWeight[n]))
             exec('self.agents[j](' + param_name + '= param_dist[1][b])')
             j += 1
         b += 1
Ejemplo n.º 8
0
def simulateKYratioDifference(DiscFac, nabla, N, type_list, weights,
                              total_output, target):
    '''
    Assigns a uniform distribution over DiscFac with width 2*nabla and N points, then
    solves and simulates all agent types in type_list and compares the simuated
    K/Y ratio to the target K/Y ratio.
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors.
    nabla : float
        Width of the uniform distribution of discount factors.
    N : int
        Number of discrete consumer types.
    type_list : [cstwMPCagent]
        List of agent types to solve and simulate after assigning discount factors.
    weights : np.array
        Age-conditional array of population weights.
    total_output : float
        Total output of the economy, denominator for the K/Y calculation.
    target : float
        Target level of capital-to-output ratio.
        
    Returns
    -------
    my_diff : float
        Difference between simulated and target capital-to-output ratios.
    '''
    if type(DiscFac) in (list, np.ndarray, np.array):
        DiscFac = DiscFac[0]
    DiscFac_list = approxUniform(N, DiscFac - nabla, DiscFac +
                                 nabla)[1]  # only take values, not probs
    assignBetaDistribution(type_list, DiscFac_list)
    multiThreadCommandsFake(type_list, beta_point_commands)
    my_diff = calculateKYratioDifference(
        np.vstack((this_type.W_history for this_type in type_list)),
        np.tile(weights / float(N), N), total_output, target)
    return my_diff
Ejemplo n.º 9
0
 def updateEvolution(self):
     '''
     Updates the "population punk proportion" evolution array.  Fasion victims
     believe that the proportion of punks in the subsequent period is a linear
     function of the proportion of punks this period, subject to a uniform
     shock.  Given attributes of self pNextIntercept, pNextSlope, pNextCount,
     pNextWidth, and pGrid, this method generates a new array for the attri-
     bute pEvolution, representing a discrete approximation of next period
     states for each current period state in pGrid.
     
     Parameters
     ----------
     none
     
     Returns
     -------
     none
     '''
     self.pEvolution = np.zeros((self.pCount,self.pNextCount))
     for j in range(self.pCount):
         pNow = self.pGrid[j]
         pNextMean = self.pNextIntercept + self.pNextSlope*pNow
         dist = approxUniform(N=self.pNextCount,bot=pNextMean-self.pNextWidth,top=pNextMean+self.pNextWidth)[1]
         self.pEvolution[j,:] = dist
Ejemplo n.º 10
0
def simulateKYratioDifference(DiscFac,nabla,N,type_list,weights,total_output,target):
    '''
    Assigns a uniform distribution over DiscFac with width 2*nabla and N points, then
    solves and simulates all agent types in type_list and compares the simuated
    K/Y ratio to the target K/Y ratio.
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors.
    nabla : float
        Width of the uniform distribution of discount factors.
    N : int
        Number of discrete consumer types.
    type_list : [cstwMPCagent]
        List of agent types to solve and simulate after assigning discount factors.
    weights : np.array
        Age-conditional array of population weights.
    total_output : float
        Total output of the economy, denominator for the K/Y calculation.
    target : float
        Target level of capital-to-output ratio.
        
    Returns
    -------
    my_diff : float
        Difference between simulated and target capital-to-output ratios.
    '''
    if type(DiscFac) in (list,np.ndarray,np.array):
        DiscFac = DiscFac[0]
    DiscFac_list = approxUniform(N,DiscFac-nabla,DiscFac+nabla)[1] # only take values, not probs
    assignBetaDistribution(type_list,DiscFac_list)
    multiThreadCommandsFake(type_list,beta_point_commands)
    my_diff = calculateKYratioDifference(np.vstack((this_type.W_history for this_type in type_list)),
                                         np.tile(weights/float(N),N),total_output,target)
    return my_diff
Ejemplo n.º 11
0
for nn in range(num_consumer_types):
    # Now create the types, and append them to the list ChineseConsumerTypes
    newType = deepcopy(ChinaExample)
    ChineseConsumerTypes.append(newType)

## Now, generate the desired ex-ante heterogeneity, by giving the different consumer types
## each with their own discount factor

# First, decide the discount factors to assign
from HARKutilities import approxUniform

bottomDiscFac = 0.9800
topDiscFac = 0.9934
DiscFac_list = approxUniform(N=num_consumer_types,
                             bot=bottomDiscFac,
                             top=topDiscFac)[1]

# Now, assign the discount factors we want to the ChineseConsumerTypes
for j in range(num_consumer_types):
    ChineseConsumerTypes[j].DiscFac = DiscFac_list[j]

####################################################################################################
####################################################################################################
"""
Now, write the function to perform the experiment.

Recall that all parameters have been assigned appropriately, except for the income process.  
This is because we want to see how much uncertainty needs to accompany the high-growth state
to generate the desired high savings rate.
Ejemplo n.º 12
0
 # Make a set of consumer types for the FBS aggregate shocks model
 BaseAggShksType = AggShockConsumerType(**Params.init_agg_shocks)
 agg_shocks_type_list = []
 for j in range(Params.pref_type_count):
     new_type = deepcopy(BaseAggShksType)
     new_type.seed = j
     new_type.resetRNG()
     new_type.makeIncShkHist()
     agg_shocks_type_list.append(new_type)
 if Params.do_beta_dist:
     beta_agg = beta_dist_estimate
     nabla_agg = nabla_estimate
 else:
     beta_agg = beta_point_estimate
     nabla_agg = 0.0
 DiscFac_list_agg = approxUniform(N=Params.pref_type_count,bot=beta_agg-nabla_agg,top=beta_agg+nabla_agg)[1]
 assignBetaDistribution(agg_shocks_type_list,DiscFac_list_agg)
 
 # Make a market for solving the FBS aggregate shocks model
 agg_shocks_market = CobbDouglasEconomy(agents = agg_shocks_type_list,
                 act_T         = Params.sim_periods_agg_shocks,
                 tolerance     = 0.0001,
                 **Params.aggregate_params)
 agg_shocks_market.makeAggShkHist()
 
 # Edit the consumer types so they have the right data
 for this_type in agg_shocks_market.agents:
     this_type.p_init = drawMeanOneLognormal(N=this_type.Nagents,sigma=0.9,seed=0)
     this_type.getEconomyData(agg_shocks_market)
 
 # Solve the aggregate shocks version of the model
Ejemplo n.º 13
0
def ImpatienceCondition(CRRA, DiscFac, N, nabla, Rfree, PermShkStd, PermGroFac,
                        Unemp, PermShkCount, PermShkDstn):

    DiscFac_list = approxUniform(N, DiscFac - nabla, DiscFac +
                                 nabla)[1]  # Construct the beta distribution
    Beta_count = len(DiscFac_list)  # Number of preference types
    GIC = np.zeros(shape=(len(DiscFac_list), 1))  # Some initial conditions
    RIC = np.zeros(shape=(len(DiscFac_list), 1))
    AIC = np.zeros(shape=(len(DiscFac_list), 1))
    WRIC = np.zeros(shape=(len(DiscFac_list), 1))
    FHWC = np.zeros(shape=(len(DiscFac_list), 1))
    exp_psi_inv = 0

    for i in range(len(PermShkDstn[1])):
        exp_psi_inv = exp_psi_inv + (1.0 / PermShkCount) * (
            PermShkDstn[1][i])**(-1)  #Get expected psi inverse
    print(exp_psi_inv)
    j = 0
    while j < Beta_count:  #Calculate the LHS of each condition
        GIC[j] = (0.99375 * exp_psi_inv *
                  (Rfree * DiscFac_list[j])**(1 / CRRA)) / (PermGroFac)
        RIC[j] = ((Rfree * DiscFac_list[j])**(1 / CRRA)) / Rfree
        WRIC[j] = ((Unemp**(1 / CRRA)) *
                   (Rfree * DiscFac_list[j])**(1 / CRRA)) / Rfree
        AIC[j] = (Rfree * DiscFac_list[j])**(1 / CRRA)
        FHWC[j] = PermGroFac / Rfree
        j += 1

    print(GIC)
    print(RIC)
    print(WRIC)
    print(AIC)
    print(FHWC)

    #Check whether the inequality implied by the impatience condition holds
    #################################################################################
    count_GIC = 0
    for i in range(len(DiscFac_list)):
        if GIC[i] < 1:
            count_GIC += 1
    if count_GIC != len(DiscFac_list):
        fail_GIC = len(DiscFac_list) - count_GIC
        print str(
            fail_GIC) + ' Type(s) fail to satisfy growth impatience condition'
    #################################################################################
    count_RIC = 0
    for i in range(len(DiscFac_list)):
        if RIC[i] < 1:
            count_RIC += 1
    if count_RIC != len(DiscFac_list):
        fail_RIC = len(DiscFac_list) - count_RIC
        print str(
            fail_RIC) + ' Type(s) fail to satisfy return impatience condition'
    #################################################################################
    count_WRIC = 0
    for i in range(len(DiscFac_list)):
        if WRIC[i] < 1:
            count_WRIC += 1
    if count_WRIC != len(DiscFac_list):
        fail_WRIC = len(DiscFac_list) - count_WRIC
        print str(
            fail_WRIC
        ) + ' Type(s) fail to satisfy weak return impatience condition'
    #################################################################################
    count_AIC = 0
    for i in range(len(DiscFac_list)):
        if AIC[i] < 1:
            count_AIC += 1
    if count_AIC != len(DiscFac_list):
        fail_AIC = len(DiscFac_list) - count_AIC
        print str(fail_AIC
                  ) + ' Type(s) fail to satisfy absolute impatience condition'
    #################################################################################
    count_FHWC = 0
    for i in range(len(DiscFac_list)):
        if FHWC[i] < 1:
            count_FHWC += 1
    if count_FHWC != len(DiscFac_list):
        fail_FHWC = len(DiscFac_list) - count_FHWC
        print str(fail_FHWC
                  ) + ' Type(s) fail to satisfy finite human wealth condition'
    #################################################################################
    count_all = count_GIC + count_RIC + count_WRIC + count_AIC + count_FHWC
    if count_all == 0:
        print 'All types satisfy all conditions'
Ejemplo n.º 14
0
def ImpatienceCondition(CRRA, DiscFac, N, nabla, Rsave_list, PermShkStd,
                        PermGroFac, Unemp, PermShkCount, PermShkDstn):
    DiscFac_list = approxUniform(N, DiscFac - nabla, DiscFac + nabla)[1]
    Beta_count = len(DiscFac_list)
    Rsave_count = len(Rsave_list)
    GIC = np.zeros(shape=(len(DiscFac_list), len(Rsave_list)))
    RIC = np.zeros(shape=(len(DiscFac_list), len(Rsave_list)))
    AIC = np.zeros(shape=(len(DiscFac_list), len(Rsave_list)))
    WRIC = np.zeros(shape=(len(DiscFac_list), len(Rsave_list)))
    FHWC = np.zeros(shape=(len(DiscFac_list), len(Rsave_list)))
    exp_psi_inv = 0

    for i in range(len(PermShkDstn[1])):
        exp_psi_inv = exp_psi_inv + (1.0 /
                                     PermShkCount) * (PermShkDstn[1][i])**(-1)
    j = 0
    b = 0
    while j < Beta_count:
        b = 0
        while b < Rsave_count:
            GIC[j, b] = (0.99375 * exp_psi_inv *
                         (Rsave_list[b] * DiscFac_list[j])**
                         (1 / CRRA)) / (PermGroFac)
            RIC[j, b] = ((Rsave_list[b] * DiscFac_list[j])
                         **(1 / CRRA)) / (Rsave_list[b])
            WRIC[j, b] = ((Unemp**(1 / CRRA)) *
                          (Rsave_list[b] * DiscFac_list[j])**
                          (1 / CRRA)) / (Rsave_list[b])
            AIC[j, b] = (Rsave_list[b] * DiscFac_list[j])**(1 / CRRA)
            FHWC[j, b] = PermGroFac / Rsave_list[b]
            b += 1
        j += 1

    print(GIC)
    print(RIC)
    print(WRIC)
    print(AIC)
    print(FHWC)
    #################################################################################
    count = 0
    for i in range(len(DiscFac_list)):
        for j in range(len(Rsave_list)):
            if GIC[i][j] > 1:
                count += 1
    if count > 0:
        print str(
            count) + ' Type(s) fail to satisfy growth impatience condition'
    #################################################################################
    count = 0
    for i in range(len(DiscFac_list)):
        for j in range(len(Rsave_list)):
            if RIC[i][j] > 1:
                count += 1
    if count > 0:
        print str(
            count) + ' Type(s) fail to satisfy return impatience condition'
    #################################################################################
    count = 0
    for i in range(len(DiscFac_list)):
        for j in range(len(Rsave_list)):
            if WRIC[i][j] > 1:
                count += 1
    if count > 0:
        print str(
            count
        ) + ' Type(s) fail to satisfy weak return impatience condition'
    #################################################################################
    count = 0
    for i in range(len(DiscFac_list)):
        for j in range(len(Rsave_list)):
            if AIC[i][j] > 1:
                count += 1
    if count > 0:
        print str(
            count) + ' Type(s) fail to satisfy absolute impatience condition'
    #################################################################################
    count = 0
    for i in range(len(DiscFac_list)):
        for j in range(len(Rsave_list)):
            if FHWC[i][j] > 1:
                count += 1
    if count > 0:
        print str(
            count) + ' Type(s) fail to satisfy finite human wealth condition'
Ejemplo n.º 15
0
ChineseConsumerTypes = [] # initialize an empty list

for nn in range(num_consumer_types):
    # Now create the types, and append them to the list ChineseConsumerTypes
    newType = deepcopy(ChinaExample)    
    ChineseConsumerTypes.append(newType)

## Now, generate the desired ex-ante heterogeneity, by giving the different consumer types
## each with their own discount factor

# First, decide the discount factors to assign
from HARKutilities import approxUniform

bottomDiscFac = 0.9800
topDiscFac    = 0.9934 
DiscFac_list  = approxUniform(N=num_consumer_types,bot=bottomDiscFac,top=topDiscFac)[1]

# Now, assign the discount factors we want to the ChineseConsumerTypes
cstwMPC.assignBetaDistribution(ChineseConsumerTypes,DiscFac_list)

####################################################################################################
####################################################################################################
"""
Now, write the function to perform the experiment.

Recall that all parameters have been assigned appropriately, except for the income process.  
This is because we want to see how much uncertainty needs to accompany the high-growth state
to generate the desired high savings rate.

Therefore, among other things, this function will have to initialize and assign
the appropriate income process.
Ejemplo n.º 16
0
        if i == j:
            PolyMrkvArray[i, j] = Persistence
        elif (i == (j - 1)) or (i == (j + 1)):
            PolyMrkvArray[i, j] = 0.5 * (1.0 - Persistence)
PolyMrkvArray[0, 0] += 0.5 * (1.0 - Persistence)
PolyMrkvArray[StateCount - 1, StateCount - 1] += 0.5 * (1.0 - Persistence)
PolyMrkvArray *= 1.0 - RegimeChangePrb
PolyMrkvArray += RegimeChangePrb / StateCount

# Define the set of aggregate permanent growth factors that can occur (Markov specifications only)
PermGroFacSet = np.exp(
    np.linspace(np.log(PermGroFacMin), np.log(PermGroFacMax), num=StateCount))

# Define the set of discount factors that agents have (for SOE and DSGE models)
DiscFacSetSOE = approxUniform(N=TypeCount,
                              bot=DiscFacMeanSOE - DiscFacSpread,
                              top=DiscFacMeanSOE + DiscFacSpread)[1]
DiscFacSetDSGE = approxUniform(N=TypeCount,
                               bot=DiscFacMeanDSGE - DiscFacSpread,
                               top=DiscFacMeanDSGE + DiscFacSpread)[1]

###############################################################################

# Define parameters for the small open economy version of the model
init_SOE_consumer = {
    'CRRA': CRRA,
    'DiscFac': DiscFacMeanSOE,
    'LivPrb': [LivPrb],
    'PermGroFac': [1.0],
    'AgentCount': AgentCount / TypeCount,  # Spread agents evenly among types
    'aXtraMin': 0.00001,
Ejemplo n.º 17
0
        BaseAggShksType = AggShockConsumerType(**Params.init_agg_shocks)
        agg_shocks_type_list = []
        for j in range(Params.pref_type_count):
            new_type = deepcopy(BaseAggShksType)
            new_type.seed = j
            new_type.resetRNG()
            new_type.makeIncShkHist()
            agg_shocks_type_list.append(new_type)
        if Params.do_beta_dist:
            beta_agg = beta_dist_estimate
            nabla_agg = nabla_estimate
        else:
            beta_agg = beta_point_estimate
            nabla_agg = 0.0
        DiscFac_list_agg = approxUniform(N=Params.pref_type_count,
                                         bot=beta_agg - nabla_agg,
                                         top=beta_agg + nabla_agg)[1]
        assignBetaDistribution(agg_shocks_type_list, DiscFac_list_agg)

        # Make a market for solving the FBS aggregate shocks model
        agg_shocks_market = CobbDouglasEconomy(
            agents=agg_shocks_type_list,
            act_T=Params.sim_periods_agg_shocks,
            tolerance=0.0001,
            **Params.aggregate_params)
        agg_shocks_market.makeAggShkHist()

        # Edit the consumer types so they have the right data
        for this_type in agg_shocks_market.agents:
            this_type.p_init = drawMeanOneLognormal(N=this_type.Nagents,
                                                    sigma=0.9,
Ejemplo n.º 18
0
def FagerengObjFunc(center, spread, verbose=False):
    '''
    Objective function for the quick and dirty structural estimation to fit
    Fagereng, Holm, and Natvik's Table 9 results with a basic infinite horizon
    consumption-saving model (with permanent and transitory income shocks).
    
    Parameters
    ----------
    center : float
        Center of the uniform distribution of discount factors.
    spread : float
        Width of the uniform distribution of discount factors.
    verbose : bool
        When True, print to screen MPC table for these parameters.  When False,
        print (center, spread, distance).
        
    Returns
    -------
    distance : float
        Euclidean distance between simulated MPCs and (adjusted) Table 9 MPCs.
    '''
    # Give our consumer types the requested discount factor distribution
    beta_set = approxUniform(N=TypeCount,
                             bot=center - spread,
                             top=center + spread)[1]
    for j in range(TypeCount):
        EstTypeList[j](DiscFac=beta_set[j])

    # Solve and simulate all consumer types, then gather their wealth levels
    multiThreadCommands(
        EstTypeList,
        ['solve()', 'initializeSim()', 'simulate()', 'unpackcFunc()'])
    WealthNow = np.concatenate([ThisType.aLvlNow for ThisType in EstTypeList])

    # Get wealth quartile cutoffs and distribute them to each consumer type
    quartile_cuts = getPercentiles(WealthNow, percentiles=[0.25, 0.50, 0.75])
    for ThisType in EstTypeList:
        WealthQ = np.zeros(ThisType.AgentCount, dtype=int)
        for n in range(3):
            WealthQ[ThisType.aLvlNow > quartile_cuts[n]] += 1
        ThisType(WealthQ=WealthQ)

    # Keep track of MPC sets in lists of lists of arrays
    MPC_set_list = [[[], [], [], []], [[], [], [], []], [[], [], [], []],
                    [[], [], [], []]]

    # Calculate the MPC for each of the four lottery sizes for all agents
    for ThisType in EstTypeList:
        ThisType.simulate(1)
        c_base = ThisType.cNrmNow
        MPC_this_type = np.zeros((ThisType.AgentCount, 4))
        for k in range(4):  # Get MPC for all agents of this type
            Llvl = lottery_size[k]
            Lnrm = Llvl / ThisType.pLvlNow
            if do_secant:
                SplurgeNrm = Splurge / ThisType.pLvlNow
                mAdj = ThisType.mNrmNow + Lnrm - SplurgeNrm
                cAdj = ThisType.cFunc[0](mAdj) + SplurgeNrm
                MPC_this_type[:, k] = (cAdj - c_base) / Lnrm
            else:
                mAdj = ThisType.mNrmNow + Lnrm
                MPC_this_type[:, k] = cAdj = ThisType.cFunc[0].derivative(mAdj)

        # Sort the MPCs into the proper MPC sets
        for q in range(4):
            these = ThisType.WealthQ == q
            for k in range(4):
                MPC_set_list[k][q].append(MPC_this_type[these, k])

    # Calculate average within each MPC set
    simulated_MPC_means = np.zeros((4, 4))
    for k in range(4):
        for q in range(4):
            MPC_array = np.concatenate(MPC_set_list[k][q])
            simulated_MPC_means[k, q] = np.mean(MPC_array)

    # Calculate Euclidean distance between simulated MPC averages and Table 9 targets
    diff = simulated_MPC_means - MPC_target
    if drop_corner:
        diff[0, 0] = 0.0
    distance = np.sqrt(np.sum((diff)**2))
    if verbose:
        print(simulated_MPC_means)
    else:
        print(center, spread, distance)
    return distance
Ejemplo n.º 19
0
    weights : np.array
        Age-conditional array of population weights.
    total_output : float
        Total output of the economy, denominator for the K/Y calculation.
    target : float
        Target level of capital-to-output ratio.
        
    Returns
    -------
    my_diff : float
        Difference between simulated and target capital-to-output ratios.
    '''
    if type(DiscFac) in (list,np.ndarray,np.array):
        DiscFac = DiscFac[0]
<<<<<<< HEAD
    DiscFac_list = approxUniform(DiscFac,nabla,N)
=======
    DiscFac_list = approxUniform(N,DiscFac-nabla,DiscFac+nabla)[1] # only take values, not probs
>>>>>>> eeb37f24755d0c683c9d9efbe5e7447425c98b86
    assignBetaDistribution(type_list,DiscFac_list)
    multiThreadCommandsFake(type_list,beta_point_commands)
    my_diff = calculateKYratioDifference(np.vstack((this_type.W_history for this_type in type_list)),
                                         np.tile(weights/float(N),N),total_output,target)
    return my_diff


mystr = lambda number : "{:.3f}".format(number)
'''
Truncates a float at exactly three decimal places when displaying as a string.
'''