def calcConsChangeAfterUncertaintyChange(OriginalTypes,NewVals,ParamToChange): ''' Calculate the change in aggregate consumption for a list of values that a parameter will take on. Parameters ---------- OriginalTypes : [IndShockConsumerType] List of consumer types, who have already had their "pre-shock" problem solved and simulated. NewVals : np.array List or array of new values that the parameter of interest will take on. ParamToChange : str Name of the income distribution parameter that will be changed. Returns ------- ChangesInConsumption : [float] List of changes in aggregate consumption corresponding to the values in NewVals, as a percentage of the original aggregate consumption level. ''' ChangesInConsumption = [] # Step 1 OldAvgC = calcAvgC(OriginalTypes) # Step 2 (the loop over counterfactual parameter values) for NewVal in log_progress(NewVals, every=1): if ParamToChange in ["PermShkStd","TranShkStd"]: ThisVal = [NewVal] else: ThisVal = NewVal ConsumerTypesNew = deepcopy(OriginalTypes) for index,ConsumerTypeNew in enumerate(ConsumerTypesNew): setattr(ConsumerTypeNew,ParamToChange,ThisVal) # Step 2A ConsumerTypeNew.updateIncomeProcess() ConsumerTypeNew.solve() # Step 2B ConsumerTypeNew.initializeSim() # Step 2C ConsumerTypeNew.aNrmNow = OriginalTypes[index].aNrmNow ConsumerTypeNew.pLvlNow = OriginalTypes[index].pLvlNow ConsumerTypeNew.simOnePeriod() # Step 2D NewAvgC = calcAvgC(ConsumerTypesNew) # Step 2E ChangeInConsumption = 100. * (NewAvgC - OldAvgC) / OldAvgC # Step 2F ChangesInConsumption.append(ChangeInConsumption) return ChangesInConsumption # Step 3, returning the output
def run(self): chunk_size = 1024 last_progress = None self.reading = True while True: chunk = self.response.read(chunk_size) #log.debug('[%s] Received %s bytes', self.uri, len(chunk)) self.buffer.extend(chunk) if not chunk: break last_progress = log_progress(self, 'Downloading', len(self.buffer), last_progress) self.reading = False log.debug('[%s] Download Complete', self.uri)
def stream(tr, r_start, r_end): position = r_start chunk_size_min = 6 * 1024 chunk_size_max = 10 * 1024 chunk_scale = 0 chunk_size = chunk_size_min last_progress = None while True: # Adjust chunk_size if chunk_scale < 1: chunk_scale = 2 * (float(position) / tr.stream_length) chunk_size = int(chunk_size_min + (chunk_size_max * chunk_scale)) if chunk_scale > 1: chunk_scale = 1 if position + chunk_size > r_end: chunk_size = r_end - position # Read chunk chunk = tr.read(position, chunk_size) if not chunk: log.debug('[%s] Finished at %s bytes (content-length: %s)' % (tr.uri, position, tr.stream_length)) break last_progress = log_progress(tr, ' Streaming', position, last_progress) position = position + len(chunk) # Write chunk yield chunk log.debug('[%s] Stream Complete', tr.uri)
DiscFac_list = approxUniform(N=num_consumer_types,bot=bottomDiscFac,top=topDiscFac)[1] # Now, assign the discount factors for j in range(num_consumer_types): ConsumerTypes[j].DiscFac = DiscFac_list[j] # %% [markdown] {"hidden": true} # Our agents now exist and have a concept of the problem they face, but we still need them to solve that problem. # # Once we have solved each type of consumer's individual problem, we need to know the distribution of wealth (and permanent income) that the population would achieve in the long run. # # The cell below does both of those tasks, looping through the consumer types. For each one, it solves that type's infinite horizon model, then simulates 1000 periods to generate an approximation to the long run distribution of wealth. # %% {"code_folding": [], "hidden": true} # log_progress presents a pretty bar that interactively shows how far the calculations have gotten for ConsumerType in log_progress(ConsumerTypes, every=1): ## We configured their discount factor above. Now solve ConsumerType.solve() # Now simulate many periods to get to the stationary distribution ConsumerType.T_sim = 1000 ConsumerType.initializeSim() ConsumerType.simulate() # %% [markdown] {"hidden": true} # With all of that setup taken care of, let's write some functions to run our counterfactual exercise and extract the information we want. # # First, let's define a simple function that merely calculates the average consumption level across the entire population in the most recent simulated period. # %% {"hidden": true} # We just merge the cNrm and pNrm lists already constructed for each ConsumerType
# Now create the types, and append them to the list MyTypes NewType = deepcopy(BaselineType) NewType.DiscFac = DiscFac_dstn[nn] NewType.seed = nn # give each consumer type a different RNG seed MyTypes.append(NewType) # %% [markdown] # ## Solving and Simulating the Baseline Agents # # Now let's solve and simulate each of our types of agents. If you look in the parameter dictionary (or at any of the agent objects themselves), you will see that each one has an $\texttt{AgentCount}$ attribute of 10000. That is, these seven ex ante heterogeneous types each represent ten thousand individual agents that will experience ex post heterogeneity when they draw different income (and mortality) shocks over time. # # In the code block below, fill in the contents of the loop to solve and simulate each agent type for many periods. To do this, you should invoke the methods $\texttt{solve}$, $\texttt{initializeSim}$, and $\texttt{simulate}$ in that order. Simulating for 1200 quarters (300 years) will approximate the long run distribution of wealth in the population. # %% # Progress bar keeps track interactively of how many have been made for ThisType in log_progress(MyTypes, every=1): ThisType.solve() ThisType.initializeSim() ThisType.simulate() # %% [markdown] # To verify that you wrote that code correctly, let's check that the aggregate level of capital (total assets held by all households) to income ratio equals what we expected it would be. To do that, let's combine the asset holdings of all types, take the mean, and see if we get the desired capital to income ratio of 10.26. # # NB: Because there is no permanent income growth in this model, all shocks are mean one and idiosyncratic, and we have many agents, aggregate or average income is 1.0. # %% aLvl_all = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes]) print('The ratio of aggregate capital to permanent income is ' + decfmt2(np.mean(aLvl_all))) # %% [markdown]
quarters_before_reform_to_plot = 5 # Declare the quarters we want to plot results for quarters_to_plot = np.arange(-quarters_before_reform_to_plot ,160,1) # Create a list to hold the paths of the national saving rate NatlSavingsRates = [] # Create a list of floats to multiply the variance of the permanent shock to income by PermShkVarMultipliers = (1.,2.,4.,8.,11.) # Loop through the desired multipliers, then get the path of the national saving rate # following economic reforms, assuming that the variance of the permanent income shock # was multiplied by the given multiplier index = 0 for PermShkVarMultiplier in log_progress(PermShkVarMultipliers, every=1): NatlSavingsRates.append(calcNatlSavingRate(PermShkVarMultiplier,RNG_seed = index)[-160 - quarters_before_reform_to_plot :]) index +=1 # %% [markdown] # We've calculated the path of the national saving rate as we wanted. All that's left is to graph the results! # %% plt.ylabel('Natl Saving Rate') plt.xlabel('Quarters Since Economic Reforms') plt.plot(quarters_to_plot,NatlSavingsRates[0],label=str(PermShkVarMultipliers[0]) + ' x variance') plt.plot(quarters_to_plot,NatlSavingsRates[1],label=str(PermShkVarMultipliers[1]) + ' x variance') plt.plot(quarters_to_plot,NatlSavingsRates[2],label=str(PermShkVarMultipliers[2]) + ' x variance') plt.plot(quarters_to_plot,NatlSavingsRates[3],label=str(PermShkVarMultipliers[3]) + ' x variance') plt.plot(quarters_to_plot,NatlSavingsRates[4],label=str(PermShkVarMultipliers[4]) + ' x variance') plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
LivPrb = [np.array(StateCount*[init_China_parameters['LivPrb']][0])], #needs to be a list, with 0th element of shape of shape (StateCount,) cycles = 0) ChinaExample.track_vars = ['aNrmNow','cNrmNow','pLvlNow'] # Names of variables to be tracked # %% [markdown] # Now, add in ex-ante heterogeneity in consumers' discount factors # # The cstwMPC parameters do not define a discount factor, since there is ex-ante heterogeneity in the discount factor. To prepare to create this ex-ante heterogeneity, first create the desired number of consumer types: # # %% num_consumer_types = 7 # declare the number of types we want ChineseConsumerTypes = [] # initialize an empty list for nn in log_progress(range(num_consumer_types), every=1): # Now create the types, and append them to the list ChineseConsumerTypes newType = deepcopy(ChinaExample) ChineseConsumerTypes.append(newType) # %% [markdown] # # Now, generate the desired ex-ante heterogeneity, by giving the different consumer types each with their own discount factor. # # First, decide the discount factors to assign: # %% from HARK.utilities import approxUniform bottomDiscFac = 0.9800 topDiscFac = 0.9934