LogC = np.log(np.mean(StickySOEconsumers.cNrmNow_hist*StickySOEconsumers.pLvlNow_hist,axis=1))[ignore_periods:] DeltaLogC = LogC[1:] - LogC[0:-1] print('Standard deviation of change in log aggregate consumption = ' + str(np.std(DeltaLogC))) print('Standard deviation of log individual assets = ' + str(np.mean(np.std(np.log(StickySOEconsumers.aLvlNow_hist[ignore_periods:,:]),axis=1)))) print('Standard deviation of log individual consumption = ' + str(np.mean(np.std(np.log(StickySOEconsumers.cNrmNow_hist[ignore_periods:,:]*StickySOEconsumers.pLvlNow_hist[ignore_periods:,:]),axis=1)))) print('Standard deviation of log individual productivity = ' + str(np.mean(np.std(np.log(StickySOEconsumers.pLvlNow_hist[ignore_periods:,:]),axis=1)))) Logc = np.log(StickySOEconsumers.cNrmNow_hist*StickySOEconsumers.pLvlNow_hist)[ignore_periods:,:] DeltaLogc = Logc[1:,:] - Logc[0:-1,:] print('Standard deviation of change in log individual consumption = ' + str(np.mean(np.std(DeltaLogc,axis=1)))) # Make a Cobb Douglas economy and the representative agent who lives in it StickyDSGEconsumer = StickyEconsumerDSGEType(**init_DSGE_consumer) StickyDSGEeconomy = CobbDouglasEconomy(**init_DSGE_market) StickyDSGEeconomy.agents = [StickyDSGEconsumer] StickyDSGEeconomy.makeAggShkHist() StickyDSGEconsumer.getEconomyData(StickyDSGEeconomy) StickyDSGEconsumer.track_vars = ['aLvlNow','mNrmNow','cNrmNow','pLvlNow','pLvlErrNow'] # Test the solution StickyDSGEeconomy.solve() m_grid = np.linspace(0,10,200) for M in StickyDSGEconsumer.Mgrid.tolist(): c_at_this_M = StickyDSGEconsumer.solution[0].cFunc(m_grid,M*np.ones_like(m_grid)) plt.plot(m_grid,c_at_this_M) plt.show() print('Average aggregate assets = ' + str(np.mean(StickyDSGEconsumer.aLvlNow_hist[ignore_periods:,:]))) print('Average aggregate consumption = ' + str(np.mean(StickyDSGEconsumer.cNrmNow_hist[ignore_periods:,:]*StickyDSGEconsumer.pLvlNow_hist[ignore_periods:,:]))) print('Standard deviation of log aggregate assets = ' + str(np.std(np.log(StickyDSGEconsumer.aLvlNow_hist[ignore_periods:,:]))))
nabla_agg = nabla_estimate else: beta_agg = beta_point_estimate nabla_agg = 0.0 DiscFac_list_agg = approxUniform(N=Params.pref_type_count, bot=beta_agg - nabla_agg, top=beta_agg + nabla_agg)[1] assignBetaDistribution(agg_shocks_type_list, DiscFac_list_agg) # Make a market for solving the FBS aggregate shocks model agg_shocks_market = CobbDouglasEconomy( agents=agg_shocks_type_list, act_T=Params.sim_periods_agg_shocks, tolerance=0.0001, **Params.aggregate_params) agg_shocks_market.makeAggShkHist() # Edit the consumer types so they have the right data for this_type in agg_shocks_market.agents: this_type.p_init = drawMeanOneLognormal(N=this_type.Nagents, sigma=0.9, seed=0) this_type.getEconomyData(agg_shocks_market) # Solve the aggregate shocks version of the model t_start = time() agg_shocks_market.solve() t_end = time() print('Solving the aggregate shocks model took ' + str(t_end - t_start) + ' seconds.') for this_type in agg_shocks_type_list:
plt.show() ########################################## # Now do aggregate shocks of a market # Make an aggregate shocks consumer AggShockExample = AggShockConsumerType(**Params.init_agg_shocks) AggShockExample.cycles = 0 AggShockExample.sim_periods = 3000 AggShockExample.makeIncShkHist( ) # Simulate a history of idiosyncratic shocks # Make a Cobb-Douglas economy for the agents EconomyExample = CobbDouglasEconomy(agents=[AggShockExample], act_T=AggShockExample.sim_periods, **Params.init_cobb_douglas) EconomyExample.makeAggShkHist() # Simulate a history of aggregate shocks # Have the consumers inherit relevant objects from the economy AggShockExample.getEconomyData(EconomyExample) # Solve the microeconomic model for the aggregate shocks example type (and display results) t_start = clock() AggShockExample.solve() t_end = clock() print('Solving an aggregate shocks consumer took ' + mystr(t_end - t_start) + ' seconds.') # # Solve the "macroeconomic" model by searching for a "fixed point dynamic rule" # t_start = clock() # EconomyExample.solve() # t_end = clock()
agg_shocks_type_list.append(new_type) if Params.do_beta_dist: beta_agg = beta_dist_estimate nabla_agg = nabla_estimate else: beta_agg = beta_point_estimate nabla_agg = 0.0 DiscFac_list_agg = approxUniform(N=Params.pref_type_count,bot=beta_agg-nabla_agg,top=beta_agg+nabla_agg)[1] assignBetaDistribution(agg_shocks_type_list,DiscFac_list_agg) # Make a market for solving the FBS aggregate shocks model agg_shocks_market = CobbDouglasEconomy(agents = agg_shocks_type_list, act_T = Params.sim_periods_agg_shocks, tolerance = 0.0001, **Params.aggregate_params) agg_shocks_market.makeAggShkHist() # Edit the consumer types so they have the right data for this_type in agg_shocks_market.agents: this_type.p_init = drawMeanOneLognormal(N=this_type.Nagents,sigma=0.9,seed=0) this_type.getEconomyData(agg_shocks_market) # Solve the aggregate shocks version of the model t_start = time() agg_shocks_market.solve() t_end = time() print('Solving the aggregate shocks model took ' + str(t_end - t_start) + ' seconds.') for this_type in agg_shocks_type_list: this_type.W_history = this_type.pHist*this_type.bHist this_type.kappa_history = 1.0 - (1.0 - this_type.MPChist)**4 agg_shock_weights = np.concatenate((np.zeros(200),np.ones(Params.sim_periods_agg_shocks-200)))