def objectiveFuncMPC(center,spread): ''' Objective function of the beta-dist estimation, similar to cstwMPC. Minimizes the distance between simulated and actual mean semiannual MPCs by wealth quintile. Parameters ---------- center : float Mean of distribution of discount factor. spread : float Half width of span of discount factor. Returns ------- distance : float Distance between simulated and actual moments. ''' DiscFacSet = approxUniform(N=TypeCount,bot=center-spread,top=center+spread)[1] for j in range(TypeCount): Agents[j](DiscFac = DiscFacSet[j]) multiThreadCommands(Agents,['solve()','initializeSim()','simulate()']) aLvl_sim = np.concatenate([agent.aLvlNow for agent in Agents]) MPC_sim = np.concatenate([agent.MPCnow for agent in Agents]) MPC_alt = 1. - (1. - MPC_sim)**2 MPC_by_aLvl = calcSubpopAvg(MPC_alt,aLvl_sim,cutoffs) moments_sim = MPC_by_aLvl moments_diff = moments_sim - moments_data moments_diff[1:] *= 1 # Rescale Lorenz shares distance = np.sqrt(np.dot(moments_diff,moments_diff)) print('Tried center=' + str(center) + ', spread=' + str(spread) + ', got distance=' + str(distance)) print(moments_sim) return distance
def objectiveFuncWealth(center,spread): ''' Objective function of the beta-dist estimation, similar to cstwMPC. Minimizes the distance between simulated and actual 20-40-60-80 Lorenz curve points and average wealth to income ratio. Parameters ---------- center : float Mean of distribution of discount factor. spread : float Half width of span of discount factor. Returns ------- distance : float Distance between simulated and actual moments. ''' DiscFacSet = approxUniform(N=TypeCount,bot=center-spread,top=center+spread)[1] for j in range(TypeCount): Agents[j](DiscFac = DiscFacSet[j]) multiThreadCommands(Agents,['solve()','initializeSim()','simulate()']) aLvl_sim = np.concatenate([agent.aLvlNow for agent in Agents]) aNrm_sim = np.concatenate([agent.aNrmNow for agent in Agents]) aNrmMean_sim = np.mean(aNrm_sim) Lorenz_sim = list(getLorenzShares(aLvl_sim,percentiles=percentile_targets)) moments_sim = np.array([aNrmMean_sim] + Lorenz_sim) moments_diff = moments_sim - moments_data moments_diff[1:] *= 1 # Rescale Lorenz shares distance = np.sqrt(np.dot(moments_diff,moments_diff)) print('Tried center=' + str(center) + ', spread=' + str(spread) + ', got distance=' + str(distance)) print(moments_sim) return distance
def FagerengObjFunc(center, spread, verbose=False): ''' Objective function for the quick and dirty structural estimation to fit Fagereng, Holm, and Natvik's Table 9 results with a basic infinite horizon consumption-saving model (with permanent and transitory income shocks). Parameters ---------- center : float Center of the uniform distribution of discount factors. spread : float Width of the uniform distribution of discount factors. verbose : bool When True, print to screen MPC table for these parameters. When False, print (center, spread, distance). Returns ------- distance : float Euclidean distance between simulated MPCs and (adjusted) Table 9 MPCs. ''' # Give our consumer types the requested discount factor distribution beta_set = approxUniform(N=TypeCount, bot=center - spread, top=center + spread)[1] for j in range(TypeCount): EstTypeList[j](DiscFac=beta_set[j]) # Solve and simulate all consumer types, then gather their wealth levels multiThreadCommands( EstTypeList, ['solve()', 'initializeSim()', 'simulate()', 'unpackcFunc()']) WealthNow = np.concatenate([ThisType.aLvlNow for ThisType in EstTypeList]) # Get wealth quartile cutoffs and distribute them to each consumer type quartile_cuts = getPercentiles(WealthNow, percentiles=[0.25, 0.50, 0.75]) for ThisType in EstTypeList: WealthQ = np.zeros(ThisType.AgentCount, dtype=int) for n in range(3): WealthQ[ThisType.aLvlNow > quartile_cuts[n]] += 1 ThisType(WealthQ=WealthQ) # Keep track of MPC sets in lists of lists of arrays MPC_set_list = [[[], [], [], []], [[], [], [], []], [[], [], [], []], [[], [], [], []]] # Calculate the MPC for each of the four lottery sizes for all agents for ThisType in EstTypeList: ThisType.simulate(1) c_base = ThisType.cNrmNow MPC_this_type = np.zeros((ThisType.AgentCount, 4)) for k in range(4): # Get MPC for all agents of this type Llvl = lottery_size[k] Lnrm = Llvl / ThisType.pLvlNow if do_secant: SplurgeNrm = Splurge / ThisType.pLvlNow mAdj = ThisType.mNrmNow + Lnrm - SplurgeNrm cAdj = ThisType.cFunc[0](mAdj) + SplurgeNrm MPC_this_type[:, k] = (cAdj - c_base) / Lnrm else: mAdj = ThisType.mNrmNow + Lnrm MPC_this_type[:, k] = cAdj = ThisType.cFunc[0].derivative(mAdj) # Sort the MPCs into the proper MPC sets for q in range(4): these = ThisType.WealthQ == q for k in range(4): MPC_set_list[k][q].append(MPC_this_type[these, k]) # Calculate average within each MPC set simulated_MPC_means = np.zeros((4, 4)) for k in range(4): for q in range(4): MPC_array = np.concatenate(MPC_set_list[k][q]) simulated_MPC_means[k, q] = np.mean(MPC_array) # Calculate Euclidean distance between simulated MPC averages and Table 9 targets diff = simulated_MPC_means - MPC_target if drop_corner: diff[0, 0] = 0.0 distance = np.sqrt(np.sum((diff)**2)) if verbose: print(simulated_MPC_means) else: print(center, spread, distance) return distance
def test_multiThreadCommands(self): # check None return if it passes self.assertIsNone(multiThreadCommands(self.agents, ["solve()"])) # check if an undefined method of agent is called self.assertRaises(AttributeError, multiThreadCommandsFake, self.agents, ["foobar"])
do_this_stuff = ['updateSolutionTerminal()', 'solve()', 'unpackcFunc()'] # Solve the model for each type by looping over the types (not multithreading) start_time = clock() multiThreadCommandsFake(my_agent_list, do_this_stuff) # Fake multithreading, just loops end_time = clock() print('Solving ' + str(type_count) + ' types without multithreading took ' + mystr(end_time - start_time) + ' seconds.') # Plot the consumption functions for all types on one figure plotFuncs([this_type.cFunc[0] for this_type in my_agent_list], 0, 5) # Delete the solution for each type to make sure we're not just faking it for i in range(type_count): my_agent_list[i].solution = None my_agent_list[i].cFunc = None my_agent_list[i].time_vary.remove('solution') my_agent_list[i].time_vary.remove('cFunc') # And here's HARK's initial attempt at multithreading: start_time = clock() multiThreadCommands(my_agent_list, do_this_stuff) # Actual multithreading end_time = clock() print('Solving ' + str(type_count) + ' types with multithreading took ' + mystr(end_time - start_time) + ' seconds.') # Plot the consumption functions for all types on one figure to see if it worked plotFuncs([this_type.cFunc[0] for this_type in my_agent_list], 0, 5)
TestOpenCL = IndShockConsumerTypesOpenCL(TypeList) # Solve all of the types using OpenCL and time it. t_start = clock() TestOpenCL.prepareToSolve() TestOpenCL.solve() TestOpenCL.finish( ) # Wait for the OpenCL queue to clear, so that timing is reported correctly t_end = clock() print('Solving ' + str(TestOpenCL.TypeCount) + ' types took ' + str(t_end - t_start) + ' seconds with OpenCL.') # Solve all of the types using Python and time it. You can choose whether to use a basic form # of multithreading (joblib) by using multiThreadCommands or multiThreadCommandsFake. t_start = clock() multiThreadCommands(TypeList, ['solve()']) t_end = clock() print('Solving ' + str(len(TypeList)) + ' types took ' + str(t_end - t_start) + ' seconds with Python.') # Simulate all of the types using OpenCL and time it. t_start = clock() TestOpenCL.loadSimulationKernels() TestOpenCL.writeSimVar( 'aNrmNow') # Writes current values in aNrmNow to a buffer TestOpenCL.writeSimVar( 'pLvlNow') # Writes current values in pLvlNow to a buffer TestOpenCL.simNperiods(T_sim) TestOpenCL.readSimVar( 'cNrmNow' ) # Reads current values in cNrmNow buffer to attribute of self