def post_solve(self): self.solution_fast = deepcopy(self.solution) if self.cycles == 0: terminal = 1 else: terminal = self.cycles self.solution[terminal] = self.solution_terminal_cs for i in range(terminal): solution = self.solution[i] # Construct the consumption function as a linear interpolation. cFunc = LinearInterp(solution.mNrm, solution.cNrm) """ Defines the value and marginal value functions for this period. Uses the fact that for a perfect foresight CRRA utility problem, if the MPC in period t is :math:`\kappa_{t}`, and relative risk aversion :math:`\rho`, then the inverse value vFuncNvrs has a constant slope of :math:`\kappa_{t}^{-\rho/(1-\rho)}` and vFuncNvrs has value of zero at the lower bound of market resources mNrmMin. See PerfForesightConsumerType.ipynb documentation notebook for a brief explanation and the links below for a fuller treatment. https://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA/#vFuncAnalytical https://www.econ2.jhu.edu/people/ccarroll/SolvingMicroDSOPs/#vFuncPF """ vFuncNvrs = LinearInterp( np.array([solution.mNrmMin, solution.mNrmMin + 1.0]), np.array([0.0, solution.vFuncNvrsSlope]), ) vFunc = ValueFuncCRRA(vFuncNvrs, self.CRRA) vPfunc = MargValueFuncCRRA(cFunc, self.CRRA) consumer_solution = ConsumerSolution( cFunc=cFunc, vFunc=vFunc, vPfunc=vPfunc, mNrmMin=solution.mNrmMin, hNrm=solution.hNrm, MPCmin=solution.MPCmin, MPCmax=solution.MPCmax, ) Ex_IncNext = 1.0 # Perfect foresight income of 1 # Add mNrmStE to the solution and return it consumer_solution.mNrmStE = _add_mNrmStENumba( self.Rfree, self.PermGroFac[i], solution.mNrm, solution.cNrm, solution.mNrmMin, Ex_IncNext, _find_mNrmStE, ) self.solution[i] = consumer_solution
def update_solution_terminal(self): """ Solves the terminal period of the portfolio choice problem. The solution is trivial, as usual: consume all market resources, and put nothing in the risky asset (because you have nothing anyway). Parameters ---------- None Returns ------- None """ # Consume all market resources: c_T = m_T cFuncAdj_terminal = IdentityFunction() cFuncFxd_terminal = IdentityFunction(i_dim=0, n_dims=2) # Risky share is irrelevant-- no end-of-period assets; set to zero ShareFuncAdj_terminal = ConstantFunction(0.0) ShareFuncFxd_terminal = IdentityFunction(i_dim=1, n_dims=2) # Value function is simply utility from consuming market resources vFuncAdj_terminal = ValueFuncCRRA(cFuncAdj_terminal, self.CRRA) vFuncFxd_terminal = ValueFuncCRRA(cFuncFxd_terminal, self.CRRA) # Marginal value of market resources is marg utility at the consumption function vPfuncAdj_terminal = MargValueFuncCRRA(cFuncAdj_terminal, self.CRRA) dvdmFuncFxd_terminal = MargValueFuncCRRA(cFuncFxd_terminal, self.CRRA) dvdsFuncFxd_terminal = ConstantFunction( 0.0) # No future, no marg value of Share # Construct the terminal period solution self.solution_terminal = PortfolioSolution( cFuncAdj=cFuncAdj_terminal, ShareFuncAdj=ShareFuncAdj_terminal, vFuncAdj=vFuncAdj_terminal, vPfuncAdj=vPfuncAdj_terminal, cFuncFxd=cFuncFxd_terminal, ShareFuncFxd=ShareFuncFxd_terminal, vFuncFxd=vFuncFxd_terminal, dvdmFuncFxd=dvdmFuncFxd_terminal, dvdsFuncFxd=dvdsFuncFxd_terminal, )
def use_points_for_interpolation(self, cNrm, mNrm, interpolator): """ Make a basic solution object with a consumption function and marginal value function (unconditional on the preference shock). Parameters ---------- cNrm : np.array Consumption points for interpolation. mNrm : np.array Corresponding market resource points for interpolation. interpolator : function A function that constructs and returns a consumption function. Returns ------- solution_now : ConsumerSolution The solution to this period's consumption-saving problem, with a consumption function, marginal value function, and minimum m. """ # Make the preference-shock specific consumption functions PrefShkCount = self.PrefShkVals.size cFunc_list = [] for j in range(PrefShkCount): MPCmin_j = self.MPCminNow * self.PrefShkVals[j]**(1.0 / self.CRRA) cFunc_this_shock = LowerEnvelope( LinearInterp( mNrm[j, :], cNrm[j, :], intercept_limit=self.hNrmNow * MPCmin_j, slope_limit=MPCmin_j, ), self.cFuncNowCnst, ) cFunc_list.append(cFunc_this_shock) # Combine the list of consumption functions into a single interpolation cFuncNow = LinearInterpOnInterp1D(cFunc_list, self.PrefShkVals) # Make the ex ante marginal value function (before the preference shock) m_grid = self.aXtraGrid + self.mNrmMinNow vP_vec = np.zeros_like(m_grid) for j in range( PrefShkCount): # numeric integration over the preference shock vP_vec += (self.uP(cFunc_list[j](m_grid)) * self.PrefShkPrbs[j] * self.PrefShkVals[j]) vPnvrs_vec = self.uPinv(vP_vec) vPfuncNow = MargValueFuncCRRA(LinearInterp(m_grid, vPnvrs_vec), self.CRRA) # Store the results in a solution object and return it solution_now = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow) return solution_now
def update_solution_terminal(self): """ Update the terminal period solution. This method should be run when a new AgentType is created or when CRRA changes. """ self.solution_terminal_cs = ConsumerSolution( cFunc=self.cFunc_terminal_, vFunc=ValueFuncCRRA(self.cFunc_terminal_, self.CRRA), vPfunc=MargValueFuncCRRA(self.cFunc_terminal_, self.CRRA), vPPfunc=MargMargValueFuncCRRA(self.cFunc_terminal_, self.CRRA), mNrmMin=0.0, hNrm=0.0, MPCmin=1.0, MPCmax=1.0, )
def make_vPfunc(self, cFunc): """ Constructs the marginal value function for this period. Parameters ---------- cFunc : function Consumption function this period, defined over market resources and persistent income level. Returns ------- vPfunc : function Marginal value (of market resources) function for this period. """ vPfunc = MargValueFuncCRRA(cFunc, self.CRRA) return vPfunc
def make_EndOfPrdvPfuncCond(self): """ Construct the end-of-period marginal value function conditional on next period's state. Parameters ---------- None Returns ------- EndofPrdvPfunc_cond : MargValueFuncCRRA The end-of-period marginal value function conditional on a particular state occuring in the succeeding period. """ # Get data to construct the end-of-period marginal value function (conditional on next state) self.aNrm_cond = self.prepare_to_calc_EndOfPrdvP() self.EndOfPrdvP_cond = self.calc_EndOfPrdvPcond() EndOfPrdvPnvrs_cond = self.uPinv( self.EndOfPrdvP_cond ) # "decurved" marginal value if self.CubicBool: EndOfPrdvPP_cond = self.calc_EndOfPrdvPP() EndOfPrdvPnvrsP_cond = EndOfPrdvPP_cond * self.uPinvP( self.EndOfPrdvP_cond ) # "decurved" marginal marginal value # Construct the end-of-period marginal value function conditional on the next state. if self.CubicBool: EndOfPrdvPnvrsFunc_cond = CubicInterp( self.aNrm_cond, EndOfPrdvPnvrs_cond, EndOfPrdvPnvrsP_cond, lower_extrap=True, ) else: EndOfPrdvPnvrsFunc_cond = LinearInterp( self.aNrm_cond, EndOfPrdvPnvrs_cond, lower_extrap=True ) EndofPrdvPfunc_cond = MargValueFuncCRRA( EndOfPrdvPnvrsFunc_cond, self.CRRA ) # "recurve" the interpolated marginal value function return EndofPrdvPfunc_cond
def update_solution_terminal(self): """ Update the terminal period solution. This method should be run when a new AgentType is created or when CRRA changes. Parameters ---------- None Returns ------- None """ self.solution_terminal.vFunc = ValueFuncCRRA(self.cFunc_terminal_, self.CRRA) self.solution_terminal.vPfunc = MargValueFuncCRRA( self.cFunc_terminal_, self.CRRA) self.solution_terminal.vPPfunc = MargMargValueFuncCRRA( self.cFunc_terminal_, self.CRRA) self.solution_terminal.hNrm = 0.0 # Don't track normalized human wealth self.solution_terminal.hLvl = lambda p: np.zeros_like(p) # But do track absolute human wealth by persistent income self.solution_terminal.mLvlMin = lambda p: np.zeros_like(p)
def solveConsPortfolio( solution_next, ShockDstn, IncShkDstn, RiskyDstn, LivPrb, DiscFac, CRRA, Rfree, PermGroFac, BoroCnstArt, aXtraGrid, ShareGrid, vFuncBool, AdjustPrb, DiscreteShareBool, ShareLimit, IndepDstnBool, ): """ Solve the one period problem for a portfolio-choice consumer. Parameters ---------- solution_next : PortfolioSolution Solution to next period's problem. ShockDstn : [np.array] List with four arrays: discrete probabilities, permanent income shocks, transitory income shocks, and risky returns. This is only used if the input IndepDstnBool is False, indicating that income and return distributions can't be assumed to be independent. IncShkDstn : distribution.Distribution Discrete distribution of permanent income shocks and transitory income shocks. This is only used if the input IndepDsntBool is True, indicating that income and return distributions are independent. RiskyDstn : [np.array] List with two arrays: discrete probabilities and risky asset returns. This is only used if the input IndepDstnBool is True, indicating that income and return distributions are independent. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rfree : float Risk free interest factor on end-of-period assets. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. In this model, it is *required* to be zero. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. ShareGrid : np.array Array of risky portfolio shares on which to define the interpolation of the consumption function when Share is fixed. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. AdjustPrb : float Probability that the agent will be able to update his portfolio share. DiscreteShareBool : bool Indicator for whether risky portfolio share should be optimized on the continuous [0,1] interval using the FOC (False), or instead only selected from the discrete set of values in ShareGrid (True). If True, then vFuncBool must also be True. ShareLimit : float Limiting lower bound of risky portfolio share as mNrm approaches infinity. IndepDstnBool : bool Indicator for whether the income and risky return distributions are in- dependent of each other, which can speed up the expectations step. Returns ------- solution_now : PortfolioSolution The solution to the single period consumption-saving with portfolio choice problem. Includes two consumption and risky share functions: one for when the agent can adjust his portfolio share (Adj) and when he can't (Fxd). """ # Make sure the individual is liquidity constrained. Allowing a consumer to # borrow *and* invest in an asset with unbounded (negative) returns is a bad mix. if BoroCnstArt != 0.0: raise ValueError("PortfolioConsumerType must have BoroCnstArt=0.0!") # Make sure that if risky portfolio share is optimized only discretely, then # the value function is also constructed (else this task would be impossible). if DiscreteShareBool and (not vFuncBool): raise ValueError( "PortfolioConsumerType requires vFuncBool to be True when DiscreteShareBool is True!" ) # Define temporary functions for utility and its derivative and inverse u = lambda x: utility(x, CRRA) uP = lambda x: utilityP(x, CRRA) uPinv = lambda x: utilityP_inv(x, CRRA) n = lambda x: utility_inv(x, CRRA) nP = lambda x: utility_invP(x, CRRA) # Unpack next period's solution vPfuncAdj_next = solution_next.vPfuncAdj dvdmFuncFxd_next = solution_next.dvdmFuncFxd dvdsFuncFxd_next = solution_next.dvdsFuncFxd vFuncAdj_next = solution_next.vFuncAdj vFuncFxd_next = solution_next.vFuncFxd # Major method fork: (in)dependent risky asset return and income distributions if IndepDstnBool: # If the distributions ARE independent... # Unpack the shock distribution TranShks_next = IncShkDstn.X[1] Risky_next = RiskyDstn.X # Flag for whether the natural borrowing constraint is zero zero_bound = np.min(TranShks_next) == 0.0 RiskyMax = np.max(Risky_next) # bNrm represents R*a, balances after asset return shocks but before income. # This just uses the highest risky return as a rough shifter for the aXtraGrid. if zero_bound: aNrmGrid = aXtraGrid bNrmGrid = np.insert(RiskyMax * aXtraGrid, 0, np.min(Risky_next) * aXtraGrid[0]) else: # Add an asset point at exactly zero aNrmGrid = np.insert(aXtraGrid, 0, 0.0) bNrmGrid = RiskyMax * np.insert(aXtraGrid, 0, 0.0) # Get grid and shock sizes, for easier indexing aNrm_N = aNrmGrid.size bNrm_N = bNrmGrid.size Share_N = ShareGrid.size # Make tiled arrays to calculate future realizations of mNrm and Share when integrating over IncShkDstn bNrm_tiled, Share_tiled = np.meshgrid(bNrmGrid, ShareGrid, indexing="ij") # Calculate future realizations of market resources def m_nrm_next(shocks, b_nrm): return b_nrm / (shocks[0] * PermGroFac) + shocks[1] # Evaluate realizations of marginal value of market resources next period def dvdb_dist(shocks, b_nrm, Share_next): mNrm_next = m_nrm_next(shocks, b_nrm) dvdmAdj_next = vPfuncAdj_next(mNrm_next) if AdjustPrb < 1.0: dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next) # Combine by adjustment probability dvdm_next = AdjustPrb * dvdmAdj_next + ( 1.0 - AdjustPrb) * dvdmFxd_next else: # Don't bother evaluating if there's no chance that portfolio share is fixed dvdm_next = dvdmAdj_next return (shocks[0] * PermGroFac)**(-CRRA) * dvdm_next # Evaluate realizations of marginal value of risky share next period def dvds_dist(shocks, b_nrm, Share_next): mNrm_next = m_nrm_next(shocks, b_nrm) # No marginal value of Share if it's a free choice! dvdsAdj_next = np.zeros_like(mNrm_next) if AdjustPrb < 1.0: dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next) # Combine by adjustment probability dvds_next = AdjustPrb * dvdsAdj_next + ( 1.0 - AdjustPrb) * dvdsFxd_next else: # Don't bother evaluating if there's no chance that portfolio share is fixed dvds_next = dvdsAdj_next return (shocks[0] * PermGroFac)**(1.0 - CRRA) * dvds_next # If the value function has been requested, evaluate realizations of value def v_intermed_dist(shocks, b_nrm, Share_next): mNrm_next = m_nrm_next(shocks, b_nrm) vAdj_next = vFuncAdj_next(mNrm_next) if AdjustPrb < 1.0: vFxd_next = vFuncFxd_next(mNrm_next, Share_next) # Combine by adjustment probability v_next = AdjustPrb * vAdj_next + (1.0 - AdjustPrb) * vFxd_next else: # Don't bother evaluating if there's no chance that portfolio share is fixed v_next = vAdj_next return (shocks[0] * PermGroFac)**(1.0 - CRRA) * v_next # Calculate intermediate marginal value of bank balances by taking expectations over income shocks dvdb_intermed = calc_expectation(IncShkDstn, dvdb_dist, bNrm_tiled, Share_tiled) # calc_expectation returns one additional "empty" dimension, remove it # this line can be deleted when calc_expectation is fixed dvdb_intermed = dvdb_intermed[:, :, 0] dvdbNvrs_intermed = uPinv(dvdb_intermed) dvdbNvrsFunc_intermed = BilinearInterp(dvdbNvrs_intermed, bNrmGrid, ShareGrid) dvdbFunc_intermed = MargValueFuncCRRA(dvdbNvrsFunc_intermed, CRRA) # Calculate intermediate value by taking expectations over income shocks if vFuncBool: v_intermed = calc_expectation(IncShkDstn, v_intermed_dist, bNrm_tiled, Share_tiled) # calc_expectation returns one additional "empty" dimension, remove it # this line can be deleted when calc_expectation is fixed v_intermed = v_intermed[:, :, 0] vNvrs_intermed = n(v_intermed) vNvrsFunc_intermed = BilinearInterp(vNvrs_intermed, bNrmGrid, ShareGrid) vFunc_intermed = ValueFuncCRRA(vNvrsFunc_intermed, CRRA) # Calculate intermediate marginal value of risky portfolio share by taking expectations dvds_intermed = calc_expectation(IncShkDstn, dvds_dist, bNrm_tiled, Share_tiled) # calc_expectation returns one additional "empty" dimension, remove it # this line can be deleted when calc_expectation is fixed dvds_intermed = dvds_intermed[:, :, 0] dvdsFunc_intermed = BilinearInterp(dvds_intermed, bNrmGrid, ShareGrid) # Make tiled arrays to calculate future realizations of bNrm and Share when integrating over RiskyDstn aNrm_tiled, Share_tiled = np.meshgrid(aNrmGrid, ShareGrid, indexing="ij") # Evaluate realizations of value and marginal value after asset returns are realized def EndOfPrddvda_dist(shock, a_nrm, Share_next): # Calculate future realizations of bank balances bNrm Rxs = shock - Rfree Rport = Rfree + Share_next * Rxs b_nrm_next = Rport * a_nrm return Rport * dvdbFunc_intermed(b_nrm_next, Share_next) def EndOfPrdv_dist(shock, a_nrm, Share_next): # Calculate future realizations of bank balances bNrm Rxs = shock - Rfree Rport = Rfree + Share_next * Rxs b_nrm_next = Rport * a_nrm return vFunc_intermed(b_nrm_next, Share_next) def EndOfPrddvds_dist(shock, a_nrm, Share_next): # Calculate future realizations of bank balances bNrm Rxs = shock - Rfree Rport = Rfree + Share_next * Rxs b_nrm_next = Rport * a_nrm return Rxs * a_nrm * dvdbFunc_intermed( b_nrm_next, Share_next) + dvdsFunc_intermed( b_nrm_next, Share_next) # Calculate end-of-period marginal value of assets by taking expectations EndOfPrddvda = (DiscFac * LivPrb * calc_expectation( RiskyDstn, EndOfPrddvda_dist, aNrm_tiled, Share_tiled)) # calc_expectation returns one additional "empty" dimension, remove it # this line can be deleted when calc_expectation is fixed EndOfPrddvda = EndOfPrddvda[:, :, 0] EndOfPrddvdaNvrs = uPinv(EndOfPrddvda) # Calculate end-of-period value by taking expectations if vFuncBool: EndOfPrdv = (DiscFac * LivPrb * calc_expectation( RiskyDstn, EndOfPrdv_dist, aNrm_tiled, Share_tiled)) # calc_expectation returns one additional "empty" dimension, remove it # this line can be deleted when calc_expectation is fixed EndOfPrdv = EndOfPrdv[:, :, 0] EndOfPrdvNvrs = n(EndOfPrdv) # Calculate end-of-period marginal value of risky portfolio share by taking expectations EndOfPrddvds = (DiscFac * LivPrb * calc_expectation( RiskyDstn, EndOfPrddvds_dist, aNrm_tiled, Share_tiled)) # calc_expectation returns one additional "empty" dimension, remove it # this line can be deleted when calc_expectation is fixed EndOfPrddvds = EndOfPrddvds[:, :, 0] else: # If the distributions are NOT independent... # Unpack the shock distribution ShockPrbs_next = ShockDstn[0] PermShks_next = ShockDstn[1] TranShks_next = ShockDstn[2] Risky_next = ShockDstn[3] # Flag for whether the natural borrowing constraint is zero zero_bound = np.min(TranShks_next) == 0.0 # Make tiled arrays to calculate future realizations of mNrm and Share; dimension order: mNrm, Share, shock if zero_bound: aNrmGrid = aXtraGrid else: # Add an asset point at exactly zero aNrmGrid = np.insert(aXtraGrid, 0, 0.0) aNrm_N = aNrmGrid.size Share_N = ShareGrid.size Shock_N = ShockPrbs_next.size aNrm_tiled = np.tile(np.reshape(aNrmGrid, (aNrm_N, 1, 1)), (1, Share_N, Shock_N)) Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)), (aNrm_N, 1, Shock_N)) ShockPrbs_tiled = np.tile(np.reshape(ShockPrbs_next, (1, 1, Shock_N)), (aNrm_N, Share_N, 1)) PermShks_tiled = np.tile(np.reshape(PermShks_next, (1, 1, Shock_N)), (aNrm_N, Share_N, 1)) TranShks_tiled = np.tile(np.reshape(TranShks_next, (1, 1, Shock_N)), (aNrm_N, Share_N, 1)) Risky_tiled = np.tile(np.reshape(Risky_next, (1, 1, Shock_N)), (aNrm_N, Share_N, 1)) # Calculate future realizations of market resources Rport = (1.0 - Share_tiled) * Rfree + Share_tiled * Risky_tiled mNrm_next = Rport * aNrm_tiled / (PermShks_tiled * PermGroFac) + TranShks_tiled Share_next = Share_tiled # Evaluate realizations of marginal value of market resources next period dvdmAdj_next = vPfuncAdj_next(mNrm_next) if AdjustPrb < 1.0: dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next) # Combine by adjustment probability dvdm_next = AdjustPrb * dvdmAdj_next + (1.0 - AdjustPrb) * dvdmFxd_next else: # Don't bother evaluating if there's no chance that portfolio share is fixed dvdm_next = dvdmAdj_next # Evaluate realizations of marginal value of risky share next period # No marginal value of Share if it's a free choice! dvdsAdj_next = np.zeros_like(mNrm_next) if AdjustPrb < 1.0: dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next) # Combine by adjustment probability dvds_next = AdjustPrb * dvdsAdj_next + (1.0 - AdjustPrb) * dvdsFxd_next else: # Don't bother evaluating if there's no chance that portfolio share is fixed dvds_next = dvdsAdj_next # If the value function has been requested, evaluate realizations of value if vFuncBool: vAdj_next = vFuncAdj_next(mNrm_next) if AdjustPrb < 1.0: vFxd_next = vFuncFxd_next(mNrm_next, Share_next) v_next = AdjustPrb * vAdj_next + (1.0 - AdjustPrb) * vFxd_next else: # Don't bother evaluating if there's no chance that portfolio share is fixed v_next = vAdj_next else: v_next = np.zeros_like(dvdm_next) # Trivial array # Calculate end-of-period marginal value of assets by taking expectations temp_fac_A = uP(PermShks_tiled * PermGroFac) # Will use this in a couple places EndOfPrddvda = ( DiscFac * LivPrb * np.sum(ShockPrbs_tiled * Rport * temp_fac_A * dvdm_next, axis=2)) EndOfPrddvdaNvrs = uPinv(EndOfPrddvda) # Calculate end-of-period value by taking expectations # Will use this below temp_fac_B = (PermShks_tiled * PermGroFac)**(1.0 - CRRA) if vFuncBool: EndOfPrdv = (DiscFac * LivPrb * np.sum(ShockPrbs_tiled * temp_fac_B * v_next, axis=2)) EndOfPrdvNvrs = n(EndOfPrdv) # Calculate end-of-period marginal value of risky portfolio share by taking expectations Rxs = Risky_tiled - Rfree EndOfPrddvds = (DiscFac * LivPrb * np.sum( ShockPrbs_tiled * (Rxs * aNrm_tiled * temp_fac_A * dvdm_next + temp_fac_B * dvds_next), axis=2, )) # Major method fork: discrete vs continuous choice of risky portfolio share if DiscreteShareBool: # Optimization of Share on the discrete set ShareGrid opt_idx = np.argmax(EndOfPrdv, axis=1) Share_now = ShareGrid[ opt_idx] # Best portfolio share is one with highest value # Take cNrm at that index as well cNrmAdj_now = EndOfPrddvdaNvrs[np.arange(aNrm_N), opt_idx] if not zero_bound: Share_now[ 0] = 1.0 # aNrm=0, so there's no way to "optimize" the portfolio # Consumption when aNrm=0 does not depend on Share cNrmAdj_now[0] = EndOfPrddvdaNvrs[0, -1] else: # Optimization of Share on continuous interval [0,1] # For values of aNrm at which the agent wants to put more than 100% into risky asset, constrain them FOC_s = EndOfPrddvds # Initialize to putting everything in safe asset Share_now = np.zeros_like(aNrmGrid) cNrmAdj_now = np.zeros_like(aNrmGrid) # If agent wants to put more than 100% into risky asset, he is constrained constrained_top = FOC_s[:, -1] > 0.0 # Likewise if he wants to put less than 0% into risky asset constrained_bot = FOC_s[:, 0] < 0.0 Share_now[constrained_top] = 1.0 if not zero_bound: Share_now[ 0] = 1.0 # aNrm=0, so there's no way to "optimize" the portfolio # Consumption when aNrm=0 does not depend on Share cNrmAdj_now[0] = EndOfPrddvdaNvrs[0, -1] # Mark as constrained so that there is no attempt at optimization constrained_top[0] = True # Get consumption when share-constrained cNrmAdj_now[constrained_top] = EndOfPrddvdaNvrs[constrained_top, -1] cNrmAdj_now[constrained_bot] = EndOfPrddvdaNvrs[constrained_bot, 0] # For each value of aNrm, find the value of Share such that FOC-Share == 0. # This loop can probably be eliminated, but it's such a small step that it won't speed things up much. crossing = np.logical_and(FOC_s[:, 1:] <= 0.0, FOC_s[:, :-1] >= 0.0) for j in range(aNrm_N): if not (constrained_top[j] or constrained_bot[j]): idx = np.argwhere(crossing[j, :])[0][0] bot_s = ShareGrid[idx] top_s = ShareGrid[idx + 1] bot_f = FOC_s[j, idx] top_f = FOC_s[j, idx + 1] bot_c = EndOfPrddvdaNvrs[j, idx] top_c = EndOfPrddvdaNvrs[j, idx + 1] alpha = 1.0 - top_f / (top_f - bot_f) Share_now[j] = (1.0 - alpha) * bot_s + alpha * top_s cNrmAdj_now[j] = (1.0 - alpha) * bot_c + alpha * top_c # Calculate the endogenous mNrm gridpoints when the agent adjusts his portfolio mNrmAdj_now = aNrmGrid + cNrmAdj_now # This is a point at which (a,c,share) have consistent length. Take the # snapshot for storing the grid and values in the solution. save_points = { "a": deepcopy(aNrmGrid), "eop_dvda_adj": uP(cNrmAdj_now), "share_adj": deepcopy(Share_now), "share_grid": deepcopy(ShareGrid), "eop_dvda_fxd": uP(EndOfPrddvda), } # Construct the risky share function when the agent can adjust if DiscreteShareBool: mNrmAdj_mid = (mNrmAdj_now[1:] + mNrmAdj_now[:-1]) / 2 mNrmAdj_plus = mNrmAdj_mid * (1.0 + 1e-12) mNrmAdj_comb = (np.transpose(np.vstack( (mNrmAdj_mid, mNrmAdj_plus)))).flatten() mNrmAdj_comb = np.append(np.insert(mNrmAdj_comb, 0, 0.0), mNrmAdj_now[-1]) Share_comb = (np.transpose(np.vstack( (Share_now, Share_now)))).flatten() ShareFuncAdj_now = LinearInterp(mNrmAdj_comb, Share_comb) else: if zero_bound: Share_lower_bound = ShareLimit else: Share_lower_bound = 1.0 Share_now = np.insert(Share_now, 0, Share_lower_bound) ShareFuncAdj_now = LinearInterp( np.insert(mNrmAdj_now, 0, 0.0), Share_now, intercept_limit=ShareLimit, slope_limit=0.0, ) # Construct the consumption function when the agent can adjust cNrmAdj_now = np.insert(cNrmAdj_now, 0, 0.0) cFuncAdj_now = LinearInterp(np.insert(mNrmAdj_now, 0, 0.0), cNrmAdj_now) # Construct the marginal value (of mNrm) function when the agent can adjust vPfuncAdj_now = MargValueFuncCRRA(cFuncAdj_now, CRRA) # Construct the consumption function when the agent *can't* adjust the risky share, as well # as the marginal value of Share function cFuncFxd_by_Share = [] dvdsFuncFxd_by_Share = [] for j in range(Share_N): cNrmFxd_temp = EndOfPrddvdaNvrs[:, j] mNrmFxd_temp = aNrmGrid + cNrmFxd_temp cFuncFxd_by_Share.append( LinearInterp(np.insert(mNrmFxd_temp, 0, 0.0), np.insert(cNrmFxd_temp, 0, 0.0))) dvdsFuncFxd_by_Share.append( LinearInterp( np.insert(mNrmFxd_temp, 0, 0.0), np.insert(EndOfPrddvds[:, j], 0, EndOfPrddvds[0, j]), )) cFuncFxd_now = LinearInterpOnInterp1D(cFuncFxd_by_Share, ShareGrid) dvdsFuncFxd_now = LinearInterpOnInterp1D(dvdsFuncFxd_by_Share, ShareGrid) # The share function when the agent can't adjust his portfolio is trivial ShareFuncFxd_now = IdentityFunction(i_dim=1, n_dims=2) # Construct the marginal value of mNrm function when the agent can't adjust his share dvdmFuncFxd_now = MargValueFuncCRRA(cFuncFxd_now, CRRA) # If the value function has been requested, construct it now if vFuncBool: # First, make an end-of-period value function over aNrm and Share EndOfPrdvNvrsFunc = BilinearInterp(EndOfPrdvNvrs, aNrmGrid, ShareGrid) EndOfPrdvFunc = ValueFuncCRRA(EndOfPrdvNvrsFunc, CRRA) # Construct the value function when the agent can adjust his portfolio mNrm_temp = aXtraGrid # Just use aXtraGrid as our grid of mNrm values cNrm_temp = cFuncAdj_now(mNrm_temp) aNrm_temp = mNrm_temp - cNrm_temp Share_temp = ShareFuncAdj_now(mNrm_temp) v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp) vNvrs_temp = n(v_temp) vNvrsP_temp = uP(cNrm_temp) * nP(v_temp) vNvrsFuncAdj = CubicInterp( np.insert(mNrm_temp, 0, 0.0), # x_list np.insert(vNvrs_temp, 0, 0.0), # f_list np.insert(vNvrsP_temp, 0, vNvrsP_temp[0]), # dfdx_list ) # Re-curve the pseudo-inverse value function vFuncAdj_now = ValueFuncCRRA(vNvrsFuncAdj, CRRA) # Construct the value function when the agent *can't* adjust his portfolio mNrm_temp = np.tile(np.reshape(aXtraGrid, (aXtraGrid.size, 1)), (1, Share_N)) Share_temp = np.tile(np.reshape(ShareGrid, (1, Share_N)), (aXtraGrid.size, 1)) cNrm_temp = cFuncFxd_now(mNrm_temp, Share_temp) aNrm_temp = mNrm_temp - cNrm_temp v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp) vNvrs_temp = n(v_temp) vNvrsP_temp = uP(cNrm_temp) * nP(v_temp) vNvrsFuncFxd_by_Share = [] for j in range(Share_N): vNvrsFuncFxd_by_Share.append( CubicInterp( np.insert(mNrm_temp[:, 0], 0, 0.0), # x_list np.insert(vNvrs_temp[:, j], 0, 0.0), # f_list np.insert(vNvrsP_temp[:, j], 0, vNvrsP_temp[j, 0]), # dfdx_list )) vNvrsFuncFxd = LinearInterpOnInterp1D(vNvrsFuncFxd_by_Share, ShareGrid) vFuncFxd_now = ValueFuncCRRA(vNvrsFuncFxd, CRRA) else: # If vFuncBool is False, fill in dummy values vFuncAdj_now = None vFuncFxd_now = None return PortfolioSolution( cFuncAdj=cFuncAdj_now, ShareFuncAdj=ShareFuncAdj_now, vPfuncAdj=vPfuncAdj_now, vFuncAdj=vFuncAdj_now, cFuncFxd=cFuncFxd_now, ShareFuncFxd=ShareFuncFxd_now, dvdmFuncFxd=dvdmFuncFxd_now, dvdsFuncFxd=dvdsFuncFxd_now, vFuncFxd=vFuncFxd_now, aGrid=save_points["a"], Share_adj=save_points["share_adj"], EndOfPrddvda_adj=save_points["eop_dvda_adj"], ShareGrid=save_points["share_grid"], EndOfPrddvda_fxd=save_points["eop_dvda_fxd"], AdjPrb=AdjustPrb, )
def post_solve(self): self.solution_fast = deepcopy(self.solution) if self.cycles == 0: cycles = 1 else: cycles = self.cycles self.solution[-1] = self.solution_terminal_cs for i in range(cycles): for j in range(self.T_cycle): solution = self.solution[i * self.T_cycle + j] # Define the borrowing constraint (limiting consumption function) cFuncNowCnst = LinearInterp( np.array([solution.mNrmMin, solution.mNrmMin + 1]), np.array([0.0, 1.0]), ) """ Constructs a basic solution for this period, including the consumption function and marginal value function. """ if self.CubicBool: # Makes a cubic spline interpolation of the unconstrained consumption # function for this period. cFuncNowUnc = CubicInterp( solution.mNrm, solution.cNrm, solution.MPC, solution.cFuncLimitIntercept, solution.cFuncLimitSlope, ) else: # Makes a linear interpolation to represent the (unconstrained) consumption function. # Construct the unconstrained consumption function cFuncNowUnc = LinearInterp( solution.mNrm, solution.cNrm, solution.cFuncLimitIntercept, solution.cFuncLimitSlope, ) # Combine the constrained and unconstrained functions into the true consumption function cFuncNow = LowerEnvelope(cFuncNowUnc, cFuncNowCnst) # Make the marginal value function and the marginal marginal value function vPfuncNow = MargValueFuncCRRA(cFuncNow, self.CRRA) # Pack up the solution and return it consumer_solution = ConsumerSolution( cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=solution.mNrmMin, hNrm=solution.hNrm, MPCmin=solution.MPCmin, MPCmax=solution.MPCmax, ) if self.vFuncBool: vNvrsFuncNow = CubicInterp( solution.mNrmGrid, solution.vNvrs, solution.vNvrsP, solution.MPCminNvrs * solution.hNrm, solution.MPCminNvrs, ) vFuncNow = ValueFuncCRRA(vNvrsFuncNow, self.CRRA) consumer_solution.vFunc = vFuncNow if self.CubicBool or self.vFuncBool: _searchFunc = ( _find_mNrmStECubic if self.CubicBool else _find_mNrmStELinear ) # Add mNrmStE to the solution and return it consumer_solution.mNrmStE = _add_mNrmStEIndNumba( self.PermGroFac[j], self.Rfree, solution.Ex_IncNext, solution.mNrmMin, solution.mNrm, solution.cNrm, solution.MPC, solution.MPCmin, solution.hNrm, _searchFunc, ) self.solution[i * self.T_cycle + j] = consumer_solution
def make_solution(self, cNrm, mNrm): """ Construct an object representing the solution to this period's problem. Parameters ---------- cNrm : np.array Array of normalized consumption values for interpolation. Each row corresponds to a Markov state for this period. mNrm : np.array Array of normalized market resource values for interpolation. Each row corresponds to a Markov state for this period. Returns ------- solution : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marg- inal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal marginal value function vPPfunc. All of these attributes are lists or arrays, with elements corresponding to the current Markov state. E.g. solution.cFunc[0] is the consumption function when in the i=0 Markov state this period. """ solution = ( ConsumerSolution() ) # An empty solution to which we'll add state-conditional solutions # Calculate the MPC at each market resource gridpoint in each state (if desired) if self.CubicBool: dcda = self.EndOfPrdvPP / self.uPP(np.array(self.cNrmNow)) MPC = dcda / (dcda + 1.0) self.MPC_temp = np.hstack( (np.reshape(self.MPCmaxNow, (self.StateCount, 1)), MPC) ) interpfunc = self.make_cubic_cFunc else: interpfunc = self.make_linear_cFunc # Loop through each current period state and add its solution to the overall solution for i in range(self.StateCount): # Set current-period-conditional human wealth and MPC bounds self.hNrmNow_j = self.hNrmNow[i] self.MPCminNow_j = self.MPCminNow[i] if self.CubicBool: self.MPC_temp_j = self.MPC_temp[i, :] # Construct the consumption function by combining the constrained and unconstrained portions self.cFuncNowCnst = LinearInterp( [self.mNrmMin_list[i], self.mNrmMin_list[i] + 1.0], [0.0, 1.0] ) cFuncNowUnc = interpfunc(mNrm[i, :], cNrm[i, :]) cFuncNow = LowerEnvelope(cFuncNowUnc, self.cFuncNowCnst) # Make the marginal value function and pack up the current-state-conditional solution vPfuncNow = MargValueFuncCRRA(cFuncNow, self.CRRA) solution_cond = ConsumerSolution( cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow ) if ( self.CubicBool ): # Add the state-conditional marginal marginal value function (if desired) solution_cond = self.add_vPPfunc(solution_cond) # Add the current-state-conditional solution to the overall period solution solution.append_solution(solution_cond) # Add the lower bounds of market resources, MPC limits, human resources, # and the value functions to the overall solution solution.mNrmMin = self.mNrmMin_list solution = self.add_MPC_and_human_wealth(solution) if self.vFuncBool: vFuncNow = self.make_vFunc(solution) solution.vFunc = vFuncNow # Return the overall solution to this period return solution
def solve_ConsLaborIntMarg( solution_next, PermShkDstn, TranShkDstn, LivPrb, DiscFac, CRRA, Rfree, PermGroFac, BoroCnstArt, aXtraGrid, TranShkGrid, vFuncBool, CubicBool, WageRte, LbrCost, ): """ Solves one period of the consumption-saving model with endogenous labor supply on the intensive margin by using the endogenous grid method to invert the first order conditions for optimal composite consumption and between consumption and leisure, obviating any search for optimal controls. Parameters ---------- solution_next : ConsumerLaborSolution The solution to the next period's problem; must have the attributes vPfunc and bNrmMinFunc representing marginal value of bank balances and minimum (normalized) bank balances as a function of the transitory shock. PermShkDstn: [np.array] Discrete distribution of permanent productivity shocks. TranShkDstn: [np.array] Discrete distribution of transitory productivity shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor. CRRA : float Coefficient of relative risk aversion over the composite good. Rfree : float Risk free interest rate on assets retained at the end of the period. PermGroFac : float Expected permanent income growth factor for next period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. Currently not handled, must be None. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. TranShkGrid: np.array Grid of transitory shock values to use as a state grid for interpolation. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. Not yet handled, must be False. CubicBool: boolean An indicator for whether the solver should use cubic or linear interpolation. Cubic interpolation is not yet handled, must be False. WageRte: float Wage rate per unit of labor supplied. LbrCost: float Cost parameter for supplying labor: u_t = U(x_t), x_t = c_t*z_t^LbrCost, where z_t is leisure = 1 - Lbr_t. Returns ------- solution_now : ConsumerLaborSolution The solution to this period's problem, including a consumption function cFunc, a labor supply function LbrFunc, and a marginal value function vPfunc; each are defined over normalized bank balances and transitory prod shock. Also includes bNrmMinNow, the minimum permissible bank balances as a function of the transitory productivity shock. """ # Make sure the inputs for this period are valid: CRRA > LbrCost/(1+LbrCost) # and CubicBool = False. CRRA condition is met automatically when CRRA >= 1. frac = 1.0 / (1.0 + LbrCost) if CRRA <= frac * LbrCost: print( "Error: make sure CRRA coefficient is strictly greater than alpha/(1+alpha)." ) sys.exit() if BoroCnstArt is not None: print( "Error: Model cannot handle artificial borrowing constraint yet. ") sys.exit() if vFuncBool or CubicBool is True: print("Error: Model cannot handle cubic interpolation yet.") sys.exit() # Unpack next period's solution and the productivity shock distribution, and define the inverse (marginal) utilty function vPfunc_next = solution_next.vPfunc TranShkPrbs = TranShkDstn.pmf TranShkVals = TranShkDstn.X.flatten() PermShkPrbs = PermShkDstn.pmf PermShkVals = PermShkDstn.X.flatten() TranShkCount = TranShkPrbs.size PermShkCount = PermShkPrbs.size uPinv = lambda X: CRRAutilityP_inv(X, gam=CRRA) # Make tiled versions of the grid of a_t values and the components of the shock distribution aXtraCount = aXtraGrid.size bNrmGrid = aXtraGrid # Next period's bank balances before labor income # Replicated axtraGrid of b_t values (bNowGrid) for each transitory (productivity) shock bNrmGrid_rep = np.tile(np.reshape(bNrmGrid, (aXtraCount, 1)), (1, TranShkCount)) # Replicated transitory shock values for each a_t state TranShkVals_rep = np.tile(np.reshape(TranShkVals, (1, TranShkCount)), (aXtraCount, 1)) # Replicated transitory shock probabilities for each a_t state TranShkPrbs_rep = np.tile(np.reshape(TranShkPrbs, (1, TranShkCount)), (aXtraCount, 1)) # Construct a function that gives marginal value of next period's bank balances *just before* the transitory shock arrives # Next period's marginal value at every transitory shock and every bank balances gridpoint vPNext = vPfunc_next(bNrmGrid_rep, TranShkVals_rep) # Integrate out the transitory shocks (in TranShkVals direction) to get expected vP just before the transitory shock vPbarNext = np.sum(vPNext * TranShkPrbs_rep, axis=1) # Transformed marginal value through the inverse marginal utility function to "decurve" it vPbarNvrsNext = uPinv(vPbarNext) # Linear interpolation over b_{t+1}, adding a point at minimal value of b = 0. vPbarNvrsFuncNext = LinearInterp(np.insert(bNrmGrid, 0, 0.0), np.insert(vPbarNvrsNext, 0, 0.0)) # "Recurve" the intermediate marginal value function through the marginal utility function vPbarFuncNext = MargValueFuncCRRA(vPbarNvrsFuncNext, CRRA) # Get next period's bank balances at each permanent shock from each end-of-period asset values # Replicated grid of a_t values for each permanent (productivity) shock aNrmGrid_rep = np.tile(np.reshape(aXtraGrid, (aXtraCount, 1)), (1, PermShkCount)) # Replicated permanent shock values for each a_t value PermShkVals_rep = np.tile(np.reshape(PermShkVals, (1, PermShkCount)), (aXtraCount, 1)) # Replicated permanent shock probabilities for each a_t value PermShkPrbs_rep = np.tile(np.reshape(PermShkPrbs, (1, PermShkCount)), (aXtraCount, 1)) bNrmNext = (Rfree / (PermGroFac * PermShkVals_rep)) * aNrmGrid_rep # Calculate marginal value of end-of-period assets at each a_t gridpoint # Get marginal value of bank balances next period at each shock vPbarNext = (PermGroFac * PermShkVals_rep)**(-CRRA) * vPbarFuncNext(bNrmNext) # Take expectation across permanent income shocks EndOfPrdvP = (DiscFac * Rfree * LivPrb * np.sum(vPbarNext * PermShkPrbs_rep, axis=1, keepdims=True)) # Compute scaling factor for each transitory shock TranShkScaleFac_temp = (frac * (WageRte * TranShkGrid)**(LbrCost * frac) * (LbrCost**(-LbrCost * frac) + LbrCost**frac)) # Flip it to be a row vector TranShkScaleFac = np.reshape(TranShkScaleFac_temp, (1, TranShkGrid.size)) # Use the first order condition to compute an array of "composite good" x_t values corresponding to (a_t,theta_t) values xNow = (np.dot(EndOfPrdvP, TranShkScaleFac))**(-1.0 / (CRRA - LbrCost * frac)) # Transform the composite good x_t values into consumption c_t and leisure z_t values TranShkGrid_rep = np.tile(np.reshape(TranShkGrid, (1, TranShkGrid.size)), (aXtraCount, 1)) xNowPow = xNow**frac # Will use this object multiple times in math below # Find optimal consumption from optimal composite good cNrmNow = (( (WageRte * TranShkGrid_rep) / LbrCost)**(LbrCost * frac)) * xNowPow # Find optimal leisure from optimal composite good LsrNow = (LbrCost / (WageRte * TranShkGrid_rep))**frac * xNowPow # The zero-th transitory shock is TranShk=0, and the solution is to not work: Lsr = 1, Lbr = 0. cNrmNow[:, 0] = uPinv(EndOfPrdvP.flatten()) LsrNow[:, 0] = 1.0 # Agent cannot choose to work a negative amount of time. When this occurs, set # leisure to one and recompute consumption using simplified first order condition. # Find where labor would be negative if unconstrained violates_labor_constraint = LsrNow > 1.0 EndOfPrdvP_temp = np.tile(np.reshape(EndOfPrdvP, (aXtraCount, 1)), (1, TranShkCount)) cNrmNow[violates_labor_constraint] = uPinv( EndOfPrdvP_temp[violates_labor_constraint]) LsrNow[violates_labor_constraint] = 1.0 # Set up z=1, upper limit # Calculate the endogenous bNrm states by inverting the within-period transition aNrmNow_rep = np.tile(np.reshape(aXtraGrid, (aXtraCount, 1)), (1, TranShkGrid.size)) bNrmNow = (aNrmNow_rep - WageRte * TranShkGrid_rep + cNrmNow + WageRte * TranShkGrid_rep * LsrNow) # Add an extra gridpoint at the absolute minimal valid value for b_t for each TranShk; # this corresponds to working 100% of the time and consuming nothing. bNowArray = np.concatenate((np.reshape(-WageRte * TranShkGrid, (1, TranShkGrid.size)), bNrmNow), axis=0) # Consume nothing cNowArray = np.concatenate((np.zeros((1, TranShkGrid.size)), cNrmNow), axis=0) # And no leisure! LsrNowArray = np.concatenate((np.zeros((1, TranShkGrid.size)), LsrNow), axis=0) LsrNowArray[0, 0] = 1.0 # Don't work at all if TranShk=0, even if bNrm=0 LbrNowArray = 1.0 - LsrNowArray # Labor is the complement of leisure # Get (pseudo-inverse) marginal value of bank balances using end of period # marginal value of assets (envelope condition), adding a column of zeros # zeros on the left edge, representing the limit at the minimum value of b_t. vPnvrsNowArray = np.concatenate((np.zeros( (1, TranShkGrid.size)), uPinv(EndOfPrdvP_temp))) # Construct consumption and marginal value functions for this period bNrmMinNow = LinearInterp(TranShkGrid, bNowArray[0, :]) # Loop over each transitory shock and make a linear interpolation to get lists # of optimal consumption, labor and (pseudo-inverse) marginal value by TranShk cFuncNow_list = [] LbrFuncNow_list = [] vPnvrsFuncNow_list = [] for j in range(TranShkGrid.size): # Adjust bNrmNow for this transitory shock, so bNrmNow_temp[0] = 0 bNrmNow_temp = bNowArray[:, j] - bNowArray[0, j] # Make consumption function for this transitory shock cFuncNow_list.append(LinearInterp(bNrmNow_temp, cNowArray[:, j])) # Make labor function for this transitory shock LbrFuncNow_list.append(LinearInterp(bNrmNow_temp, LbrNowArray[:, j])) # Make pseudo-inverse marginal value function for this transitory shock vPnvrsFuncNow_list.append( LinearInterp(bNrmNow_temp, vPnvrsNowArray[:, j])) # Make linear interpolation by combining the lists of consumption, labor and marginal value functions cFuncNowBase = LinearInterpOnInterp1D(cFuncNow_list, TranShkGrid) LbrFuncNowBase = LinearInterpOnInterp1D(LbrFuncNow_list, TranShkGrid) vPnvrsFuncNowBase = LinearInterpOnInterp1D(vPnvrsFuncNow_list, TranShkGrid) # Construct consumption, labor, pseudo-inverse marginal value functions with # bNrmMinNow as the lower bound. This removes the adjustment in the loop above. cFuncNow = VariableLowerBoundFunc2D(cFuncNowBase, bNrmMinNow) LbrFuncNow = VariableLowerBoundFunc2D(LbrFuncNowBase, bNrmMinNow) vPnvrsFuncNow = VariableLowerBoundFunc2D(vPnvrsFuncNowBase, bNrmMinNow) # Construct the marginal value function by "recurving" its pseudo-inverse vPfuncNow = MargValueFuncCRRA(vPnvrsFuncNow, CRRA) # Make a solution object for this period and return it solution = ConsumerLaborSolution(cFunc=cFuncNow, LbrFunc=LbrFuncNow, vPfunc=vPfuncNow, bNrmMin=bNrmMinNow) return solution
def update_solution_terminal(self): """ Updates the terminal period solution and solves for optimal consumption and labor when there is no future. Parameters ---------- None Returns ------- None """ t = -1 TranShkGrid = self.TranShkGrid[t] LbrCost = self.LbrCost[t] WageRte = self.WageRte[t] bNrmGrid = np.insert( self.aXtraGrid, 0, 0.0 ) # Add a point at b_t = 0 to make sure that bNrmGrid goes down to 0 bNrmCount = bNrmGrid.size # 201 TranShkCount = TranShkGrid.size # = (7,) bNrmGridTerm = np.tile( np.reshape(bNrmGrid, (bNrmCount, 1)), (1, TranShkCount )) # Replicated bNrmGrid for each transitory shock theta_t TranShkGridTerm = np.tile( TranShkGrid, (bNrmCount, 1) ) # Tile the grid of transitory shocks for the terminal solution. (201,7) # Array of labor (leisure) values for terminal solution LsrTerm = np.minimum( (LbrCost / (1.0 + LbrCost)) * (bNrmGridTerm / (WageRte * TranShkGridTerm) + 1.0), 1.0, ) LsrTerm[0, 0] = 1.0 LbrTerm = 1.0 - LsrTerm # Calculate market resources in terminal period, which is consumption mNrmTerm = bNrmGridTerm + LbrTerm * WageRte * TranShkGridTerm cNrmTerm = mNrmTerm # Consume everything we have # Make a bilinear interpolation to represent the labor and consumption functions LbrFunc_terminal = BilinearInterp(LbrTerm, bNrmGrid, TranShkGrid) cFunc_terminal = BilinearInterp(cNrmTerm, bNrmGrid, TranShkGrid) # Compute the effective consumption value using consumption value and labor value at the terminal solution xEffTerm = LsrTerm**LbrCost * cNrmTerm vNvrsFunc_terminal = BilinearInterp(xEffTerm, bNrmGrid, TranShkGrid) vFunc_terminal = ValueFuncCRRA(vNvrsFunc_terminal, self.CRRA) # Using the envelope condition at the terminal solution to estimate the marginal value function vPterm = LsrTerm**LbrCost * CRRAutilityP(xEffTerm, gam=self.CRRA) vPnvrsTerm = CRRAutilityP_inv( vPterm, gam=self.CRRA ) # Evaluate the inverse of the CRRA marginal utility function at a given marginal value, vP vPnvrsFunc_terminal = BilinearInterp(vPnvrsTerm, bNrmGrid, TranShkGrid) vPfunc_terminal = MargValueFuncCRRA( vPnvrsFunc_terminal, self.CRRA) # Get the Marginal Value function bNrmMin_terminal = ConstantFunction( 0.0 ) # Trivial function that return the same real output for any input self.solution_terminal = ConsumerLaborSolution( cFunc=cFunc_terminal, LbrFunc=LbrFunc_terminal, vFunc=vFunc_terminal, vPfunc=vPfunc_terminal, bNrmMin=bNrmMin_terminal, )
def solve_ConsRepAgent( solution_next, DiscFac, CRRA, IncShkDstn, CapShare, DeprFac, PermGroFac, aXtraGrid ): """ Solve one period of the simple representative agent consumption-saving model. Parameters ---------- solution_next : ConsumerSolution Solution to the next period's problem (i.e. previous iteration). DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. IncShkDstn : distribution.Distribution A discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: permanent shocks, transitory shocks. CapShare : float Capital's share of income in Cobb-Douglas production function. DeprFac : float Depreciation rate of capital. PermGroFac : float Expected permanent income growth factor at the end of this period. aXtraGrid : np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. In this model, the minimum acceptable level is always zero. Returns ------- solution_now : ConsumerSolution Solution to this period's problem (new iteration). """ # Unpack next period's solution and the income distribution vPfuncNext = solution_next.vPfunc ShkPrbsNext = IncShkDstn.pmf PermShkValsNext = IncShkDstn.X[0] TranShkValsNext = IncShkDstn.X[1] # Make tiled versions of end-of-period assets, shocks, and probabilities aNrmNow = aXtraGrid aNrmCount = aNrmNow.size ShkCount = ShkPrbsNext.size aNrm_tiled = np.tile(np.reshape(aNrmNow, (aNrmCount, 1)), (1, ShkCount)) # Tile arrays of the income shocks and put them into useful shapes PermShkVals_tiled = np.tile( np.reshape(PermShkValsNext, (1, ShkCount)), (aNrmCount, 1) ) TranShkVals_tiled = np.tile( np.reshape(TranShkValsNext, (1, ShkCount)), (aNrmCount, 1) ) ShkPrbs_tiled = np.tile(np.reshape(ShkPrbsNext, (1, ShkCount)), (aNrmCount, 1)) # Calculate next period's capital-to-permanent-labor ratio under each combination # of end-of-period assets and shock realization kNrmNext = aNrm_tiled / (PermGroFac * PermShkVals_tiled) # Calculate next period's market resources KtoLnext = kNrmNext / TranShkVals_tiled RfreeNext = 1.0 - DeprFac + CapShare * KtoLnext ** (CapShare - 1.0) wRteNext = (1.0 - CapShare) * KtoLnext ** CapShare mNrmNext = RfreeNext * kNrmNext + wRteNext * TranShkVals_tiled # Calculate end-of-period marginal value of assets for the RA vPnext = vPfuncNext(mNrmNext) EndOfPrdvP = DiscFac * np.sum( RfreeNext * (PermGroFac * PermShkVals_tiled) ** (-CRRA) * vPnext * ShkPrbs_tiled, axis=1, ) # Invert the first order condition to get consumption, then find endogenous gridpoints cNrmNow = EndOfPrdvP ** (-1.0 / CRRA) mNrmNow = aNrmNow + cNrmNow # Construct the consumption function and the marginal value function cFuncNow = LinearInterp(np.insert(mNrmNow, 0, 0.0), np.insert(cNrmNow, 0, 0.0)) vPfuncNow = MargValueFuncCRRA(cFuncNow, CRRA) # Construct and return the solution for this period solution_now = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow) return solution_now
def solve_ConsRepAgentMarkov( solution_next, MrkvArray, DiscFac, CRRA, IncShkDstn, CapShare, DeprFac, PermGroFac, aXtraGrid, ): """ Solve one period of the simple representative agent consumption-saving model. This version supports a discrete Markov process. Parameters ---------- solution_next : ConsumerSolution Solution to the next period's problem (i.e. previous iteration). MrkvArray : np.array Markov transition array between this period and next period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. IncShkDstn : [distribution.Distribution] A list of discrete approximations to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. CapShare : float Capital's share of income in Cobb-Douglas production function. DeprFac : float Depreciation rate of capital. PermGroFac : [float] Expected permanent income growth factor for each state we could be in next period. aXtraGrid : np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. In this model, the minimum acceptable level is always zero. Returns ------- solution_now : ConsumerSolution Solution to this period's problem (new iteration). """ # Define basic objects StateCount = MrkvArray.shape[0] aNrmNow = aXtraGrid aNrmCount = aNrmNow.size EndOfPrdvP_cond = np.zeros((StateCount, aNrmCount)) + np.nan # Loop over *next period* states, calculating conditional EndOfPrdvP for j in range(StateCount): # Define next-period-state conditional objects vPfuncNext = solution_next.vPfunc[j] ShkPrbsNext = IncShkDstn[j].pmf PermShkValsNext = IncShkDstn[j].X[0] TranShkValsNext = IncShkDstn[j].X[1] # Make tiled versions of end-of-period assets, shocks, and probabilities ShkCount = ShkPrbsNext.size aNrm_tiled = np.tile(np.reshape(aNrmNow, (aNrmCount, 1)), (1, ShkCount)) # Tile arrays of the income shocks and put them into useful shapes PermShkVals_tiled = np.tile( np.reshape(PermShkValsNext, (1, ShkCount)), (aNrmCount, 1) ) TranShkVals_tiled = np.tile( np.reshape(TranShkValsNext, (1, ShkCount)), (aNrmCount, 1) ) ShkPrbs_tiled = np.tile(np.reshape(ShkPrbsNext, (1, ShkCount)), (aNrmCount, 1)) # Calculate next period's capital-to-permanent-labor ratio under each combination # of end-of-period assets and shock realization kNrmNext = aNrm_tiled / (PermGroFac[j] * PermShkVals_tiled) # Calculate next period's market resources KtoLnext = kNrmNext / TranShkVals_tiled RfreeNext = 1.0 - DeprFac + CapShare * KtoLnext ** (CapShare - 1.0) wRteNext = (1.0 - CapShare) * KtoLnext ** CapShare mNrmNext = RfreeNext * kNrmNext + wRteNext * TranShkVals_tiled # Calculate end-of-period marginal value of assets for the RA vPnext = vPfuncNext(mNrmNext) EndOfPrdvP_cond[j, :] = DiscFac * np.sum( RfreeNext * (PermGroFac[j] * PermShkVals_tiled) ** (-CRRA) * vPnext * ShkPrbs_tiled, axis=1, ) # Apply the Markov transition matrix to get unconditional end-of-period marginal value EndOfPrdvP = np.dot(MrkvArray, EndOfPrdvP_cond) # Construct the consumption function and marginal value function for each discrete state cFuncNow_list = [] vPfuncNow_list = [] for i in range(StateCount): # Invert the first order condition to get consumption, then find endogenous gridpoints cNrmNow = EndOfPrdvP[i, :] ** (-1.0 / CRRA) mNrmNow = aNrmNow + cNrmNow # Construct the consumption function and the marginal value function cFuncNow_list.append( LinearInterp(np.insert(mNrmNow, 0, 0.0), np.insert(cNrmNow, 0, 0.0)) ) vPfuncNow_list.append(MargValueFuncCRRA(cFuncNow_list[-1], CRRA)) # Construct and return the solution for this period solution_now = ConsumerSolution(cFunc=cFuncNow_list, vPfunc=vPfuncNow_list) return solution_now