def defUtilityFuncs(self): ''' Defines CRRA utility function for this period (and its derivatives, and their inverses), saving them as attributes of self for other methods to use. Note - the input here is xi*c-v(L), not c itself (GHH preferences with preference shock xi) Parameters ---------- none Returns ------- none ''' ConsPerfForesightSolver.defUtilityFuncs(self) self.uPinv = lambda u: utilityP_inv(u, gam=self.CRRA) self.uPinvP = lambda u: utilityP_invP(u, gam=self.CRRA) self.uinvP = lambda u: utility_invP(u, gam=self.CRRA)
def solveConsPortfolio(solution_next, ShockDstn, IncomeDstn, RiskyDstn, LivPrb, DiscFac, CRRA, Rfree, PermGroFac, BoroCnstArt, aXtraGrid, ShareGrid, vFuncBool, AdjustPrb, DiscreteShareBool, ShareLimit, IndepDstnBool): ''' Solve the one period problem for a portfolio-choice consumer. Parameters ---------- solution_next : PortfolioSolution Solution to next period's problem. ShockDstn : [np.array] List with four arrays: discrete probabilities, permanent income shocks, transitory income shocks, and risky returns. This is only used if the input IndepDstnBool is False, indicating that income and return distributions can't be assumed to be independent. IncomeDstn : [np.array] List with three arrays: discrete probabilities, permanent income shocks, and transitory income shocks. This is only used if the input IndepDsntBool is True, indicating that income and return distributions are independent. RiskyDstn : [np.array] List with two arrays: discrete probabilities and risky asset returns. This is only used if the input IndepDstnBool is True, indicating that income and return distributions are independent. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rfree : float Risk free interest factor on end-of-period assets. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. In this model, it is *required* to be zero. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. ShareGrid : np.array Array of risky portfolio shares on which to define the interpolation of the consumption function when Share is fixed. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. AdjustPrb : float Probability that the agent will be able to update his portfolio share. DiscreteShareBool : bool Indicator for whether risky portfolio share should be optimized on the continuous [0,1] interval using the FOC (False), or instead only selected from the discrete set of values in ShareGrid (True). If True, then vFuncBool must also be True. ShareLimit : float Limiting lower bound of risky portfolio share as mNrm approaches infinity. IndepDstnBool : bool Indicator for whether the income and risky return distributions are in- dependent of each other, which can speed up the expectations step. Returns ------- solution_now : PortfolioSolution The solution to the single period consumption-saving with portfolio choice problem. Includes two consumption and risky share functions: one for when the agent can adjust his portfolio share (Adj) and when he can't (Fxd). ''' # Make sure the individual is liquidity constrained. Allowing a consumer to # borrow *and* invest in an asset with unbounded (negative) returns is a bad mix. if BoroCnstArt != 0.0: raise ValueError('PortfolioConsumerType must have BoroCnstArt=0.0!') # Make sure that if risky portfolio share is optimized only discretely, then # the value function is also constructed (else this task would be impossible). if (DiscreteShareBool and (not vFuncBool)): raise ValueError( 'PortfolioConsumerType requires vFuncBool to be True when DiscreteShareBool is True!' ) # Define temporary functions for utility and its derivative and inverse u = lambda x: utility(x, CRRA) uP = lambda x: utilityP(x, CRRA) uPinv = lambda x: utilityP_inv(x, CRRA) n = lambda x: utility_inv(x, CRRA) nP = lambda x: utility_invP(x, CRRA) # Unpack next period's solution vPfuncAdj_next = solution_next.vPfuncAdj dvdmFuncFxd_next = solution_next.dvdmFuncFxd dvdsFuncFxd_next = solution_next.dvdsFuncFxd vFuncAdj_next = solution_next.vFuncAdj vFuncFxd_next = solution_next.vFuncFxd # Major method fork: (in)dependent risky asset return and income distributions if IndepDstnBool: # If the distributions ARE independent... # Unpack the shock distribution IncPrbs_next = IncomeDstn.pmf PermShks_next = IncomeDstn.X[0] TranShks_next = IncomeDstn.X[1] Rprbs_next = RiskyDstn.pmf Risky_next = RiskyDstn.X zero_bound = ( np.min(TranShks_next) == 0. ) # Flag for whether the natural borrowing constraint is zero RiskyMax = np.max(Risky_next) # bNrm represents R*a, balances after asset return shocks but before income. # This just uses the highest risky return as a rough shifter for the aXtraGrid. if zero_bound: aNrmGrid = aXtraGrid bNrmGrid = np.insert(RiskyMax * aXtraGrid, 0, np.min(Risky_next) * aXtraGrid[0]) else: aNrmGrid = np.insert(aXtraGrid, 0, 0.0) # Add an asset point at exactly zero bNrmGrid = RiskyMax * np.insert(aXtraGrid, 0, 0.0) # Get grid and shock sizes, for easier indexing aNrm_N = aNrmGrid.size bNrm_N = bNrmGrid.size Share_N = ShareGrid.size Income_N = IncPrbs_next.size Risky_N = Rprbs_next.size # Make tiled arrays to calculate future realizations of mNrm and Share when integrating over IncomeDstn bNrm_tiled = np.tile(np.reshape(bNrmGrid, (bNrm_N, 1, 1)), (1, Share_N, Income_N)) Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)), (bNrm_N, 1, Income_N)) IncPrbs_tiled = np.tile(np.reshape(IncPrbs_next, (1, 1, Income_N)), (bNrm_N, Share_N, 1)) PermShks_tiled = np.tile(np.reshape(PermShks_next, (1, 1, Income_N)), (bNrm_N, Share_N, 1)) TranShks_tiled = np.tile(np.reshape(TranShks_next, (1, 1, Income_N)), (bNrm_N, Share_N, 1)) # Calculate future realizations of market resources mNrm_next = bNrm_tiled / (PermShks_tiled * PermGroFac) + TranShks_tiled Share_next = Share_tiled # Evaluate realizations of marginal value of market resources next period dvdmAdj_next = vPfuncAdj_next(mNrm_next) if AdjustPrb < 1.: dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next) dvdm_next = AdjustPrb * dvdmAdj_next + ( 1. - AdjustPrb) * dvdmFxd_next # Combine by adjustment probability else: # Don't bother evaluating if there's no chance that portfolio share is fixed dvdm_next = dvdmAdj_next # Evaluate realizations of marginal value of risky share next period dvdsAdj_next = np.zeros_like( mNrm_next) # No marginal value of Share if it's a free choice! if AdjustPrb < 1.: dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next) dvds_next = AdjustPrb * dvdsAdj_next + ( 1. - AdjustPrb) * dvdsFxd_next # Combine by adjustment probability else: # Don't bother evaluating if there's no chance that portfolio share is fixed dvds_next = dvdsAdj_next # If the value function has been requested, evaluate realizations of value if vFuncBool: vAdj_next = vFuncAdj_next(mNrm_next) if AdjustPrb < 1.: vFxd_next = vFuncFxd_next(mNrm_next, Share_next) v_next = AdjustPrb * vAdj_next + (1. - AdjustPrb) * vFxd_next else: # Don't bother evaluating if there's no chance that portfolio share is fixed v_next = vAdj_next else: v_next = np.zeros_like(dvdm_next) # Trivial array # Calculate intermediate marginal value of bank balances by taking expectations over income shocks temp_fac_A = uP(PermShks_tiled * PermGroFac) # Will use this in a couple places dvdb_intermed = np.sum(IncPrbs_tiled * temp_fac_A * dvdm_next, axis=2) dvdbNvrs_intermed = uPinv(dvdb_intermed) dvdbNvrsFunc_intermed = BilinearInterp(dvdbNvrs_intermed, bNrmGrid, ShareGrid) dvdbFunc_intermed = MargValueFunc2D(dvdbNvrsFunc_intermed, CRRA) # Calculate intermediate value by taking expectations over income shocks temp_fac_B = (PermShks_tiled * PermGroFac)**(1. - CRRA ) # Will use this below if vFuncBool: v_intermed = np.sum(IncPrbs_tiled * temp_fac_B * v_next, axis=2) vNvrs_intermed = n(v_intermed) vNvrsFunc_intermed = BilinearInterp(vNvrs_intermed, bNrmGrid, ShareGrid) vFunc_intermed = ValueFunc2D(vNvrsFunc_intermed, CRRA) # Calculate intermediate marginal value of risky portfolio share by taking expectations dvds_intermed = np.sum(IncPrbs_tiled * temp_fac_B * dvds_next, axis=2) dvdsFunc_intermed = BilinearInterp(dvds_intermed, bNrmGrid, ShareGrid) # Make tiled arrays to calculate future realizations of bNrm and Share when integrating over RiskyDstn aNrm_tiled = np.tile(np.reshape(aNrmGrid, (aNrm_N, 1, 1)), (1, Share_N, Risky_N)) Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)), (aNrm_N, 1, Risky_N)) Rprbs_tiled = np.tile(np.reshape(Rprbs_next, (1, 1, Risky_N)), (aNrm_N, Share_N, 1)) Risky_tiled = np.tile(np.reshape(Risky_next, (1, 1, Risky_N)), (aNrm_N, Share_N, 1)) # Calculate future realizations of bank balances bNrm Share_next = Share_tiled Rxs = Risky_tiled - Rfree Rport = Rfree + Share_next * Rxs bNrm_next = Rport * aNrm_tiled # Evaluate realizations of value and marginal value after asset returns are realized dvdb_next = dvdbFunc_intermed(bNrm_next, Share_next) dvds_next = dvdsFunc_intermed(bNrm_next, Share_next) if vFuncBool: v_next = vFunc_intermed(bNrm_next, Share_next) else: v_next = np.zeros_like(dvdb_next) # Calculate end-of-period marginal value of assets by taking expectations EndOfPrddvda = DiscFac * LivPrb * np.sum( Rprbs_tiled * Rport * dvdb_next, axis=2) EndOfPrddvdaNvrs = uPinv(EndOfPrddvda) # Calculate end-of-period value by taking expectations if vFuncBool: EndOfPrdv = DiscFac * LivPrb * np.sum(Rprbs_tiled * v_next, axis=2) EndOfPrdvNvrs = n(EndOfPrdv) # Calculate end-of-period marginal value of risky portfolio share by taking expectations EndOfPrddvds = DiscFac * LivPrb * np.sum( Rprbs_tiled * (Rxs * aNrm_tiled * dvdb_next + dvds_next), axis=2) else: # If the distributions are NOT independent... # Unpack the shock distribution ShockPrbs_next = ShockDstn[0] PermShks_next = ShockDstn[1] TranShks_next = ShockDstn[2] Risky_next = ShockDstn[3] zero_bound = ( np.min(TranShks_next) == 0. ) # Flag for whether the natural borrowing constraint is zero # Make tiled arrays to calculate future realizations of mNrm and Share; dimension order: mNrm, Share, shock if zero_bound: aNrmGrid = aXtraGrid else: aNrmGrid = np.insert(aXtraGrid, 0, 0.0) # Add an asset point at exactly zero aNrm_N = aNrmGrid.size Share_N = ShareGrid.size Shock_N = ShockPrbs_next.size aNrm_tiled = np.tile(np.reshape(aNrmGrid, (aNrm_N, 1, 1)), (1, Share_N, Shock_N)) Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)), (aNrm_N, 1, Shock_N)) ShockPrbs_tiled = np.tile(np.reshape(ShockPrbs_next, (1, 1, Shock_N)), (aNrm_N, Share_N, 1)) PermShks_tiled = np.tile(np.reshape(PermShks_next, (1, 1, Shock_N)), (aNrm_N, Share_N, 1)) TranShks_tiled = np.tile(np.reshape(TranShks_next, (1, 1, Shock_N)), (aNrm_N, Share_N, 1)) Risky_tiled = np.tile(np.reshape(Risky_next, (1, 1, Shock_N)), (aNrm_N, Share_N, 1)) # Calculate future realizations of market resources Rport = (1. - Share_tiled) * Rfree + Share_tiled * Risky_tiled mNrm_next = Rport * aNrm_tiled / (PermShks_tiled * PermGroFac) + TranShks_tiled Share_next = Share_tiled # Evaluate realizations of marginal value of market resources next period dvdmAdj_next = vPfuncAdj_next(mNrm_next) if AdjustPrb < 1.: dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next) dvdm_next = AdjustPrb * dvdmAdj_next + ( 1. - AdjustPrb) * dvdmFxd_next # Combine by adjustment probability else: # Don't bother evaluating if there's no chance that portfolio share is fixed dvdm_next = dvdmAdj_next # Evaluate realizations of marginal value of risky share next period dvdsAdj_next = np.zeros_like( mNrm_next) # No marginal value of Share if it's a free choice! if AdjustPrb < 1.: dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next) dvds_next = AdjustPrb * dvdsAdj_next + ( 1. - AdjustPrb) * dvdsFxd_next # Combine by adjustment probability else: # Don't bother evaluating if there's no chance that portfolio share is fixed dvds_next = dvdsAdj_next # If the value function has been requested, evaluate realizations of value if vFuncBool: vAdj_next = vFuncAdj_next(mNrm_next) if AdjustPrb < 1.: vFxd_next = vFuncFxd_next(mNrm_next, Share_next) v_next = AdjustPrb * vAdj_next + (1. - AdjustPrb) * vFxd_next else: # Don't bother evaluating if there's no chance that portfolio share is fixed v_next = vAdj_next else: v_next = np.zeros_like(dvdm_next) # Trivial array # Calculate end-of-period marginal value of assets by taking expectations temp_fac_A = uP(PermShks_tiled * PermGroFac) # Will use this in a couple places EndOfPrddvda = DiscFac * LivPrb * np.sum( ShockPrbs_tiled * Rport * temp_fac_A * dvdm_next, axis=2) EndOfPrddvdaNvrs = uPinv(EndOfPrddvda) # Calculate end-of-period value by taking expectations temp_fac_B = (PermShks_tiled * PermGroFac)**(1. - CRRA ) # Will use this below if vFuncBool: EndOfPrdv = DiscFac * LivPrb * np.sum( ShockPrbs_tiled * temp_fac_B * v_next, axis=2) EndOfPrdvNvrs = n(EndOfPrdv) # Calculate end-of-period marginal value of risky portfolio share by taking expectations Rxs = Risky_tiled - Rfree EndOfPrddvds = DiscFac * LivPrb * np.sum( ShockPrbs_tiled * (Rxs * aNrm_tiled * temp_fac_A * dvdm_next + temp_fac_B * dvds_next), axis=2) # Major method fork: discrete vs continuous choice of risky portfolio share if DiscreteShareBool: # Optimization of Share on the discrete set ShareGrid opt_idx = np.argmax(EndOfPrdv, axis=1) Share_now = ShareGrid[ opt_idx] # Best portfolio share is one with highest value cNrmAdj_now = EndOfPrddvdaNvrs[np.arange( aNrm_N), opt_idx] # Take cNrm at that index as well if not zero_bound: Share_now[ 0] = 1. # aNrm=0, so there's no way to "optimize" the portfolio cNrmAdj_now[0] = EndOfPrddvdaNvrs[ 0, -1] # Consumption when aNrm=0 does not depend on Share else: # Optimization of Share on continuous interval [0,1] # For values of aNrm at which the agent wants to put more than 100% into risky asset, constrain them FOC_s = EndOfPrddvds Share_now = np.zeros_like( aNrmGrid) # Initialize to putting everything in safe asset cNrmAdj_now = np.zeros_like(aNrmGrid) constrained = FOC_s[:, -1] > 0. # If agent wants to put more than 100% into risky asset, he is constrained Share_now[constrained] = 1.0 if not zero_bound: Share_now[ 0] = 1. # aNrm=0, so there's no way to "optimize" the portfolio cNrmAdj_now[0] = EndOfPrddvdaNvrs[ 0, -1] # Consumption when aNrm=0 does not depend on Share cNrmAdj_now[constrained] = EndOfPrddvdaNvrs[ constrained, -1] # Get consumption when share-constrained # For each value of aNrm, find the value of Share such that FOC-Share == 0. # This loop can probably be eliminated, but it's such a small step that it won't speed things up much. crossing = np.logical_and(FOC_s[:, 1:] <= 0., FOC_s[:, :-1] >= 0.) for j in range(aNrm_N): if Share_now[j] == 0.: try: idx = np.argwhere(crossing[j, :])[0][0] bot_s = ShareGrid[idx] top_s = ShareGrid[idx + 1] bot_f = FOC_s[j, idx] top_f = FOC_s[j, idx + 1] bot_c = EndOfPrddvdaNvrs[j, idx] top_c = EndOfPrddvdaNvrs[j, idx + 1] alpha = 1. - top_f / (top_f - bot_f) Share_now[j] = (1. - alpha) * bot_s + alpha * top_s cNrmAdj_now[j] = (1. - alpha) * bot_c + alpha * top_c except: print('No optimal controls found for a=' + str(aNrmGrid[j])) # Calculate the endogenous mNrm gridpoints when the agent adjusts his portfolio mNrmAdj_now = aNrmGrid + cNrmAdj_now # Construct the risky share function when the agent can adjust if DiscreteShareBool: mNrmAdj_mid = (mNrmAdj_now[1:] + mNrmAdj_now[:-1]) / 2 mNrmAdj_plus = mNrmAdj_mid * (1. + 1e-12) mNrmAdj_comb = (np.transpose(np.vstack( (mNrmAdj_mid, mNrmAdj_plus)))).flatten() mNrmAdj_comb = np.append(np.insert(mNrmAdj_comb, 0, 0.0), mNrmAdj_now[-1]) Share_comb = (np.transpose(np.vstack( (Share_now, Share_now)))).flatten() ShareFuncAdj_now = LinearInterp(mNrmAdj_comb, Share_comb) else: if zero_bound: Share_lower_bound = ShareLimit else: Share_lower_bound = 1.0 Share_now = np.insert(Share_now, 0, Share_lower_bound) ShareFuncAdj_now = LinearInterp(np.insert(mNrmAdj_now, 0, 0.0), Share_now, intercept_limit=ShareLimit, slope_limit=0.0) # Construct the consumption function when the agent can adjust cNrmAdj_now = np.insert(cNrmAdj_now, 0, 0.0) cFuncAdj_now = LinearInterp(np.insert(mNrmAdj_now, 0, 0.0), cNrmAdj_now) # Construct the marginal value (of mNrm) function when the agent can adjust vPfuncAdj_now = MargValueFunc(cFuncAdj_now, CRRA) # Construct the consumption function when the agent *can't* adjust the risky share, as well # as the marginal value of Share function cFuncFxd_by_Share = [] dvdsFuncFxd_by_Share = [] for j in range(Share_N): cNrmFxd_temp = EndOfPrddvdaNvrs[:, j] mNrmFxd_temp = aNrmGrid + cNrmFxd_temp cFuncFxd_by_Share.append( LinearInterp(np.insert(mNrmFxd_temp, 0, 0.0), np.insert(cNrmFxd_temp, 0, 0.0))) dvdsFuncFxd_by_Share.append( LinearInterp(np.insert(mNrmFxd_temp, 0, 0.0), np.insert(EndOfPrddvds[:, j], 0, EndOfPrddvds[0, j]))) cFuncFxd_now = LinearInterpOnInterp1D(cFuncFxd_by_Share, ShareGrid) dvdsFuncFxd_now = LinearInterpOnInterp1D(dvdsFuncFxd_by_Share, ShareGrid) # The share function when the agent can't adjust his portfolio is trivial ShareFuncFxd_now = IdentityFunction(i_dim=1, n_dims=2) # Construct the marginal value of mNrm function when the agent can't adjust his share dvdmFuncFxd_now = MargValueFunc2D(cFuncFxd_now, CRRA) # If the value function has been requested, construct it now if vFuncBool: # First, make an end-of-period value function over aNrm and Share EndOfPrdvNvrsFunc = BilinearInterp(EndOfPrdvNvrs, aNrmGrid, ShareGrid) EndOfPrdvFunc = ValueFunc2D(EndOfPrdvNvrsFunc, CRRA) # Construct the value function when the agent can adjust his portfolio mNrm_temp = aXtraGrid # Just use aXtraGrid as our grid of mNrm values cNrm_temp = cFuncAdj_now(mNrm_temp) aNrm_temp = mNrm_temp - cNrm_temp Share_temp = ShareFuncAdj_now(mNrm_temp) v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp) vNvrs_temp = n(v_temp) vNvrsP_temp = uP(cNrm_temp) * nP(v_temp) vNvrsFuncAdj = CubicInterp( np.insert(mNrm_temp, 0, 0.0), # x_list np.insert(vNvrs_temp, 0, 0.0), # f_list np.insert(vNvrsP_temp, 0, vNvrsP_temp[0])) # dfdx_list vFuncAdj_now = ValueFunc( vNvrsFuncAdj, CRRA) # Re-curve the pseudo-inverse value function # Construct the value function when the agent *can't* adjust his portfolio mNrm_temp = np.tile(np.reshape(aXtraGrid, (aXtraGrid.size, 1)), (1, Share_N)) Share_temp = np.tile(np.reshape(ShareGrid, (1, Share_N)), (aXtraGrid.size, 1)) cNrm_temp = cFuncFxd_now(mNrm_temp, Share_temp) aNrm_temp = mNrm_temp - cNrm_temp v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp) vNvrs_temp = n(v_temp) vNvrsP_temp = uP(cNrm_temp) * nP(v_temp) vNvrsFuncFxd_by_Share = [] for j in range(Share_N): vNvrsFuncFxd_by_Share.append( CubicInterp( np.insert(mNrm_temp[:, 0], 0, 0.0), # x_list np.insert(vNvrs_temp[:, j], 0, 0.0), # f_list np.insert(vNvrsP_temp[:, j], 0, vNvrsP_temp[j, 0]))) #dfdx_list vNvrsFuncFxd = LinearInterpOnInterp1D(vNvrsFuncFxd_by_Share, ShareGrid) vFuncFxd_now = ValueFunc2D(vNvrsFuncFxd, CRRA) else: # If vFuncBool is False, fill in dummy values vFuncAdj_now = None vFuncFxd_now = None # Create and return this period's solution return PortfolioSolution(cFuncAdj=cFuncAdj_now, ShareFuncAdj=ShareFuncAdj_now, vPfuncAdj=vPfuncAdj_now, vFuncAdj=vFuncAdj_now, cFuncFxd=cFuncFxd_now, ShareFuncFxd=ShareFuncFxd_now, dvdmFuncFxd=dvdmFuncFxd_now, dvdsFuncFxd=dvdsFuncFxd_now, vFuncFxd=vFuncFxd_now)