コード例 #1
0
    def def_BoroCnst(self, BoroCnstArt):
        """
        Defines the constrained portion of the consumption function as cFuncNowCnst,
        an attribute of self.

        Parameters
        ----------
        BoroCnstArt : float or None
            Borrowing constraint for the minimum allowable assets to end the
            period with.  If it is less than the natural borrowing constraint,
            then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
            rowing constraint.

        Returns
        -------
        None
        """
        # Make temporary grids of income shocks and next period income values
        ShkCount = self.TranShkValsNext.size
        pLvlCount = self.pLvlGrid.size
        PermShkVals_temp = np.tile(
            np.reshape(self.PermShkValsNext, (1, ShkCount)), (pLvlCount, 1))
        TranShkVals_temp = np.tile(
            np.reshape(self.TranShkValsNext, (1, ShkCount)), (pLvlCount, 1))
        pLvlNext_temp = (np.tile(
            np.reshape(self.pLvlNextFunc(self.pLvlGrid), (pLvlCount, 1)),
            (1, ShkCount),
        ) * PermShkVals_temp)

        # Find the natural borrowing constraint for each persistent income level
        aLvlMin_candidates = (self.mLvlMinNext(pLvlNext_temp) -
                              TranShkVals_temp * pLvlNext_temp) / self.Rfree
        aLvlMinNow = np.max(aLvlMin_candidates, axis=1)
        self.BoroCnstNat = LinearInterp(np.insert(self.pLvlGrid, 0, 0.0),
                                        np.insert(aLvlMinNow, 0, 0.0))

        # Define the minimum allowable mLvl by pLvl as the greater of the natural and artificial borrowing constraints
        if self.BoroCnstArt is not None:
            self.BoroCnstArt = LinearInterp(np.array([0.0, 1.0]),
                                            np.array([0.0, self.BoroCnstArt]))
            self.mLvlMinNow = UpperEnvelope(self.BoroCnstArt, self.BoroCnstNat)
        else:
            self.mLvlMinNow = self.BoroCnstNat

        # Define the constrained consumption function as "consume all" shifted by mLvlMin
        cFuncNowCnstBase = BilinearInterp(
            np.array([[0.0, 0.0], [1.0, 1.0]]),
            np.array([0.0, 1.0]),
            np.array([0.0, 1.0]),
        )
        self.cFuncNowCnst = VariableLowerBoundFunc2D(cFuncNowCnstBase,
                                                     self.mLvlMinNow)
コード例 #2
0
class GenIncProcessConsumerType(IndShockConsumerType):
    """
    A consumer type with idiosyncratic shocks to persistent and transitory income.
    His problem is defined by a sequence of income distributions, survival prob-
    abilities, and persistent income growth functions, as well as time invariant
    values for risk aversion, discount factor, the interest rate, the grid of
    end-of-period assets, and an artificial borrowing constraint.

    See init_explicit_perm_inc for a dictionary of the
    keywords that should be passed to the constructor.

    Parameters
    ----------
    cycles : int
        Number of times the sequence of periods should be solved.
    """

    cFunc_terminal_ = BilinearInterp(np.array([[0.0, 0.0], [1.0, 1.0]]),
                                     np.array([0.0, 1.0]), np.array([0.0,
                                                                     1.0]))
    solution_terminal_ = ConsumerSolution(cFunc=cFunc_terminal_,
                                          mNrmMin=0.0,
                                          hNrm=0.0,
                                          MPCmin=1.0,
                                          MPCmax=1.0)

    state_vars = ['pLvl', "mLvl", 'aLvl']

    def __init__(self, **kwds):
        params = init_explicit_perm_inc.copy()
        params.update(kwds)

        # Initialize a basic ConsumerType
        IndShockConsumerType.__init__(self, **params)
        self.solve_one_period = make_one_period_oo_solver(
            ConsGenIncProcessSolver)

        # a poststate?
        self.state_now['aLvl'] = None
        self.state_prev['aLvl'] = None

        # better way to do this...
        self.state_now["mLvl"] = None
        self.state_prev["mLvl"] = None

    def pre_solve(self):
        #        AgentType.pre_solve()
        self.update_solution_terminal()

    def update(self):
        """
        Update the income process, the assets grid, the persistent income grid,
        and the terminal solution.

        Parameters
        ----------
        None

        Returns
        -------
        None
        """
        IndShockConsumerType.update(self)
        self.update_pLvlNextFunc()
        self.update_pLvlGrid()

    def update_solution_terminal(self):
        """
        Update the terminal period solution.  This method should be run when a
        new AgentType is created or when CRRA changes.

        Parameters
        ----------
        None

        Returns
        -------
        None
        """
        self.solution_terminal.vFunc = ValueFuncCRRA(self.cFunc_terminal_,
                                                     self.CRRA)
        self.solution_terminal.vPfunc = MargValueFuncCRRA(
            self.cFunc_terminal_, self.CRRA)
        self.solution_terminal.vPPfunc = MargMargValueFuncCRRA(
            self.cFunc_terminal_, self.CRRA)
        self.solution_terminal.hNrm = 0.0  # Don't track normalized human wealth
        self.solution_terminal.hLvl = lambda p: np.zeros_like(p)
        # But do track absolute human wealth by persistent income
        self.solution_terminal.mLvlMin = lambda p: np.zeros_like(p)
        # And minimum allowable market resources by perm inc

    def update_pLvlNextFunc(self):
        """
        A dummy method that creates a trivial pLvlNextFunc attribute that has
        no persistent income dynamics.  This method should be overwritten by
        subclasses in order to make (e.g.) an AR1 income process.

        Parameters
        ----------
        None

        Returns
        -------
        None
        """
        pLvlNextFuncBasic = LinearInterp(np.array([0.0, 1.0]),
                                         np.array([0.0, 1.0]))
        self.pLvlNextFunc = self.T_cycle * [pLvlNextFuncBasic]
        self.add_to_time_vary("pLvlNextFunc")

    def install_retirement_func(self):
        """
        Installs a special pLvlNextFunc representing retirement in the correct
        element of self.pLvlNextFunc.  Draws on the attributes T_retire and
        pLvlNextFuncRet.  If T_retire is zero or pLvlNextFuncRet does not
        exist, this method does nothing.  Should only be called from within the
        method update_pLvlNextFunc, which ensures that time is flowing forward.

        Parameters
        ----------
        None

        Returns
        -------
        None
        """
        if (not hasattr(self, "pLvlNextFuncRet")) or self.T_retire == 0:
            return
        t = self.T_retire
        self.pLvlNextFunc[t] = self.pLvlNextFuncRet

    def update_pLvlGrid(self):
        """
        Update the grid of persistent income levels.  Currently only works for
        infinite horizon models (cycles=0) and lifecycle models (cycles=1).  Not
        clear what to do about cycles>1 because the distribution of persistent
        income will be different within a period depending on how many cycles
        have elapsed.  This method uses a simulation approach to generate the
        pLvlGrid at each period of the cycle, drawing on the initial distribution
        of persistent income, the pLvlNextFuncs, and the attribute pLvlPctiles.

        Parameters
        ----------
        None

        Returns
        -------
        None
        """
        LivPrbAll = np.array(self.LivPrb)

        # Simulate the distribution of persistent income levels by t_cycle in a lifecycle model
        if self.cycles == 1:
            pLvlNow = Lognormal(self.pLvlInitMean,
                                sigma=self.pLvlInitStd,
                                seed=31382).draw(self.AgentCount)
            pLvlGrid = []  # empty list of time-varying persistent income grids
            # Calculate distribution of persistent income in each period of lifecycle
            for t in range(len(self.PermShkStd)):
                if t > 0:
                    PermShkNow = self.PermShkDstn[t -
                                                  1].draw(N=self.AgentCount)
                    pLvlNow = self.pLvlNextFunc[t - 1](pLvlNow) * PermShkNow
                pLvlGrid.append(
                    get_percentiles(pLvlNow, percentiles=self.pLvlPctiles))

        # Calculate "stationary" distribution in infinite horizon (might vary across periods of cycle)
        elif self.cycles == 0:
            T_long = 1000  # Number of periods to simulate to get to "stationary" distribution
            pLvlNow = Lognormal(mu=self.pLvlInitMean,
                                sigma=self.pLvlInitStd,
                                seed=31382).draw(self.AgentCount)
            t_cycle = np.zeros(self.AgentCount, dtype=int)
            for t in range(T_long):
                LivPrb = LivPrbAll[
                    t_cycle]  # Determine who dies and replace them with newborns
                draws = Uniform(seed=t).draw(self.AgentCount)
                who_dies = draws > LivPrb
                pLvlNow[who_dies] = Lognormal(self.pLvlInitMean,
                                              self.pLvlInitStd,
                                              seed=t + 92615).draw(
                                                  np.sum(who_dies))
                t_cycle[who_dies] = 0

                for j in range(self.T_cycle):  # Update persistent income
                    these = t_cycle == j
                    PermShkTemp = self.PermShkDstn[j].draw(N=np.sum(these))
                    pLvlNow[these] = self.pLvlNextFunc[j](
                        pLvlNow[these]) * PermShkTemp
                t_cycle = t_cycle + 1
                t_cycle[t_cycle == self.T_cycle] = 0

            # We now have a "long run stationary distribution", extract percentiles
            pLvlGrid = []  # empty list of time-varying persistent income grids
            for t in range(self.T_cycle):
                these = t_cycle == t
                pLvlGrid.append(
                    get_percentiles(pLvlNow[these],
                                    percentiles=self.pLvlPctiles))

        # Throw an error if cycles>1
        else:
            assert False, "Can only handle cycles=0 or cycles=1!"

        # Store the result and add attribute to time_vary
        self.pLvlGrid = pLvlGrid
        self.add_to_time_vary("pLvlGrid")

    def sim_birth(self, which_agents):
        """
        Makes new consumers for the given indices.  Initialized variables include aNrm and pLvl, as
        well as time variables t_age and t_cycle.  Normalized assets and persistent income levels
        are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).

        Parameters
        ----------
        which_agents : np.array(Bool)
            Boolean array of size self.AgentCount indicating which agents should be "born".

        Returns
        -------
        None
        """
        # Get and store states for newly born agents
        N = np.sum(which_agents)  # Number of new consumers to make
        aNrmNow_new = Lognormal(self.aNrmInitMean,
                                self.aNrmInitStd,
                                seed=self.RNG.randint(0, 2**31 - 1)).draw(N)
        self.state_now['pLvl'][which_agents] = Lognormal(
            self.pLvlInitMean,
            self.pLvlInitStd,
            seed=self.RNG.randint(0, 2**31 - 1)).draw(N)
        self.state_now['aLvl'][
            which_agents] = aNrmNow_new * self.state_now['pLvl'][which_agents]
        self.t_age[
            which_agents] = 0  # How many periods since each agent was born
        self.t_cycle[
            which_agents] = 0  # Which period of the cycle each agent is currently in

    def transition(self):
        """
        Calculates updated values of normalized market resources
        and persistent income level for each
        agent.  Uses pLvlNow, aLvlNow, PermShkNow, TranShkNow.

        Parameters
        ----------
        None

        Returns
        -------
        pLvlNow
        mLvlNow
        """
        aLvlPrev = self.state_prev['aLvl']
        RfreeNow = self.get_Rfree()

        # Calculate new states: normalized market resources
        # and persistent income level
        pLvlNow = np.zeros_like(aLvlPrev)

        for t in range(self.T_cycle):
            these = t == self.t_cycle
            pLvlNow[these] = (
                self.pLvlNextFunc[t - 1](self.state_prev['pLvl'][these]) *
                self.shocks['PermShk'][these])

        #state value
        bLvlNow = RfreeNow * aLvlPrev  # Bank balances before labor income

        # Market resources after income - state value
        mLvlNow = bLvlNow + \
                  self.shocks['TranShk'] * \
                  pLvlNow

        return (pLvlNow, mLvlNow)

    def get_controls(self):
        """
        Calculates consumption for each consumer of this type using the consumption functions.

        Parameters
        ----------
        None

        Returns
        -------
        None
        """
        cLvlNow = np.zeros(self.AgentCount) + np.nan
        MPCnow = np.zeros(self.AgentCount) + np.nan

        for t in range(self.T_cycle):
            these = t == self.t_cycle
            cLvlNow[these] = self.solution[t].cFunc(
                self.state_now["mLvl"][these], self.state_now['pLvl'][these])
            MPCnow[these] = self.solution[t].cFunc.derivativeX(
                self.state_now["mLvl"][these], self.state_now['pLvl'][these])
        self.controls["cLvl"] = cLvlNow
        self.MPCnow = MPCnow

    def get_poststates(self):
        """
        Calculates end-of-period assets for each consumer of this type.
        Identical to version in IndShockConsumerType but uses Lvl rather than Nrm variables.

        Parameters
        ----------
        None

        Returns
        -------
        None
        """
        self.state_now['aLvl'] = self.state_now["mLvl"] - self.controls["cLvl"]
        # moves now to prev
        AgentType.get_poststates(self)
コード例 #3
0
ファイル: ConsLaborModel.py プロジェクト: alanlujan91/HARK
    def update_solution_terminal(self):
        """
        Updates the terminal period solution and solves for optimal consumption
        and labor when there is no future.

        Parameters
        ----------
        None

        Returns
        -------
        None
        """
        t = -1
        TranShkGrid = self.TranShkGrid[t]
        LbrCost = self.LbrCost[t]
        WageRte = self.WageRte[t]

        bNrmGrid = np.insert(
            self.aXtraGrid, 0, 0.0
        )  # Add a point at b_t = 0 to make sure that bNrmGrid goes down to 0
        bNrmCount = bNrmGrid.size  # 201
        TranShkCount = TranShkGrid.size  # = (7,)
        bNrmGridTerm = np.tile(
            np.reshape(bNrmGrid, (bNrmCount, 1)),
            (1, TranShkCount
             ))  # Replicated bNrmGrid for each transitory shock theta_t
        TranShkGridTerm = np.tile(
            TranShkGrid, (bNrmCount, 1)
        )  # Tile the grid of transitory shocks for the terminal solution. (201,7)

        # Array of labor (leisure) values for terminal solution
        LsrTerm = np.minimum(
            (LbrCost / (1.0 + LbrCost)) * (bNrmGridTerm /
                                           (WageRte * TranShkGridTerm) + 1.0),
            1.0,
        )
        LsrTerm[0, 0] = 1.0
        LbrTerm = 1.0 - LsrTerm

        # Calculate market resources in terminal period, which is consumption
        mNrmTerm = bNrmGridTerm + LbrTerm * WageRte * TranShkGridTerm
        cNrmTerm = mNrmTerm  # Consume everything we have

        # Make a bilinear interpolation to represent the labor and consumption functions
        LbrFunc_terminal = BilinearInterp(LbrTerm, bNrmGrid, TranShkGrid)
        cFunc_terminal = BilinearInterp(cNrmTerm, bNrmGrid, TranShkGrid)

        # Compute the effective consumption value using consumption value and labor value at the terminal solution
        xEffTerm = LsrTerm**LbrCost * cNrmTerm
        vNvrsFunc_terminal = BilinearInterp(xEffTerm, bNrmGrid, TranShkGrid)
        vFunc_terminal = ValueFuncCRRA(vNvrsFunc_terminal, self.CRRA)

        # Using the envelope condition at the terminal solution to estimate the marginal value function
        vPterm = LsrTerm**LbrCost * CRRAutilityP(xEffTerm, gam=self.CRRA)
        vPnvrsTerm = CRRAutilityP_inv(
            vPterm, gam=self.CRRA
        )  # Evaluate the inverse of the CRRA marginal utility function at a given marginal value, vP

        vPnvrsFunc_terminal = BilinearInterp(vPnvrsTerm, bNrmGrid, TranShkGrid)
        vPfunc_terminal = MargValueFuncCRRA(
            vPnvrsFunc_terminal, self.CRRA)  # Get the Marginal Value function

        bNrmMin_terminal = ConstantFunction(
            0.0
        )  # Trivial function that return the same real output for any input

        self.solution_terminal = ConsumerLaborSolution(
            cFunc=cFunc_terminal,
            LbrFunc=LbrFunc_terminal,
            vFunc=vFunc_terminal,
            vPfunc=vPfunc_terminal,
            bNrmMin=bNrmMin_terminal,
        )
コード例 #4
0
def solveConsPortfolio(solution_next, ShockDstn, IncomeDstn, RiskyDstn, LivPrb,
                       DiscFac, CRRA, Rfree, PermGroFac, BoroCnstArt,
                       aXtraGrid, ShareGrid, vFuncBool, AdjustPrb,
                       DiscreteShareBool, ShareLimit, IndepDstnBool):
    '''
    Solve the one period problem for a portfolio-choice consumer.
    
    Parameters
    ----------
    solution_next : PortfolioSolution
        Solution to next period's problem.
    ShockDstn : [np.array]
        List with four arrays: discrete probabilities, permanent income shocks,
        transitory income shocks, and risky returns.  This is only used if the
        input IndepDstnBool is False, indicating that income and return distributions
        can't be assumed to be independent.
    IncomeDstn : [np.array]
        List with three arrays: discrete probabilities, permanent income shocks,
        and transitory income shocks.  This is only used if the input IndepDsntBool
        is True, indicating that income and return distributions are independent.
    RiskyDstn : [np.array]
        List with two arrays: discrete probabilities and risky asset returns. This
        is only used if the input IndepDstnBool is True, indicating that income
        and return distributions are independent.
    LivPrb : float
        Survival probability; likelihood of being alive at the beginning of
        the succeeding period.
    DiscFac : float
        Intertemporal discount factor for future utility.
    CRRA : float
        Coefficient of relative risk aversion.
    Rfree : float
        Risk free interest factor on end-of-period assets.
    PermGroFac : float
        Expected permanent income growth factor at the end of this period.
    BoroCnstArt: float or None
        Borrowing constraint for the minimum allowable assets to end the
        period with.  In this model, it is *required* to be zero.
    aXtraGrid: np.array
        Array of "extra" end-of-period asset values-- assets above the
        absolute minimum acceptable level.
    ShareGrid : np.array
        Array of risky portfolio shares on which to define the interpolation
        of the consumption function when Share is fixed.
    vFuncBool: boolean
        An indicator for whether the value function should be computed and
        included in the reported solution.
    AdjustPrb : float
        Probability that the agent will be able to update his portfolio share.
    DiscreteShareBool : bool
        Indicator for whether risky portfolio share should be optimized on the
        continuous [0,1] interval using the FOC (False), or instead only selected
        from the discrete set of values in ShareGrid (True).  If True, then
        vFuncBool must also be True.
    ShareLimit : float
        Limiting lower bound of risky portfolio share as mNrm approaches infinity.
    IndepDstnBool : bool
        Indicator for whether the income and risky return distributions are in-
        dependent of each other, which can speed up the expectations step.

    Returns
    -------
    solution_now : PortfolioSolution
        The solution to the single period consumption-saving with portfolio choice
        problem.  Includes two consumption and risky share functions: one for when
        the agent can adjust his portfolio share (Adj) and when he can't (Fxd).
    '''
    # Make sure the individual is liquidity constrained.  Allowing a consumer to
    # borrow *and* invest in an asset with unbounded (negative) returns is a bad mix.
    if BoroCnstArt != 0.0:
        raise ValueError('PortfolioConsumerType must have BoroCnstArt=0.0!')

    # Make sure that if risky portfolio share is optimized only discretely, then
    # the value function is also constructed (else this task would be impossible).
    if (DiscreteShareBool and (not vFuncBool)):
        raise ValueError(
            'PortfolioConsumerType requires vFuncBool to be True when DiscreteShareBool is True!'
        )

    # Define temporary functions for utility and its derivative and inverse
    u = lambda x: utility(x, CRRA)
    uP = lambda x: utilityP(x, CRRA)
    uPinv = lambda x: utilityP_inv(x, CRRA)
    n = lambda x: utility_inv(x, CRRA)
    nP = lambda x: utility_invP(x, CRRA)

    # Unpack next period's solution
    vPfuncAdj_next = solution_next.vPfuncAdj
    dvdmFuncFxd_next = solution_next.dvdmFuncFxd
    dvdsFuncFxd_next = solution_next.dvdsFuncFxd
    vFuncAdj_next = solution_next.vFuncAdj
    vFuncFxd_next = solution_next.vFuncFxd

    # Major method fork: (in)dependent risky asset return and income distributions
    if IndepDstnBool:  # If the distributions ARE independent...
        # Unpack the shock distribution
        IncPrbs_next = IncomeDstn.pmf
        PermShks_next = IncomeDstn.X[0]
        TranShks_next = IncomeDstn.X[1]
        Rprbs_next = RiskyDstn.pmf
        Risky_next = RiskyDstn.X
        zero_bound = (
            np.min(TranShks_next) == 0.
        )  # Flag for whether the natural borrowing constraint is zero
        RiskyMax = np.max(Risky_next)

        # bNrm represents R*a, balances after asset return shocks but before income.
        # This just uses the highest risky return as a rough shifter for the aXtraGrid.
        if zero_bound:
            aNrmGrid = aXtraGrid
            bNrmGrid = np.insert(RiskyMax * aXtraGrid, 0,
                                 np.min(Risky_next) * aXtraGrid[0])
        else:
            aNrmGrid = np.insert(aXtraGrid, 0,
                                 0.0)  # Add an asset point at exactly zero
            bNrmGrid = RiskyMax * np.insert(aXtraGrid, 0, 0.0)

        # Get grid and shock sizes, for easier indexing
        aNrm_N = aNrmGrid.size
        bNrm_N = bNrmGrid.size
        Share_N = ShareGrid.size
        Income_N = IncPrbs_next.size
        Risky_N = Rprbs_next.size

        # Make tiled arrays to calculate future realizations of mNrm and Share when integrating over IncomeDstn
        bNrm_tiled = np.tile(np.reshape(bNrmGrid, (bNrm_N, 1, 1)),
                             (1, Share_N, Income_N))
        Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)),
                              (bNrm_N, 1, Income_N))
        IncPrbs_tiled = np.tile(np.reshape(IncPrbs_next, (1, 1, Income_N)),
                                (bNrm_N, Share_N, 1))
        PermShks_tiled = np.tile(np.reshape(PermShks_next, (1, 1, Income_N)),
                                 (bNrm_N, Share_N, 1))
        TranShks_tiled = np.tile(np.reshape(TranShks_next, (1, 1, Income_N)),
                                 (bNrm_N, Share_N, 1))

        # Calculate future realizations of market resources
        mNrm_next = bNrm_tiled / (PermShks_tiled * PermGroFac) + TranShks_tiled
        Share_next = Share_tiled

        # Evaluate realizations of marginal value of market resources next period
        dvdmAdj_next = vPfuncAdj_next(mNrm_next)
        if AdjustPrb < 1.:
            dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next)
            dvdm_next = AdjustPrb * dvdmAdj_next + (
                1. -
                AdjustPrb) * dvdmFxd_next  # Combine by adjustment probability
        else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
            dvdm_next = dvdmAdj_next

        # Evaluate realizations of marginal value of risky share next period
        dvdsAdj_next = np.zeros_like(
            mNrm_next)  # No marginal value of Share if it's a free choice!
        if AdjustPrb < 1.:
            dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next)
            dvds_next = AdjustPrb * dvdsAdj_next + (
                1. -
                AdjustPrb) * dvdsFxd_next  # Combine by adjustment probability
        else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
            dvds_next = dvdsAdj_next

        # If the value function has been requested, evaluate realizations of value
        if vFuncBool:
            vAdj_next = vFuncAdj_next(mNrm_next)
            if AdjustPrb < 1.:
                vFxd_next = vFuncFxd_next(mNrm_next, Share_next)
                v_next = AdjustPrb * vAdj_next + (1. - AdjustPrb) * vFxd_next
            else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
                v_next = vAdj_next
        else:
            v_next = np.zeros_like(dvdm_next)  # Trivial array

        # Calculate intermediate marginal value of bank balances by taking expectations over income shocks
        temp_fac_A = uP(PermShks_tiled *
                        PermGroFac)  # Will use this in a couple places
        dvdb_intermed = np.sum(IncPrbs_tiled * temp_fac_A * dvdm_next, axis=2)
        dvdbNvrs_intermed = uPinv(dvdb_intermed)
        dvdbNvrsFunc_intermed = BilinearInterp(dvdbNvrs_intermed, bNrmGrid,
                                               ShareGrid)
        dvdbFunc_intermed = MargValueFunc2D(dvdbNvrsFunc_intermed, CRRA)

        # Calculate intermediate value by taking expectations over income shocks
        temp_fac_B = (PermShks_tiled * PermGroFac)**(1. - CRRA
                                                     )  # Will use this below
        if vFuncBool:
            v_intermed = np.sum(IncPrbs_tiled * temp_fac_B * v_next, axis=2)
            vNvrs_intermed = n(v_intermed)
            vNvrsFunc_intermed = BilinearInterp(vNvrs_intermed, bNrmGrid,
                                                ShareGrid)
            vFunc_intermed = ValueFunc2D(vNvrsFunc_intermed, CRRA)

        # Calculate intermediate marginal value of risky portfolio share by taking expectations
        dvds_intermed = np.sum(IncPrbs_tiled * temp_fac_B * dvds_next, axis=2)
        dvdsFunc_intermed = BilinearInterp(dvds_intermed, bNrmGrid, ShareGrid)

        # Make tiled arrays to calculate future realizations of bNrm and Share when integrating over RiskyDstn
        aNrm_tiled = np.tile(np.reshape(aNrmGrid, (aNrm_N, 1, 1)),
                             (1, Share_N, Risky_N))
        Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)),
                              (aNrm_N, 1, Risky_N))
        Rprbs_tiled = np.tile(np.reshape(Rprbs_next, (1, 1, Risky_N)),
                              (aNrm_N, Share_N, 1))
        Risky_tiled = np.tile(np.reshape(Risky_next, (1, 1, Risky_N)),
                              (aNrm_N, Share_N, 1))

        # Calculate future realizations of bank balances bNrm
        Share_next = Share_tiled
        Rxs = Risky_tiled - Rfree
        Rport = Rfree + Share_next * Rxs
        bNrm_next = Rport * aNrm_tiled

        # Evaluate realizations of value and marginal value after asset returns are realized
        dvdb_next = dvdbFunc_intermed(bNrm_next, Share_next)
        dvds_next = dvdsFunc_intermed(bNrm_next, Share_next)
        if vFuncBool:
            v_next = vFunc_intermed(bNrm_next, Share_next)
        else:
            v_next = np.zeros_like(dvdb_next)

        # Calculate end-of-period marginal value of assets by taking expectations
        EndOfPrddvda = DiscFac * LivPrb * np.sum(
            Rprbs_tiled * Rport * dvdb_next, axis=2)
        EndOfPrddvdaNvrs = uPinv(EndOfPrddvda)

        # Calculate end-of-period value by taking expectations
        if vFuncBool:
            EndOfPrdv = DiscFac * LivPrb * np.sum(Rprbs_tiled * v_next, axis=2)
            EndOfPrdvNvrs = n(EndOfPrdv)

        # Calculate end-of-period marginal value of risky portfolio share by taking expectations
        EndOfPrddvds = DiscFac * LivPrb * np.sum(
            Rprbs_tiled * (Rxs * aNrm_tiled * dvdb_next + dvds_next), axis=2)

    else:  # If the distributions are NOT independent...
        # Unpack the shock distribution
        ShockPrbs_next = ShockDstn[0]
        PermShks_next = ShockDstn[1]
        TranShks_next = ShockDstn[2]
        Risky_next = ShockDstn[3]
        zero_bound = (
            np.min(TranShks_next) == 0.
        )  # Flag for whether the natural borrowing constraint is zero

        # Make tiled arrays to calculate future realizations of mNrm and Share; dimension order: mNrm, Share, shock
        if zero_bound:
            aNrmGrid = aXtraGrid
        else:
            aNrmGrid = np.insert(aXtraGrid, 0,
                                 0.0)  # Add an asset point at exactly zero
        aNrm_N = aNrmGrid.size
        Share_N = ShareGrid.size
        Shock_N = ShockPrbs_next.size
        aNrm_tiled = np.tile(np.reshape(aNrmGrid, (aNrm_N, 1, 1)),
                             (1, Share_N, Shock_N))
        Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)),
                              (aNrm_N, 1, Shock_N))
        ShockPrbs_tiled = np.tile(np.reshape(ShockPrbs_next, (1, 1, Shock_N)),
                                  (aNrm_N, Share_N, 1))
        PermShks_tiled = np.tile(np.reshape(PermShks_next, (1, 1, Shock_N)),
                                 (aNrm_N, Share_N, 1))
        TranShks_tiled = np.tile(np.reshape(TranShks_next, (1, 1, Shock_N)),
                                 (aNrm_N, Share_N, 1))
        Risky_tiled = np.tile(np.reshape(Risky_next, (1, 1, Shock_N)),
                              (aNrm_N, Share_N, 1))

        # Calculate future realizations of market resources
        Rport = (1. - Share_tiled) * Rfree + Share_tiled * Risky_tiled
        mNrm_next = Rport * aNrm_tiled / (PermShks_tiled *
                                          PermGroFac) + TranShks_tiled
        Share_next = Share_tiled

        # Evaluate realizations of marginal value of market resources next period
        dvdmAdj_next = vPfuncAdj_next(mNrm_next)
        if AdjustPrb < 1.:
            dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next)
            dvdm_next = AdjustPrb * dvdmAdj_next + (
                1. -
                AdjustPrb) * dvdmFxd_next  # Combine by adjustment probability
        else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
            dvdm_next = dvdmAdj_next

        # Evaluate realizations of marginal value of risky share next period
        dvdsAdj_next = np.zeros_like(
            mNrm_next)  # No marginal value of Share if it's a free choice!
        if AdjustPrb < 1.:
            dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next)
            dvds_next = AdjustPrb * dvdsAdj_next + (
                1. -
                AdjustPrb) * dvdsFxd_next  # Combine by adjustment probability
        else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
            dvds_next = dvdsAdj_next

        # If the value function has been requested, evaluate realizations of value
        if vFuncBool:
            vAdj_next = vFuncAdj_next(mNrm_next)
            if AdjustPrb < 1.:
                vFxd_next = vFuncFxd_next(mNrm_next, Share_next)
                v_next = AdjustPrb * vAdj_next + (1. - AdjustPrb) * vFxd_next
            else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
                v_next = vAdj_next
        else:
            v_next = np.zeros_like(dvdm_next)  # Trivial array

        # Calculate end-of-period marginal value of assets by taking expectations
        temp_fac_A = uP(PermShks_tiled *
                        PermGroFac)  # Will use this in a couple places
        EndOfPrddvda = DiscFac * LivPrb * np.sum(
            ShockPrbs_tiled * Rport * temp_fac_A * dvdm_next, axis=2)
        EndOfPrddvdaNvrs = uPinv(EndOfPrddvda)

        # Calculate end-of-period value by taking expectations
        temp_fac_B = (PermShks_tiled * PermGroFac)**(1. - CRRA
                                                     )  # Will use this below
        if vFuncBool:
            EndOfPrdv = DiscFac * LivPrb * np.sum(
                ShockPrbs_tiled * temp_fac_B * v_next, axis=2)
            EndOfPrdvNvrs = n(EndOfPrdv)

        # Calculate end-of-period marginal value of risky portfolio share by taking expectations
        Rxs = Risky_tiled - Rfree
        EndOfPrddvds = DiscFac * LivPrb * np.sum(
            ShockPrbs_tiled * (Rxs * aNrm_tiled * temp_fac_A * dvdm_next +
                               temp_fac_B * dvds_next),
            axis=2)

    # Major method fork: discrete vs continuous choice of risky portfolio share
    if DiscreteShareBool:  # Optimization of Share on the discrete set ShareGrid
        opt_idx = np.argmax(EndOfPrdv, axis=1)
        Share_now = ShareGrid[
            opt_idx]  # Best portfolio share is one with highest value
        cNrmAdj_now = EndOfPrddvdaNvrs[np.arange(
            aNrm_N), opt_idx]  # Take cNrm at that index as well
        if not zero_bound:
            Share_now[
                0] = 1.  # aNrm=0, so there's no way to "optimize" the portfolio
            cNrmAdj_now[0] = EndOfPrddvdaNvrs[
                0, -1]  # Consumption when aNrm=0 does not depend on Share

    else:  # Optimization of Share on continuous interval [0,1]
        # For values of aNrm at which the agent wants to put more than 100% into risky asset, constrain them
        FOC_s = EndOfPrddvds
        Share_now = np.zeros_like(
            aNrmGrid)  # Initialize to putting everything in safe asset
        cNrmAdj_now = np.zeros_like(aNrmGrid)
        constrained = FOC_s[:,
                            -1] > 0.  # If agent wants to put more than 100% into risky asset, he is constrained
        Share_now[constrained] = 1.0
        if not zero_bound:
            Share_now[
                0] = 1.  # aNrm=0, so there's no way to "optimize" the portfolio
            cNrmAdj_now[0] = EndOfPrddvdaNvrs[
                0, -1]  # Consumption when aNrm=0 does not depend on Share
        cNrmAdj_now[constrained] = EndOfPrddvdaNvrs[
            constrained, -1]  # Get consumption when share-constrained

        # For each value of aNrm, find the value of Share such that FOC-Share == 0.
        # This loop can probably be eliminated, but it's such a small step that it won't speed things up much.
        crossing = np.logical_and(FOC_s[:, 1:] <= 0., FOC_s[:, :-1] >= 0.)
        for j in range(aNrm_N):
            if Share_now[j] == 0.:
                try:
                    idx = np.argwhere(crossing[j, :])[0][0]
                    bot_s = ShareGrid[idx]
                    top_s = ShareGrid[idx + 1]
                    bot_f = FOC_s[j, idx]
                    top_f = FOC_s[j, idx + 1]
                    bot_c = EndOfPrddvdaNvrs[j, idx]
                    top_c = EndOfPrddvdaNvrs[j, idx + 1]
                    alpha = 1. - top_f / (top_f - bot_f)
                    Share_now[j] = (1. - alpha) * bot_s + alpha * top_s
                    cNrmAdj_now[j] = (1. - alpha) * bot_c + alpha * top_c
                except:
                    print('No optimal controls found for a=' +
                          str(aNrmGrid[j]))

    # Calculate the endogenous mNrm gridpoints when the agent adjusts his portfolio
    mNrmAdj_now = aNrmGrid + cNrmAdj_now

    # Construct the risky share function when the agent can adjust
    if DiscreteShareBool:
        mNrmAdj_mid = (mNrmAdj_now[1:] + mNrmAdj_now[:-1]) / 2
        mNrmAdj_plus = mNrmAdj_mid * (1. + 1e-12)
        mNrmAdj_comb = (np.transpose(np.vstack(
            (mNrmAdj_mid, mNrmAdj_plus)))).flatten()
        mNrmAdj_comb = np.append(np.insert(mNrmAdj_comb, 0, 0.0),
                                 mNrmAdj_now[-1])
        Share_comb = (np.transpose(np.vstack(
            (Share_now, Share_now)))).flatten()
        ShareFuncAdj_now = LinearInterp(mNrmAdj_comb, Share_comb)
    else:
        if zero_bound:
            Share_lower_bound = ShareLimit
        else:
            Share_lower_bound = 1.0
        Share_now = np.insert(Share_now, 0, Share_lower_bound)
        ShareFuncAdj_now = LinearInterp(np.insert(mNrmAdj_now, 0, 0.0),
                                        Share_now,
                                        intercept_limit=ShareLimit,
                                        slope_limit=0.0)

    # Construct the consumption function when the agent can adjust
    cNrmAdj_now = np.insert(cNrmAdj_now, 0, 0.0)
    cFuncAdj_now = LinearInterp(np.insert(mNrmAdj_now, 0, 0.0), cNrmAdj_now)

    # Construct the marginal value (of mNrm) function when the agent can adjust
    vPfuncAdj_now = MargValueFunc(cFuncAdj_now, CRRA)

    # Construct the consumption function when the agent *can't* adjust the risky share, as well
    # as the marginal value of Share function
    cFuncFxd_by_Share = []
    dvdsFuncFxd_by_Share = []
    for j in range(Share_N):
        cNrmFxd_temp = EndOfPrddvdaNvrs[:, j]
        mNrmFxd_temp = aNrmGrid + cNrmFxd_temp
        cFuncFxd_by_Share.append(
            LinearInterp(np.insert(mNrmFxd_temp, 0, 0.0),
                         np.insert(cNrmFxd_temp, 0, 0.0)))
        dvdsFuncFxd_by_Share.append(
            LinearInterp(np.insert(mNrmFxd_temp, 0, 0.0),
                         np.insert(EndOfPrddvds[:, j], 0, EndOfPrddvds[0, j])))
    cFuncFxd_now = LinearInterpOnInterp1D(cFuncFxd_by_Share, ShareGrid)
    dvdsFuncFxd_now = LinearInterpOnInterp1D(dvdsFuncFxd_by_Share, ShareGrid)

    # The share function when the agent can't adjust his portfolio is trivial
    ShareFuncFxd_now = IdentityFunction(i_dim=1, n_dims=2)

    # Construct the marginal value of mNrm function when the agent can't adjust his share
    dvdmFuncFxd_now = MargValueFunc2D(cFuncFxd_now, CRRA)

    # If the value function has been requested, construct it now
    if vFuncBool:
        # First, make an end-of-period value function over aNrm and Share
        EndOfPrdvNvrsFunc = BilinearInterp(EndOfPrdvNvrs, aNrmGrid, ShareGrid)
        EndOfPrdvFunc = ValueFunc2D(EndOfPrdvNvrsFunc, CRRA)

        # Construct the value function when the agent can adjust his portfolio
        mNrm_temp = aXtraGrid  # Just use aXtraGrid as our grid of mNrm values
        cNrm_temp = cFuncAdj_now(mNrm_temp)
        aNrm_temp = mNrm_temp - cNrm_temp
        Share_temp = ShareFuncAdj_now(mNrm_temp)
        v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp)
        vNvrs_temp = n(v_temp)
        vNvrsP_temp = uP(cNrm_temp) * nP(v_temp)
        vNvrsFuncAdj = CubicInterp(
            np.insert(mNrm_temp, 0, 0.0),  # x_list
            np.insert(vNvrs_temp, 0, 0.0),  # f_list
            np.insert(vNvrsP_temp, 0, vNvrsP_temp[0]))  # dfdx_list
        vFuncAdj_now = ValueFunc(
            vNvrsFuncAdj, CRRA)  # Re-curve the pseudo-inverse value function

        # Construct the value function when the agent *can't* adjust his portfolio
        mNrm_temp = np.tile(np.reshape(aXtraGrid, (aXtraGrid.size, 1)),
                            (1, Share_N))
        Share_temp = np.tile(np.reshape(ShareGrid, (1, Share_N)),
                             (aXtraGrid.size, 1))
        cNrm_temp = cFuncFxd_now(mNrm_temp, Share_temp)
        aNrm_temp = mNrm_temp - cNrm_temp
        v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp)
        vNvrs_temp = n(v_temp)
        vNvrsP_temp = uP(cNrm_temp) * nP(v_temp)
        vNvrsFuncFxd_by_Share = []
        for j in range(Share_N):
            vNvrsFuncFxd_by_Share.append(
                CubicInterp(
                    np.insert(mNrm_temp[:, 0], 0, 0.0),  # x_list
                    np.insert(vNvrs_temp[:, j], 0, 0.0),  # f_list
                    np.insert(vNvrsP_temp[:, j], 0,
                              vNvrsP_temp[j, 0])))  #dfdx_list
        vNvrsFuncFxd = LinearInterpOnInterp1D(vNvrsFuncFxd_by_Share, ShareGrid)
        vFuncFxd_now = ValueFunc2D(vNvrsFuncFxd, CRRA)

    else:  # If vFuncBool is False, fill in dummy values
        vFuncAdj_now = None
        vFuncFxd_now = None

    # Create and return this period's solution
    return PortfolioSolution(cFuncAdj=cFuncAdj_now,
                             ShareFuncAdj=ShareFuncAdj_now,
                             vPfuncAdj=vPfuncAdj_now,
                             vFuncAdj=vFuncAdj_now,
                             cFuncFxd=cFuncFxd_now,
                             ShareFuncFxd=ShareFuncFxd_now,
                             dvdmFuncFxd=dvdmFuncFxd_now,
                             dvdsFuncFxd=dvdsFuncFxd_now,
                             vFuncFxd=vFuncFxd_now)
コード例 #5
0
ファイル: test_interpolation.py プロジェクト: HTS420/HARK_TS
 def test_same_length(self):
     bilinear = BilinearInterp(self.f_array, self.x_array, self.y_array)
     self.assertEqual(bilinear(2, 2), 4.0)
     bilinear = BilinearInterp(self.f_array, self.x_array, self.y_array_t)
     self.assertEqual(bilinear(2, 2), 4.0)