예제 #1
0
 def test_same_length(self):
     cube = CubicInterp(self.x_list, self.z_list, self.dydx_list)
     self.assertEqual(cube(1.5), 2.25)
     cube = CubicInterp(self.x_array, self.z_array, self.dydx_array)
     self.assertEqual(cube(1.5), 2.25)
     cube = CubicInterp(self.x_array_t, self.z_array_t, self.dydx_array_t)
     self.assertEqual(cube(1.5), 2.25)
예제 #2
0
    def make_EndOfPrdvFuncCond(self):
        """
        Construct the end-of-period value function conditional on next period's
        state.  NOTE: It might be possible to eliminate this method and replace
        it with ConsIndShockSolver.make_EndOfPrdvFunc, but the self.X_cond
        variables must be renamed.

        Parameters
        ----------
        none

        Returns
        -------
        EndofPrdvFunc_cond : ValueFuncCRRA
            The end-of-period value function conditional on a particular state
            occuring in the next period.
        """
        VLvlNext = (
            self.PermShkVals_temp ** (1.0 - self.CRRA)
            * self.PermGroFac ** (1.0 - self.CRRA)
        ) * self.vFuncNext(self.mNrmNext)
        EndOfPrdv_cond = self.DiscFacEff * np.sum(VLvlNext * self.ShkPrbs_temp, axis=0)
        EndOfPrdvNvrs_cond = self.uinv(EndOfPrdv_cond)
        EndOfPrdvNvrsP_cond = self.EndOfPrdvP_cond * self.uinvP(EndOfPrdv_cond)
        EndOfPrdvNvrs_cond = np.insert(EndOfPrdvNvrs_cond, 0, 0.0)
        EndOfPrdvNvrsP_cond = np.insert(EndOfPrdvNvrsP_cond, 0, EndOfPrdvNvrsP_cond[0])
        aNrm_temp = np.insert(self.aNrm_cond, 0, self.BoroCnstNat)
        EndOfPrdvNvrsFunc_cond = CubicInterp(
            aNrm_temp, EndOfPrdvNvrs_cond, EndOfPrdvNvrsP_cond
        )
        EndofPrdvFunc_cond = ValueFuncCRRA(EndOfPrdvNvrsFunc_cond, self.CRRA)
        return EndofPrdvFunc_cond
예제 #3
0
    def makeEndOfPrdvPfuncCond(self):
        '''
        Construct the end-of-period marginal value function conditional on next
        period's state.

        Parameters
        ----------
        None

        Returns
        -------
        EndofPrdvPfunc_cond : MargValueFunc
            The end-of-period marginal value function conditional on a particular
            state occuring in the succeeding period.
        '''
        # Get data to construct the end-of-period marginal value function (conditional on next state)
        self.aNrm_cond      = self.prepareToCalcEndOfPrdvP()
        self.EndOfPrdvP_cond= self.calcEndOfPrdvPcond()
        EndOfPrdvPnvrs_cond = self.uPinv(self.EndOfPrdvP_cond) # "decurved" marginal value
        if self.CubicBool:
            EndOfPrdvPP_cond = self.calcEndOfPrdvPP()
            EndOfPrdvPnvrsP_cond = EndOfPrdvPP_cond*self.uPinvP(self.EndOfPrdvP_cond) # "decurved" marginal marginal value

        # Construct the end-of-period marginal value function conditional on the next state.
        if self.CubicBool:
            EndOfPrdvPnvrsFunc_cond = CubicInterp(self.aNrm_cond,EndOfPrdvPnvrs_cond,
                                                  EndOfPrdvPnvrsP_cond,lower_extrap=True)
        else:
            EndOfPrdvPnvrsFunc_cond = LinearInterp(self.aNrm_cond,EndOfPrdvPnvrs_cond,
                                                   lower_extrap=True)
        EndofPrdvPfunc_cond = MargValueFunc(EndOfPrdvPnvrsFunc_cond,self.CRRA) # "recurve" the interpolated marginal value function
        return EndofPrdvPfunc_cond
    def post_solve(self):
        """
        This method adds consumption at m=0 to the list of stable arm points,
        then constructs the consumption function as a cubic interpolation over
        those points.  Should be run after the backshooting routine is complete.

        Parameters
        ----------
        none

        Returns
        -------
        none
        """
        # Add bottom point to the stable arm points
        self.solution[0].mNrm_list.insert(0, 0.0)
        self.solution[0].cNrm_list.insert(0, 0.0)
        self.solution[0].MPC_list.insert(0, self.MPCmax)

        # Construct an interpolation of the consumption function from the stable arm points
        self.solution[0].cFunc = CubicInterp(
            self.solution[0].mNrm_list,
            self.solution[0].cNrm_list,
            self.solution[0].MPC_list,
            self.PFMPC * (self.h - 1.0),
            self.PFMPC,
        )
        self.solution[0].cFunc_U = lambda m: self.PFMPC * m
예제 #5
0
    def make_EndOfPrdvFuncCond(self):
        """
        Construct the end-of-period value function conditional on next period's
        state. 
        
        Parameters
        ----------
        EndOfPrdvP : np.array
            Array of end-of-period marginal value of assets corresponding to the
            asset values in self.aNrmNow.
        Returns
        -------
        none
        """
        def v_lvl_next(shocks, a_nrm):
            return (shocks[0]**(1.0 - self.CRRA) *
                    self.PermGroFac**(1.0 - self.CRRA)) * self.vFuncNext(
                        self.m_nrm_next(shocks, a_nrm))

        EndOfPrdv_cond = self.DiscFacEff * calc_expectation(
            self.IncShkDstn, v_lvl_next, self.aNrmNow)
        EndOfPrdvNvrs = self.uinv(
            EndOfPrdv_cond)  # value transformed through inverse utility
        EndOfPrdvNvrsP = self.EndOfPrdvP_cond * self.uinvP(EndOfPrdv_cond)
        EndOfPrdvNvrs = np.insert(EndOfPrdvNvrs, 0, 0.0)
        EndOfPrdvNvrsP = np.insert(
            EndOfPrdvNvrsP, 0, EndOfPrdvNvrsP[0]
        )  # This is a very good approximation, vNvrsPP = 0 at the asset minimum
        aNrm_temp = np.insert(self.aNrmNow, 0, self.BoroCnstNat)
        EndOfPrdvNvrsFunc = CubicInterp(aNrm_temp, EndOfPrdvNvrs,
                                        EndOfPrdvNvrsP)
        EndOfPrdvFunc_dond = ValueFuncCRRA(EndOfPrdvNvrsFunc, self.CRRA)

        return EndOfPrdvFunc_dond
예제 #6
0
    def make_vFunc(self, solution):
        """
        Construct the value function for each current state.

        Parameters
        ----------
        solution : ConsumerSolution
            The solution to the single period consumption-saving problem. Must
            have a consumption function cFunc (using cubic or linear splines) as
            a list with elements corresponding to the current Markov state.  E.g.
            solution.cFunc[0] is the consumption function when in the i=0 Markov
            state this period.

        Returns
        -------
        vFuncNow : [ValueFuncCRRA]
            A list of value functions (defined over normalized market resources
            m) for each current period Markov state.
        """
        vFuncNow = []  # Initialize an empty list of value functions
        # Loop over each current period state and construct the value function
        for i in range(self.StateCount):
            # Make state-conditional grids of market resources and consumption
            mNrmMin = self.mNrmMin_list[i]
            mGrid = mNrmMin + self.aXtraGrid
            cGrid = solution.cFunc[i](mGrid)
            aGrid = mGrid - cGrid

            # Calculate end-of-period value at each gridpoint
            EndOfPrdv_all = np.zeros((self.StateCount, self.aXtraGrid.size))
            for j in range(self.StateCount):
                if self.possible_transitions[i, j]:
                    EndOfPrdv_all[j, :] = self.EndOfPrdvFunc_list[j](aGrid)
            EndOfPrdv = np.dot(self.MrkvArray[i, :], EndOfPrdv_all)

            # Calculate (normalized) value and marginal value at each gridpoint
            vNrmNow = self.u(cGrid) + EndOfPrdv
            vPnow = self.uP(cGrid)

            # Make a "decurved" value function with the inverse utility function
            vNvrs = self.uinv(vNrmNow)  # value transformed through inverse utility
            vNvrsP = vPnow * self.uinvP(vNrmNow)
            mNrm_temp = np.insert(mGrid, 0, mNrmMin)  # add the lower bound
            vNvrs = np.insert(vNvrs, 0, 0.0)
            vNvrsP = np.insert(
                vNvrsP, 0, self.MPCmaxEff[i] ** (-self.CRRA / (1.0 - self.CRRA))
            )
            MPCminNvrs = self.MPCminNow[i] ** (-self.CRRA / (1.0 - self.CRRA))
            vNvrsFunc_i = CubicInterp(
                mNrm_temp, vNvrs, vNvrsP, MPCminNvrs * self.hNrmNow[i], MPCminNvrs
            )

            # "Recurve" the decurved value function and add it to the list
            vFunc_i = ValueFuncCRRA(vNvrsFunc_i, self.CRRA)
            vFuncNow.append(vFunc_i)
        return vFuncNow
예제 #7
0
    def makevFunc(self, solution):
        '''
        Make the beginning-of-period value function (unconditional on the shock).

        Parameters
        ----------
        solution : ConsumerSolution
            The solution to this single period problem, which must include the
            consumption function.

        Returns
        -------
        vFuncNow : ValueFunc
            A representation of the value function for this period, defined over
            normalized market resources m: v = vFuncNow(m).
        '''
        # Compute expected value and marginal value on a grid of market resources,
        # accounting for all of the discrete preference shocks
        PrefShkCount = self.PrefShkVals.size
        mNrm_temp = self.mNrmMinNow + self.aXtraGrid
        vNrmNow = np.zeros_like(mNrm_temp)
        vPnow = np.zeros_like(mNrm_temp)
        for j in range(PrefShkCount):
            this_shock = self.PrefShkVals[j]
            this_prob = self.PrefShkPrbs[j]
            cNrmNow = solution.cFunc(mNrm_temp,
                                     this_shock * np.ones_like(mNrm_temp))
            aNrmNow = mNrm_temp - cNrmNow
            vNrmNow += this_prob * (this_shock * self.u(cNrmNow) +
                                    self.EndOfPrdvFunc(aNrmNow))
            vPnow += this_prob * this_shock * self.uP(cNrmNow)

        # Construct the beginning-of-period value function
        vNvrs = self.uinv(vNrmNow)  # value transformed through inverse utility
        vNvrsP = vPnow * self.uinvP(vNrmNow)
        mNrm_temp = np.insert(mNrm_temp, 0, self.mNrmMinNow)
        vNvrs = np.insert(vNvrs, 0, 0.0)
        vNvrsP = np.insert(vNvrsP, 0,
                           self.MPCmaxEff**(-self.CRRA / (1.0 - self.CRRA)))
        MPCminNvrs = self.MPCminNow**(-self.CRRA / (1.0 - self.CRRA))
        vNvrsFuncNow = CubicInterp(mNrm_temp, vNvrs, vNvrsP,
                                   MPCminNvrs * self.hNrmNow, MPCminNvrs)
        vFuncNow = ValueFunc(vNvrsFuncNow, self.CRRA)
        return vFuncNow
예제 #8
0
    def makeCubiccFunc(self,mNrm,cNrm):
        '''
        Make a cubic interpolation to represent the (unconstrained) consumption
        function conditional on the current period state.

        Parameters
        ----------
        mNrm : np.array
            Array of normalized market resource values for interpolation.
        cNrm : np.array
            Array of normalized consumption values for interpolation.

        Returns
        -------
        cFuncUnc: an instance of HARK.interpolation.CubicInterp
        '''
        cFuncUnc = CubicInterp(mNrm,cNrm,self.MPC_temp_j,self.MPCminNow_j*self.hNrmNow_j,
                               self.MPCminNow_j)
        return cFuncUnc
    def make_cubic_cFunc(self, mLvl, pLvl, cLvl):
        """
        Makes a quasi-cubic spline interpolation of the unconstrained consumption
        function for this period.  Function is cubic splines with respect to mLvl,
        but linear in pLvl.

        Parameters
        ----------
        mLvl : np.array
            Market resource points for interpolation.
        pLvl : np.array
            Persistent income level points for interpolation.
        cLvl : np.array
            Consumption points for interpolation.

        Returns
        -------
        cFuncUnc : CubicInterp
            The unconstrained consumption function for this period.
        """
        # Calculate the MPC at each gridpoint
        EndOfPrdvPP = (self.DiscFacEff * self.Rfree * self.Rfree * np.sum(
            self.vPPfuncNext(self.mLvlNext, self.pLvlNext) * self.ShkPrbs_temp,
            axis=0,
        ))
        dcda = EndOfPrdvPP / self.uPP(np.array(cLvl[1:, 1:]))
        MPC = dcda / (dcda + 1.0)
        MPC = np.concatenate((np.reshape(MPC[:, 0], (MPC.shape[0], 1)), MPC),
                             axis=1)
        # Stick an extra MPC value at bottom; MPCmax doesn't work
        MPC = np.concatenate((self.MPCminNow * np.ones(
            (1, self.aXtraGrid.size + 1)), MPC),
                             axis=0)

        # Make cubic consumption function with respect to mLvl for each persistent income level
        cFunc_by_pLvl_list = []  # list of consumption functions for each pLvl
        for j in range(pLvl.shape[0]):
            pLvl_j = pLvl[j, 0]
            m_temp = mLvl[j, :] - self.BoroCnstNat(pLvl_j)
            c_temp = cLvl[
                j, :]  # Make a cubic consumption function for this pLvl
            MPC_temp = MPC[j, :]
            if pLvl_j > 0:
                cFunc_by_pLvl_list.append(
                    CubicInterp(
                        m_temp,
                        c_temp,
                        MPC_temp,
                        lower_extrap=True,
                        slope_limit=self.MPCminNow,
                        intercept_limit=self.MPCminNow * self.hLvlNow(pLvl_j),
                    ))
            else:  # When pLvl=0, cFunc is linear
                cFunc_by_pLvl_list.append(
                    LinearInterp(m_temp, c_temp, lower_extrap=True))
        pLvl_list = pLvl[:, 0]
        cFuncUncBase = LinearInterpOnInterp1D(
            cFunc_by_pLvl_list, pLvl_list)  # Combine all linear cFuncs
        cFuncUnc = VariableLowerBoundFunc2D(cFuncUncBase, self.BoroCnstNat)
        # Re-adjust for lower bound of natural borrowing constraint
        return cFuncUnc
    def make_vFunc(self, solution):
        """
        Creates the value function for this period, defined over market resources
        m and persistent income p.  self must have the attribute EndOfPrdvFunc in
        order to execute.

        Parameters
        ----------
        solution : ConsumerSolution
            The solution to this single period problem, which must include the
            consumption function.

        Returns
        -------
        vFuncNow : ValueFuncCRRA
            A representation of the value function for this period, defined over
            market resources m and persistent income p: v = vFuncNow(m,p).
        """
        mSize = self.aXtraGrid.size
        pSize = self.pLvlGrid.size

        # Compute expected value and marginal value on a grid of market resources
        pLvl_temp = np.tile(self.pLvlGrid,
                            (mSize, 1))  # Tile pLvl across m values
        mLvl_temp = (np.tile(self.mLvlMinNow(self.pLvlGrid), (mSize, 1)) +
                     np.tile(np.reshape(self.aXtraGrid, (mSize, 1)),
                             (1, pSize)) * pLvl_temp)
        cLvlNow = solution.cFunc(mLvl_temp, pLvl_temp)
        aLvlNow = mLvl_temp - cLvlNow
        vNow = self.u(cLvlNow) + self.EndOfPrdvFunc(aLvlNow, pLvl_temp)
        vPnow = self.uP(cLvlNow)

        # Calculate pseudo-inverse value and its first derivative (wrt mLvl)
        vNvrs = self.uinv(vNow)  # value transformed through inverse utility
        vNvrsP = vPnow * self.uinvP(vNow)

        # Add data at the lower bound of m
        mLvl_temp = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),
                                               (1, pSize)), mLvl_temp),
                                   axis=0)
        vNvrs = np.concatenate((np.zeros((1, pSize)), vNvrs), axis=0)
        vNvrsP = np.concatenate((np.reshape(vNvrsP[0, :],
                                            (1, vNvrsP.shape[1])), vNvrsP),
                                axis=0)

        # Add data at the lower bound of p
        MPCminNvrs = self.MPCminNow**(-self.CRRA / (1.0 - self.CRRA))
        m_temp = np.reshape(mLvl_temp[:, 0], (mSize + 1, 1))
        mLvl_temp = np.concatenate((m_temp, mLvl_temp), axis=1)
        vNvrs = np.concatenate((MPCminNvrs * m_temp, vNvrs), axis=1)
        vNvrsP = np.concatenate((MPCminNvrs * np.ones((mSize + 1, 1)), vNvrsP),
                                axis=1)

        # Construct the pseudo-inverse value function
        vNvrsFunc_list = []
        for j in range(pSize + 1):
            pLvl = np.insert(self.pLvlGrid, 0, 0.0)[j]
            vNvrsFunc_list.append(
                CubicInterp(
                    mLvl_temp[:, j] - self.mLvlMinNow(pLvl),
                    vNvrs[:, j],
                    vNvrsP[:, j],
                    MPCminNvrs * self.hLvlNow(pLvl),
                    MPCminNvrs,
                ))
        vNvrsFuncBase = LinearInterpOnInterp1D(
            vNvrsFunc_list, np.insert(self.pLvlGrid, 0,
                                      0.0))  # Value function "shifted"
        vNvrsFuncNow = VariableLowerBoundFunc2D(vNvrsFuncBase, self.mLvlMinNow)

        # "Re-curve" the pseudo-inverse value function into the value function
        vFuncNow = ValueFuncCRRA(vNvrsFuncNow, self.CRRA)
        return vFuncNow
    def make_EndOfPrdvFunc(self, EndOfPrdvP):
        """
        Construct the end-of-period value function for this period, storing it
        as an attribute of self for use by other methods.

        Parameters
        ----------
        EndOfPrdvP : np.array
            Array of end-of-period marginal value of assets corresponding to the
            asset values in self.aLvlNow x self.pLvlGrid.

        Returns
        -------
        none
        """
        vLvlNext = self.vFuncNext(
            self.mLvlNext,
            self.pLvlNext)  # value in many possible future states
        EndOfPrdv = self.DiscFacEff * np.sum(
            vLvlNext * self.ShkPrbs_temp,
            axis=0)  # expected value, averaging across states
        EndOfPrdvNvrs = self.uinv(
            EndOfPrdv)  # value transformed through inverse utility
        EndOfPrdvNvrsP = EndOfPrdvP * self.uinvP(EndOfPrdv)

        # Add points at mLvl=zero
        EndOfPrdvNvrs = np.concatenate((np.zeros(
            (self.pLvlGrid.size, 1)), EndOfPrdvNvrs),
                                       axis=1)
        if hasattr(self, "MedShkDstn"):
            EndOfPrdvNvrsP = np.concatenate((np.zeros(
                (self.pLvlGrid.size, 1)), EndOfPrdvNvrsP),
                                            axis=1)
        else:
            EndOfPrdvNvrsP = np.concatenate(
                (
                    np.reshape(EndOfPrdvNvrsP[:, 0], (self.pLvlGrid.size, 1)),
                    EndOfPrdvNvrsP,
                ),
                axis=1,
            )
            # This is a very good approximation, vNvrsPP = 0 at the asset minimum
        aLvl_temp = np.concatenate(
            (
                np.reshape(self.BoroCnstNat(self.pLvlGrid),
                           (self.pLvlGrid.size, 1)),
                self.aLvlNow,
            ),
            axis=1,
        )

        # Make an end-of-period value function for each persistent income level in the grid
        EndOfPrdvNvrsFunc_list = []
        for p in range(self.pLvlGrid.size):
            EndOfPrdvNvrsFunc_list.append(
                CubicInterp(
                    aLvl_temp[p, :] - self.BoroCnstNat(self.pLvlGrid[p]),
                    EndOfPrdvNvrs[p, :],
                    EndOfPrdvNvrsP[p, :],
                ))
        EndOfPrdvNvrsFuncBase = LinearInterpOnInterp1D(EndOfPrdvNvrsFunc_list,
                                                       self.pLvlGrid)

        # Re-adjust the combined end-of-period value function to account for the natural borrowing constraint shifter
        EndOfPrdvNvrsFunc = VariableLowerBoundFunc2D(EndOfPrdvNvrsFuncBase,
                                                     self.BoroCnstNat)
        self.EndOfPrdvFunc = ValueFuncCRRA(EndOfPrdvNvrsFunc, self.CRRA)
예제 #12
0
def solveConsPortfolio(solution_next, ShockDstn, IncomeDstn, RiskyDstn, LivPrb,
                       DiscFac, CRRA, Rfree, PermGroFac, BoroCnstArt,
                       aXtraGrid, ShareGrid, vFuncBool, AdjustPrb,
                       DiscreteShareBool, ShareLimit, IndepDstnBool):
    '''
    Solve the one period problem for a portfolio-choice consumer.
    
    Parameters
    ----------
    solution_next : PortfolioSolution
        Solution to next period's problem.
    ShockDstn : [np.array]
        List with four arrays: discrete probabilities, permanent income shocks,
        transitory income shocks, and risky returns.  This is only used if the
        input IndepDstnBool is False, indicating that income and return distributions
        can't be assumed to be independent.
    IncomeDstn : [np.array]
        List with three arrays: discrete probabilities, permanent income shocks,
        and transitory income shocks.  This is only used if the input IndepDsntBool
        is True, indicating that income and return distributions are independent.
    RiskyDstn : [np.array]
        List with two arrays: discrete probabilities and risky asset returns. This
        is only used if the input IndepDstnBool is True, indicating that income
        and return distributions are independent.
    LivPrb : float
        Survival probability; likelihood of being alive at the beginning of
        the succeeding period.
    DiscFac : float
        Intertemporal discount factor for future utility.
    CRRA : float
        Coefficient of relative risk aversion.
    Rfree : float
        Risk free interest factor on end-of-period assets.
    PermGroFac : float
        Expected permanent income growth factor at the end of this period.
    BoroCnstArt: float or None
        Borrowing constraint for the minimum allowable assets to end the
        period with.  In this model, it is *required* to be zero.
    aXtraGrid: np.array
        Array of "extra" end-of-period asset values-- assets above the
        absolute minimum acceptable level.
    ShareGrid : np.array
        Array of risky portfolio shares on which to define the interpolation
        of the consumption function when Share is fixed.
    vFuncBool: boolean
        An indicator for whether the value function should be computed and
        included in the reported solution.
    AdjustPrb : float
        Probability that the agent will be able to update his portfolio share.
    DiscreteShareBool : bool
        Indicator for whether risky portfolio share should be optimized on the
        continuous [0,1] interval using the FOC (False), or instead only selected
        from the discrete set of values in ShareGrid (True).  If True, then
        vFuncBool must also be True.
    ShareLimit : float
        Limiting lower bound of risky portfolio share as mNrm approaches infinity.
    IndepDstnBool : bool
        Indicator for whether the income and risky return distributions are in-
        dependent of each other, which can speed up the expectations step.

    Returns
    -------
    solution_now : PortfolioSolution
        The solution to the single period consumption-saving with portfolio choice
        problem.  Includes two consumption and risky share functions: one for when
        the agent can adjust his portfolio share (Adj) and when he can't (Fxd).
    '''
    # Make sure the individual is liquidity constrained.  Allowing a consumer to
    # borrow *and* invest in an asset with unbounded (negative) returns is a bad mix.
    if BoroCnstArt != 0.0:
        raise ValueError('PortfolioConsumerType must have BoroCnstArt=0.0!')

    # Make sure that if risky portfolio share is optimized only discretely, then
    # the value function is also constructed (else this task would be impossible).
    if (DiscreteShareBool and (not vFuncBool)):
        raise ValueError(
            'PortfolioConsumerType requires vFuncBool to be True when DiscreteShareBool is True!'
        )

    # Define temporary functions for utility and its derivative and inverse
    u = lambda x: utility(x, CRRA)
    uP = lambda x: utilityP(x, CRRA)
    uPinv = lambda x: utilityP_inv(x, CRRA)
    n = lambda x: utility_inv(x, CRRA)
    nP = lambda x: utility_invP(x, CRRA)

    # Unpack next period's solution
    vPfuncAdj_next = solution_next.vPfuncAdj
    dvdmFuncFxd_next = solution_next.dvdmFuncFxd
    dvdsFuncFxd_next = solution_next.dvdsFuncFxd
    vFuncAdj_next = solution_next.vFuncAdj
    vFuncFxd_next = solution_next.vFuncFxd

    # Major method fork: (in)dependent risky asset return and income distributions
    if IndepDstnBool:  # If the distributions ARE independent...
        # Unpack the shock distribution
        IncPrbs_next = IncomeDstn.pmf
        PermShks_next = IncomeDstn.X[0]
        TranShks_next = IncomeDstn.X[1]
        Rprbs_next = RiskyDstn.pmf
        Risky_next = RiskyDstn.X
        zero_bound = (
            np.min(TranShks_next) == 0.
        )  # Flag for whether the natural borrowing constraint is zero
        RiskyMax = np.max(Risky_next)

        # bNrm represents R*a, balances after asset return shocks but before income.
        # This just uses the highest risky return as a rough shifter for the aXtraGrid.
        if zero_bound:
            aNrmGrid = aXtraGrid
            bNrmGrid = np.insert(RiskyMax * aXtraGrid, 0,
                                 np.min(Risky_next) * aXtraGrid[0])
        else:
            aNrmGrid = np.insert(aXtraGrid, 0,
                                 0.0)  # Add an asset point at exactly zero
            bNrmGrid = RiskyMax * np.insert(aXtraGrid, 0, 0.0)

        # Get grid and shock sizes, for easier indexing
        aNrm_N = aNrmGrid.size
        bNrm_N = bNrmGrid.size
        Share_N = ShareGrid.size
        Income_N = IncPrbs_next.size
        Risky_N = Rprbs_next.size

        # Make tiled arrays to calculate future realizations of mNrm and Share when integrating over IncomeDstn
        bNrm_tiled = np.tile(np.reshape(bNrmGrid, (bNrm_N, 1, 1)),
                             (1, Share_N, Income_N))
        Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)),
                              (bNrm_N, 1, Income_N))
        IncPrbs_tiled = np.tile(np.reshape(IncPrbs_next, (1, 1, Income_N)),
                                (bNrm_N, Share_N, 1))
        PermShks_tiled = np.tile(np.reshape(PermShks_next, (1, 1, Income_N)),
                                 (bNrm_N, Share_N, 1))
        TranShks_tiled = np.tile(np.reshape(TranShks_next, (1, 1, Income_N)),
                                 (bNrm_N, Share_N, 1))

        # Calculate future realizations of market resources
        mNrm_next = bNrm_tiled / (PermShks_tiled * PermGroFac) + TranShks_tiled
        Share_next = Share_tiled

        # Evaluate realizations of marginal value of market resources next period
        dvdmAdj_next = vPfuncAdj_next(mNrm_next)
        if AdjustPrb < 1.:
            dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next)
            dvdm_next = AdjustPrb * dvdmAdj_next + (
                1. -
                AdjustPrb) * dvdmFxd_next  # Combine by adjustment probability
        else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
            dvdm_next = dvdmAdj_next

        # Evaluate realizations of marginal value of risky share next period
        dvdsAdj_next = np.zeros_like(
            mNrm_next)  # No marginal value of Share if it's a free choice!
        if AdjustPrb < 1.:
            dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next)
            dvds_next = AdjustPrb * dvdsAdj_next + (
                1. -
                AdjustPrb) * dvdsFxd_next  # Combine by adjustment probability
        else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
            dvds_next = dvdsAdj_next

        # If the value function has been requested, evaluate realizations of value
        if vFuncBool:
            vAdj_next = vFuncAdj_next(mNrm_next)
            if AdjustPrb < 1.:
                vFxd_next = vFuncFxd_next(mNrm_next, Share_next)
                v_next = AdjustPrb * vAdj_next + (1. - AdjustPrb) * vFxd_next
            else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
                v_next = vAdj_next
        else:
            v_next = np.zeros_like(dvdm_next)  # Trivial array

        # Calculate intermediate marginal value of bank balances by taking expectations over income shocks
        temp_fac_A = uP(PermShks_tiled *
                        PermGroFac)  # Will use this in a couple places
        dvdb_intermed = np.sum(IncPrbs_tiled * temp_fac_A * dvdm_next, axis=2)
        dvdbNvrs_intermed = uPinv(dvdb_intermed)
        dvdbNvrsFunc_intermed = BilinearInterp(dvdbNvrs_intermed, bNrmGrid,
                                               ShareGrid)
        dvdbFunc_intermed = MargValueFunc2D(dvdbNvrsFunc_intermed, CRRA)

        # Calculate intermediate value by taking expectations over income shocks
        temp_fac_B = (PermShks_tiled * PermGroFac)**(1. - CRRA
                                                     )  # Will use this below
        if vFuncBool:
            v_intermed = np.sum(IncPrbs_tiled * temp_fac_B * v_next, axis=2)
            vNvrs_intermed = n(v_intermed)
            vNvrsFunc_intermed = BilinearInterp(vNvrs_intermed, bNrmGrid,
                                                ShareGrid)
            vFunc_intermed = ValueFunc2D(vNvrsFunc_intermed, CRRA)

        # Calculate intermediate marginal value of risky portfolio share by taking expectations
        dvds_intermed = np.sum(IncPrbs_tiled * temp_fac_B * dvds_next, axis=2)
        dvdsFunc_intermed = BilinearInterp(dvds_intermed, bNrmGrid, ShareGrid)

        # Make tiled arrays to calculate future realizations of bNrm and Share when integrating over RiskyDstn
        aNrm_tiled = np.tile(np.reshape(aNrmGrid, (aNrm_N, 1, 1)),
                             (1, Share_N, Risky_N))
        Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)),
                              (aNrm_N, 1, Risky_N))
        Rprbs_tiled = np.tile(np.reshape(Rprbs_next, (1, 1, Risky_N)),
                              (aNrm_N, Share_N, 1))
        Risky_tiled = np.tile(np.reshape(Risky_next, (1, 1, Risky_N)),
                              (aNrm_N, Share_N, 1))

        # Calculate future realizations of bank balances bNrm
        Share_next = Share_tiled
        Rxs = Risky_tiled - Rfree
        Rport = Rfree + Share_next * Rxs
        bNrm_next = Rport * aNrm_tiled

        # Evaluate realizations of value and marginal value after asset returns are realized
        dvdb_next = dvdbFunc_intermed(bNrm_next, Share_next)
        dvds_next = dvdsFunc_intermed(bNrm_next, Share_next)
        if vFuncBool:
            v_next = vFunc_intermed(bNrm_next, Share_next)
        else:
            v_next = np.zeros_like(dvdb_next)

        # Calculate end-of-period marginal value of assets by taking expectations
        EndOfPrddvda = DiscFac * LivPrb * np.sum(
            Rprbs_tiled * Rport * dvdb_next, axis=2)
        EndOfPrddvdaNvrs = uPinv(EndOfPrddvda)

        # Calculate end-of-period value by taking expectations
        if vFuncBool:
            EndOfPrdv = DiscFac * LivPrb * np.sum(Rprbs_tiled * v_next, axis=2)
            EndOfPrdvNvrs = n(EndOfPrdv)

        # Calculate end-of-period marginal value of risky portfolio share by taking expectations
        EndOfPrddvds = DiscFac * LivPrb * np.sum(
            Rprbs_tiled * (Rxs * aNrm_tiled * dvdb_next + dvds_next), axis=2)

    else:  # If the distributions are NOT independent...
        # Unpack the shock distribution
        ShockPrbs_next = ShockDstn[0]
        PermShks_next = ShockDstn[1]
        TranShks_next = ShockDstn[2]
        Risky_next = ShockDstn[3]
        zero_bound = (
            np.min(TranShks_next) == 0.
        )  # Flag for whether the natural borrowing constraint is zero

        # Make tiled arrays to calculate future realizations of mNrm and Share; dimension order: mNrm, Share, shock
        if zero_bound:
            aNrmGrid = aXtraGrid
        else:
            aNrmGrid = np.insert(aXtraGrid, 0,
                                 0.0)  # Add an asset point at exactly zero
        aNrm_N = aNrmGrid.size
        Share_N = ShareGrid.size
        Shock_N = ShockPrbs_next.size
        aNrm_tiled = np.tile(np.reshape(aNrmGrid, (aNrm_N, 1, 1)),
                             (1, Share_N, Shock_N))
        Share_tiled = np.tile(np.reshape(ShareGrid, (1, Share_N, 1)),
                              (aNrm_N, 1, Shock_N))
        ShockPrbs_tiled = np.tile(np.reshape(ShockPrbs_next, (1, 1, Shock_N)),
                                  (aNrm_N, Share_N, 1))
        PermShks_tiled = np.tile(np.reshape(PermShks_next, (1, 1, Shock_N)),
                                 (aNrm_N, Share_N, 1))
        TranShks_tiled = np.tile(np.reshape(TranShks_next, (1, 1, Shock_N)),
                                 (aNrm_N, Share_N, 1))
        Risky_tiled = np.tile(np.reshape(Risky_next, (1, 1, Shock_N)),
                              (aNrm_N, Share_N, 1))

        # Calculate future realizations of market resources
        Rport = (1. - Share_tiled) * Rfree + Share_tiled * Risky_tiled
        mNrm_next = Rport * aNrm_tiled / (PermShks_tiled *
                                          PermGroFac) + TranShks_tiled
        Share_next = Share_tiled

        # Evaluate realizations of marginal value of market resources next period
        dvdmAdj_next = vPfuncAdj_next(mNrm_next)
        if AdjustPrb < 1.:
            dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next)
            dvdm_next = AdjustPrb * dvdmAdj_next + (
                1. -
                AdjustPrb) * dvdmFxd_next  # Combine by adjustment probability
        else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
            dvdm_next = dvdmAdj_next

        # Evaluate realizations of marginal value of risky share next period
        dvdsAdj_next = np.zeros_like(
            mNrm_next)  # No marginal value of Share if it's a free choice!
        if AdjustPrb < 1.:
            dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next)
            dvds_next = AdjustPrb * dvdsAdj_next + (
                1. -
                AdjustPrb) * dvdsFxd_next  # Combine by adjustment probability
        else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
            dvds_next = dvdsAdj_next

        # If the value function has been requested, evaluate realizations of value
        if vFuncBool:
            vAdj_next = vFuncAdj_next(mNrm_next)
            if AdjustPrb < 1.:
                vFxd_next = vFuncFxd_next(mNrm_next, Share_next)
                v_next = AdjustPrb * vAdj_next + (1. - AdjustPrb) * vFxd_next
            else:  # Don't bother evaluating if there's no chance that portfolio share is fixed
                v_next = vAdj_next
        else:
            v_next = np.zeros_like(dvdm_next)  # Trivial array

        # Calculate end-of-period marginal value of assets by taking expectations
        temp_fac_A = uP(PermShks_tiled *
                        PermGroFac)  # Will use this in a couple places
        EndOfPrddvda = DiscFac * LivPrb * np.sum(
            ShockPrbs_tiled * Rport * temp_fac_A * dvdm_next, axis=2)
        EndOfPrddvdaNvrs = uPinv(EndOfPrddvda)

        # Calculate end-of-period value by taking expectations
        temp_fac_B = (PermShks_tiled * PermGroFac)**(1. - CRRA
                                                     )  # Will use this below
        if vFuncBool:
            EndOfPrdv = DiscFac * LivPrb * np.sum(
                ShockPrbs_tiled * temp_fac_B * v_next, axis=2)
            EndOfPrdvNvrs = n(EndOfPrdv)

        # Calculate end-of-period marginal value of risky portfolio share by taking expectations
        Rxs = Risky_tiled - Rfree
        EndOfPrddvds = DiscFac * LivPrb * np.sum(
            ShockPrbs_tiled * (Rxs * aNrm_tiled * temp_fac_A * dvdm_next +
                               temp_fac_B * dvds_next),
            axis=2)

    # Major method fork: discrete vs continuous choice of risky portfolio share
    if DiscreteShareBool:  # Optimization of Share on the discrete set ShareGrid
        opt_idx = np.argmax(EndOfPrdv, axis=1)
        Share_now = ShareGrid[
            opt_idx]  # Best portfolio share is one with highest value
        cNrmAdj_now = EndOfPrddvdaNvrs[np.arange(
            aNrm_N), opt_idx]  # Take cNrm at that index as well
        if not zero_bound:
            Share_now[
                0] = 1.  # aNrm=0, so there's no way to "optimize" the portfolio
            cNrmAdj_now[0] = EndOfPrddvdaNvrs[
                0, -1]  # Consumption when aNrm=0 does not depend on Share

    else:  # Optimization of Share on continuous interval [0,1]
        # For values of aNrm at which the agent wants to put more than 100% into risky asset, constrain them
        FOC_s = EndOfPrddvds
        Share_now = np.zeros_like(
            aNrmGrid)  # Initialize to putting everything in safe asset
        cNrmAdj_now = np.zeros_like(aNrmGrid)
        constrained = FOC_s[:,
                            -1] > 0.  # If agent wants to put more than 100% into risky asset, he is constrained
        Share_now[constrained] = 1.0
        if not zero_bound:
            Share_now[
                0] = 1.  # aNrm=0, so there's no way to "optimize" the portfolio
            cNrmAdj_now[0] = EndOfPrddvdaNvrs[
                0, -1]  # Consumption when aNrm=0 does not depend on Share
        cNrmAdj_now[constrained] = EndOfPrddvdaNvrs[
            constrained, -1]  # Get consumption when share-constrained

        # For each value of aNrm, find the value of Share such that FOC-Share == 0.
        # This loop can probably be eliminated, but it's such a small step that it won't speed things up much.
        crossing = np.logical_and(FOC_s[:, 1:] <= 0., FOC_s[:, :-1] >= 0.)
        for j in range(aNrm_N):
            if Share_now[j] == 0.:
                try:
                    idx = np.argwhere(crossing[j, :])[0][0]
                    bot_s = ShareGrid[idx]
                    top_s = ShareGrid[idx + 1]
                    bot_f = FOC_s[j, idx]
                    top_f = FOC_s[j, idx + 1]
                    bot_c = EndOfPrddvdaNvrs[j, idx]
                    top_c = EndOfPrddvdaNvrs[j, idx + 1]
                    alpha = 1. - top_f / (top_f - bot_f)
                    Share_now[j] = (1. - alpha) * bot_s + alpha * top_s
                    cNrmAdj_now[j] = (1. - alpha) * bot_c + alpha * top_c
                except:
                    print('No optimal controls found for a=' +
                          str(aNrmGrid[j]))

    # Calculate the endogenous mNrm gridpoints when the agent adjusts his portfolio
    mNrmAdj_now = aNrmGrid + cNrmAdj_now

    # Construct the risky share function when the agent can adjust
    if DiscreteShareBool:
        mNrmAdj_mid = (mNrmAdj_now[1:] + mNrmAdj_now[:-1]) / 2
        mNrmAdj_plus = mNrmAdj_mid * (1. + 1e-12)
        mNrmAdj_comb = (np.transpose(np.vstack(
            (mNrmAdj_mid, mNrmAdj_plus)))).flatten()
        mNrmAdj_comb = np.append(np.insert(mNrmAdj_comb, 0, 0.0),
                                 mNrmAdj_now[-1])
        Share_comb = (np.transpose(np.vstack(
            (Share_now, Share_now)))).flatten()
        ShareFuncAdj_now = LinearInterp(mNrmAdj_comb, Share_comb)
    else:
        if zero_bound:
            Share_lower_bound = ShareLimit
        else:
            Share_lower_bound = 1.0
        Share_now = np.insert(Share_now, 0, Share_lower_bound)
        ShareFuncAdj_now = LinearInterp(np.insert(mNrmAdj_now, 0, 0.0),
                                        Share_now,
                                        intercept_limit=ShareLimit,
                                        slope_limit=0.0)

    # Construct the consumption function when the agent can adjust
    cNrmAdj_now = np.insert(cNrmAdj_now, 0, 0.0)
    cFuncAdj_now = LinearInterp(np.insert(mNrmAdj_now, 0, 0.0), cNrmAdj_now)

    # Construct the marginal value (of mNrm) function when the agent can adjust
    vPfuncAdj_now = MargValueFunc(cFuncAdj_now, CRRA)

    # Construct the consumption function when the agent *can't* adjust the risky share, as well
    # as the marginal value of Share function
    cFuncFxd_by_Share = []
    dvdsFuncFxd_by_Share = []
    for j in range(Share_N):
        cNrmFxd_temp = EndOfPrddvdaNvrs[:, j]
        mNrmFxd_temp = aNrmGrid + cNrmFxd_temp
        cFuncFxd_by_Share.append(
            LinearInterp(np.insert(mNrmFxd_temp, 0, 0.0),
                         np.insert(cNrmFxd_temp, 0, 0.0)))
        dvdsFuncFxd_by_Share.append(
            LinearInterp(np.insert(mNrmFxd_temp, 0, 0.0),
                         np.insert(EndOfPrddvds[:, j], 0, EndOfPrddvds[0, j])))
    cFuncFxd_now = LinearInterpOnInterp1D(cFuncFxd_by_Share, ShareGrid)
    dvdsFuncFxd_now = LinearInterpOnInterp1D(dvdsFuncFxd_by_Share, ShareGrid)

    # The share function when the agent can't adjust his portfolio is trivial
    ShareFuncFxd_now = IdentityFunction(i_dim=1, n_dims=2)

    # Construct the marginal value of mNrm function when the agent can't adjust his share
    dvdmFuncFxd_now = MargValueFunc2D(cFuncFxd_now, CRRA)

    # If the value function has been requested, construct it now
    if vFuncBool:
        # First, make an end-of-period value function over aNrm and Share
        EndOfPrdvNvrsFunc = BilinearInterp(EndOfPrdvNvrs, aNrmGrid, ShareGrid)
        EndOfPrdvFunc = ValueFunc2D(EndOfPrdvNvrsFunc, CRRA)

        # Construct the value function when the agent can adjust his portfolio
        mNrm_temp = aXtraGrid  # Just use aXtraGrid as our grid of mNrm values
        cNrm_temp = cFuncAdj_now(mNrm_temp)
        aNrm_temp = mNrm_temp - cNrm_temp
        Share_temp = ShareFuncAdj_now(mNrm_temp)
        v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp)
        vNvrs_temp = n(v_temp)
        vNvrsP_temp = uP(cNrm_temp) * nP(v_temp)
        vNvrsFuncAdj = CubicInterp(
            np.insert(mNrm_temp, 0, 0.0),  # x_list
            np.insert(vNvrs_temp, 0, 0.0),  # f_list
            np.insert(vNvrsP_temp, 0, vNvrsP_temp[0]))  # dfdx_list
        vFuncAdj_now = ValueFunc(
            vNvrsFuncAdj, CRRA)  # Re-curve the pseudo-inverse value function

        # Construct the value function when the agent *can't* adjust his portfolio
        mNrm_temp = np.tile(np.reshape(aXtraGrid, (aXtraGrid.size, 1)),
                            (1, Share_N))
        Share_temp = np.tile(np.reshape(ShareGrid, (1, Share_N)),
                             (aXtraGrid.size, 1))
        cNrm_temp = cFuncFxd_now(mNrm_temp, Share_temp)
        aNrm_temp = mNrm_temp - cNrm_temp
        v_temp = u(cNrm_temp) + EndOfPrdvFunc(aNrm_temp, Share_temp)
        vNvrs_temp = n(v_temp)
        vNvrsP_temp = uP(cNrm_temp) * nP(v_temp)
        vNvrsFuncFxd_by_Share = []
        for j in range(Share_N):
            vNvrsFuncFxd_by_Share.append(
                CubicInterp(
                    np.insert(mNrm_temp[:, 0], 0, 0.0),  # x_list
                    np.insert(vNvrs_temp[:, j], 0, 0.0),  # f_list
                    np.insert(vNvrsP_temp[:, j], 0,
                              vNvrsP_temp[j, 0])))  #dfdx_list
        vNvrsFuncFxd = LinearInterpOnInterp1D(vNvrsFuncFxd_by_Share, ShareGrid)
        vFuncFxd_now = ValueFunc2D(vNvrsFuncFxd, CRRA)

    else:  # If vFuncBool is False, fill in dummy values
        vFuncAdj_now = None
        vFuncFxd_now = None

    # Create and return this period's solution
    return PortfolioSolution(cFuncAdj=cFuncAdj_now,
                             ShareFuncAdj=ShareFuncAdj_now,
                             vPfuncAdj=vPfuncAdj_now,
                             vFuncAdj=vFuncAdj_now,
                             cFuncFxd=cFuncFxd_now,
                             ShareFuncFxd=ShareFuncFxd_now,
                             dvdmFuncFxd=dvdmFuncFxd_now,
                             dvdsFuncFxd=dvdsFuncFxd_now,
                             vFuncFxd=vFuncFxd_now)
예제 #13
0
    def post_solve(self):
        self.solution_fast = deepcopy(self.solution)

        if self.cycles == 0:
            cycles = 1
        else:
            cycles = self.cycles
            self.solution[-1] = self.solution_terminal_cs

        for i in range(cycles):
            for j in range(self.T_cycle):
                solution = self.solution[i * self.T_cycle + j]

                # Define the borrowing constraint (limiting consumption function)
                cFuncNowCnst = LinearInterp(
                    np.array([solution.mNrmMin, solution.mNrmMin + 1]),
                    np.array([0.0, 1.0]),
                )

                """
                Constructs a basic solution for this period, including the consumption
                function and marginal value function.
                """

                if self.CubicBool:
                    # Makes a cubic spline interpolation of the unconstrained consumption
                    # function for this period.
                    cFuncNowUnc = CubicInterp(
                        solution.mNrm,
                        solution.cNrm,
                        solution.MPC,
                        solution.cFuncLimitIntercept,
                        solution.cFuncLimitSlope,
                    )
                else:
                    # Makes a linear interpolation to represent the (unconstrained) consumption function.
                    # Construct the unconstrained consumption function
                    cFuncNowUnc = LinearInterp(
                        solution.mNrm,
                        solution.cNrm,
                        solution.cFuncLimitIntercept,
                        solution.cFuncLimitSlope,
                    )

                # Combine the constrained and unconstrained functions into the true consumption function
                cFuncNow = LowerEnvelope(cFuncNowUnc, cFuncNowCnst)

                # Make the marginal value function and the marginal marginal value function
                vPfuncNow = MargValueFuncCRRA(cFuncNow, self.CRRA)

                # Pack up the solution and return it
                consumer_solution = ConsumerSolution(
                    cFunc=cFuncNow,
                    vPfunc=vPfuncNow,
                    mNrmMin=solution.mNrmMin,
                    hNrm=solution.hNrm,
                    MPCmin=solution.MPCmin,
                    MPCmax=solution.MPCmax,
                )

                if self.vFuncBool:
                    vNvrsFuncNow = CubicInterp(
                        solution.mNrmGrid,
                        solution.vNvrs,
                        solution.vNvrsP,
                        solution.MPCminNvrs * solution.hNrm,
                        solution.MPCminNvrs,
                    )
                    vFuncNow = ValueFuncCRRA(vNvrsFuncNow, self.CRRA)

                    consumer_solution.vFunc = vFuncNow

                if self.CubicBool or self.vFuncBool:
                    _searchFunc = (
                        _find_mNrmStECubic if self.CubicBool else _find_mNrmStELinear
                    )
                    # Add mNrmStE to the solution and return it
                    consumer_solution.mNrmStE = _add_mNrmStEIndNumba(
                        self.PermGroFac[j],
                        self.Rfree,
                        solution.Ex_IncNext,
                        solution.mNrmMin,
                        solution.mNrm,
                        solution.cNrm,
                        solution.MPC,
                        solution.MPCmin,
                        solution.hNrm,
                        _searchFunc,
                    )

                self.solution[i * self.T_cycle + j] = consumer_solution
예제 #14
0
import numpy as np
from scipy.interpolate import CubicHermiteSpline

from HARK.interpolation import CubicInterp, CubicHermiteInterp

# %% [markdown]
# ### Creating a HARK wrapper for scipy's CubicHermiteSpline
#
# The class CubicHermiteInterp in HARK.interpolation implements a HARK wrapper for scipy's CubicHermiteSpline. A HARK wrapper is needed due to the way interpolators are used in solution methods accross HARK, and in particular due to the `distance_criteria` attribute used for VFI convergence.

# %% pycharm={"name": "#%%\n"}
x = np.linspace(0, 10, num=11, endpoint=True)
y = np.cos(-(x**2) / 9.0)
dydx = 2.0 * x / 9.0 * np.sin(-(x**2) / 9.0)

f = CubicInterp(x, y, dydx, lower_extrap=True)
f2 = CubicHermiteSpline(x, y, dydx)
f3 = CubicHermiteInterp(x, y, dydx, lower_extrap=True)

# %% [markdown]
# Above are 3 interpolators, which are:
# 1. **CubicInterp** from HARK.interpolation
# 2. **CubicHermiteSpline** from scipy.interpolate
# 3. **CubicHermiteInterp** hybrid newly implemented in HARK.interpolation
#
# Below we see that they behave in much the same way.

# %% pycharm={"name": "#%%\n"}
xnew = np.linspace(0, 10, num=41, endpoint=True)

plt.plot(x, y, "o", xnew, f(xnew), "-", xnew, f2(xnew), "--", xnew, f3(xnew),