예제 #1
0
    def ee_error(self):
        """
        Computes the euler equation error over the entire state space.
        
        *Output
            * Log10 euler_error
            * max Log10 euler error
            * average Log10 euler error
        """

        # a. initialize
        euler_error = np.zeros((self.Nz, self.Ns))

        # b. helper function
        u_prime = lambda c: c**(-self.sigma)

        u_prime_inv = lambda x: x**(-1 / self.sigma)

        # c. calculate euler error at all grid points

        for i_z, z in enumerate(self.grid_z):  #current income shock
            for i_s, s0 in enumerate(self.grid_sav):  #current asset level

                # i. interpolate savings policy function grid point

                a_plus = interp(self.grid_sav, self.pol_sav[i_z, :], s0)

                # liquidity constrained, do not calculate error
                if a_plus <= 0:
                    euler_error[i_z, i_s] = np.nan

                # interior solution
                else:

                    # ii. current consumption and initialize expected marginal utility
                    c = (1 + self.r) * s0 + self.w * z - a_plus

                    avg_marg_c_plus = 0

                    # iii. expected marginal utility
                    for i_zz, z_plus in enumerate(
                            self.grid_z):  #next period productivity

                        c_plus = (
                            1 + self.r) * a_plus + self.w * z_plus - interp(
                                self.grid_sav, self.pol_sav[i_zz, :], a_plus)

                        #expectation of marginal utility of consumption
                        avg_marg_c_plus += self.pi[i_z, i_zz] * u_prime(c_plus)

                    # iv. compute euler error
                    euler_error[i_z, i_s] = 1 - u_prime_inv(
                        self.beta * (1 + self.r) * avg_marg_c_plus) / c

        # ii. transform euler error with log_10. take max and average
        euler_error = np.log10(np.abs(euler_error))
        max_error = np.nanmax(np.nanmax(euler_error, axis=1))
        avg_error = np.nanmean(euler_error)

        return euler_error, max_error, avg_error
예제 #2
0
def MC(popu,
       π_star,
       w_vals,
       ζ_vals,
       δ_vals,
       Γ_star,
       P_ζ_cdfs,
       P_δ,
       μ,
       π,
       r,
       R,
       maxiter=1000,
       tol=1e-5,
       verbose=True):
    """
    Monte Carlo simulation.
    """

    # π is the true probability of invention success, not the perceived one

    N = popu.shape[0]

    # is there any gain by taking draws together outside the loop?
    ι_draw_rvs = np.random.random((maxiter, N))
    δ_draw_rvs = np.random.random((maxiter, N))
    π_draw_rvs = np.random.random((maxiter, N))
    ζ_draw_rvs = np.random.random((maxiter, N))

    ζ_i_draw_rvs = np.zeros((maxiter, N), dtype=np.int64)
    _generate_sample_paths(P_ζ_cdfs, popu[:, 1], ζ_draw_rvs, ζ_i_draw_rvs)
    popu[:, 1] = ζ_i_draw_rvs[-1, :]

    for i in range(maxiter):
        popu_old = np.copy(popu)
        for j in prange(N):
            ι = 0 if ι_draw_rvs[i, j] < μ else 1

            # update w
            w = popu[j, 0]
            ζ_i = ζ_i_draw_rvs[i, j]
            ζ = ζ_vals[ζ_i]

            k_tilde = interp(w_vals, π_star[ι, ζ_i, :, 1], w)
            b = interp(w_vals, π_star[ι, ζ_i, :, 2], w)

            δ_i = 0 if δ_draw_rvs[i, j] < P_δ[0] else 1
            δ = δ_vals[δ_i]

            next_w = (1 + r) * δ * k_tilde + (R - (1 + r) * δ) * b

            # the invention decision
            p_invention = interp(w_vals, π_star[ι, ζ_i, :, 0], w)
            if p_invention > 0.5:
                if π_draw_rvs[i, j] < π:
                    next_w += Γ_star

            popu[j, 0] = next_w
 def objective(c, v, y):
     """
     The right hand side of the Bellman equation
     """
     # First turn v into a function via interpolation
     v_func = lambda x: interp(y_grid, v, x)
     return u(c) + β * np.mean(v_func(f(y - c) * shocks))
예제 #4
0
    def __init__(self, speclist, xtype="frequency", xarr=None, force=False, **kwargs):

        if xarr is None:
            self.xarr = speclist[0].xarr
        else:
            self.xarr = xarr

        self.units = speclist[0].units
        self.header = speclist[0].header
        self.parse_header(self.header)

        for spec in speclist:
            if not isinstance(spec, Spectrum):
                raise TypeError("Must create an ObsBlock with a list of spectra.")
            if not np.array_equal(spec.xarr, self.xarr):
                if not force:
                    raise ValueError("Mismatch between X axes in ObsBlock")
            if spec.units != self.units:
                raise ValueError("Mismatched units")

        if force:
            self.speclist = [interpolation.interp(spec, self) for spec in speclist]
        else:
            self.speclist = speclist
        self.nobs = len(self.speclist)

        # Create a 2-dimensional array of the data
        self.data = np.array([sp.data for sp in self.speclist]).swapaxes(0, 1).squeeze()
        self.error = np.array([sp.error for sp in self.speclist]).swapaxes(0, 1).squeeze()

        self.plotter = plotters.Plotter(self)
        self._register_fitters()
        self.specfit = fitters.Specfit(self, Registry=self.Registry)
        self.baseline = baseline.Baseline(self)
예제 #5
0
def folded_cpl_evolution(
    energy,
    time,
    peak_flux,
    ep_start,
    ep_tau,
    emin,
    emax,
    alpha,
    redshift,
    Nrest,
    gamma,
    response,
):

    return interp(response[0], response[1], energy) * corr_cpl_evolution(
        energy,
        time,
        peak_flux,
        ep_start,
        ep_tau,
        emin,
        emax,
        alpha,
        redshift,
        Nrest,
        gamma,
    )
예제 #6
0
    def compute_aggregates(self, popu):
        """
        compute aggregates using stationary distribution.
        """

        # simplify notations
        π_star, w_vals, ζ_vals = self.π_star, self.hh.w_vals, self.hh.ζ_vals

        # K_tilde and B
        aggregates = np.zeros(2)
        for h_i in range(self.hh.π.size):
            for ζ_i in range(len(ζ_vals)):
                w_subsample = popu[popu[:, 1, h_i] == ζ_i, 0, h_i]
                ζ_weight = len(w_subsample) / len(popu)
                for ι_i in range(2):
                    # compute K_tilde and B
                    for i in range(2):
                        aggregates[i] += self.hh.α[h_i] * self.hh.P_ι[ι_i] * ζ_weight * \
                            interp(w_vals, π_star[ι_i, ζ_i, :, i+1],
                                   w_subsample).mean()

        # K = K_tilde - B
        K = aggregates[0] - aggregates[1]
        B = aggregates[1]

        return K, B
def vec_eval(p):
    N = p.shape[0]
    out = np.zeros(N)
    for n in range(N):
        z1 = p[n, 0]
        z2 = p[n, 1]
        out[n] = interp(x1, x2, y, z1, z2)
    return out
예제 #8
0
def _find_mNrmStE(m, Rfree, PermGroFac, mNrm, cNrm, Ex_IncNext):
    # Make a linear function of all combinations of c and m that yield mNext = mNow
    mZeroChange = (1.0 - PermGroFac / Rfree) * m + (PermGroFac / Rfree) * Ex_IncNext

    # Find the steady state level of market resources
    res = interp(mNrm, cNrm, m) - mZeroChange
    # A zero of this is SS market resources
    return res
예제 #9
0
 def objective(c, σ, y):
     """
     The right hand side of the operator
     """
     # First turn w into a function via interpolation
     σ_func = lambda x: interp(grid, σ, x)
     vals = u_prime(σ_func(f(y - c) * shocks)) * f_prime(y - c) * shocks
     return u_prime(c) - β * np.mean(vals)
예제 #10
0
    def solve_egm(self, pol_cons_old, ret_ss, w_ss):
        """
        Endogenous grid method to help solve the household problem
        """

        # a. initialize
        c_tilde = np.empty((self.Nz, self.Ns))
        a_star = np.empty((self.Nz, self.Ns))
        pol_cons = np.empty((self.Nz, self.Ns))

        for i_z in range(self.Nz):

            # b. find RHS of euler equation (step 3 in EGM algo)
            avg_marg_u_plus = np.zeros(self.Ns)
            for i_zz in range(self.Nz):

                # i. future consumption
                c_plus = pol_cons_old[i_zz, :]

                # iii. future marginal utility
                marg_u_plus = self.u_prime(c_plus)

                # iv. average marginal utility
                weight = self.pi[i_z, i_zz]

                avg_marg_u_plus += weight * marg_u_plus

            ee_rhs = (1 + ret_ss) * self.beta * avg_marg_u_plus

            # b. find current consumption (step 4 EGM algo)
            c_tilde[i_z, :] = self.u_prime_inv(ee_rhs)

            # c. get the endogenous grid of the value of assets today (step 5 EGM algo)
            a_star[i_z, :] = (c_tilde[i_z, :] + self.grid_sav -
                              self.grid_z[i_z] * w_ss) / (1 + ret_ss)

            # d. update new consumption policy guess on savings grid
            for i_s, v_s in enumerate(self.grid_sav):
                if v_s <= a_star[
                        i_z,
                        0]:  #borrowing constrained, outside the grid range on the left
                    pol_cons[i_z, i_s] = (1 + ret_ss) * v_s + self.grid_sav[
                        0] + self.grid_z[i_z] * w_ss

                elif v_s >= a_star[
                        i_z,
                        -1]:  # , linearly extrapolate, outside the grid range on the right
                    pol_cons[
                        i_z,
                        i_s] = c_tilde[i_z, -1] + (v_s - a_star[i_z, -1]) * (
                            c_tilde[i_z, -1] - c_tilde[i_z, -2]) / (
                                a_star[i_z, -1] - a_star[i_z, -2])

                else:  #linearly interpolate, inside the grid range
                    pol_cons[i_z, i_s] = interp(a_star[i_z, :],
                                                c_tilde[i_z, :], v_s)

        return pol_cons, a_star
예제 #11
0
def correlate(spectrum1, spectrum2, range=None, units=None, errorweight=False):
    """
    Cross-correlate spectrum1 with spectrum2

    """

    if range is not None:
        spectrum1 = spectrum1.slice(*range, units=units)
        spectrum2 = spectrum2.slice(*range, units=units)

    if not (spectrum1.xarr.shape == spectrum2.xarr.shape) or not all(
            spectrum1.xarr == spectrum2.xarr):
        spectrum2 = interpolation.interp(spectrum2, spectrum1)

    data1 = spectrum1.data
    data2 = spectrum2.data

    xcorr = np.correlate(data1, data2, mode='same')

    # very simple propagation of error
    # each element is multiplied, multiplicative error is given such that (sigma_xy/xy)**2 = (sigma_x/x)**2 + (sigma_y/y)**2
    # error = (np.correlate( (spectrum1.error/spectrum1.data)**2 , np.ones(xcorr.shape), mode='same') +
    #          np.correlate( (spectrum2.error/spectrum2.data)**2 , np.ones(xcorr.shape), mode='same'))**0.5 * xcorr
    # That approach sucks - what if data == 0?
    #
    # this might be more correct: http://arxiv.org/pdf/1006.4069v1.pdf eqn 4
    # but it doesn't quite fit my naive expectations so:
    error = (np.correlate(
        (spectrum1.error)**2, np.ones(xcorr.shape), mode='same') +
             np.correlate(
                 (spectrum2.error)**2, np.ones(xcorr.shape), mode='same'))**0.5

    xarr = spectrum1.xarr
    xrange = xarr.max() - xarr.min()
    xmin = -xrange / 2.
    xmax = xrange / 2.
    offset_values = np.linspace(xmin, xmax, len(xarr))

    offset_xarr = units_module.SpectroscopicAxis(offset_values,
                                                 unit=xarr.units)

    header = headers.intersection(spectrum1.header, spectrum2.header)
    header.update('CRPIX1', 1)
    try:
        header.update('CRVAL1', xmin)
    except ValueError:
        header.update('CRVAL1', xmin.tolist())
    header.update('CDELT1', offset_xarr.cdelt())

    return classes.XCorrSpectrum(xarr=offset_xarr,
                                 data=xcorr,
                                 header=header,
                                 error=error)
예제 #12
0
    def evaluate(self, v):

        out = interp(self.x, self.y, v)

        # set outside bounds to zero

        for i in range(v.shape[0]):
            if v[i] > self.xmax:
                out[i] = 0.
            elif v[i] < self.xmin:
                out[i] = 0.

        return out
예제 #13
0
def folded_cpl_evolution(
    energy,
    time,
    peak_flux,
    ep,
    alpha,
    emin,
    emax,
    response,
):

    return interp(response[0], response[1], energy) * cpl_evolution(
        energy, time, peak_flux, ep, alpha, emin, emax)
예제 #14
0
    def evaluate(self, x, NH, redshift):

        if isinstance(x, astropy_units.Quantity):

            _unit = astropy_units.cm**2
            _y_unit = astropy_units.dimensionless_unscaled
            _x = x.value
            _redshift = redshift.value
        else:

            _unit = 1.0
            _y_unit = 1.0
            _redshift = redshift
            _x = x

        xsect_interp = interp(self.xsect_ene, self.xsect_val,
                              _x * (1 + _redshift))

        xsect_interp = interp( self.xsect_ene, self.xsect_val, _x * (1+ _redshift))

        spec = _numba_eval(NH,xsect_interp) * _y_unit

        return spec
예제 #15
0
    def T(f):
        """
        The Lucas operator
        """

        # Turn f into a function
        Af = lambda x: interp(grid, f, x)

        Tf = np.empty_like(f)
        # Apply the T operator to f using Monte Carlo integration
        for i in prange(len(grid)):
            y = grid[i]
            Tf[i] = h[i] + β * np.mean(Af(y**α * z_vec))

        return Tf
예제 #16
0
def correlate(spectrum1, spectrum2, range=None, units=None, errorweight=False):
    """
    Cross-correlate spectrum1 with spectrum2

    """

    if range is not None:
        spectrum1 = spectrum1.slice(*range, units=units)
        spectrum2 = spectrum2.slice(*range, units=units)

    if not (spectrum1.xarr.shape == spectrum2.xarr.shape) or not all(spectrum1.xarr == spectrum2.xarr):
        spectrum2 = interpolation.interp(spectrum2, spectrum1)

    data1 = spectrum1.data
    data2 = spectrum2.data

    xcorr = np.correlate(data1, data2, mode='same')

    # very simple propagation of error
    # each element is multiplied, multiplicative error is given such that (sigma_xy/xy)**2 = (sigma_x/x)**2 + (sigma_y/y)**2
    # error = (np.correlate( (spectrum1.error/spectrum1.data)**2 , np.ones(xcorr.shape), mode='same') + 
    #          np.correlate( (spectrum2.error/spectrum2.data)**2 , np.ones(xcorr.shape), mode='same'))**0.5 * xcorr
    # That approach sucks - what if data == 0?
    #
    # this might be more correct: http://arxiv.org/pdf/1006.4069v1.pdf eqn 4
    # but it doesn't quite fit my naive expectations so:
    error = (np.correlate( (spectrum1.error)**2 , np.ones(xcorr.shape), mode='same') + 
             np.correlate( (spectrum2.error)**2 , np.ones(xcorr.shape), mode='same'))**0.5
    
    xarr = spectrum1.xarr
    xrange = xarr.max()-xarr.min()
    xmin = -xrange/2.
    xmax =  xrange/2.
    offset_values = np.linspace(xmin, xmax, len(xarr))

    offset_xarr = units_module.SpectroscopicAxis(offset_values, unit=xarr.unit) 

    header = headers.intersection(spectrum1.header, spectrum2.header)
    header.update('CRPIX1',1)
    try:
        header.update('CRVAL1',xmin)
    except ValueError:
        header.update('CRVAL1',xmin.tolist())
    header.update('CDELT1',offset_xarr.cdelt())

    return classes.XCorrSpectrum(xarr=offset_xarr, data=xcorr, header=header, error=error)
예제 #17
0
    def __init__(self,
                 speclist,
                 xtype='frequency',
                 xarr=None,
                 force=False,
                 **kwargs):

        if xarr is None:
            self.xarr = speclist[0].xarr
        else:
            self.xarr = xarr

        self.units = speclist[0].units
        self.header = speclist[0].header
        self.parse_header(self.header)

        for spec in speclist:
            if not isinstance(spec, Spectrum):
                raise TypeError(
                    "Must create an ObsBlock with a list of spectra.")
            if not np.array_equal(spec.xarr, self.xarr):
                if not force:
                    raise ValueError("Mismatch between X axes in ObsBlock")
            if spec.units != self.units:
                raise ValueError("Mismatched units")

        if force:
            self.speclist = [
                interpolation.interp(spec, self) for spec in speclist
            ]
        else:
            self.speclist = speclist
        self.nobs = len(self.speclist)

        # Create a 2-dimensional array of the data
        self.data = np.array([sp.data
                              for sp in self.speclist]).swapaxes(0,
                                                                 1).squeeze()
        self.error = np.array([sp.error
                               for sp in self.speclist]).swapaxes(0,
                                                                  1).squeeze()

        self.plotter = plotters.Plotter(self)
        self._register_fitters()
        self.specfit = fitters.Specfit(self, Registry=self.Registry)
        self.baseline = baseline.Baseline(self)
예제 #18
0
def folded_cpl_evolution(
    energy,
    time,
    peak_flux,
    ep_start,
    ep_tau,
    alpha,
    trise,
    tdecay,
    emin,
    emax,
    response,
    z,
):

    return interp(response[0], response[1], energy) * cpl_evolution(
        energy, time, peak_flux, ep_start, ep_tau, alpha, trise, tdecay, emin,
        emax, z)
예제 #19
0
def euler_eq_residual(a_plus, params_eer):
    """
    Returns the difference between the LHS and RHS of the Euler Equation.
    
    *Input
        - a_plus : current savings

    *Output
        - Returns euler equation residual
    """

    # a. Initialize
    a, z, pol_sav_old, i_z, r, w, beta, sigma, pi, grid_z, grid_a = params_eer

    Nz = len(grid_z)
    avg_marg_u_plus = 0

    # b. current consumption
    c = (1 + r) * a + w * z - a_plus

    # c. expected marginal utility from consumption next period
    for i_zz in prange(Nz):

        # i. consumption next period
        c_plus = (1 + r) * a_plus + w * grid_z[i_zz] - interp(
            grid_a, pol_sav_old[i_zz, :], a_plus)

        # ii. marginal utility next period
        marg_u_plus = u_prime(c_plus, sigma)

        # iii. calculate expected marginal utility
        weight = pi[i_z, i_zz]

        avg_marg_u_plus += weight * marg_u_plus

    # d. RHS of the euler equation
    ee_rhs = (1 + r) * beta * avg_marg_u_plus

    return u_prime(c, sigma) - ee_rhs
def household_opt_givenLoc(price_vector, individual_attributes, gridded_city_index, location_t1, migration_prop, iceberg_prop):
    util_data_givenLoc, Q_data_givenLoc, tau_pts_givenLoc, tau_bounds = household_generate_interps_givenLoc(price_vector, individual_attributes, gridded_city_index, location_t1, migration_prop, iceberg_prop)

    # Now I will do the interpolation that has bothered me since
    grid_tau_a, grid_tau_b, grid_tau_c, grid_tau_d, grid_tau_e = tau_pts_givenLoc

    q_func = lambda x: interp(grid_tau_a, grid_tau_b, grid_tau_c, grid_tau_d, grid_tau_e, Q_data_givenLoc, x)

    x0 =np.zeros(5) ## Set to 5 for number of choice cities

    # Set bounds for investments
    tau_lower_bound, tau_upper_bound = tau_bounds
    tau_bounds_array = np.array([tau_lower_bound, tau_upper_bound]).T

    tauQgrid_args = (grid_tau_a, grid_tau_b, grid_tau_c, grid_tau_d, grid_tau_e, util_data_givenLoc)

    util_optimizer = scipy.optimize.minimize(utility_objective_givenLoc, x0, args=tauQgrid_args)
    # util_optimizer = nelder_mead(utility_objective_givenLoc, x0, bounds=tau_bounds_array, args=tauQgrid_args)

    OPTIMAL_utility_givenLoc, OPTIMAL_tau_adj_t1_givenLoc = util_optimizer.fun, util_optimizer.x

    # calculate the optimal q calculated via interpolation
    OPTIMAL_q = q_func(OPTIMAL_tau_adj_t1_givenLoc)

    # change optimal tau_t1 from 5 into proper length, filling the non-gridded cities tau as 0
    OPTIMAL_tau_citylen_t1_givenLoc = np.zeros(city_count)

    place_hold_counter = 0
    for city_index in gridded_city_index:
        OPTIMAL_tau_citylen_t1_givenLoc[city_index] = OPTIMAL_tau_adj_t1_givenLoc[place_hold_counter]
        place_hold_counter += 1

    # calculate the optimal consumption
    OPTIMAL_investment_grid = np.array(list(OPTIMAL_tau_citylen_t1_givenLoc) + [OPTIMAL_q])
    OPTIMAL_consumptions_givenLoc = household_calculate_feasible_givenLoc(price_vector, individual_attributes, OPTIMAL_investment_grid, location_t1, migration_prop, iceberg_prop)

    return OPTIMAL_utility_givenLoc, OPTIMAL_consumptions_givenLoc
예제 #21
0
def simulate_MarkovChain(pol_cons, pol_sav, params_sim):
    """
    Simulates markov chain for T periods for N households and calculates the euler equation error for all 
    individuals at each point in time. In addition, it checks the grid size by issuing a warning if 1% of 
    households are at the maximum value (right edge) of the grid. The algorithm is from Ch.7 in Heer and Maussner.
    
    *Input
        - pol_cons: consumption policy function 
        - pol_sav: savings policy function 
        - params_sim: model parameters
    
    *Output
        - sim_c: consumption profile
        - sim_sav: savings (a') profile
        - sim_z: Income shock profile.
        - sim_m: cash-on-hand profile ((1+r)a + w*z)
        - euler_error_sim : error when the euler equation equality holds
    """

    # 1. initialization

    a0, r, w, simN, simT, grid_z, grid_sav, sigma, beta, pi, shock_history = params_sim

    sim_sav = np.zeros((simT, simN))
    sim_c = np.zeros((simT, simN))
    sim_m = np.zeros((simT, simN))
    sim_z = np.zeros((simT, simN), np.float64)
    sim_z_idx = np.zeros((simT, simN), np.int32)
    edge = 0
    euler_error_sim = np.empty((simT, simN)) * np.nan

    # 2. helper functions

    # savings policy function interpolant
    polsav_interp = lambda a, z: interp(grid_sav, pol_sav[z, :], a)

    # marginal utility
    u_prime = lambda c: c**(-sigma)

    #inverse marginal utility
    u_prime_inv = lambda x: x**(-1 / sigma)

    # 3. simulate markov chain
    for t in range(simT):  #time

        for i in prange(simN):  #individual hh

            # a. states
            if t == 0:
                a_lag = a0[i]
            else:
                a_lag = sim_sav[t - 1, i]

            # b. shock realization.
            sim_z_idx[t, i] = shock_history[t, i]
            sim_z[t, i] = grid_z[sim_z_idx[t, i]]

            # c. income
            y = w * sim_z[t, i]

            # d. cash-on-hand path
            sim_m[t, i] = (1 + r) * a_lag + y

            # e. savings path
            sim_sav[t, i] = polsav_interp(a_lag, sim_z_idx[t, i])
            if sim_sav[t, i] < grid_sav[0]:
                sim_sav[t, i] = grid_sav[0]  #ensure constraint binds

            # f. consumption path

            sim_c[t, i] = sim_m[t, i] - sim_sav[t, i]

            # g. error evaluation

            check_out = False
            if sim_sav[t, i] == pol_sav[sim_z_idx[t, i], -1]:
                edge = edge + 1
                check_out = True

            constrained = False
            if sim_sav[t, i] == grid_sav[0]:
                constrained = True

            if sim_c[t, i] < sim_m[
                    t, i] and constrained == False and check_out == False:

                avg_marg_c_plus = 0

                for i_zz in range(len(grid_z)):  #next period productivity

                    sav_int = polsav_interp(sim_sav[t, i], i_zz)
                    if sav_int < grid_sav[0]:
                        sav_int = grid_sav[0]  #ensure constraint binds

                    c_plus = (1 + r) * sim_sav[
                        t, i] + w * grid_z[i_zz] - polsav_interp(
                            sim_sav[t, i], i_zz)

                    #expectation of marginal utility of consumption
                    avg_marg_c_plus += pi[sim_z_idx[t, i],
                                          i_zz] * u_prime(c_plus)

                #euler error
                euler_error_sim[t, i] = np.abs(
                    1 - (u_prime_inv(beta *
                                     (1 + r) * avg_marg_c_plus) / sim_c[t, i]))

    # 4. transform euler eerror to log_10 and get max and average
    euler_error_sim = np.log10(np.abs(euler_error_sim))

    # 5. grid size evaluation
    frac_outside = edge / grid_sav.size
    if frac_outside > 0.01:
        raise Exception('Increase grid size!')

    return sim_c, sim_sav, sim_z, sim_m, euler_error_sim
예제 #22
0
def egm_algo(pol_cons_old, params_egm):
    """
    Endogenous grid method to help solve the household problem.
    
    *Input
        - pol_cons_old: consumption policy function from previous iteration.
        - params_egm: model parameters
        
    *Output
        - pol_cons: updated consumption policy function
        - a_star: endogenous grid 
    """

    # a. initialize
    r, w, beta, pi, grid_sav, grid_z, sigma, maxit, tol = params_egm

    Nz = len(grid_z)
    Ns = len(grid_sav)
    c_tilde = np.zeros((Nz, Ns))
    a_star = np.zeros((Nz, Ns))
    pol_cons = np.zeros((Nz, Ns))

    # b. helper functions
    u_prime = lambda c: c**(-sigma)

    u_prime_inv = lambda x: x**(-1 / sigma)

    for i_z in range(Nz):

        # c. find RHS of euler equation (step 3 in EGM algo)
        avg_marg_u_plus = np.zeros(Ns)

        for i_zz in range(Nz):

            # i. future consumption
            c_plus = pol_cons_old[i_zz, :]

            # iii. future marginal utility
            marg_u_plus = u_prime(c_plus)

            # iv. average marginal utility
            weight = pi[i_z, i_zz]

            avg_marg_u_plus += weight * marg_u_plus

        ee_rhs = (1 + r) * beta * avg_marg_u_plus

        # d. find current consumption (step 4 EGM algo)
        c_tilde[i_z, :] = u_prime_inv(ee_rhs)

        # e. get the endogenous grid of the value of assets today (step 5 EGM algo)
        a_star[i_z, :] = (c_tilde[i_z, :] + grid_sav - grid_z[i_z] * w) / (1 +
                                                                           r)

        # f. update new consumption policy guess on savings grid
        for i_s, v_s in enumerate(grid_sav):

            if v_s <= a_star[
                    i_z,
                    0]:  #borrowing constrained, outside the grid range on the left
                pol_cons[i_z,
                         i_s] = (1 + r) * v_s + grid_sav[0] + grid_z[i_z] * w

            elif v_s >= a_star[
                    i_z,
                    -1]:  # , linearly extrapolate, outside the grid range on the right
                pol_cons[i_z,
                         i_s] = c_tilde[i_z, -1] + (v_s - a_star[i_z, -1]) * (
                             c_tilde[i_z, -1] - c_tilde[i_z, -2]) / (
                                 a_star[i_z, -1] - a_star[i_z, -2])

            else:  #linearly interpolate, inside the grid range
                pol_cons[i_z, i_s] = interp(a_star[i_z, :], c_tilde[i_z, :],
                                            v_s)

    return pol_cons, a_star
예제 #23
0
def simulate_MarkovChain( 
    a0,
    z0,
    sim_ret,
    sim_w,
    simN,
    simT,
    grid_z,
    grid_a,
    pol_cons,
    pol_sav,
    pi,
    sigma,
    beta,
    a_bar,
    seed,
        ):
    
    """
    Simulates markov chain for T periods for N households. Also checks 
    the grid size by ensuring that no more than 1% of households are at
    the maximum value of the grid.
    
    *Output
        - sim_c: consumption profile
        - sim_sav: savings (a') profile
        - sim_z: income profile index, 0 for low state, 1 for high state
        - sim_m: cash-on-hand profile ((1+r)a + w*z)
        - euler_error : error when the euler equation equality holds
    """
    
    # 1. initialization
    np.random.seed(seed)
    
    sim_sav = np.zeros((simT,simN))
    sim_c = np.zeros((simT,simN))
    sim_m = np.zeros((simT,simN))
    sim_z = np.zeros((simT,simN), np.int32)
    sim_z_idx = np.zeros((simT,simN), np.int32)
    euler_error = np.empty((simT,simN)) * np.nan
    edge = 0
    
    # 2. helper functions
    
    # savings policy function interpolant
    polsav_interp = lambda a, z: interp(grid_a, pol_sav[z, :], a)
    
    # marginal utility
    u_prime = lambda c : c**(-sigma)
    
    #inverse marginal utility
    u_prime_inv = lambda x : x ** (-1/sigma)
    
    # 3. simulate markov chain
    for t in range(simT):   #time

        draw = np.linspace(0, 1, simN)
        np.random.shuffle(draw)
        
        for i in prange(simN):  #individual

            # a. states 
            if t == 0:
                z_lag = np.int32(z0[i])
                a_lag = a0[i]
            else:
                z_lag = sim_z[t-1,i]
                a_lag = sim_sav[t-1,i]
                
            # b. shock realization. 0 for low state. 1 for high state.
            if draw[i] <= pi[z_lag, 1]:     #state transition condition
                sim_z[t,i] = 1
                sim_z_idx[t,i] = 0  #state row index
            else:
                sim_z[t,i] = 0
                sim_z_idx[t,i] = 1
                
            # c. income
            y = sim_w*grid_z[sim_z[t,i]]
            
            # d. cash-on-hand path
            sim_m[t, i] = (1 + sim_ret) * a_lag + y
            
            # e. consumption path
            sim_c[t,i] = sim_m[t, i] - polsav_interp(a_lag,sim_z[t,i])
            
            # f. savings path
            sim_sav[t,i] = sim_m[t, i] - sim_c[t,i]
            
            # g. error evaluation
            
            check_out=False
            if sim_sav[t,i] == pol_sav[sim_z_idx[t,i],-1]:
                edge = edge + 1
                check_out=True
                
            constrained=False
            if sim_sav[t,i] == 0:
                constrained=True
            
                
            if sim_c[t,i] < sim_m[t,i] and constrained==False and check_out==False :
                
                #all possible consumption choices tomorrow (2 in total)
                c_plus = np.empty(len(grid_z))
                
                for iz in range(len(grid_z)):
                    c_plus[iz] = (1 + sim_ret) * sim_sav[t,i] + sim_w*grid_z[iz] - polsav_interp(sim_sav[t,i],iz)
                
                #expectation of marginal utility of consumption
                avg_marg_c_plus = np.dot(pi[sim_z_idx[t,i],:], u_prime(c_plus))
                
                #euler error
                euler_error[t,i] = np.abs(1 - (u_prime_inv(beta*(1+sim_ret)*avg_marg_c_plus) / sim_c[t,i]))
            
            
    # 4. grid size evaluation
    frac_outside = edge/grid_a.size
    if frac_outside > 0.01 :
        raise Exception('Increase grid size!')
    

    return sim_c, sim_sav, sim_z, sim_m, euler_error
get_coeffs(A, (1, 2))

##########
# interp #
##########

### 1d interpolation

from interpolation import interp

x = np.linspace(0, 1, 100)**2  # non-uniform points
y = np.linspace(0, 1, 100)  # values

# interpolate at one point:
interp(x, y, 0.5)

# or at many points:
u = np.linspace(0, 1, 1000)  # points
interp(x, y, u)

# one can iterate at low cost since the function is jitable:
from numba import njit


@njit
def vec_eval(u):
    N = u.shape[0]
    out = np.zeros(N)
    for n in range(N):
        out[n] = interp(x, y, u)
def vec_eval(u):
    N = u.shape[0]
    out = np.zeros(N)
    for n in range(N):
        out[n] = interp(x, y, u)
    return out
            axs[i].set_ylabel('Ln(P)')
            j += 1

    ###################################################################################

        print('done')

    elif plotb == True:

        #plt.scatter(np.log(pr[0]),np.log(pr[1]), label = j,s =3)

        plt.rcParams.update({'font.size': 32})

        #plt.loglog(pr[0],pr[1], label = j)#,s =1)

        a = inter.interp(np.log(pr[0]), np.log(pr[1]))

        plt.rcParams.update({'font.size': 32})

        plt.scatter(a[0], a[1], s=1, c='black')

        plt.scatter(np.log(pr[0]), np.log(pr[1]), s=100, label=j)

        #plt.scatter(np.log(p2[0]),np.log(p2[1]), s =1, label = 'No scaling (a = 1)')

        plt.xlabel('ln(Avalanche size s)')

        plt.ylabel('ln(Probability of avalanche s)')

        plt.title('Probabilities of avalanche s for different L')
예제 #27
0
파일: indexing.py 프로젝트: koenvo/dimarray
def reindex_axis(self, values, axis=0, method="exact", repna=True, fill_value=np.nan, tol=TOLERANCE, use_pandas=None):
    """ reindex an array along an axis

    Input:
        - values : array-like or Axis: new axis values
        - axis   : axis number or name
        - method : "exact" (default), "nearest", "interp" 
        - repna: if False, raise error when an axis value is not present 
                       otherwise just replace with NaN. Defaulf is True
        - fill_value: value to use instead of missing data
        - tol: re-index with a particular tolerance (can be longer)
        - use_pandas, optional: bool : if True (the default), convert to pandas for re-indexing 
            If any special option (method, tol) is set or if modulo axes are present 
            or, of course, if pandas is not installed,
            this option is set to False by default.

    Output:
        - DimArray

    Examples:
    ---------

    Basic reindexing: fill missing values with NaN
    >>> a = da.DimArray([1,2,3],('x0', [1,2,3]))
    >>> b = da.DimArray([3,4],('x0',[1,3]))
    >>> b.reindex_axis([1,2,3])
    dimarray: 2 non-null elements (1 null)
    dimensions: 'x0'
    0 / x0 (3): 1 to 3
    array([  3.,  nan,   4.])

    Or replace with anything else, like -9999
    >>> b.reindex_axis([1,2,3], fill_value=-9999)
    dimarray: 3 non-null elements (0 null)
    dimensions: 'x0'
    0 / x0 (3): 1 to 3
    array([    3, -9999,     4])

    "nearest" mode
    >>> b.reindex_axis([0,1,2,3], method='nearest') # out-of-bound to NaN
    dimarray: 3 non-null elements (1 null)
    dimensions: 'x0'
    0 / x0 (4): 0 to 3
    array([ nan,   3.,   3.,   4.])

    "interp" mode
    >>> b.reindex_axis([0,1,2,3], method='interp') # out-of-bound to NaN
    dimarray: 3 non-null elements (1 null)
    dimensions: 'x0'
    0 / x0 (4): 0 to 3
    array([ nan,  3. ,  3.5,  4. ])
    """
    if isinstance(values, Axis):
        newaxis = values
        values = newaxis.values
        axis = newaxis.name

    axis_id = self.axes.get_idx(axis)
    ax = self.axes[axis_id]  # Axis object
    axis_nm = ax.name

    # do nothing if axis is same or only None element
    if ax.values[0] is None or np.all(values == ax.values):
        return self

    # check whether pandas can be used for re-indexing
    if use_pandas is None:
        use_pandas = get_option("optim.use_pandas")

    # ...any special option activated?
    if (
        method != "exact" or tol is not None or ax.tol is not None or ax.modulo is not None or self.ndim > 4
    ):  # pandas defined up to 4-D
        use_pandas = False

    # ...is pandas installed?
    try:
        import pandas
    except ImportError:
        use_pandas = False

    # re-index using pandas
    if use_pandas:
        pandasobj = self.to_pandas()
        newpandas = pandasobj.reindex_axis(values, axis=axis_id, fill_value=fill_value)
        newobj = self.from_pandas(newpandas)  # use class method from_pandas
        newobj._metadata = self._metadata  # add metadata back
        newobj.axes[axis_id].name = axis_nm  # give back original name

    # indices along which to sample
    elif method == "exact":
        newobj = take_na(self, values, axis=axis, repna=repna, fill_value=fill_value)

    elif method in ("nearest", "interp"):
        from interpolation import interp

        newobj = interp(self, values, axis=axis, method=method, repna=repna)

    else:
        raise ValueError("invalid reindex_axis method: " + repr(method))

    # assert np.all((np.isnan(ax0.values) | (ax0.values == ax1.values))), "pb when reindexing"
    return newobj
예제 #28
0
def simulate_MonteCarlo(a0, z0, sim_ret, sim_w, simN, simT, grid_z, grid_a,
                        pol_cons, pol_sav, pi, shock_matrix):
    """
    Monte Carlo simulation for T periods for N households. Also checks 
    the grid size by ensuring that no more than 1% of households are at
    the maximum value of the grid.
    
    *Output
        - sim_k : aggregate capital (total savings in previous period)
        - sim_sav: current savings (a') profile
        - sim_z: income profile 
        - sim_c: consumption profile
        - sim_m: cash-on-hand profile ((1+r)a + w*z)
    """

    # 1. initialization
    sim_sav = np.zeros(simN)
    sim_c = np.zeros(simN)
    sim_m = np.zeros(simN)
    sim_z = np.zeros(simN, np.float64)
    sim_z_idx = np.zeros(simN, np.int32)
    sim_k = np.zeros(simT)
    edge = 0

    # 2. savings policy function interpolant
    polsav_interp = lambda a, z: interp(grid_a, pol_sav[z, :], a)

    # 3. simulate markov chain
    for t in range(simT):  #time

        #calculate cross-sectional moments
        if t <= 0:
            sim_k[t] = np.mean(a0)
        else:
            sim_k[t] = np.mean(sim_sav)

        for i in prange(simN):  #individual

            # a. states
            if t == 0:
                a_lag = a0[i]
            else:
                a_lag = sim_sav[i]

            # b. shock realization
            sim_z_idx[i] = shock_matrix[t, i]
            sim_z[i] = grid_z[sim_z_idx[i]]

            # c. income
            y = sim_w[t] * sim_z[i]

            # d. cash-on-hand
            sim_m[i] = (1 + sim_ret[t]) * a_lag + y

            # e. consumption path
            sim_c[i] = sim_m[i] - polsav_interp(a_lag, sim_z_idx[i])

            if sim_c[i] == pol_cons[sim_z_idx[i], -1]:
                edge = edge + 1

            # f. savings path
            sim_sav[i] = sim_m[i] - sim_c[i]

    # 4. grid size evaluation
    frac_outside = edge / grid_a.size
    if frac_outside > 0.01:
        print('\nIncrease grid size!')

    return sim_k, sim_sav, sim_z, sim_c, sim_m
예제 #29
0
def simulate_MonteCarlo(pol_cons, pol_sav, r, w, params_sim):
    """
    Monte Carlo simulation for T periods for N households. Also checks 
    the grid size by ensuring that no more than 1% of households are at
    the maximum value of the grid.
    
    *Output
        - sim_k : aggregate capital (total savings in previous period)
        - sim_sav: current savings (a') profile
        - sim_z: income profile index, 0 for low state, 1 for high state
        - sim_c: consumption profile
        - sim_m: cash-on-hand profile ((1+r)a + w*z)
    """

    # a. initialization
    a0, simN, simT, grid_z, grid_a, sigma, beta, pi, shock_history = params_sim

    sim_sav = np.zeros(simN)
    sim_c = np.zeros(simN)
    sim_m = np.zeros(simN)
    sim_z = np.zeros(simN)
    sim_z_idx = np.zeros(simN, np.int32)
    sim_k = np.zeros(simT)
    euler_error_sim = np.empty(simN) * np.nan
    edge = 0

    # b. helper functions

    # savings policy function interpolant
    polsav_interp = lambda a, z: interp(grid_a, pol_sav[z, :], a)

    # marginal utility
    u_prime = lambda c: c**(-sigma)

    #inverse marginal utility
    u_prime_inv = lambda x: x**(-1 / sigma)

    # c. simulate markov chain
    for t in range(simT):  #time

        #calculate cross-sectional moments for agg. capital
        if t <= 0:
            sim_k[t] = np.mean(a0)
        else:
            sim_k[t] = np.mean(sim_sav)

        for i in prange(simN):  #individual

            # i. states
            if t == 0:
                a_lag = a0[i]

            else:
                a_lag = sim_sav[i]

            # ii. shock realization.
            sim_z_idx[i] = shock_history[t, i]
            sim_z[i] = grid_z[sim_z_idx[i]]

            # iii. income
            y = w * sim_z[i]

            # iv. cash-on-hand
            sim_m[i] = (1 + r) * a_lag + y

            # v. savings path
            sim_sav[i] = polsav_interp(a_lag, sim_z_idx[i])
            if sim_sav[i] < grid_a[0]:
                sim_sav[i] = grid_a[0]  #ensure constraint binds

            # vi. consumption path

            sim_c[i] = sim_m[i] - sim_sav[i]

            # vii. error evaluation

            check_out = False
            if sim_sav[i] >= pol_sav[sim_z_idx[i], -1]:
                edge = edge + 1
                check_out = True

            constrained = False
            if sim_sav[i] == grid_a[0]:
                constrained = True

            if sim_c[i] < sim_m[
                    i] and constrained == False and check_out == False:

                avg_marg_c_plus = 0

                for i_zz in range(len(grid_z)):  #next period productivity

                    sav_int = polsav_interp(sim_sav[i], i_zz)
                    if sav_int < grid_a[0]:
                        sav_int = grid_a[0]  #ensure constraint binds

                    c_plus = (1 + r) * sim_sav[i] + w * grid_z[i_zz] - sav_int

                    #expectation of marginal utility of consumption
                    avg_marg_c_plus += pi[sim_z_idx[i], i_zz] * u_prime(c_plus)

                #euler error
                euler_error_sim[i] = 1 - (
                    u_prime_inv(beta * (1 + r) * avg_marg_c_plus) / sim_c[i])

    # d. transform euler eerror to log_10 and get max and average
    euler_error_sim = np.log10(np.abs(euler_error_sim))

    # e. grid size evaluation
    frac_outside = edge / grid_a.size
    if frac_outside > 0.01:
        raise Exception('Increase grid size!')

    return sim_k, sim_sav, sim_z, sim_c, sim_m, euler_error_sim
예제 #30
0
    def eigen_stationary_density(self):
        """
        Solve for the exact stationary density. First constructs the Nz*Ns by Nz*Ns transition matrix Q(a',z'; a,z) 
        from state (a,z) to (a',z'). Then obtains the eigenvector associated with the unique eigenvalue equal to 1. 
        This eigenvector (renormalized so that it sums to one) is the unique stationary density function.
        
        Note: About 99% of the computation time is spend on the eigenvalue calculation. For now there is no
        way to speed this function up as numba only supports np.linalg.eig() when there is no domain change 
        (ex. real numbers to real numbers). Here there is a domain change as some eigenvalues and eigenvector 
        elements are complex.

        *Output
            * stationary_pdf: stationary density function
            * Q: transition matrix
        """

        # a. initialize transition matrix
        Q = np.zeros((self.Nz * self.Na_fine, self.Nz * self.Na_fine))

        # b. interpolate and construct transition matrix
        for i_z in range(self.Nz):  #current productivity
            for i_a, a0 in enumerate(self.grid_a_fine):

                # i. interpolate
                a_intp = interp(self.grid_a, self.pol_sav[i_z, :], a0)

                #take the grid index to the right. a_intp lies between grid_a_fine[j-1] and grid_a_fine[j].
                j = np.sum(self.grid_a_fine <= a_intp)

                #less than or equal to lowest grid value
                if a_intp <= self.grid_a_fine[0]:
                    p = 0

                #more than or equal to greatest grid value
                elif a_intp >= self.grid_a_fine[-1]:
                    p = 1
                    j = j - 1  #since right index is outside the grid make it the max index

                #inside grid
                else:
                    p = (a_intp - self.grid_a_fine[j - 1]) / (
                        self.grid_a_fine[j] - self.grid_a_fine[j - 1])

                # ii. transition matrix
                na = i_z * self.Na_fine  #minimum row index

                for i_zz in range(self.Nz):  #next productivity state
                    ma = i_zz * self.Na_fine  #minimum column index

                    Q[na + i_a, ma + j] = p * self.pi[i_z, i_zz]
                    Q[na + i_a, ma + j - 1] = (1.0 - p) * self.pi[i_z, i_zz]

        # iii. ensure that the rows sum up to 1
        assert np.allclose(Q.sum(axis=1), np.ones(
            self.Nz *
            self.Na_fine)), "Transition matrix error: Rows do not sum to 1"

        # c. get the eigenvector
        eigen_val, eigen_vec = np.linalg.eig(
            Q.T)  #transpose Q for eig function.

        # i. find column index for eigen value equal to 1
        idx = np.argmin(np.abs(eigen_val - 1.0))

        eigen_vec_stat = np.copy(eigen_vec[:, idx])

        # ii. ensure complex arguments of any complex numbers are small and convert to real numbers

        if np.max(np.abs(np.imag(eigen_vec_stat))) < 1e-6:
            eigen_vec_stat = np.real(
                eigen_vec_stat
            )  # drop the complex argument of any complex numbers.

        else:
            raise Exception(
                "Stationary eigenvector error: Maximum complex argument greater than 0.000001. Use a different distribution solution method."
            )

        # d. obtain stationary density from stationary eigenvector

        # i. reshape
        stationary_pdf = eigen_vec_stat.reshape(self.Nz, self.Na_fine)

        # ii. stationary distribution by percent
        stationary_pdf = stationary_pdf / np.sum(np.sum(stationary_pdf,
                                                        axis=0))

        return stationary_pdf, Q
예제 #31
0
def discrete_stationary_density(pol_sav, params_discrete):
    """
    Discrete approximation of the density function. Approximates the stationary joint density through forward 
    iteration and linear interpolation over a discretized state space. By default the code uses a finer grid than 
    the one in the solution but one could use the same grid here. The algorithm is from Ch.7 in Heer and Maussner.
    
    *Input
        - pol_sav: savings policy function
        - params_discrete: model parameters
        
    *Output
        - stationary_pdf: joint stationary density function
        - it: number of iterations
    """

    # a. initialize

    grid_a, grid_a_fine, Nz, pi, pi_stat, maxit, tol = params_discrete

    Na_fine = len(grid_a_fine)

    # initial guess uniform distribution
    stationary_pdf_old = np.ones((Na_fine, Nz)) / Na_fine
    stationary_pdf_old = stationary_pdf_old * np.transpose(pi_stat)
    stationary_pdf_old = stationary_pdf_old.T

    # b. fixed point iteration
    for it in range(maxit):  # iteration

        stationary_pdf = np.zeros((Nz, Na_fine))  # distribution in period t+1

        for iz in range(Nz):  # iteration over productivity types in period t

            for ia, a0 in enumerate(
                    grid_a_fine):  # iteration over assets in period t

                # i. interpolate

                a_intp = interp(grid_a, pol_sav[iz, :],
                                a0)  # linear interpolation for a'(z, a)

                # ii. obtain distribution in period t+1

                #left edge of the grid
                if a_intp <= grid_a_fine[0]:
                    for izz in range(Nz):
                        stationary_pdf[izz, 0] = stationary_pdf[
                            izz, 0] + stationary_pdf_old[iz, ia] * pi[iz, izz]

                #right edge of the grid
                elif a_intp >= grid_a_fine[-1]:
                    for izz in range(Nz):
                        stationary_pdf[izz, -1] = stationary_pdf[
                            izz, -1] + stationary_pdf_old[iz, ia] * pi[iz, izz]

                #inside the grid range, linearly interpolate
                else:

                    j = np.sum(
                        grid_a_fine <= a_intp
                    )  # a_intp lies between grid_a_fine[j-1] and grid_a_fine[j]

                    p0 = (a_intp - grid_a_fine[j - 1]) / (grid_a_fine[j] -
                                                          grid_a_fine[j - 1])

                    for izz in range(Nz):

                        stationary_pdf[izz, j] = stationary_pdf[
                            izz,
                            j] + p0 * stationary_pdf_old[iz, ia] * pi[iz, izz]
                        stationary_pdf[izz, j - 1] = stationary_pdf[
                            izz, j - 1] + (1 - p0) * stationary_pdf_old[
                                iz, ia] * pi[iz, izz]

        #stationary distribution by percent
        stationary_pdf = stationary_pdf / np.sum(np.sum(stationary_pdf,
                                                        axis=0))

        # iii. calculate supremum norm
        dist = np.abs(stationary_pdf - stationary_pdf_old).max()

        if dist < tol:
            break

        else:
            stationary_pdf_old = np.copy(stationary_pdf)

    return stationary_pdf, it
예제 #32
0
        def __call__(self, v):

            return interp(self._x, self._y, v)
    def F(K, S_bar, tol):
        """
        Function that gives first stage profits - cost of 
        generation capital for given level of storage capital 
        and generation capital 

        Parameters
        ----------
        K :    float
                generation capital 
        S_bar:  float
                 storage capacity
        tol:    float
                  tolerance for the time iteration fp 

        Returns
        ----------
        Pi_hat - r_k:  float
                        net present discounted value of profits
        """

        print(K)

        # initial guess of value function is pricing function saved from
        # previous evaluation of G operator
        v_init = config.rho_global_old

        # set-up grid based on value of S_nar
        config.grid = np.linspace(grid_min_s, S_bar, grid_size)

        # calculate pricing function
        rho_star = TC_star(v_init, K, S_bar, tol, config.grid)
        config.rho_global_old = rho_star

        T = np.int(TS_length)

        s = np.zeros(T)
        priceT = np.zeros(T)
        d = np.zeros(T)
        gen = np.zeros(T)
        integrand = np.zeros(T)

        # supply and shocks for simulation
        shock_index = np.arange(len(shock_X))
        shocks = np.random.choice(shock_index, T, p=P)

        # generate sequence of price and demand
        s[0] = S_bar / 2
        e = np.zeros(T)
        z = np.zeros(T)

        for i in range(T):
            zi = shock_X[shocks[i], 0]
            ei = shock_X[shocks[i], 1]

            priceT[i] = interp(config.grid, rho_star[shocks[i]], s[i])
            d[i] = p_inv(ei, priceT[i], D_bar)
            gen[i] = zi * K
            if i < T - 1:
                s[i + 1] = np.max(
                    [grid_min_s, (1 - delta_storage) * s[i] - d[i] + gen[i]])
                s[i + 1] = np.min([s[i + 1], S_bar])

            integrand[i] = priceT[i] * zi

        # integrate to expected price
        Eprice = np.mean(integrand)
        err = (1 / (1 - beta)) * Eprice - r_k

        print("Error for operator F for generatot value %s operator is %s" %
              (K, err))

        return (1 / (1 - beta)) * Eprice - r_k