Beispiel #1
0
def cycle_hook(state, i):
    A = 6
    if i % 100 == 0:
        p = Parameters.policy(state)

        # a1
        plt.plot(
            Parameters.beta * State.w(state).numpy() *
            (1 - Parameters.beta**(A - 1)) / (1 - Parameters.beta**A),
            PolicyState.a1(p), 'bs')
        plt.savefig(Parameters.LOG_DIR + '/a1.png')
        plt.close()

        # a2
        plt.plot(
            Parameters.beta * (State.r(state) * State.K2(state)).numpy() *
            (1 - Parameters.beta**(A - 2)) / (1 - Parameters.beta**(A - 1)),
            PolicyState.a2(p), 'bs')
        plt.savefig(Parameters.LOG_DIR + '/a2.png')
        plt.close()

        plt.plot(
            Parameters.beta * (State.r(state) * State.K3(state)).numpy() *
            (1 - Parameters.beta**(A - 3)) / (1 - Parameters.beta**(A - 2)),
            PolicyState.a3(p), 'bs')
        plt.savefig(Parameters.LOG_DIR + '/a3.png')
        plt.close()

        plt.plot(
            Parameters.beta * (State.r(state) * State.K4(state)).numpy() *
            (1 - Parameters.beta**(A - 4)) / (1 - Parameters.beta**(A - 3)),
            PolicyState.a4(p), 'bs')
        plt.savefig(Parameters.LOG_DIR + '/a4.png')
        plt.close()

        plt.plot(
            Parameters.beta * (State.r(state) * State.K5(state)).numpy() *
            (1 - Parameters.beta**(A - 5)) / (1 - Parameters.beta**(A - 4)),
            PolicyState.a5(p), 'bs')
        plt.savefig(Parameters.LOG_DIR + '/a5.png')
        plt.close()
Beispiel #2
0
def equations(state, policy_state):
    E_t = State.E_t_gen(state, policy_state)
    
    loss_dict = {}

    delta_1 = Definitions.delta_1(state, policy_state)
    
    #original equation
    loss_dict['eq_1'] = E_t(lambda s, ps: PolicyState.Cy(policy_state) * PolicyState.Cy(ps) * PolicyState.lambday(policy_state) - PolicyState.Cy(ps) * State.nux(state) + PolicyState.Cy(policy_state) * beta * b_habit * State.nux (s))    
    
    # original eq 1
    #loss_dict['eq_1'] = PolicyState.lambday(policy_state) - (State.nux(state) / (PolicyState.Cy(policy_state) )) + beta * b_habit * E_t(lambda s, ps: State.nux (s) / (PolicyState.Cy(ps)))    
        
    loss_dict['eq_2'] = PolicyState.lambday(policy_state) * PolicyState.Ry(policy_state) - PolicyState.muy(policy_state) * (delta_1 + delta_2 *(PolicyState.uy(policy_state) - 1.0)) 
    
    loss_dict['eq_3'] = PolicyState.lambday(policy_state) - beta * (1.0 + PolicyState.iy(policy_state)) * E_t(lambda s, ps: (1.0/(1.0 + PolicyState.piy(ps) )))
    
    loss_dict['eq_4'] = PolicyState.lambday(policy_state) - PolicyState.muy(policy_state) * State.Zx(state) * ((1.0 - kappa/2.0 * (PolicyState.Iy(policy_state) - 1.0 )**2.0) - kappa * (PolicyState.Iy(policy_state) - 1.0) * PolicyState.Iy(policy_state)) - beta * E_t(lambda s, ps: PolicyState.muy(ps) * State.Zx(s) * kappa * (PolicyState.Iy(ps) - 1.0) * PolicyState.Iy(ps)**2.0 )
    
    loss_dict['eq_5'] = PolicyState.muy(policy_state) - beta * E_t(lambda s, ps: PolicyState.muy(ps) * PolicyState.Ry(ps) * PolicyState.uy(ps) + PolicyState.muy(ps) * (1.0 - (delta_0 + delta_1 * (PolicyState.uy(ps) - 1.0) + delta_2/2.0 * (PolicyState.uy(ps) - 1.0)**2.0 )))   
    
    loss_dict['eq_6'] = PolicyState.h1y(policy_state) * PolicyState.whashy(policy_state)**(eps_w * (1.0 + chi)) - State.nux(state) * State.psix(state) * PolicyState.wy(policy_state)**(eps_w * (1.0 + chi)) * PolicyState.Ny(policy_state)**(1.0 + chi) - phi_w * beta * (1.0 + PolicyState.piy(policy_state))**(-zeta_w * eps_w * ( 1.0 + chi )) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**(eps_w * (1.0 + chi)) * PolicyState.whashy(ps)**(eps_w * (1.0 + chi)) * PolicyState.h1y(ps))
 
    loss_dict['eq_7'] = PolicyState.h2y(policy_state) * PolicyState.whashy(policy_state)**eps_w - PolicyState.lambday(policy_state) * PolicyState.wy(policy_state)**eps_w * PolicyState.Ny(policy_state) - phi_w * beta * (1.0 - PolicyState.piy(policy_state))**(zeta_w * (1.0 - eps_w)) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**(eps_w -1.0 ) * PolicyState.whashy(ps)**eps_w * PolicyState.h2y(ps))
    
    loss_dict['eq_8'] = PolicyState.whashy(policy_state) * PolicyState.h2y(policy_state) - ( eps_w / (eps_w - 1.0)) * PolicyState.h1y(policy_state)
    
    loss_dict['eq_9'] = PolicyState.wy(policy_state) *  PolicyState.Ny(policy_state) - ((1.0 - alpha) / alpha) * PolicyState.Khaty(policy_state) *  PolicyState.Ry(policy_state)
    
    loss_dict['eq_10'] = (1.0 - alpha) * State.Ax(state) * PolicyState.mcy(policy_state) * PolicyState.Khaty(policy_state)**alpha - PolicyState.wy(policy_state) * PolicyState.Ny(policy_state)**alpha
        
    loss_dict['eq_11'] = PolicyState.x1y(policy_state) - PolicyState.lambday(policy_state) * PolicyState.mcy(policy_state) * PolicyState.Yy(policy_state) - phi_p * beta * (1.0 + PolicyState.piy(policy_state))**(-zeta_p * eps_p) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**eps_p * PolicyState.x1y(ps) )
    
    loss_dict['eq_12'] = PolicyState.x2y(policy_state) - PolicyState.lambday(policy_state) * PolicyState.Yy(policy_state) - phi_p * beta * ( 1.0 + PolicyState.piy(policy_state))**(zeta_p *(1.0 - eps_p)) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**(eps_p - 1.0) * PolicyState.x2y(ps))
    
    loss_dict['eq_13'] = ((1.0 + PolicyState.pihashy(policy_state)) * PolicyState.x2y(policy_state) - (eps_p / (eps_p - 1.0)) * (1.0 + PolicyState.piy(policy_state))* PolicyState.x1y(policy_state))

    loss_dict['eq_14'] = PolicyState.Yy(policy_state) - PolicyState.Cy(policy_state) - b_habit * State.Cx(state) - PolicyState.Iy(policy_state) * State.Ix(state) - State.Gx(state) 
    
    loss_dict['eq_15'] = PolicyState.Ky(policy_state) - State.Zx(state) * (1.0 - kappa/2.0 * (PolicyState.Iy(policy_state) - 1.0 )**2.0) * PolicyState.Iy(policy_state)*State.Ix(state) - (1.0 - (delta_0 + delta_1 * (PolicyState.uy(policy_state) - 1.0) + delta_2/2.0 * (PolicyState.uy(policy_state) - 1.0)**2.0)) * State.Kx(state)       
    
    loss_dict['eq_16'] = State.Ax(state) * PolicyState.Khaty(policy_state)**alpha * PolicyState.Ny(policy_state)**(1.0 - alpha) - F_prod - PolicyState.Yy(policy_state) * PolicyState.nupy(policy_state)
    
    loss_dict['eq_17'] = PolicyState.Khaty(policy_state) - PolicyState.uy(policy_state) * State.Kx(state)
    
    loss_dict['eq_18'] = PolicyState.nupy(policy_state) * (1.0 + PolicyState.piy(policy_state))**(- eps_p) - (1.0 - phi_p) * (1.0 + PolicyState.pihashy(policy_state))**(- eps_p) - (1.0 + State.pix(state))**(-zeta_p*eps_p) * phi_p * State.nupx(state)
    
    loss_dict['eq_19'] =  (1.0 + PolicyState.piy(policy_state))**(1.0 - eps_p) - (1.0 - phi_p) * (1.0 + PolicyState.pihashy(policy_state) )**(1.0 - eps_p) - (1.0 + State.pix(state))**(zeta_p * (1.0 - eps_p)) * phi_p
    
    loss_dict['eq_20'] = PolicyState.wy(policy_state)**(1.0 - eps_w) - (1.0 - phi_w)*PolicyState.whashy(policy_state)**(1.0 - eps_w) - (1.0 + State.pix(state))**(zeta_w * (1.0 - eps_w)) * phi_w * (1.0 + PolicyState.piy(policy_state))**(eps_w - 1.0) * State.wx(state)**(1.0 - eps_w)

    loss_dict['eq_21'] = PolicyState.iy(policy_state) - tf.math.maximum((1.0 - rho_i) * i_ss + rho_i * State.ix(state) +  (1.0 - rho_i) * (phi_pi * (PolicyState.piy(policy_state) - pi_ss) + phi_y * (tf.math.log(PolicyState.Yy(policy_state)) - tf.math.log(State.Yx(state))) + State.mx(state)), i_LB )

    return loss_dict
def nupy_norm(state, policy_state):
    return PolicyState.nupy(policy_state) * nupy_ss
def Yy_norm(state, policy_state):
    return PolicyState.Yy(policy_state) * Yy_ss
def x2y_norm(state, policy_state):
    return PolicyState.x1y(policy_state) * x2y_ss
def muy_norm(state, policy_state):
    return PolicyState.muy(policy_state) * muy_ss
Beispiel #7
0
def equations(state, policy_state):
    E_t = State.E_t_gen(state, policy_state)

    loss_dict = {}

    weight = 1.0
    weight2 = 1.0

    Ky = Definitions.Ky(state, policy_state)
    iy = Definitions.iy(state, policy_state)

    #original equation
    loss_dict['eq_1'] = 1000 * E_t(
        lambda s, ps: PolicyState.Cy(policy_state) * PolicyState.Cy(
            ps) * PolicyState.lambday(policy_state) - PolicyState.Cy(
                ps) * State.nux(state) + PolicyState.Cy(
                    policy_state) * beta * b_habit * State.nux(s))

    # original eq 1
    #loss_dict['eq_1'] = PolicyState.lambday(policy_state) - (State.nux(state) / (PolicyState.Cy(policy_state) )) + beta * b_habit * E_t(lambda s, ps: State.nux (s) / (PolicyState.Cy(ps)))

    # original eq 1 1/C
    #loss_dict['eq_1'] = PolicyState.lambday(policy_state) - (State.nux(state) / (PolicyState.Cy(policy_state) )) + beta * b_habit * E_t(lambda s, ps: State.nux (s) / ((weight*(PolicyState.Cy(ps)) + (1-weight)*0.025)))

    ## debug #  replace (PolicyState.Cy(ps)) with (weight*(PolicyState.Cy(ps)) + (1-weight)*0.025) -- E()
    #loss_dict['eq_1'] = E_t(lambda s, ps: PolicyState.Cy(policy_state) * (weight*(PolicyState.Cy(ps)) + (1-weight)*0.025) * PolicyState.lambday(policy_state) - (weight*(PolicyState.Cy(ps)) + (1-weight)*0.025) * State.nux(state) + PolicyState.Cy(policy_state) * beta * b_habit * State.nux (s))

    #loss_dict['eq_2'] = PolicyState.lambday(policy_state) - beta * (1.0 + PolicyState.iy(policy_state)) * E_t(lambda s, ps: PolicyState.lambday(ps)*(1.0/(1.0 + PolicyState.piy(ps) )))

    # debug: replace PolicyState.lambday(ps) with (weight*(PolicyState.lambday(ps)) + (1-weight)*2.6206)
    loss_dict['eq_2'] = PolicyState.lambday(policy_state) - beta * (
        1.0 + iy) * E_t(lambda s, ps: (weight * (PolicyState.lambday(ps)) +
                                       (1 - weight) * 2.6206) *
                        (1.0 / (1.0 + PolicyState.piy(ps))))

    loss_dict['eq_3'] = PolicyState.lambday(
        policy_state
    ) - PolicyState.muy(policy_state) * State.Zx(state) * (
        (1.0 - kappa / 2.0 *
         (PolicyState.Iy(policy_state) / State.Ix(state) - 1.0)**2.0) - kappa *
        (PolicyState.Iy(policy_state) / State.Ix(state) - 1.0) *
        PolicyState.Iy(policy_state) / State.Ix(state)) - beta * E_t(
            lambda s, ps: (weight * (PolicyState.muy(ps)) +
                           (1 - weight) * 2.6206) * State.Zx(s) * kappa *
            ((weight2 * (PolicyState.Iy(ps)) +
              (1 - weight2) * 0.1881) / PolicyState.Iy(policy_state) - 1.0) *
            ((weight2 * (PolicyState.Iy(ps)) +
              (1 - weight2) * 0.1881) / PolicyState.Iy(policy_state))**2.0)

    # debug: replace PolicyState.muy(ps) with (weight*(PolicyState.muy(ps)) + (1-weight)*2.6206)
    loss_dict['eq_4'] = PolicyState.muy(policy_state) - beta * E_t(
        lambda s, ps: PolicyState.lambday(ps) * PolicyState.Ry(ps) +
        (weight * (PolicyState.muy(ps)) +
         (1 - weight) * 2.6206) * (1.0 - delta_0))

    loss_dict['eq_5'] = 10 * (
        PolicyState.h1y(policy_state) * PolicyState.whashy(policy_state)**
        (eps_w * (1.0 + chi)) -
        State.nux(state) * State.psix(state) * PolicyState.wy(policy_state)**
        (eps_w * (1.0 + chi)) * PolicyState.Ny(policy_state)**(1.0 + chi) -
        phi_w * beta * (1.0 + PolicyState.piy(policy_state))**
        (-zeta_w * eps_w *
         (1.0 + chi)) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**
                            (eps_w * (1.0 + chi)) * PolicyState.whashy(ps)**
                            (eps_w * (1.0 + chi)) * PolicyState.h1y(ps)))

    loss_dict['eq_6'] = 10 * (
        PolicyState.h2y(policy_state) * PolicyState.whashy(policy_state)**eps_w
        - PolicyState.lambday(policy_state) * PolicyState.wy(policy_state)**
        eps_w * PolicyState.Ny(policy_state) - phi_w * beta *
        (1.0 - PolicyState.piy(policy_state))**(zeta_w * (1.0 - eps_w)) *
        E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**(eps_w - 1.0) *
            PolicyState.whashy(ps)**eps_w * PolicyState.h2y(ps)))

    loss_dict['eq_7'] = PolicyState.whashy(policy_state) * PolicyState.h2y(
        policy_state) - (eps_w / (eps_w - 1.0)) * PolicyState.h1y(policy_state)

    loss_dict['eq_8'] = PolicyState.wy(policy_state) * PolicyState.Ny(
        policy_state) - (
            (1.0 - alpha) / alpha) * Ky * PolicyState.Ry(policy_state)

    loss_dict['eq_9'] = (1.0 - alpha) * State.Ax(state) * PolicyState.mcy(
        policy_state) * Ky**alpha - PolicyState.wy(
            policy_state) * PolicyState.Ny(policy_state)**alpha

    loss_dict['eq_10'] = 100 * (
        PolicyState.x1y(policy_state) - PolicyState.lambday(policy_state) *
        PolicyState.mcy(policy_state) * PolicyState.Yy(policy_state) -
        phi_p * beta *
        (1.0 + PolicyState.piy(policy_state))**(-zeta_p * eps_p) *
        E_t(lambda s, ps:
            (1.0 + PolicyState.piy(ps))**eps_p * PolicyState.x1y(ps)))

    loss_dict['eq_11'] = 10 * (
        PolicyState.x2y(policy_state) - PolicyState.lambday(policy_state) *
        PolicyState.Yy(policy_state) - phi_p * beta *
        (1.0 + PolicyState.piy(policy_state))**
        (zeta_p *
         (1.0 - eps_p)) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**
                              (eps_p - 1.0) * PolicyState.x2y(ps)))

    loss_dict['eq_12'] = 100 * (
        (1.0 + PolicyState.pihashy(policy_state)) *
        PolicyState.x2y(policy_state) - (eps_p / (eps_p - 1.0)) *
        (1.0 + PolicyState.piy(policy_state)) * PolicyState.x1y(policy_state))

    loss_dict['eq_13'] = PolicyState.Yy(policy_state) - PolicyState.Cy(
        policy_state) - b_habit * State.Cx(state) - PolicyState.Iy(
            policy_state) - State.Gx(state)

    #original Eq.
    #loss_dict['eq_14'] = PolicyState.Ky(policy_state) - State.Zx(state) * (1.0 - kappa/2.0 * (PolicyState.Iy(policy_state)/State.Ix(state) - 1.0 )**2.0) * PolicyState.Iy(policy_state) - (1.0 - delta_0) * State.Kx(state)

    loss_dict['eq_14'] = State.Ax(state) * Ky**alpha * PolicyState.Ny(
        policy_state)**(1.0 - alpha) - F_prod - PolicyState.Yy(
            policy_state) * PolicyState.nupy(policy_state)

    loss_dict['eq_15'] = PolicyState.nupy(policy_state) * (
        1.0 + PolicyState.piy(policy_state))**(-eps_p) - (1.0 - phi_p) * (
            1.0 + PolicyState.pihashy(policy_state))**(-eps_p) - (
                1.0 + State.pix(state))**(-zeta_p *
                                          eps_p) * phi_p * State.nupx(state)

    loss_dict['eq_16'] = (1.0 + PolicyState.piy(policy_state))**(
        1.0 -
        eps_p) - (1.0 - phi_p) * (1.0 + PolicyState.pihashy(policy_state))**(
            1.0 - eps_p) - (1.0 + State.pix(state))**(zeta_p *
                                                      (1.0 - eps_p)) * phi_p

    loss_dict['eq_17'] = PolicyState.wy(policy_state)**(1.0 - eps_w) - (
        1.0 - phi_w) * PolicyState.whashy(policy_state)**(1.0 - eps_w) - (
            1.0 + State.pix(state))**(zeta_w * (1.0 - eps_w)) * phi_w * (
                1.0 + PolicyState.piy(policy_state))**(
                    eps_w - 1.0) * State.wx(state)**(1.0 - eps_w)

    #original Eq., substituted
    #loss_dict['eq_18'] = PolicyState.iy(policy_state) - tf.math.maximum((1.0 - rho_i) * i_ss + rho_i * State.ix(state) +  (1.0 - rho_i) * (phi_pi * (PolicyState.piy(policy_state) - pi_ss) + phi_y * (tf.math.log(PolicyState.Yy(policy_state)) - tf.math.log(State.Yx(state))) + State.mx(state)), i_LB )

    return loss_dict
    return Parameters.alpha * State.TFP(state) * Definitions.K_total(
        state, None)**(Parameters.alpha - 1) + (1 - State.depr(state))


def w(state, policy_state=None):
    return (1 - Parameters.alpha) * State.TFP(state) * Definitions.K_total(
        state, None)**Parameters.alpha


def Y(state, policy_state=None):
    return State.TFP(state) * Definitions.K_total(
        state, None)**Parameters.alpha + (
            1 - State.depr(state)) * Definitions.K_total(state, None)


# consumption definitions
for i in range(1, 7):
    # only youngest generation has labour income
    if i == 1:
        setattr(sys.modules[__name__], "c" + str(i),
                lambda s, ps: w(s, ps) - PolicyState.a1(ps))
    if i > 1 and i < 6:
        setattr(sys.modules[__name__], "c" + str(i),
                (lambda ind: lambda s, ps: r(s, ps) * getattr(
                    State, "K" + str(ind))
                 (s) - getattr(PolicyState, "a" + str(ind))(ps))(i))
    if i == 6:
        # consume everything
        setattr(sys.modules[__name__], "c" + str(i),
                lambda s, ps: r(s, ps) * State.K6(s))
def whashy_norm(state, policy_state):
    return PolicyState.whashy(policy_state) * whashy_ss
def wy_norm(state, policy_state):
    return PolicyState.wy(policy_state) * wy_ss
def Iy_norm(state, policy_state):
    return PolicyState.Iy(policy_state) * Iy_ss
def Ry_norm(state, policy_state):
    return PolicyState.Ry(policy_state) * (Ry_ss + 1.0) - 1.0
def pihashy_norm(state, policy_state):
    return PolicyState.pihashy(policy_state) * (pihashy_ss + 1.0) - 1.0
def Cy_norm(state, policy_state):
    return PolicyState.Cy(policy_state) * Cy_ss
def equations(state, policy_state):
    E_t = State.E_t_gen(state, policy_state)
    
    loss_dict = {}

    delta_1 = 1.0/beta - (1.0 - delta_0)
    #delta_1 = Definitions.delta_1(state, policy_state)
    
    weight = 0.6
    
   
    #original equation
    #loss_dict['eq_1'] = tf.exp(E_t(lambda s, ps: PolicyState.Cy(policy_state) * PolicyState.Cy(ps) * PolicyState.lambday(policy_state) - PolicyState.Cy(ps) * State.nux(state) + PolicyState.Cy(policy_state) * beta * b_habit * State.nux (s))) - 1.0    
    
    # original eq 1
    #loss_dict['eq_1'] = PolicyState.lambday(policy_state) - (State.nux(state) / (PolicyState.Cy(policy_state) )) + beta * b_habit * E_t(lambda s, ps: State.nux (s) / (PolicyState.Cy(ps)))    
    
    
    
      # original eq 1 1/C
    #loss_dict['eq_1'] = PolicyState.lambday(policy_state) - (State.nux(state) / (PolicyState.Cy(policy_state) )) + beta * b_habit * E_t(lambda s, ps: State.nux (s) / ((weight*(PolicyState.Cy(ps)) + (1-weight)*0.025)))    
    
    
    # debug #  replace (PolicyState.Cy(ps)) with (weight*(PolicyState.Cy(ps)) + (1-weight)*0.025) -- E()
    loss_dict['eq_1'] = E_t(lambda s, ps: PolicyState.Cy(policy_state) * (weight*(PolicyState.Cy(ps)) + (1-weight)*0.025) * PolicyState.lambday(policy_state) - (weight*(PolicyState.Cy(ps)) + (1-weight)*0.025) * State.nux(state) + PolicyState.Cy(policy_state) * beta * b_habit * State.nux (s)) 
    
    loss_dict['eq_2'] = PolicyState.lambday(policy_state) * PolicyState.Ry(policy_state) - PolicyState.muy(policy_state) * (delta_1 + delta_2 *(PolicyState.uy(policy_state) - 1.0)) 
    
    #loss_dict['eq_3'] = PolicyState.lambday(policy_state) - beta * (1.0 + PolicyState.iy(policy_state)) * E_t(lambda s, ps: PolicyState.lambday(ps)*(1.0/(1.0 + PolicyState.piy(ps) )))        
    
    # debug: replace PolicyState.piy(ps) with (weight*(PolicyState.piy(ps)) + (1-weight)*0.005)        
    # debug: replace PolicyState.lambday(ps) with (weight*(PolicyState.lambday(ps)) + (1-weight)*2.6206) 
    loss_dict['eq_3'] = PolicyState.lambday(policy_state) - beta * (1.0 + PolicyState.iy(policy_state)) * E_t(lambda s, ps: PolicyState.lambday(ps)*(1.0/(1.0 + (weight*(PolicyState.piy(ps)) + (1-weight)*0.005))))
    
    #loss_dict['eq_4'] = PolicyState.lambday(policy_state) - PolicyState.muy(policy_state) * State.Zx(state) * ((1.0 - kappa/2.0 * (PolicyState.Iy(policy_state)/State.Ix(state) - 1.0 )**2.0) - kappa * (PolicyState.Iy(policy_state)/State.Ix(state) - 1.0) * PolicyState.Iy(policy_state)/State.Ix(state)) - beta * E_t(lambda s, ps: PolicyState.muy(ps) * State.Zx(s) * kappa * (PolicyState.Iy(ps)/PolicyState.Iy(policy_state) - 1.0) * (PolicyState.Iy(ps)/PolicyState.Iy(policy_state))**2.0 )
    
    # debug: replace PolicyState.Iy(ps) with (weight*(PolicyState.Iy(ps)) + (1-weight)*0.1881)  
    # debug: replace PolicyState.muy(ps) with (weight*(PolicyState.muy(ps)) + (1-weight)*2.6206) 
    loss_dict['eq_4'] = PolicyState.lambday(policy_state) - PolicyState.muy(policy_state) * State.Zx(state) * ((1.0 - kappa/2.0 * (PolicyState.Iy(policy_state)/State.Ix(state) - 1.0 )**2.0) - kappa * (PolicyState.Iy(policy_state)/State.Ix(state) - 1.0) * PolicyState.Iy(policy_state)/State.Ix(state)) - beta * E_t(lambda s, ps: (weight*(PolicyState.muy(ps)) + (1-weight)*2.6206) * State.Zx(s) * kappa * ((weight*(PolicyState.Iy(ps)) + (1-weight)*0.1881)/PolicyState.Iy(policy_state) - 1.0) * ((weight*(PolicyState.Iy(ps)) + (1-weight)*0.1881)/PolicyState.Iy(policy_state))**2.0 )
    
    #loss_dict['eq_5'] = PolicyState.muy(policy_state) - beta * E_t(lambda s, ps: PolicyState.lambday(ps) * PolicyState.Ry(ps) * PolicyState.uy(ps) + PolicyState.muy(ps) * (1.0 - (delta_0 + delta_1 * (PolicyState.uy(ps) - 1.0) + delta_2/2.0 * (PolicyState.uy(ps) - 1.0)**2.0 )))   

    # debug: replace PolicyState.muy(ps) with (weight*(PolicyState.muy(ps)) + (1-weight)*2.6206) 
    # debug: replace PolicyState.uy(ps) with (weight*(PolicyState.uy(ps)) + (1-weight)*1.0) 
    # debug: replace PolicyState.lambday(ps) with (weight*(PolicyState.lambday(ps)) + (1-weight)*2.6206)     
    loss_dict['eq_5'] = PolicyState.muy(policy_state) - beta * E_t(lambda s, ps: (weight*(PolicyState.lambday(ps)) + (1-weight)*2.6206) * PolicyState.Ry(ps) * PolicyState.uy(ps) + (weight*(PolicyState.muy(ps)) + (1-weight)*2.6206) * (1.0 - (delta_0 + delta_1 * ((weight*(PolicyState.uy(ps)) + (1-weight)*1.0) - 1.0) + delta_2/2.0 * ((weight*(PolicyState.uy(ps)) + (1-weight)*1.0) - 1.0)**2.0 )))  
    
    #loss_dict['eq_6'] = PolicyState.h1y(policy_state) * PolicyState.whashy(policy_state)**(eps_w * (1.0 + chi)) - State.nux(state) * State.psix(state) * PolicyState.wy(policy_state)**(eps_w * (1.0 + chi)) * PolicyState.Ny(policy_state)**(1.0 + chi) - phi_w * beta * (1.0 + PolicyState.piy(policy_state))**(-zeta_w * eps_w * ( 1.0 + chi )) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**(eps_w * (1.0 + chi)) * PolicyState.whashy(ps)**(eps_w * (1.0 + chi)) * PolicyState.h1y(ps))
 
    # debug: replace PolicyState.whashy(ps) with (weight*(PolicyState.whashy(ps)) + (1-weight)*1.969) 
    # debug: replace PolicyState.h1y(ps) with (weight*(PolicyState.h1y(ps)) + (1-weight)*1.6711)     
    # debug: replace PolicyState.piy(ps) with (weight*(PolicyState.piy(ps)) + (1-weight)*0.005)         
    loss_dict['eq_6'] = PolicyState.h1y(policy_state) * PolicyState.whashy(policy_state)**(eps_w * (1.0 + chi)) - State.nux(state) * State.psix(state) * PolicyState.wy(policy_state)**(eps_w * (1.0 + chi)) * PolicyState.Ny(policy_state)**(1.0 + chi) - phi_w * beta * (1.0 + PolicyState.piy(policy_state))**(-zeta_w * eps_w * ( 1.0 + chi )) * E_t(lambda s, ps: (1.0 + (weight*(PolicyState.piy(ps)) + (1-weight)*0.005))**(eps_w * (1.0 + chi)) * (weight*(PolicyState.whashy(ps)) + (1-weight)*1.969)**(eps_w * (1.0 + chi)) * (weight*(PolicyState.h1y(ps)) + (1-weight)*1.6711))
 
    #loss_dict['eq_7'] = PolicyState.h2y(policy_state) * PolicyState.whashy(policy_state)**eps_w - PolicyState.lambday(policy_state) * PolicyState.wy(policy_state)**eps_w * PolicyState.Ny(policy_state) - phi_w * beta * (1.0 - PolicyState.piy(policy_state))**(zeta_w * (1.0 - eps_w)) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**(eps_w -1.0 ) * PolicyState.whashy(ps)**eps_w * PolicyState.h2y(ps))
    
    # debug: replace PolicyState.whashy(ps) with (weight*(PolicyState.whashy(ps)) + (1-weight)*1.969) 
    # debug: replace PolicyState.h2y(ps) with (weight*(PolicyState.h2y(ps)) + (1-weight)*2.8441)         
    # debug: replace PolicyState.piy(ps) with (weight*(PolicyState.piy(ps)) + (1-weight)*0.005)             
    loss_dict['eq_7'] = PolicyState.h2y(policy_state) * PolicyState.whashy(policy_state)**eps_w - PolicyState.lambday(policy_state) * PolicyState.wy(policy_state)**eps_w * PolicyState.Ny(policy_state) - phi_w * beta * (1.0 - PolicyState.piy(policy_state))**(zeta_w * (1.0 - eps_w)) * E_t(lambda s, ps: (1.0 + (weight*(PolicyState.piy(ps)) + (1-weight)*0.005))**(eps_w -1.0 ) * (weight*(PolicyState.whashy(ps)) + (1-weight)*1.969)**eps_w * (weight*(PolicyState.h2y(ps)) + (1-weight)*2.8441))
    
    loss_dict['eq_8'] = PolicyState.whashy(policy_state) * PolicyState.h2y(policy_state) - ( eps_w / (eps_w - 1.0)) * PolicyState.h1y(policy_state)
    
    loss_dict['eq_9'] = PolicyState.wy(policy_state) *  PolicyState.Ny(policy_state) - ((1.0 - alpha) / alpha) * PolicyState.Khaty(policy_state) *  PolicyState.Ry(policy_state)
    
    loss_dict['eq_10'] = (1.0 - alpha) * State.Ax(state) * PolicyState.mcy(policy_state) * PolicyState.Khaty(policy_state)**alpha - PolicyState.wy(policy_state) * PolicyState.Ny(policy_state)**alpha
        
    #loss_dict['eq_11'] = PolicyState.x1y(policy_state) - PolicyState.lambday(policy_state) * PolicyState.mcy(policy_state) * PolicyState.Yy(policy_state) - phi_p * beta * (1.0 + PolicyState.piy(policy_state))**(-zeta_p * eps_p) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**eps_p * PolicyState.x1y(ps) )

    # debug: replace PolicyState.x1y(ps) with (weight*(PolicyState.x1y(ps)) + (1-weight)*5.3470) 
    # debug: replace PolicyState.piy(ps) with (weight*(PolicyState.piy(ps)) + (1-weight)*0.005)                 
    loss_dict['eq_11'] = PolicyState.x1y(policy_state) - PolicyState.lambday(policy_state) * PolicyState.mcy(policy_state) * PolicyState.Yy(policy_state) - phi_p * beta * (1.0 + PolicyState.piy(policy_state))**(-zeta_p * eps_p) * E_t(lambda s, ps: (1.0 + (weight*(PolicyState.piy(ps)) + (1-weight)*0.005))**eps_p * (weight*(PolicyState.x1y(ps)) + (1-weight)*5.3470))

    
    #loss_dict['eq_12'] = PolicyState.x2y(policy_state) - PolicyState.lambday(policy_state) * PolicyState.Yy(policy_state) - phi_p * beta * ( 1.0 + PolicyState.piy(policy_state))**(zeta_p *(1.0 - eps_p)) * E_t(lambda s, ps: (1.0 + PolicyState.piy(ps))**(eps_p - 1.0) * PolicyState.x2y(ps))

    # debug: replace PolicyState.x2y(ps) with (weight*(PolicyState.x2y(ps)) + (1-weight)*6.6009)  
    # debug: replace PolicyState.piy(ps) with (weight*(PolicyState.piy(ps)) + (1-weight)*0.005)                     
    loss_dict['eq_12'] = PolicyState.x2y(policy_state) - PolicyState.lambday(policy_state) * PolicyState.Yy(policy_state) - phi_p * beta * ( 1.0 + PolicyState.piy(policy_state))**(zeta_p *(1.0 - eps_p)) * E_t(lambda s, ps: (1.0 + (weight*(PolicyState.piy(ps)) + (1-weight)*0.005))**(eps_p - 1.0) * (weight*(PolicyState.x2y(ps)) + (1-weight)*6.6009))    
    
    loss_dict['eq_13'] = (1.0 + PolicyState.pihashy(policy_state)) * PolicyState.x2y(policy_state) - (eps_p / (eps_p - 1.0)) * (1.0 + PolicyState.piy(policy_state))* PolicyState.x1y(policy_state)

    loss_dict['eq_14'] = PolicyState.Yy(policy_state) - PolicyState.Cy(policy_state) - b_habit * State.Cx(state) - PolicyState.Iy(policy_state) - State.Gx(state) 
    
    loss_dict['eq_15'] = PolicyState.Ky(policy_state) - State.Zx(state) * (1.0 - kappa/2.0 * (PolicyState.Iy(policy_state)/State.Ix(state) - 1.0 )**2.0) * PolicyState.Iy(policy_state) - (1.0 - (delta_0 + delta_1 * (PolicyState.uy(policy_state) - 1.0) + delta_2/2.0 * (PolicyState.uy(policy_state) - 1.0)**2.0)) * State.Kx(state)       
    
    #loss_dict['eq_15'] = tf.square(PolicyState.Ky(policy_state) - State.Zx(state) * (1.0 - kappa/2.0 * (PolicyState.Iy(policy_state)/State.Ix(state) - 1.0 )**2.0) * PolicyState.Iy(policy_state) - (1.0 - (delta_0 + delta_1 * (PolicyState.uy(policy_state) - 1.0) + delta_2/2.0 * (PolicyState.uy(policy_state) - 1.0)**2.0)) * State.Kx(state))    




    loss_dict['eq_16'] = State.Ax(state) * PolicyState.Khaty(policy_state)**alpha * PolicyState.Ny(policy_state)**(1.0 - alpha) - F_prod - PolicyState.Yy(policy_state) * PolicyState.nupy(policy_state)
    
    loss_dict['eq_17'] = PolicyState.Khaty(policy_state) - PolicyState.uy(policy_state) * State.Kx(state)
    
    loss_dict['eq_18'] = PolicyState.nupy(policy_state) * (1.0 + PolicyState.piy(policy_state))**(- eps_p) - (1.0 - phi_p) * (1.0 + PolicyState.pihashy(policy_state))**(- eps_p) - (1.0 + State.pix(state))**(-zeta_p*eps_p) * phi_p * State.nupx(state)
    
    loss_dict['eq_19'] =  (1.0 + PolicyState.piy(policy_state))**(1.0 - eps_p) - (1.0 - phi_p) * (1.0 + PolicyState.pihashy(policy_state) )**(1.0 - eps_p) - (1.0 + State.pix(state))**(zeta_p * (1.0 - eps_p)) * phi_p
    
    loss_dict['eq_20'] = PolicyState.wy(policy_state)**(1.0 - eps_w) - (1.0 - phi_w)*PolicyState.whashy(policy_state)**(1.0 - eps_w) - (1.0 + State.pix(state))**(zeta_w * (1.0 - eps_w)) * phi_w * (1.0 + PolicyState.piy(policy_state))**(eps_w - 1.0) * State.wx(state)**(1.0 - eps_w)

    loss_dict['eq_21'] = PolicyState.iy(policy_state) - tf.math.maximum((1.0 - rho_i) * i_ss + rho_i * State.ix(state) +  (1.0 - rho_i) * (phi_pi * (PolicyState.piy(policy_state) - pi_ss) + phi_y * (tf.math.log(PolicyState.Yy(policy_state)) - tf.math.log(State.Yx(state))) + State.mx(state)), i_LB )

    return loss_dict
Beispiel #16
0
def equations(state, policy_state):
    
    E_t = State.E_t_gen(state, policy_state)
    
    loss_dict = {}

    delta_1 = Definitions.delta_1(state, policy_state)

    #loss_dict['eq_1'] = PolicyState.lambday(policy_state) - 2.62
    loss_dict['eq_1'] = PolicyState.lambday(policy_state) - 1.0

    #loss_dict['eq_2'] = PolicyState.muy(policy_state) - 2.62 
    loss_dict['eq_2'] = PolicyState.muy(policy_state) - 1.0 
    
    #loss_dict['eq_3'] = PolicyState.Cy(policy_state) - 0.0247 
    loss_dict['eq_3'] = PolicyState.Cy(policy_state) - 1.0 

    #loss_dict['eq_4'] = PolicyState.piy(policy_state) - 0.005 
    loss_dict['eq_4'] = PolicyState.piy(policy_state) - 1.0 


    #loss_dict['eq_5'] = PolicyState.pihashy(policy_state) - 0.0176 
    loss_dict['eq_5'] = PolicyState.pihashy(policy_state) - 1.0 


    #loss_dict['eq_6'] = PolicyState.Ry(policy_state) - 0.03
    loss_dict['eq_6'] = PolicyState.Ry(policy_state) - 1.0


    #loss_dict['eq_8'] = PolicyState.uy(policy_state) - 1.0 

    #loss_dict['eq_7'] = PolicyState.Iy(policy_state) - 0.185 
    loss_dict['eq_7'] = PolicyState.Iy(policy_state) - 1.0 

    #loss_dict['eq_8'] = PolicyState.wy(policy_state) - 1.1913 
    loss_dict['eq_8'] = PolicyState.wy(policy_state) - 1.0 

    #loss_dict['eq_9'] = PolicyState.whashy(policy_state) - 1.1969 
    loss_dict['eq_9'] = PolicyState.whashy(policy_state) - 1.0 

    #loss_dict['eq_10'] = PolicyState.h1y(policy_state) - 1.6711 
    loss_dict['eq_10'] = PolicyState.h1y(policy_state) - 1.0 

    #loss_dict['eq_11'] = PolicyState.h2y(policy_state) - 2.8441
    loss_dict['eq_11'] = PolicyState.h2y(policy_state) - 1.0

    #loss_dict['eq_12'] = PolicyState.Ny(policy_state) - 0.4394 
    loss_dict['eq_12'] = PolicyState.Ny(policy_state) - 1.0 

    #loss_dict['eq_15'] = PolicyState.Khaty(policy_state) - 7.47

    #loss_dict['eq_16'] = PolicyState.Ky(policy_state) - 7.47 

    #loss_dict['eq_13'] = PolicyState.mcy(policy_state) - 0.727 
    loss_dict['eq_13'] = PolicyState.mcy(policy_state) - 1.0 

    #loss_dict['eq_14'] = PolicyState.x1y(policy_state) - 5.34 
    loss_dict['eq_14'] = PolicyState.x1y(policy_state) - 1.0 

    #loss_dict['eq_15'] = PolicyState.x2y(policy_state) - 6.60 
    loss_dict['eq_15'] = PolicyState.x2y(policy_state) - 1.0 

    #loss_dict['eq_16'] = PolicyState.Yy(policy_state) - 0.747 
    loss_dict['eq_16'] = PolicyState.Yy(policy_state) - 1.0 

    #loss_dict['eq_17'] = PolicyState.nupy(policy_state) - 1.0006 
    loss_dict['eq_17'] = PolicyState.nupy(policy_state) - 1.0 
    
    return loss_dict
def h2y_norm(state, policy_state):
    return PolicyState.h1y(policy_state) * h2y_ss
def K_total_next(state, policy_state):
    return PolicyState.a1(policy_state) + PolicyState.a2(
        policy_state) + PolicyState.a3(policy_state) + PolicyState.a4(
            policy_state) + PolicyState.a5(policy_state)
def Ny_norm(state, policy_state):
    return PolicyState.Ny(policy_state) * Ny_ss
def mcy_norm(state, policy_state):
    return PolicyState.mcy(policy_state) * mcy_ss
def lambday_norm(state, policy_state):
    return PolicyState.lambday(policy_state) * lambday_ss