def __init__(self, last=None, first=None, start=None, pay_rate=None, social=None): if (last is None) and (first is None) and (start is None) and (pay_rate is None) and (social is None): self.last = input("Input last name:").capitalize() self.first = input("Input first name:").capitalize() self.start = input("Input start year:") self.pay_rate = float(input("Input pay_rate:")) self.social = SS() else: self.last = last.capitalize() self.first = first.capitalize() self.start = start self.pay_rate = pay_rate self.social = SS(social)
def set_ss(self, ss, source="self"): if len(ss) == 1: self.ss = len(self)*ss else: self.ss = ss # Infer the Martini backbone secondary structure types self.ssclass, self.sstypes = SS.ssClassification(self.ss, source)
def get_TPI(params, bvec): start_time = time.clock() (T, beta, sigma, nvec, L, A, alpha, delta, b_ss, K_ss, maxiter_TPI, mindist_TPI, xi) = params abs2 = 1 tpi_iter = 0 L = ss.get_L(nvec) K1 = ss.get_K(bvec)[0] Kpath_old = np.zeros(T + 1) Kpath_old[:-1] = getPath(K1, K_ss, T) Kpath_old[-1] = K_ss while abs2 > mindist_TPI and tpi_iter < maxiter_TPI: tpi_iter += 1 w_path = ss.get_w((A, alpha), Kpath_old, L) r_path = ss.get_r((A, alpha, delta), Kpath_old, L) b = np.zeros([2, T + 1]) #print(bvec) b[:, 0] = bvec b32 = opt.root(b3_error, b[1, 0], args=(nvec[1:], beta, sigma, b[0, 0], w_path[:2], r_path[:2])) b[1, 1] = b32.x for t in range(T - 1): bvec_guess = np.array([b[0, t], b[1, t + 1]]) bt = opt.root( zero_func, bvec_guess, (nvec, beta, sigma, w_path[t:t + 3], r_path[t + 1:t + 3])) bgrid = np.eye(2, dtype=bool) b[:, t + 1:t + 3] = bgrid * bt.x + b[:, t + 1:t + 3] # Calculate the implied capital stock from conjecture and the error Kpath_new = b.sum(axis=0) abs2 = ((Kpath_old[:] - Kpath_new[:])**2).sum() # Update guess Kpath_old = xi * Kpath_new + (1 - xi) * Kpath_old print('iteration:', tpi_iter, ' squared distance: ', abs2) tpi_time = time.clock() - start_time return Kpath_old, r_path, w_path
def model_moments(init_vals, args): ''' ''' l_tilde = args[3] ss_output = SS.get_SS(init_vals, args, graphs=False) n_ss = ss_output["n_ss"] return n_ss / l_tilde
def send_mail(): uuid = request.args.get('uuid') ugid = request.args.get('ugid') people = db.get_people(ugid, uuid) if email_check(people): status = SS.gen_people(people) if status == 200: db.group_sent(ugid) return (jsonify({'status': status})) abort(400)
def b3_error(b3, *args): nvec, beta, sigma, b2, w_path, r_path = args n2, n3 = nvec w1, w2 = w_path r1, r2 = r_path c2 = (1 + r1) * b2 + w1 * n2 - b3 c3 = (1 + r2) * b3 + w2 * n3 muc2, muc3 = ss.get_MUc(np.array([c2, c3]), sigma) error = muc2 - beta * (1 + r2) * muc3 return error
def minstat(chi_guesses, *args): ''' -------------------------------------------------------------------- This function generates the weighted sum of squared differences between the model and data moments. INPUTS: chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together arg = length 6 tuple, variables needed for minimizer OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: SS.run_SS() calc_moments() OBJECTS CREATED WITHIN FUNCTION: ss_output = dictionary, variables from SS of model model_moments = [J+2+S,] array, moments from the model solution distance = scalar, weighted, squared deviation between data and model moments RETURNS: distance -------------------------------------------------------------------- ''' data_moments, W, income_tax_params, ss_params, iterative_params, chi_params, baseline_dir = args J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\ g_n_ss, tau_payroll, tau_bq, rho, omega_SS, lambdas, \ imm_rates, e, retire, mean_income_data, h_wealth, p_wealth,\ m_wealth, b_ellipse, upsilon = ss_params chi_b = chi_guesses[:J] chi_n = chi_guesses[J:] chi_params = (chi_b, chi_n) ss_output = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params, True, baseline_dir) model_moments = calc_moments(ss_output, omega_SS, lambdas, S, J) # distance with levels distance = np.dot( np.dot((np.array(model_moments) - np.array(data_moments)).T, W), np.array(model_moments) - np.array(data_moments)) #distance = ((np.array(model_moments) - np.array(data_moments))**2).sum() print 'DATA and MODEL DISTANCE: ', distance # # distance with percentage diffs # distance = (((model_moments - data_moments)/data_moments)**2).sum() return distance
def minstat(chi_guesses, *args): ''' -------------------------------------------------------------------- This function generates the weighted sum of squared differences between the model and data moments. INPUTS: chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together arg = length 6 tuple, variables needed for minimizer OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: SS.run_SS() calc_moments() OBJECTS CREATED WITHIN FUNCTION: ss_output = dictionary, variables from SS of model model_moments = [J+2+S,] array, moments from the model solution distance = scalar, weighted, squared deviation between data and model moments RETURNS: distance -------------------------------------------------------------------- ''' data_moments, W, income_tax_params, ss_params, iterative_params, chi_params, baseline_dir = args J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\ g_n_ss, tau_payroll, tau_bq, rho, omega_SS, lambdas, \ imm_rates, e, retire, mean_income_data, h_wealth, p_wealth,\ m_wealth, b_ellipse, upsilon = ss_params chi_b = chi_guesses[:J] chi_n = chi_guesses[J:] chi_params = (chi_b, chi_n) ss_output = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params, True, baseline_dir) model_moments = calc_moments(ss_output, omega_SS, lambdas, S, J) # distance with levels distance = np.dot(np.dot((np.array(model_moments) - np.array(data_moments)).T,W), np.array(model_moments) - np.array(data_moments)) #distance = ((np.array(model_moments) - np.array(data_moments))**2).sum() print 'DATA and MODEL DISTANCE: ', distance # # distance with percentage diffs # distance = (((model_moments - data_moments)/data_moments)**2).sum() return distance
def LfEulerSys(bvec, *args): ''' -------------------------------------------------------------------- Generates vector of all Euler errors for a given bvec, which errors characterize all optimal lifetime decisions, where p is an integer in [2, S] representing the remaining periods of life -------------------------------------------------------------------- INPUTS: bvec = (p-1,) vector, remaining lifetime savings decisions where p is the number of remaining periods args = length 7 tuple, (beta, sigma, beg_wealth, nvec, rpath, wpath, EulDiff) beta = scalar in [0,1), discount factor sigma = scalar > 0, coefficient of relative risk aversion beg_wealth = scalar, wealth at the beginning of first age nvec = (p,) vector, remaining exogenous labor supply rpath = (p,) vector, interest rates over remaining life wpath = (p,) vector, wages rates over remaining life OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: get_cvec_lf() c5ssf.get_b_errors() OBJECTS CREATED WITHIN FUNCTION: bvec2 = (p,) vector, remaining savings including initial savings cvec = (p,) vector, remaining lifetime consumption levels implied by bvec2 c_cnstr = (p,) Boolean vector, =True if c_{s,t}<=0 b_err_params = length 2 tuple, (beta, sigma) b_err_vec = (p-1,) vector, Euler errors from lifetime consumption vector FILES CREATED BY THIS FUNCTION: None RETURNS: b_err_vec -------------------------------------------------------------------- ''' beta, sigma, beg_wealth, nvec, rpath, wpath, EulDiff = args bvec2 = np.append(beg_wealth, bvec) cvec, c_cnstr = get_cvec_lf(rpath, wpath, nvec, bvec2) b_err_params = (beta, sigma) b_err_vec = ss.get_b_errors(b_err_params, rpath[1:], cvec, c_cnstr, EulDiff) return b_err_vec
import matplotlib import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator from mpl_toolkits.mplot3d import Axes3D import sys import os # Household parameters S = int(80) beta_annual = 0.96 beta = beta_annual**(80 / S) sigma = 2.5 ncutper = round((2 / 3) * S) nvec = np.ones(S) nvec[ncutper:] = 0.2 L = ss.get_L(nvec) # Firm parameters A = 1.0 alpha = 0.35 delta_annual = 0.05 delta = 1 - ((1 - delta_annual)**(80 / S)) # SS parameters SS_solve = True SS_tol = 1e-13 SS_graphs = True SS_EulDiff = True # TPI parameters T = 320 TPI_solve = True TPI_tol = 1e-13 maxiter_TPI = 200
bvec_guess1 = (2,) vector, guess for steady-state bvec (b1, b2) b_cnstr = (2,) Boolean vector, =True if b_s causes negative consumption c_s <= 0 or negative aggregate capital stock K <= 0 c_cnstr = (3,) Boolean vector, =True for elements of negative consumption c_s <= 0 K_cnstr = Boolean, =True if K <= 0 bvec_guess2 = (2,) vector, guess for steady-state bvec (b1, b2) bvec_guess3 = (2,) vector, guess for steady-state bvec (b1, b2) ------------------------------------------------------------------------ ''' f_params = (nvec, A, alpha, delta) bvec_guess1 = np.array([1.0, 1.2]) b_cnstr, c_cnstr, K_cnstr = ss.feasible(f_params, bvec_guess1) print('bvec_guess1', bvec_guess1) print('c_cnstr', c_cnstr) print('K_cnstr', K_cnstr) bvec_guess2 = np.array([0.06, -0.001]) b_cnstr, c_cnstr, K_cnstr = ss.feasible(f_params, bvec_guess2) print('bvec_guess2', bvec_guess2) print('c_cnstr', c_cnstr) print('K_cnstr', K_cnstr) bvec_guess3 = np.array([0.1, 0.1]) b_cnstr, c_cnstr, K_cnstr = ss.feasible(f_params, bvec_guess3) print('bvec_guess3', bvec_guess3) print('c_cnstr', c_cnstr) print('K_cnstr', K_cnstr)
def chi_estimate(income_tax_params, ss_params, iterative_params, chi_guesses, baseline_dir="./OUTPUT"): ''' -------------------------------------------------------------------- This function calls others to obtain the data momements and then runs the simulated method of moments estimation by calling the minimization routine. INPUTS: income_tax_parameters = length 4 tuple, (analytical_mtrs, etr_params, mtrx_params, mtry_params) ss_parameters = length 21 tuple, (J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\ g_n_ss, tau_payroll, retire, mean_income_data,\ h_wealth, p_wealth, m_wealth, b_ellipse, upsilon) iterative_params = [2,] vector, vector with max iterations and tolerance for SS solution chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together baseline_dir = string, path where baseline results located OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: wealth.compute_wealth_moments() labor.labor_data_moments() minstat() OBJECTS CREATED WITHIN FUNCTION: wealth_moments = [J+2,] array, wealth moments from data labor_moments = [S,] array, labor moments from data data_moments = [J+2+S,] array, wealth and labor moments stacked bnds = [S+J,] array, bounds for parameter estimates chi_guesses_flat = [J+S,] vector, initial guesses of chi_b and chi_n stacked min_arg = length 6 tuple, variables needed for minimizer est_output = dictionary, output from minimizer chi_params = [J+S,] vector, parameters estimates for chi_b and chi_n stacked objective_func_min = scalar, minimum of statistical objective function OUTPUT: ./baseline_dir/Calibration/chi_estimation.pkl RETURNS: chi_params -------------------------------------------------------------------- ''' # unpack tuples of parameters J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\ g_n_ss, tau_payroll, tau_bq, rho, omega_SS, lambdas, \ imm_rates, e, retire, mean_income_data, h_wealth, p_wealth,\ m_wealth, b_ellipse, upsilon = ss_params chi_b_guess, chi_n_guess = chi_guesses flag_graphs = False # specify bootstrap iterations n = 10000 # Generate Wealth data moments scf, data = wealth.get_wealth_data() wealth_moments = wealth.compute_wealth_moments(scf, lambdas, J) # Generate labor data moments cps = labor.get_labor_data() labor_moments = labor.compute_labor_moments(cps, S) # combine moments data_moments = list(wealth_moments.flatten()) + list( labor_moments.flatten()) # determine weighting matrix optimal_weight = False if optimal_weight: VCV_wealth_moments = wealth.VCV_moments(scf, n, lambdas, J) VCV_labor_moments = labor.VCV_moments(cps, n, lambdas, S) VCV_data_moments = np.zeros((J + 2 + S, J + 2 + S)) VCV_data_moments[:J + 2, :J + 2] = VCV_wealth_moments VCV_data_moments[J + 2:, J + 2:] = VCV_labor_moments W = np.linalg.inv(VCV_data_moments) #np.savetxt('VCV_data_moments.csv',VCV_data_moments) else: W = np.identity(J + 2 + S) # call minimizer bnds = np.tile(np.array([1e-12, None]), (S + J, 1)) # Need (1e-12, None) S+J times chi_guesses_flat = list(chi_b_guess.flatten()) + list( chi_n_guess.flatten()) min_args = data_moments, W, income_tax_params, ss_params, \ iterative_params, chi_guesses_flat, baseline_dir # est_output = opt.minimize(minstat, chi_guesses_flat, args=(min_args), method="L-BFGS-B", bounds=bnds, # tol=1e-15, options={'maxfun': 1, 'maxiter': 1, 'maxls': 1}) # est_output = opt.minimize(minstat, chi_guesses_flat, args=(min_args), method="L-BFGS-B", bounds=bnds, # tol=1e-15) # chi_params = est_output.x # objective_func_min = est_output.fun # # # pickle output # utils.mkdirs(os.path.join(baseline_dir, "Calibration")) # est_dir = os.path.join(baseline_dir, "Calibration/chi_estimation.pkl") # pickle.dump(est_output, open(est_dir, "wb")) # # # save data and model moments and min stat to csv # # to then put in table of paper chi_params = chi_guesses_flat chi_b = chi_params[:J] chi_n = chi_params[J:] chi_params_list = (chi_b, chi_n) ss_output = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params_list, True, baseline_dir) model_moments = calc_moments(ss_output, omega_SS, lambdas, S, J) # # make dataframe for results # columns = ['data_moment', 'model_moment', 'minstat'] # moment_fit = pd.DataFrame(index=range(0,J+2+S), columns=columns) # moment_fit = moment_fit.fillna(0) # with 0s rather than NaNs # moment_fit['data_moment'] = data_moments # moment_fit['model_moment'] = model_moments # moment_fit['minstat'] = objective_func_min # est_dir = os.path.join(baseline_dir, "Calibration/moment_results.pkl")s # moment_fit.to_csv(est_dir) # calculate std errors h = 0.0001 # pct change in parameter model_moments_low = np.zeros((len(chi_params), len(model_moments))) model_moments_high = np.zeros((len(chi_params), len(model_moments))) chi_params_low = chi_params chi_params_high = chi_params for i in range(len(chi_params)): chi_params_low[i] = chi_params[i] * (1 + h) chi_b = chi_params_low[:J] chi_n = chi_params_low[J:] chi_params_list = (chi_b, chi_n) ss_output = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params_list, True, baseline_dir) model_moments_low[i, :] = calc_moments(ss_output, omega_SS, lambdas, S, J) chi_params_high[i] = chi_params[i] * (1 + h) chi_b = chi_params_high[:J] chi_n = chi_params_high[J:] chi_params_list = (chi_b, chi_n) ss_output = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params_list, True, baseline_dir) model_moments_high[i, :] = calc_moments(ss_output, omega_SS, lambdas, S, J) deriv_moments = (np.asarray(model_moments_high) - np.asarray(model_moments_low)).T / ( 2. * h * np.asarray(chi_params)) VCV_params = np.linalg.inv( np.dot(np.dot(deriv_moments.T, W), deriv_moments)) std_errors_chi = (np.diag(VCV_params))**(1 / 2.) sd_dir = os.path.join(baseline_dir, "Calibration/chi_std_errors.pkl") pickle.dump(std_errors_chi, open(sd_dir, "wb")) np.savetxt('chi_std_errors.csv', std_errors_chi) return chi_params
if not os.access(ss_output_dir, os.F_OK): os.makedirs(ss_output_dir) ss_outputfile = os.path.join(ss_output_dir, 'ss_vars.pkl') ss_paramsfile = os.path.join(ss_output_dir, 'ss_args.pkl') # Compute steady-state solution if SS_solve: print('BEGIN EQUILIBRIUM STEADY-STATE COMPUTATION') # Make initial guess for rh_ss, rf_ss, and q_ss rh_ss_guess = 0.04 rf_ss_guess = 0.04 q_ss_guess = 1.0 ss_args = (nhvec, nfvec, beta, sigma, alpha_h, phi_h, Z_h, gamma_h, delta_h, alpha_f, phi_f, Z_f, gamma_f, delta_f, SS_tol_outer, SS_tol_inner, xi_SS, SS_maxiter, SS_EulDiff) ss_output = ss.get_SS(rh_ss_guess, rf_ss_guess, q_ss_guess, ss_args, SS_graphs) # Save ss_output as pickle pickle.dump(ss_output, open(ss_outputfile, 'wb')) pickle.dump(ss_args, open(ss_paramsfile, 'wb')) # Don't compute steady-state, get it from pickle else: # Make sure that the SS output files exist ss_vars_exst = os.path.exists(ss_outputfile) ss_args_exst = os.path.exists(ss_paramsfile) if (not ss_vars_exst) or (not ss_args_exst): # If the files don't exist, stop the program and run the steady- # state solution first err_msg = ('ERROR: The SS output files do not exist and ' + 'SS_solve=False. Must set SS_solve=True and ' +
# b = 0.501 # upsilon = 1.553 ellip_init = np.array([0.2, 1.0]) Frisch = 0.8 scale_param = 1.0 cfe_params = np.array([Frisch, scale_param]) b, upsilon = elp.fit_ellip_CFE(ellip_init, cfe_params, l_tilde, True) lambdas = np.array([0.3, 0.3, 0.2, 0.1, 0.1]) J = lambdas.shape[0] age_wgts = np.ones(S) * (1 / S) age_wgts_80 = np.ones(80) * (1 / 80) emat = abil.get_e_interp(S, age_wgts, age_wgts_80, lambdas, True) # Firm parameters A = 1.0 alpha = 0.35 delta_annual = 0.05 delta = 1 - ((1 - delta_annual)**(80 / S)) # SS parameters SS_graph = True K_init = 100.0 L_init = 50.0 KL_init = np.array([K_init, L_init]) ss_args = (KL_init, beta, sigma, emat, chi_n_vec, l_tilde, b, upsilon, lambdas, S, J, alpha, A, delta) ss_output = ss.get_SS(ss_args, SS_graph) pickle.dump(ss_output, open('ss_output.pkl', 'wb'))
ss_output_dir = os.path.join(cur_path, ss_output_fldr) if not os.access(ss_output_dir, os.F_OK): os.makedirs(ss_output_dir) ss_outputfile = os.path.join(ss_output_dir, 'ss_vars.pkl') ss_paramsfile = os.path.join(ss_output_dir, 'ss_args.pkl') # Compute steady-state solution ss_args = (S, beta, sigma, l_tilde, b_ellip, upsilon, chi_n_vec, A, alpha, delta, SS_BsctTol, SS_EulTol, SS_EulDiff, xi_SS, SS_maxiter) if SS_solve: print('BEGIN EQUILIBRIUM STEADY-STATE COMPUTATION') rss_init = 0.06 print('Solving SS outer loop using root finder on r.') ss_output = ss.get_SS(rss_init, ss_args, SS_graphs) # Save ss_output as pickle pickle.dump(ss_output, open(ss_outputfile, 'wb')) pickle.dump(ss_args, open(ss_paramsfile, 'wb')) # Don't compute steady-state, get it from pickle else: # Make sure that the SS output files exist ss_vars_exst = os.path.exists(ss_outputfile) ss_args_exst = os.path.exists(ss_paramsfile) if (not ss_vars_exst) or (not ss_args_exst): # If the files don't exist, stop the program and run the steady- # state solution first err_msg = ('ERROR: The SS output files do not exist and ' + 'SS_solve=False. Must set SS_solve=True and ' +
if not ss_output_exst: # If the files don't exist, stop the program err_msg = ('ERROR: The SS output files do not exist') raise ValueError(err_msg) else: ss_output = pickle.load(open(ss_outputfile, 'rb')) ss_results.append(ss_output) tp_output_fldr = 'OUTPUT/TP/' + demog_type tp_output_dir = os.path.join(cur_path, tp_output_fldr) tp_outputfile = os.path.join(tp_output_dir, 'tp_vars.pkl') # Make sure that the TPI output files exist tp_output_exst = os.path.exists(tp_outputfile) if not tp_output_exst: # If the files don't exist, stop the program err_msg = ('ERROR: The TP output files do not exist') raise ValueError(err_msg) else: tp_output = pickle.load(open(tp_outputfile, 'rb')) tp_results.append(tp_output) graph = False if graph: p = params.parameters(demog_type) tp.create_graphs(tp_output, p) tp.tp_pct_change_graphs(tp_results[0], tp_results[1:], p, figure_labels) ss.ss_pct_change_graphs(ss_results[0], ss_results[1:], p, figure_labels)
def solve_PE(w0, tax_params, hh_params, firm_params, fin_frictions, grid_params, output_dir, guid='baseline', plot_results=False): ''' ------------------------------------------------------------------------ Solve partial equilibrium model ------------------------------------------------------------------------ ''' # set directory specific to this model run output_path = os.path.join(output_dir, 'PE', guid) pathlib.Path(output_path).mkdir(parents=True, exist_ok=True) # upack dictionaries tau_l = tax_params['tau_l'] tau_i = tax_params['tau_i'] tau_d = tax_params['tau_d'] tau_g = tax_params['tau_g'] tau_c = tax_params['tau_c'] f_e = tax_params['f_e'] f_b = tax_params['f_b'] tax_tuple = (tau_l, tau_i, tau_d, tau_g, tau_c, f_e, f_b) alpha_k = firm_params['alpha_k'] alpha_l = firm_params['alpha_l'] delta = firm_params['delta'] psi = firm_params['psi'] mu = firm_params['mu'] rho = firm_params['rho'] sigma_eps = firm_params['sigma_eps'] eta0 = fin_frictions['eta0'] eta1 = fin_frictions['eta1'] eta2 = fin_frictions['eta2'] theta = fin_frictions['theta'] sizez = grid_params['sizez'] num_sigma = grid_params['num_sigma'] dens_k = grid_params['dens_k'] lb_k = grid_params['lb_k'] sizeb = grid_params['sizeb'] lb_b = grid_params['lb_b'] ub_b = grid_params['ub_b'] beta = hh_params['beta'] h = hh_params['h'] # compute equilibrium interest rate r = ((1 / beta) - 1) / (1 - tau_i) # compute the firm's discount factor betafirm = (1 / (1 + (r * ((1 - tau_i) / (1 - tau_g))))) ''' ------------------------------------------------------------------------ Compute grids for z, k, b ------------------------------------------------------------------------ ''' firm_params_k = (betafirm, delta, alpha_k, alpha_l) Pi, zgrid = grids.discrete_z(rho, mu, sigma_eps, num_sigma, sizez) kgrid, sizek, kstar, ub_k = grids.discrete_k(w0, firm_params_k, zgrid, sizez, dens_k, lb_k) bgrid = grids.discrete_b(lb_b, ub_b, sizeb, w0, firm_params_k, zgrid, tau_c, theta, ub_k) # grid_params = (zgrid, sizez, Pi, kgrid, sizek, kstar, bgrid, sizeb) ''' ------------------------------------------------------------------------ Solve for partial equilibrium ------------------------------------------------------------------------ ''' VF_initial = np.zeros((sizez, sizek, sizeb)) # initial guess at Value Function # initial guess at stationary distribution Gamma_initial = np.ones((sizez, sizek, sizeb)) * (1 / (sizek * sizez * sizeb)) op, e, l_d, y, eta, collateral_constraint =\ VFI.get_firmobjects(r, w0, zgrid, kgrid, bgrid, alpha_k, alpha_l, delta, psi, eta0, eta1, eta2, theta, sizez, sizek, sizeb, tax_tuple) VF, PF_k, PF_b, optK, optI, optB =\ VFI.VFI(e, eta, collateral_constraint, betafirm, delta, kgrid, bgrid, Pi, sizez, sizek, sizeb, tax_tuple, VF_initial) # print('Policy funtion, debt: ', PF_b[0, int(np.ceil(sizek/2)), :]) # print('Policy funtion, debt: ', PF_b[5, int(np.ceil(sizek/2)), :]) # print('Policy funtion, debt: ', PF_b[-1, int(np.ceil(sizek/2)), :]) # # print('Policy funtion, debt: ', PF_b[0, -3, :]) # print('Policy funtion, debt: ', PF_b[5, -3, :]) # print('Policy funtion, debt: ', PF_b[-1, -3, :]) # # print('Policy funtion, debt: ', PF_b[0, 3, :]) # print('Policy funtion, debt: ', PF_b[5, 3, :]) # print('Policy funtion, debt: ', PF_b[-1, 3, :]) # # print('bgrid = ', bgrid) # print('Policy funtion, investment: ', optI[0, :, :]) # print('Policy funtion, investment: ', optI[5, :, :]) # print('Policy funtion, investment: ', optI[-1, :, :]) # quit() # # print('VF: ', VF[0, -3, :]) # print('VF: ', VF[5, -3, :]) # print('VF: ', VF[-1, -3, :]) # # print('Collateral Constraint = ', collateral_constraint[-1, -3, :, -1, :]) # print('Collateral Constraint = ', collateral_constraint[-1, -3, -3, -1, :]) # Gamma = SS.find_SD(PF_k, PF_b, Pi, sizez, sizek, sizeb, Gamma_initial) # print('SD over z and b: ', Gamma.sum(axis=1)) ''' ------------------------------------------------------------------------ Compute model moments ------------------------------------------------------------------------ ''' output_vars = (optK, optI, optB, op, e, l_d, y, eta, VF, PF_k, PF_b, Gamma) k_params = (kgrid, sizek, dens_k, kstar) z_params = (Pi, zgrid, sizez) b_params = (bgrid, sizeb) model_moments =\ moments.firm_moments(w0, r, delta, psi, h, k_params, z_params, b_params, tax_tuple, output_vars, output_dir, print_moments=True) if plot_results: ''' ------------------------------------------------------------------------ Plot results ------------------------------------------------------------------------ ''' # plots.firm_plots(delta, k_params, z_params, output_vars, output_dir) # create dictionaries of output, params, grids, moments # output_dict = {'optK': optK, 'optI': optI, 'optB': optB, 'op': op, # 'e': e, 'eta': eta, 'VF': VF, 'PF_k': PF_k, # 'PF_b': PF_b, 'Gamma': Gamma} output_dict = {'optK': optK, 'optI': optI, 'optB': optB, 'VF': VF, 'PF_k': PF_k, 'PF_b': PF_b, 'Gamma': Gamma} param_dict = {'w': w0, 'r': r, 'tax_params': tax_params, 'hh_params': hh_params, 'firm_params': firm_params, 'fin_frictions': fin_frictions, 'grid_params': grid_params} grid_dict = {'zgrid': zgrid, 'sizez': sizez, 'Pi': Pi, 'kgrid': kgrid, 'sizek': sizek, 'kstar': kstar, 'bgrid': bgrid, 'sizeb': sizeb} model_out_dict = {'params': param_dict, 'grid': grid_dict, 'moments': model_moments, 'output': output_dict} # Save pickle of model output pkl_path = os.path.join(output_path, 'model_output.pkl') pickle.dump(model_out_dict, open(pkl_path, 'wb'))
L = nvec.sum() #Firm parameters A = 1.0 alpha = 0.35 delta_annual = 0.05 delta = 1 - ((1 - delta_annual)**20) ''' Problem 1: Checking if feasible () works properly ''' b_guess_1 = np.array([1.0, 1.2]) b_guess_2 = np.array([0.06, -0.001]) b_guess_3 = np.array([0.1, 0.1]) f_params = (nvec, A, alpha, delta) results1 = np.array(ss.feasible(f_params, b_guess_1)) results2 = np.array(ss.feasible(f_params, b_guess_2)) results3 = np.array(ss.feasible(f_params, b_guess_3)) print(results1) print(results2) print(results3) ''' Problem 2: Solving steady state ''' # SS parameters SS_tol = 1e-13 SS_graphs = False bvec_guess = np.array([0.1, 0.1]) f_params = (nvec, A, alpha, delta) b_cnstr, c_cnstr, K_cnstr = ss.feasible(f_params, bvec_guess)
print('BEGIN EQUILIBRIUM STEADY-STATE COMPUTATION') rss_init = 0.06 c1_init = 0.1 if calibrate_n: factor_init = 59415.5012 # y_bar_data / y_bar_model when setting chi_n_vec = 1 init_vals = (rss_init, c1_init, factor_init) ss_args = (S, beta, sigma, l_tilde, b_ellip, upsilon, A, alpha, delta, SS_BsctTol, Factor_tol, SS_EulTol, SS_EulDiff, xi_SS, xi_factor, SS_maxiter) else: init_vals = (rss_init, c1_init) ss_args = (S, beta, sigma, l_tilde, b_ellip, upsilon, chi_n_vec, A, alpha, delta, SS_BsctTol, SS_EulTol, SS_EulDiff, xi_SS, SS_maxiter) print('Solving SS outer loop using bisection method on r.') ss_output = ss.get_SS(init_vals, ss_args, calibrate_n, SS_graphs) # Save ss_output as pickle pickle.dump(ss_output, open(ss_outputfile, 'wb')) pickle.dump(ss_args, open(ss_paramsfile, 'wb')) # Don't compute steady-state, get it from pickle else: # Make sure that the SS output files exist ss_vars_exst = os.path.exists(ss_outputfile) ss_args_exst = os.path.exists(ss_paramsfile) if (not ss_vars_exst) or (not ss_args_exst): # If the files don't exist, stop the program and run the steady- # state solution first err_msg = ('ERROR: The SS output files do not exist and ' + 'SS_solve=False. Must set SS_solve=True and ' +
sigma = 1.5 # CRRA beta = 0.8 # discount rate alpha = 0.3 # capital share of output delta = 0.1 # rate of depreciation A = 1.0 # TFP T = 20 # number of periods until SS # exogenous labor supply n = np.array([1.0, 1.0, 0.2]) # parameter for convergence of GE loop xi = 0.1 # solve the SS ss_params = (beta, sigma, n, alpha, A, delta, xi) r_init = 1 / beta - 1 r_ss, b_sp1_ss, euler_errors_ss = SS.solve_ss(r_init, ss_params) print('SS interest rate is ', r_ss) print('Maximum Euler error in the SS is ', np.absolute(euler_errors_ss).max()) # solve the time path r_path_init = np.ones(T) * r_ss b_sp1_pre = 1.1 * b_sp1_ss # initial distribiton of savings - # determined before t=1 # NOTE: if the initial distribution of savings is the same as the SS # value, then the path of interest rates should equal the SS value in # each period... tpi_params = (beta, sigma, n, alpha, A, delta, T, xi, b_sp1_pre, r_ss, b_sp1_ss) r_path, euler_errors_path = TPI.solve_tp(r_path_init, tpi_params) print('Maximum Euler error along the time path is ', np.absolute(euler_errors_path).max())
def minstat(chi_guesses, *args): ''' -------------------------------------------------------------------- This function generates the weighted sum of squared differences between the model and data moments. INPUTS: chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together arg = length 6 tuple, variables needed for minimizer OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: SS.run_SS() calc_moments() OBJECTS CREATED WITHIN FUNCTION: ss_output = dictionary, variables from SS of model model_moments = [J+2+S,] array, moments from the model solution distance = scalar, weighted, squared deviation between data and model moments RETURNS: distance -------------------------------------------------------------------- ''' data_moments, W, income_tax_params, ss_params, iterative_params, chi_params, baseline_dir = args J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\ g_n_ss, tau_payroll, tau_bq, rho, omega_SS, lambdas, \ imm_rates, e, retire, mean_income_data, h_wealth, p_wealth,\ m_wealth, b_ellipse, upsilon = ss_params chi_b = chi_guesses[:J] chi_n = chi_guesses[J:] chi_params = (chi_b, chi_n) ss_output = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params, True, baseline_dir) model_moments = calc_moments(ss_output, omega_SS, lambdas, S, J) # distance with levels if ss_output['ss_flag'] == 0: distance = np.dot( np.dot((np.array(model_moments) - np.array(data_moments)).T, W), np.array(model_moments) - np.array(data_moments)) else: distance = 1e14 #distance = ((np.array(model_moments) - np.array(data_moments))**2).sum() print 'DATA and MODEL DISTANCE: ', distance # save results along the way bh_along = np.reshape(np.append(chi_guesses, distance), (1, 88)) # x = chi_guesses # f = distance # fun_dict = {'x':x,'f':f} # bh_out = np.append(bh_output, bh_along,axis=1) # bh_output = bh_out # print bh_output.shape columns = list(xrange(88)) df = pd.DataFrame(bh_along, columns=columns) bh_output.loc[len(bh_output), :] = bh_along pickle.dump(bh_output, open("estimation_output_along.pkl", "wb")) # # distance with percentage diffs # distance = (((model_moments - data_moments)/data_moments)**2).sum() return distance
A = 1.0 delta_annual = 0.05 delta = 1.0 - ((1.0 - delta_annual)**(yrs_live / S)) # SS Parameters ss_solve = True ss_max_iter = 400 ss_tol = 1e-13 xi_ss = 0.1 if ss_solve: c1_guess = 0.5 r_old = 0.5 params = (beta, sigma, S, ltilde, b, upsilon, chi_n_vec, A, alpha, delta, ss_max_iter, ss_tol, xi_ss) r_ss, w_ss, c_ss, n_ss, b_ss, K_ss, L_ss, C_ss, Y_ss, b_err, n_err, b_last = ss.get_SS( c1_guess, r_old, params) ss.create_graphs(c_ss, b_ss, n_ss) print(r_ss, w_ss, K_ss, L_ss, Y_ss, C_ss) print("savings euler error is {}".format(b_err)) print("labor supply euler error is {}".format(n_err)) print("final period saving is {}".format(b_last)) print("resource constraint error is {}".format(Y_ss - C_ss - delta * K_ss)) # TPI Patameters tpi_solve = True T1 = 60 T2 = 90 tpi_max_iter = 500 tpi_tol = 1e-12 xi_tpi = 0.3 b1vec = 1.08 * b_ss
A = 1.0 delta_annual = 0.05 delta = 1.0 - ((1.0 - delta_annual) ** (yrs_live / S)) # SS Parameters ss_solve = True ss_max_iter = 400 ss_tol = 1e-9 xi_ss = 0.1 if ss_solve: c1_guess = 0.5 r_old = 0.5 params = (S, beta, S, sigma, l_ub, b, upsilon, chi, A, alpha, delta, ss_max_iter, ss_tol, xi_ss) r_ss, w_ss, c_ss, n_ss, b_ss, K_ss, L_ss,b_err, n_err, b_last = ss.get_SS(c1_guess, r_old, params) C_ss=utils.get_C(c_ss) Y_ss=utils.get_Y(K_ss,L_ss,(A, alpha)) cnt_err= Y_ss - C_ss - delta * K_ss # ss.create_graphs(c_ss, b_ss, n_ss) # ss.write_csv(r_ss, w_ss, K_ss, L_ss, Y_ss, C_ss,b_err, n_err, b_last, cnt_err) # TPI Patameters tpi_solve = True T1 = 60 T2 = 90 tpi_max_iter = 500 tpi_tol = 1e-12 xi_tpi = 0.3 b1vec = 1.08 * b_ss
f_args) BQ_args = (lambdas, omega_SS, mort_rates, g_n_SS) BQ_init = aggr.get_BQ(bmat_init, rss_init, BQ_args) factor_init = 1.0 init_calc = True mean_ydata = calibrate.get_avg_ydata() chi_n_vec = 1.0 * np.ones(S) ss_init_vals = (rss_init, BQ_init, factor_init, bmat_init, nmat_init) ss_args_noclb = (J, E, S, lambdas, emat, mort_rates, imm_rates_adj, omega_SS, g_n_SS, zeta_mat, chi_n_vec, chi_b_vec, beta, sigma, l_tilde, b_ellip, upsilon, g_y, Z, gamma, delta, SS_tol_outer, SS_tol_inner, xi_SS, SS_maxiter, mean_ydata, init_calc) print(ss_args_noclb) ss_output_noclb = ss.get_SS(ss_init_vals, ss_args_noclb, False) # Save ss_output as pickle pickle.dump(ss_output_noclb, open(ss_outfile_noclb, 'wb')) pickle.dump(ss_args_noclb, open(ss_prmfile_noclb, 'wb')) # Update initial guess for factor to factor from chi_n_vec = 1 sol'n lambda_mat = np.tile(lambdas.reshape((1, J)), (S, 1)) omega_mat = np.tile(omega_SS.reshape((S, 1)), (1, J)) avg_inc_model = \ (omega_mat * lambda_mat * (ss_output_noclb['r_ss'] * ss_output_noclb['b_ss'] + ss_output_noclb['w_ss'] * emat * ss_output_noclb['n_ss'])).sum() print('average_income_model: ', avg_inc_model) factor_init = mean_ydata / avg_inc_model print('factor_init: ', factor_init)
# import packages import numpy as np import SS # set model parameters sigma = 1.5 # CRRA beta = 0.8 # discount rate alpha = 0.3 # capital share of output delta = 0.1 # rate of depreciation A = 1.0 # TFP # exogenous labor supply n = np.array([1.0, 1.0, 0.2]) # parameter for convergence of GE loop xi = 0.1 # solve the SS ss_params = (beta, sigma, n, alpha, A, delta, xi) r_init = 1 / beta - 1 r_ss = SS.solve_ss(r_init, ss_params) # solve the time path
import SS import Xbar import file_concat adultdriver = file_concat.file_concat('vehicle-2014-small-adultdriver-new.csv', 'accidents-2014-small.csv') N = 166519 r = 3 v1 = r-1 v2 = N - r [Xbar,counter] = Xbar.averages(adultdriver) SSb = SS.ssb(Xbar, counter) SSW = SS.ssw(adultdriver, Xbar) - SSb Fcalc = (SSb*v2)/(SSW*v1) print Fcalc print Xbar, N
def solve_GE(w0, tax_params, hh_params, firm_params, fin_frictions, grid_params, output_dir, guid='baseline', plot_results=False): # set directory specific to this model run output_path = os.path.join(output_dir, 'GE', guid) pathlib.Path(output_path).mkdir(parents=True, exist_ok=True) # upack dictionaries tau_l = tax_params['tau_l'] tau_i = tax_params['tau_i'] tau_d = tax_params['tau_d'] tau_g = tax_params['tau_g'] tau_c = tax_params['tau_c'] f_e = tax_params['f_e'] f_b = tax_params['f_b'] tax_tuple = (tau_l, tau_i, tau_d, tau_g, tau_c, f_e, f_b) alpha_k = firm_params['alpha_k'] alpha_l = firm_params['alpha_l'] delta = firm_params['delta'] psi = firm_params['psi'] mu = firm_params['mu'] rho = firm_params['rho'] sigma_eps = firm_params['sigma_eps'] eta0 = fin_frictions['eta0'] eta1 = fin_frictions['eta1'] eta2 = fin_frictions['eta2'] theta = fin_frictions['theta'] sizez = grid_params['sizez'] num_sigma = grid_params['num_sigma'] dens_k = grid_params['dens_k'] lb_k = grid_params['lb_k'] sizeb = grid_params['sizeb'] lb_b = grid_params['lb_b'] ub_b = grid_params['ub_b'] beta = hh_params['beta'] h = hh_params['h'] # compute equilibrium interest rate r = ((1 / beta) - 1) / (1 - tau_i) # compute the firm's discount factor betafirm = (1 / (1 + (r * ((1 - tau_i) / (1 - tau_g))))) ''' ------------------------------------------------------------------------ Compute grids for z, k, b ------------------------------------------------------------------------ ''' firm_params_k = (betafirm, delta, alpha_k, alpha_l) Pi, zgrid = grids.discrete_z(rho, mu, sigma_eps, num_sigma, sizez) kgrid, sizek, kstar, ub_k = grids.discrete_k(w0, firm_params_k, zgrid, sizez, dens_k, lb_k) bgrid = grids.discrete_b(lb_b, ub_b, sizeb, w0, firm_params_k, zgrid, tau_c, theta, ub_k) # grid_params = (zgrid, sizez, Pi, kgrid, sizek, kstar, bgrid, sizeb) ''' ------------------------------------------------------------------------ Solve for general equilibrium ------------------------------------------------------------------------ ''' VF_initial = np.zeros((sizez, sizek, sizeb)) # initial guess at Value Function # initial guess at stationary distribution Gamma_initial = np.ones((sizez, sizek, sizeb)) * (1 / (sizek * sizez * sizeb)) gr_args = (r, alpha_k, alpha_l, delta, psi, betafirm, kgrid, zgrid, bgrid, Pi, eta0, eta1, eta2, theta, sizek, sizez, sizeb, h, tax_tuple, VF_initial, Gamma_initial) start_time = time.time() w = SS.golden_ratio_eqm(0.8, 1.6, gr_args, tolerance=1e-4) end_time = time.time() print('Solving the GE model took ', end_time - start_time, ' seconds to solve') print('SS wage rate: ', w) ''' ------------------------------------------------------------------------ Find model outputs given eq'm wage rate ------------------------------------------------------------------------ ''' op, e, l_d, y, eta, collateral_constraint =\ VFI.get_firmobjects(r, w, zgrid, kgrid, bgrid, alpha_k, alpha_l, delta, psi, eta0, eta1, eta2, theta, sizez, sizek, sizeb, tax_tuple) VF, PF_k, PF_b, optK, optI, optB =\ VFI.VFI(e, eta, collateral_constraint, betafirm, delta, kgrid, bgrid, Pi, sizez, sizek, sizeb, tax_tuple, VF_initial) Gamma = SS.find_SD(PF_k, PF_b, Pi, sizez, sizek, sizeb, Gamma_initial) ''' ------------------------------------------------------------------------ Compute model moments ------------------------------------------------------------------------ ''' output_vars = (optK, optI, optB, op, e, l_d, y, eta, VF, PF_k, PF_b, Gamma) k_params = (kgrid, sizek, dens_k, kstar) z_params = (Pi, zgrid, sizez) b_params = (bgrid, sizeb) model_moments =\ moments.firm_moments(w, r, delta, psi, h, k_params, z_params, b_params, tax_tuple, output_vars, output_dir, print_moments=True) if plot_results: ''' ------------------------------------------------------------------------ Plot results ------------------------------------------------------------------------ ''' # plots.firm_plots(delta, k_params, z_params, output_vars, output_dir) # create dictionaries of output, params, grids, moments # output_dict = {'optK': optK, 'optI': optI, 'optB': optB, 'op': op, # 'e': e, 'eta': eta, 'VF': VF, 'PF_k': PF_k, # 'PF_b': PF_b, 'Gamma': Gamma} output_dict = {'optK': optK, 'optI': optI, 'optB': optB, 'VF': VF, 'PF_k': PF_k, 'PF_b': PF_b, 'Gamma': Gamma} param_dict = {'w': w, 'r': r, 'tax_params': tax_params, 'hh_params': hh_params, 'firm_params': firm_params, 'fin_frictions': fin_frictions, 'grid_params': grid_params} grid_dict = {'zgrid': zgrid, 'sizez': sizez, 'Pi': Pi, 'kgrid': kgrid, 'sizek': sizek, 'kstar': kstar, 'bgrid': bgrid, 'sizeb': sizeb} model_out_dict = {'params': param_dict, 'grid': grid_dict, 'moments': model_moments, 'output': output_dict} # Save pickle of model output pkl_path = os.path.join(output_path, 'model_output.pkl') pickle.dump(model_out_dict, open(pkl_path, 'wb'))
def firm_moments(w, r, delta, psi, fixed_cost, h, k_params, z_params, b_params, tax_params, output_vars, output_dir, print_moments=False): ''' ------------------------------------------------------------------------ Compute moments ------------------------------------------------------------------------ ''' # unpack tuples kgrid, sizek, dens, kstar = k_params Pi, zgrid, sizez = z_params bgrid, sizeb = b_params tau_l, tau_i, tau_d, tau_g, tau_c, f_e, f_b = tax_params optK, optI, optB, op, e, l_d, y, eta, VF, PF_k, PF_b, Gamma = output_vars k3grid = np.tile(np.reshape(kgrid, (1, sizek, 1)), (sizez, 1, sizeb)) k2grid = np.tile(np.reshape(kgrid, (1, sizek)), (sizez, 1)) op3 = np.tile(np.reshape(op, (sizez, sizek, 1)), (1, 1, sizeb)) # Aggregate Investment Rate agg_IK = (optI * Gamma).sum() / (k3grid * Gamma).sum() # Aggregate Dividends/Earnings equity, div, is_constrained, is_using_equity =\ find_equity_div(e, eta, PF_k, PF_b, sizez, sizek, sizeb) agg_DE = ((div * Gamma).sum() / (np.tile(np.reshape(op, (sizez, sizek, 1)), (1, 1, sizeb)) * Gamma).sum()) # Aggregate New Equity/Investment mean_SI = ((equity / optI) * Gamma).sum() / Gamma.sum() mean_S = (equity * Gamma).sum() / Gamma.sum() agg_SI = (equity * Gamma).sum() / (optI * Gamma).sum() # Aggregate leverage ratio agg_BV = (optB * Gamma).sum() / ((VF + optB) * Gamma).sum() # Volatility of the Investment Rate # this is determined as the standard deviation in the investment rate # across the steady state distribution of firms mean_IK = ((optI / k3grid) * Gamma).sum() / Gamma.sum() sd_IK = np.sqrt(((((optI / k3grid) - mean_IK) ** 2) * Gamma).sum()) # Volatility of Earnings/Capital mean_EK = ((op / k2grid) * Gamma.sum(axis=2)).sum() / Gamma.sum() sd_EK = np.sqrt(((((op / k2grid) - mean_EK) ** 2) * Gamma.sum(axis=2)).sum()) # Volatility of leverage ratio mean_BV = ((optB / (VF + optB)) * Gamma).sum() / Gamma.sum() sd_BV = np.sqrt(((((optB / (VF + optB)) - mean_BV) ** 2) * Gamma).sum()) # Volatility of rate of new equity issues sd_S = np.sqrt((((equity - mean_S) ** 2) * Gamma).sum()) # Autocorrelation of the Investment Rate, Earnings/Capital ratio, # leverage ratio, serial correlation between lagged profits and leverage # Autocorrelation of Earnings/Capital ac_IK, ac_EK, ac_BV, sc_EK_BV, ac_S =\ find_autocorr(op, optI, optB, equity, PF_k, PF_b, VF, Gamma, kgrid, Pi, sizez, sizek, sizeb, mean_IK, mean_EK, mean_BV, mean_S, sd_IK, sd_EK, sd_BV, sd_S) # compute covariances cov_BV_EK = (((optB / (VF + optB)) - mean_BV) * ((op3 / k3grid) - mean_EK) * Gamma).sum() cov_SI_EK = (((equity / optI) - mean_SI) * ((op3 / k3grid) - mean_EK) * Gamma).sum() cov_BV_IK = (((optB / (VF + optB)) - mean_BV) * ((optI / k3grid) - mean_IK) * Gamma).sum() cov_IK_SI = (((optI / k3grid) - mean_IK) * ((equity / optI) - mean_SI) * Gamma).sum() cov_IK_EK = (((optI / k3grid) - mean_IK) * ((op3 / k3grid) - mean_EK) * Gamma).sum() # compute correlations corr_BV_EK = cov_BV_EK / (sd_BV * sd_EK) corr_IK_EK = cov_IK_EK / (sd_IK * sd_EK) # fraction with investment "spike" (I/K > 0.2) inv_spike = (((optI / k3grid) > 0.2) * Gamma).sum() / Gamma.sum() # put these cross-sectional moments in a dictionary cross_section_dict = {'agg_IK': agg_IK, 'agg_DE': agg_DE, 'agg_SI': agg_SI, 'agg_BV': agg_BV, 'mean_IK': mean_IK, 'sd_IK': sd_IK, 'mean_EK': mean_EK, 'sd_EK': sd_EK, 'mean_BV': mean_BV, 'sd_BV': sd_BV, 'ac_IK': ac_IK, 'ac_EK': ac_EK, 'ac_BV': ac_BV, 'sc_EK_BV': sc_EK_BV, 'ac_SI': ac_S, 'cov_BV_EK': cov_BV_EK, 'cov_SI_EK': cov_SI_EK, 'cov_BV_IK': cov_BV_IK, 'cov_IK_SI': cov_IK_SI, 'cov_IK_EK': cov_IK_EK, 'corr_BV_EK': corr_BV_EK, 'corr_IK_EK': corr_IK_EK, 'inv_spike': inv_spike} # Macro aggregates: agg_B = (optB * Gamma).sum() agg_E = (op3 * Gamma).sum() agg_I = (optI * Gamma).sum() agg_K = (k3grid * Gamma).sum() agg_Y = (Gamma.sum(axis=2) * y).sum() agg_D = (div * Gamma).sum() agg_S = (equity * Gamma).sum() agg_L_d = (Gamma.sum(axis=2) * l_d).sum() agg_Psi = (Gamma * VFI.adj_costs(optK, k3grid, delta, psi, fixed_cost)).sum() agg_C = agg_Y - agg_I - agg_Psi agg_L_s = SS.get_L_s(w, agg_C, h, tau_l) mean_Q = (VF * Gamma).sum() / Gamma.sum() AvgQ = (VF * Gamma).sum() / (k3grid * Gamma).sum() agg_IIT = ((tau_d * agg_D) + (tau_l * agg_L_s) + (tau_i * r * agg_B) - (tau_g * agg_S)) agg_CIT = tau_c * (agg_E - (r * f_b * agg_B) - ((1 - f_e) * delta * agg_K) - f_e * agg_I) total_taxes = agg_CIT + agg_IIT # put these aggregate moments in a dictionary macro_dict = {'agg_B': agg_B, 'agg_E': agg_E, 'agg_I': agg_I, 'agg_K': agg_K, 'agg_Y': agg_Y, 'agg_D': agg_D, 'agg_S': agg_S, 'agg_L_d': agg_L_d, 'agg_Psi': agg_Psi, 'agg_C': agg_C, 'agg_L_s': agg_L_s, 'mean_Q': mean_Q, 'AvgQ': AvgQ, 'total_taxes': total_taxes, 'agg_CIT': agg_CIT, 'w': w, 'r': r} # Financing regimes frac_equity = (is_using_equity * Gamma).sum() / Gamma.sum() frac_div = ((1 - is_constrained) * Gamma).sum() / Gamma.sum() frac_constrained = ((is_constrained - is_using_equity) * Gamma).sum() / Gamma.sum() frac_debt = ((optB > 0) * Gamma).sum() / Gamma.sum() frac_neg_debt = ((optB < 0) * Gamma).sum() / Gamma.sum() frac_equity_debt = (((equity > 0) & (optB > 0)) * Gamma).sum() / Gamma.sum() frac_div_debt = (((div > 0) & (optB > 0)) * Gamma).sum() / Gamma.sum() frac_constrained_debt = (((equity == 0) & (div == 0) & (optB > 0)) * Gamma).sum() / Gamma.sum() # moments by regime share_K_equity = ((is_using_equity * k3grid) * Gamma).sum() / agg_K share_K_div = (((1 - is_constrained) * k3grid) * Gamma).sum() / agg_K share_K_constrained = (((is_constrained - is_using_equity) * k3grid) * Gamma).sum() / agg_K share_I_equity = ((is_using_equity * optI) * Gamma).sum() / agg_I share_I_div = (((1 - is_constrained) * optI) * Gamma).sum() / agg_I share_I_constrained = (((is_constrained - is_using_equity) * optI) * Gamma).sum() / agg_I if agg_B != 0: share_B_equity = ((is_using_equity * optB) * Gamma).sum() / agg_B share_B_div = (((1 - is_constrained) * optB) * Gamma).sum() / agg_B share_B_constrained = (((is_constrained - is_using_equity) * optB) * Gamma).sum() / agg_B else: share_B_equity = 0 share_B_div = 0 share_B_constrained = 0 IK_equity = ((is_using_equity * optI) * Gamma).sum() / ((is_using_equity * k3grid) * Gamma).sum() IK_div = (((1 - is_constrained) * optI) * Gamma).sum() / (((1 - is_constrained) * k3grid) * Gamma).sum() IK_constrained = (((is_constrained - is_using_equity) * optI) * Gamma).sum() / (((is_constrained - is_using_equity) * k3grid) * Gamma).sum() BV_equity = ((is_using_equity * optB) * Gamma).sum() / ((is_using_equity * (VF + optB)) * Gamma).sum() BV_div = (((1 - is_constrained) * optB) * Gamma).sum() / (((1 - is_constrained) * (VF + optB)) * Gamma).sum() BV_constrained = (((is_constrained - is_using_equity) * optB) * Gamma).sum() / (((is_constrained - is_using_equity) * (VF + optB)) * Gamma).sum() EK_equity = ((is_using_equity * op3) * Gamma).sum() / ((is_using_equity * k3grid) * Gamma).sum() EK_div = (((1 - is_constrained) * op3) * Gamma).sum() / (((1 - is_constrained) * k3grid) * Gamma).sum() EK_constrained = (((is_constrained - is_using_equity) * op3) * Gamma).sum() / (((is_constrained - is_using_equity) * k3grid) * Gamma).sum() AvgQ_equity = ((is_using_equity * VF) * Gamma).sum() / ((is_using_equity * k3grid) * Gamma).sum() AvgQ_div = (((1 - is_constrained) * VF) * Gamma).sum() / (((1 - is_constrained) * k3grid) * Gamma).sum() AvgQ_constrained = (((is_constrained - is_using_equity) * VF) * Gamma).sum() / (((is_constrained - is_using_equity) * k3grid) * Gamma).sum() # put these moments by regime type in a dictionary regimes_dict = {'frac_equity': frac_equity, 'frac_div': frac_div, 'frac_constrained': frac_constrained, 'frac_debt': frac_debt, 'frac_neg_debt': frac_neg_debt, 'frac_equity_debt': frac_equity_debt, 'frac_div_debt': frac_div_debt, 'frac_constrained_debt': frac_constrained_debt, 'share_K_equity': share_K_equity, 'share_K_div': share_K_div, 'share_K_constrained': share_K_constrained, 'share_I_equity': share_I_equity, 'share_I_div': share_I_div, 'share_I_constrained': share_I_constrained, 'share_B_equity': share_B_equity, 'share_B_div': share_B_div, 'share_B_constrained': share_B_constrained, 'IK_equity': IK_equity, 'IK_div': IK_div, 'IK_constrained': IK_constrained, 'EK_equity': EK_equity, 'EK_div': EK_div, 'EK_constrained': EK_constrained, 'BV_equity': BV_equity, 'BV_div': BV_div, 'BV_constrained': BV_constrained, 'AvgQ_equity': AvgQ_equity, 'AvgQ_div': AvgQ_div, 'AvgQ_constrained': AvgQ_constrained} if print_moments: print('The aggregate investment rate = ', agg_IK) print('The aggregate ratio of dividends to earnings = ', agg_DE) print('The aggregate ratio of equity to new investment = ', agg_SI) print('The volatility in the investment rate = ', sd_IK) print('The autocorrelation in the investment rate = ', ac_IK) print('The volatility of the earnings/capital ratio = ', sd_EK) print('The autocorrelation in the earnings/capital ratio = ', ac_EK) # print('The fraction of firms issuing equity is: ', frac_equity) # print('The fraction of firms who are financially constrained is: ', frac_constrained) # print('The fraction of firms distributing dividends is: ', frac_div) # print('Share of capital for equity regime: ', share_K_equity) # print('Share of capital for constrained regime: ', share_K_constrained) # print('Share of capital for dividend regime: ', share_K_div) # print('Share of investment for equity regime: ', share_I_equity) # print('Share of investment for constrained regime: ', share_I_constrained) # print('Share of investment for dividend regime: ', share_I_div) # print('Mean E/K for equity regime: ', EK_equity) # print('Mean E/K for constrained regime: ', EK_constrained) # print('Mean E/K for dividend regime: ', EK_div) # print('Mean I/K for equity regime: ', IK_equity) # print('Mean I/K for constrained regime: ', IK_constrained) # print('Mean I/K for dividend regime: ', IK_div) # print('Avg Q for equity regime: ', AvgQ_equity) # print('Avg Q for constrained regime: ', AvgQ_constrained) # print('Avg Q for dividend regime: ', AvgQ_div) print('The aggregate leverage ratio = ', agg_BV) print('The fraction with positive debt = ', frac_debt) print('The fraction with negative debt = ', frac_neg_debt) model_moments = {'cross_section': cross_section_dict, 'macro': macro_dict, 'regimes': regimes_dict} return model_moments
os.makedirs(ss_output_dir) ss_outputfile = os.path.join(ss_output_dir, 'ss_vars.pkl') ss_paramsfile = os.path.join(ss_output_dir, 'ss_args.pkl') # Compute steady-state solution ss_args = (S, beta, sigma, l_tilde, b_ellip, upsilon, chi_n_vec, A, alpha, delta, SS_BsctTol, SS_EulTol, SS_EulDiff, xi_SS, SS_maxiter) if SS_solve: print('BEGIN EQUILIBRIUM STEADY-STATE COMPUTATION') rss_init = 0.06 c1_init = 0.1 init_vals = (rss_init, c1_init) print('Solving SS outer loop using bisection method on r.') ss_output = ss.get_SS(init_vals, ss_args, SS_graphs) # Save ss_output as pickle pickle.dump(ss_output, open(ss_outputfile, 'wb')) pickle.dump(ss_args, open(ss_paramsfile, 'wb')) # Don't compute steady-state, get it from pickle else: # Make sure that the SS output files exist ss_vars_exst = os.path.exists(ss_outputfile) ss_args_exst = os.path.exists(ss_paramsfile) if (not ss_vars_exst) or (not ss_args_exst): # If the files don't exist, stop the program and run the steady- # state solution first err_msg = ('ERROR: The SS output files do not exist and ' + 'SS_solve=False. Must set SS_solve=True and ' +
# Compute steady-state solution if SS_solve: print('BEGIN EQUILIBRIUM STEADY-STATE COMPUTATION') Kss_init = 10.0 Lss_init = 10.0 rss_init = 0.05 wss_init = 1.2 c1_init = 0.1 init_vals = (Kss_init, Lss_init, rss_init, wss_init, c1_init) ss_args = (S, beta, sigma, l_tilde, b_ellip, upsilon, chi_n_vec, A, alpha, delta, SS_tol, SS_EulDiff, hh_fsolve, KL_outer) if SS_outer_root: print('Solving SS outer loop using root finder.') ss_output = ss.get_SS_root(init_vals, ss_args, SS_graphs) else: print('Solving SS outer loop using bisection method.') ss_output = ss.get_SS_bsct(init_vals, ss_args, SS_graphs) # Save ss_output as pickle pickle.dump(ss_output, open(ss_outputfile, 'wb')) pickle.dump(ss_args, open(ss_paramsfile, 'wb')) # Don't compute steady-state, get it from pickle else: # Make sure that the SS output files exist ss_vars_exst = os.path.exists(ss_outputfile) ss_args_exst = os.path.exists(ss_paramsfile) if (not ss_vars_exst) or (not ss_args_exst): # If the files don't exist, stop the program and run the steady-
import file_concat import SS import Xbar adultdriver = file_concat.file_concat('vehicle-2014-small-adultdriver-new.csv', 'accidents-2014-small.csv') [Xbar, counter] = Xbar.averages(adultdriver) S_X = SS.s(adultdriver, Xbar) print S_X, Xbar, counter
def chi_estimate(income_tax_params, ss_params, iterative_params, chi_guesses, baseline_dir="./OUTPUT"): ''' -------------------------------------------------------------------- This function calls others to obtain the data momements and then runs the simulated method of moments estimation by calling the minimization routine. INPUTS: income_tax_parameters = length 4 tuple, (analytical_mtrs, etr_params, mtrx_params, mtry_params) ss_parameters = length 21 tuple, (J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\ g_n_ss, tau_payroll, retire, mean_income_data,\ h_wealth, p_wealth, m_wealth, b_ellipse, upsilon) iterative_params = [2,] vector, vector with max iterations and tolerance for SS solution chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together baseline_dir = string, path where baseline results located OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: wealth.compute_wealth_moments() labor.labor_data_moments() minstat() OBJECTS CREATED WITHIN FUNCTION: wealth_moments = [J+2,] array, wealth moments from data labor_moments = [S,] array, labor moments from data data_moments = [J+2+S,] array, wealth and labor moments stacked bnds = [S+J,] array, bounds for parameter estimates chi_guesses_flat = [J+S,] vector, initial guesses of chi_b and chi_n stacked min_arg = length 6 tuple, variables needed for minimizer est_output = dictionary, output from minimizer chi_params = [J+S,] vector, parameters estimates for chi_b and chi_n stacked objective_func_min = scalar, minimum of statistical objective function OUTPUT: ./baseline_dir/Calibration/chi_estimation.pkl RETURNS: chi_params -------------------------------------------------------------------- ''' # unpack tuples of parameters J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\ g_n_ss, tau_payroll, tau_bq, rho, omega_SS, lambdas, \ imm_rates, e, retire, mean_income_data, h_wealth, p_wealth,\ m_wealth, b_ellipse, upsilon = ss_params chi_b_guess, chi_n_guess = chi_guesses flag_graphs = False # specify bootstrap iterations n = 10000 # Generate Wealth data moments scf, data = wealth.get_wealth_data() wealth_moments = wealth.compute_wealth_moments(scf, lambdas, J) # Generate labor data moments cps = labor.get_labor_data() labor_moments = labor.compute_labor_moments(cps, S) # combine moments data_moments = list(wealth_moments.flatten()) + list(labor_moments.flatten()) # determine weighting matrix optimal_weight = False if optimal_weight: VCV_wealth_moments = wealth.VCV_moments(scf, n, lambdas, J) VCV_labor_moments = labor.VCV_moments(cps, n, lambdas, S) VCV_data_moments = np.zeros((J+2+S,J+2+S)) VCV_data_moments[:J+2,:J+2] = VCV_wealth_moments VCV_data_moments[J+2:,J+2:] = VCV_labor_moments W = np.linalg.inv(VCV_data_moments) #np.savetxt('VCV_data_moments.csv',VCV_data_moments) else: W = np.identity(J+2+S) # call minimizer bnds = np.tile(np.array([1e-12, None]),(S+J,1)) # Need (1e-12, None) S+J times chi_guesses_flat = list(chi_b_guess.flatten()) + list(chi_n_guess.flatten()) min_args = data_moments, W, income_tax_params, ss_params, \ iterative_params, chi_guesses_flat, baseline_dir # est_output = opt.minimize(minstat, chi_guesses_flat, args=(min_args), method="L-BFGS-B", bounds=bnds, # tol=1e-15, options={'maxfun': 1, 'maxiter': 1, 'maxls': 1}) # est_output = opt.minimize(minstat, chi_guesses_flat, args=(min_args), method="L-BFGS-B", bounds=bnds, # tol=1e-15) # chi_params = est_output.x # objective_func_min = est_output.fun # # # pickle output # utils.mkdirs(os.path.join(baseline_dir, "Calibration")) # est_dir = os.path.join(baseline_dir, "Calibration/chi_estimation.pkl") # pickle.dump(est_output, open(est_dir, "wb")) # # # save data and model moments and min stat to csv # # to then put in table of paper chi_params = chi_guesses_flat chi_b = chi_params[:J] chi_n = chi_params[J:] chi_params_list = (chi_b, chi_n) ss_output = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params_list, True, baseline_dir) model_moments = calc_moments(ss_output, omega_SS, lambdas, S, J) # # make dataframe for results # columns = ['data_moment', 'model_moment', 'minstat'] # moment_fit = pd.DataFrame(index=range(0,J+2+S), columns=columns) # moment_fit = moment_fit.fillna(0) # with 0s rather than NaNs # moment_fit['data_moment'] = data_moments # moment_fit['model_moment'] = model_moments # moment_fit['minstat'] = objective_func_min # est_dir = os.path.join(baseline_dir, "Calibration/moment_results.pkl")s # moment_fit.to_csv(est_dir) # calculate std errors h = 0.0001 # pct change in parameter model_moments_low = np.zeros((len(chi_params),len(model_moments))) model_moments_high = np.zeros((len(chi_params),len(model_moments))) chi_params_low = chi_params chi_params_high = chi_params for i in range(len(chi_params)): chi_params_low[i] = chi_params[i]*(1+h) chi_b = chi_params_low[:J] chi_n = chi_params_low[J:] chi_params_list = (chi_b, chi_n) ss_output = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params_list, True, baseline_dir) model_moments_low[i,:] = calc_moments(ss_output, omega_SS, lambdas, S, J) chi_params_high[i] = chi_params[i]*(1+h) chi_b = chi_params_high[:J] chi_n = chi_params_high[J:] chi_params_list = (chi_b, chi_n) ss_output = SS.run_SS(income_tax_params, ss_params, iterative_params, chi_params_list, True, baseline_dir) model_moments_high[i,:] = calc_moments(ss_output, omega_SS, lambdas, S, J) deriv_moments = (np.asarray(model_moments_high) - np.asarray(model_moments_low)).T/(2.*h*np.asarray(chi_params)) VCV_params = np.linalg.inv(np.dot(np.dot(deriv_moments.T,W),deriv_moments)) std_errors_chi = (np.diag(VCV_params))**(1/2.) sd_dir = os.path.join(baseline_dir, "Calibration/chi_std_errors.pkl") pickle.dump(std_errors_chi, open(sd_dir, "wb")) np.savetxt('chi_std_errors.csv',std_errors_chi) return chi_params
bvec_guess = np.array([ -0.01, 0.1, 0.2, 0.23, 0.25, 0.23, 0.2, 0.1, -0.01, 0.1, 0.2, 0.23, 0.25, 0.23, 0.2, 0.1, -0.01, 0.1, 0.2, 0.23, 0.25, 0.23, 0.2, 0.1, -0.01, 0.1, 0.2, 0.23, 0.25, 0.23, 0.2, 0.1, -0.01, 0.1, 0.2, 0.23, 0.25, 0.23, 0.2, 0.1, -0.01, 0.1, 0.2, 0.23, 0.25, 0.23, 0.2, 0.1, -0.01, 0.1, 0.2, 0.23, 0.25, 0.23, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]) print(f'bvecguess:{bvec_guess.shape}') # f_params = (nvec, A, alpha, delta, bq_distr, beta) # b_cnstr, c_cnstr, K_cnstr = ss.feasible(f_params, bvec_guess) #beta, sigma, nvec, A, alpha, delta, SS_tol, chi, fert_rates, mort_rates, imm_rates, omega_SS, gn_SS, g_y ss_params = (beta, sigma, nvec, A, alpha, delta, SS_tol, fert_rates, mort_rates, imm_rates, omega_SS, g_vec[-1], g_y) ss_output = ss.get_SS(ss_params, bvec_guess, SS_graphs) b_ss = np.append([0], ss_output['b_ss']) K_ss = ss_output['K_ss'] w_ss = ss_output['w_ss'] r_ss = ss_output['r_ss'] c_ss = ss_output['c_ss'] C_ss = ss_output['C_ss'] Y_ss = ss_output['Y_ss'] EulErr_ss = np.append([0], ss_output['EulErr_ss']) RCerr_ss = ss_output['RCerr_ss'] BQ_ss = ss_output['BQ_ss'] results = np.zeros((S, 10)) results[:, 0] = b_ss.T
cur_path = os.path.split(os.path.abspath(__file__))[0] ss_output_fldr = 'OUTPUT/SS' ss_output_dir = os.path.join(cur_path, ss_output_fldr) if not os.access(ss_output_dir, os.F_OK): os.makedirs(ss_output_dir) ss_outputfile = os.path.join(ss_output_dir, 'ss_vars.pkl') ss_paramsfile = os.path.join(ss_output_dir, 'ss_args.pkl') # Compute steady-state solution if SS_solve: print('BEGIN EQUILIBRIUM STEADY-STATE COMPUTATION') # Make initial guess for b_ss and make sure it is feasible # (K >= epsilon) bss_guess = 0.05 * np.ones(S - 1) f_params = (nvec, A, alpha, delta) cg_cstr, Kg_cstr, bg_cstr = ss.feasible(bss_guess, f_params) if Kg_cstr or cg_cstr.max(): if Kg_cstr: err_msg = ('ERROR: Initial guess for b_ss (bss_guess) ' + 'caused an infeasible value for K ' + '(K < epsilon)') raise RuntimeError(err_msg) elif not Kg_cstr and cg_cstr.max(): err_msg = ('ERROR: Initial guess for b_ss (bss_guess) ' + 'caused infeasible value(s) for c_s ' + '(c_s <= 0)') print('cg_cstr: ', cg_cstr) raise RuntimeError(err_msg) else: ss_args = (nvec, beta, sigma, A, alpha, delta, SS_tol, SS_EulDiff) ss_output = ss.get_SS(bss_guess, ss_args, SS_graphs) # Save ss_output as pickle