def __init__(self, *, base, targ_mode, compare=None, setup=None, base_name='Model', compare_name='Data', graphs_title_add=None, moments_aux=None, noplot=False): if setup is None: self.setup = ModelSetup() if type(base) is str: self.moments = filer(base, 0, 0, repeat=False) else: self.moments = base self.targ_mode = targ_mode self.base_name = base_name self.compare_name = compare_name if graphs_title_add: self.graphs_title_add = '\n ' + graphs_title_add else: self.graphs_title_add = '' if type(compare) is str: targ_load = filer(compare, 0, 0, repeat=False) # emulating targets self.targets = {key: (targ_load[key], 0.0) for key in targ_load} else: self.targets = all_targets(targ_mode) if not noplot: try: self.print_things() except: print('failed to print') self.moments_aux = moments_aux if not noplot: self.plot()
def __init__(self,Mlist,*args,use_pickles=False,**kwargs): if type(Mlist) is not list: Mlist = [Mlist] if not use_pickles: Agents.__init__(self,Mlist,*args,nosim=True,**kwargs) else: try: pmeet_exo = filer('pmeet_exo.pkl',0,0,repeat=False) ppreg_exo = filer('ppreg_exo.pkl',0,0,repeat=False) Agents.__init__(self,Mlist,*args,pmeet_exo=pmeet_exo,\ ppreg_exo=ppreg_exo,nosim=True,**kwargs) except: print('no available pickles found!') mdl = Mlist assert type(mdl) is list pmeet_exo = np.array(mdl[0].setup.pars['pmeet_t'][:mdl[0].setup.pars['Tmeet']]) ppreg_exo = np.array([mdl[0].setup.upp_precomputed_fem[t][3] for t in range(mdl[0].setup.pars['Tmeet'])]) Agents.__init__(self,Mlist,*args,pmeet_exo=pmeet_exo,\ ppreg_exo=ppreg_exo,nosim=True,**kwargs) self.define_targets() self.run_sim() self.get_shares() if use_pickles: pmeet_exo = self.pmeet_exo.copy() ppreg_exo = self.ppreg_exo.copy() filer('pmeet_exo.pkl',pmeet_exo,True) filer('ppreg_exo.pkl',ppreg_exo,True) print('saved pickles')
def get_point(high_e,read_wisdom=False): if high_e: x = {'sigma_psi': 0.02351795998155682, 'sigma_psi_init': 0.06319749482803798, 'pmeet_21': 0.36231065032307264, 'pmeet_30': 1.2185173549427066, 'pmeet_40': 1.0, 'preg_21': 0.006683178914886262, 'preg_28': 0.030446109898106186, 'preg_35': 0.026257274180916695, 'u_shift_mar': 0.24749413577501758, 'util_alp': 0.5739290298121097, 'util_kap': 8.484242183675391, 'util_qbar': 5.561954628246849, 'disutil_marry_sm_mal': 4.408415907989258, 'disutil_shotgun': 0.4715857724582857, 'abortion_costs': 1.8092065614536414, 'p_abortion_access': 0.9512267376733684, 'u_lost_divorce': 0.8892578980806901, 'mu_psi_init': -0.15876965206098093, 'high education': True} if read_wisdom: try: print('read wisdom from file!') o = filer('wisdom.pkl',0,0,repeat=False)[0] x = calibration_params()[-1](o[1]) print(x) print('saved distance is {}'.format(o[0])) x.update({'high education': True}) except: print('failed to read from wisdom file') targ_mode = 'high education' else: x = {'sigma_psi': 0.04119975516565719, 'sigma_psi_init': 0.07184509981781, 'pmeet_21': 0.7300641341551373, 'pmeet_30': 0.38552526708748397, 'pmeet_40': 1.4132304041226518, 'preg_21': 0.1029100967053943, 'preg_28': 0.11241132276639117, 'preg_35': 0.11203564468462099, 'u_shift_mar': 0.338428482678413, 'util_alp': 0.5195282434982275, 'util_kap': 7.152398760885778, 'util_qbar': 0.0, 'disutil_marry_sm_mal': 3.18966037249299, 'disutil_shotgun': 0.3647670950676456, 'abortion_costs': 0.2962878054482049, 'p_abortion_access': 0.6662167114665236, 'u_lost_divorce': 0.5275074834332285, 'mu_psi_init': -0.24342175587968384, 'high education': False} targ_mode = 'low education' if read_wisdom: try: print('read wisdom from file!') o = filer('wisdom.pkl',0,0,repeat=False)[0] x = calibration_params()[-1](o[1]) print(x) print('saved distance is {}'.format(o[0])) x.update({'high education': False}) except: print('failed to read from wisdom file') return x, targ_mode
def produce_cf_table(educ='col'): names = [('BL','baseline'), ('NS','no social stigma'), ('FC','costless abortion'), ('NR','no remar penalty'), #('SD','no skills depreciation'), ('FD','no divorce costs'), ('ND','infinite divorce costs'), #('PG','no pay gap') ] def file_name(nm): return '{} {}.pkl'.format(educ,nm) entries = [('divorced in 10 years, kids-first','divorced by years after marriage if kids first, 10'), ('divorced in 10 years, marriage-first','divorced by years after marriage if marriage first, 10'), ('kids-first at 30','k then m in sample at 30'), ('unplanned pregnancies aborted','unplanned pregnancies aborted'), ('single mothers at 35','single mothers among mothers at 35')] table_cols = list() for ename, nm in names: fname = file_name(nm) mom = filer(fname,0,0) table_col = [r'\textbf{' + ename + r'}'] + [mom[e] for _, e in entries] table_cols.append(table_col) table_left = [r'\textbf{Experiment}'] + [r'\textit{' + e + r'}' for e, _ in entries] # build table rows table_rows = list() for irow in range(len(entries)+1): table_row = table_left[irow] for icol in range(len(names)): try: table_row += ' & ' + '{:02.1f}'.format(100*table_cols[icol][irow]) except ValueError: table_row += ' & ' + '{}'.format(table_cols[icol][irow]) table_row += r' \\' table_rows.append(table_row) [print(r) for r in table_rows]
def __init__(self,nogrid=False,divorce_costs_k='Default',divorce_costs_nk='Default',**kwargs): p = dict() #age_begin = 21 #age_data = 23 # age at which the data start being available #age_death = 76 #age_retire = 65 #age_fertile = 41 T = 55 Tret = 45 # first period when the agent is retired Tfert = 20 # first peroid when infertile Tdiv = 44 # first period when cannot divorce / renegotiate Tmeet = 43 # first period when stop meeting partners Tinc = 25 # first period where stop tracking income process and assume it to be fixed p['T'] = T p['Tret'] = Tret p['Tfert'] = Tfert p['Tsim'] = T p['Tmeet'] = Tmeet p['n_zf_t'] = [7]*Tret + [1]*(T-Tret) p['n_zm_t'] = [5]*Tret + [1]*(T-Tret) p['sigma_psi_init'] = 0.28 p['mu_psi_init'] = 0.0 p['sigma_psi'] = 0.11 p['R_t'] = [1/0.96]*T p['n_psi'] = 17 p['beta_t'] = [0.96]*T p['A'] = 1.0 # consumption in couple: c = (1/A)*[c_f^(1+rho) + c_m^(1+rho)]^(1/(1+rho)) p['crra_power'] = 1.5 p['couple_rts'] = 0.23 p['sig_partner_a'] = 0.1 p['mu_partner_a_female'] = 0.00 p['mu_partner_a_male'] = -0.00 p['dump_factor_z'] = 0.75 p['sig_partner_z'] = 0.25 p['mu_partner_z_male'] = -0.02 p['mu_partner_z_female'] = 0.02 p['m_bargaining_weight'] = 0.5 p['pmeet_21'] = 0.1 p['pmeet_30'] = 0.2 p['pmeet_40'] = 0.1 p['pmeet_pre25'] = None p['ppreg_pre25'] = None p['pmeet_exo'] = None p['ppreg_exo'] = None p['m_zf'] = 1.0 p['m_zf0'] = 1.0 p['no kids at meeting'] = True p['high education'] = True # what trend to pick p['any kids'] = True p['z_drift'] = -0.09 if p['high education'] else -0.06 p['wret'] = 0.8 p['uls'] = 0.2 p['pls'] = 1.0 p['income_sd_mult'] = 1.0 p['pay_gap'] = True p['preg_mult'] = 1.0 p['u_shift_mar'] = 0.0 p['u_shift_coh'] = 0.0 p['sm_shift'] = 0.0 p['disutil_marry_sm_fem'] = 0.0 p['disutil_marry_sm_mal'] = 10.0 p['disutil_shotgun'] = 2.0 p['pmeet_multiplier_fem'] = 1.0 p['p_to_meet_sm_if_mal'] = 0.1 p['taste_shock_mult'] = 1.0 p['p_abortion_access'] = 0.5 p['abortion_costs'] = 10.0 p['u_lost_divorce'] = 0.0 p['child_a_cost'] = 0.0 p['child_support_share'] = 0.2 p['child_support_awarded_nm'] = 0.284 p['child_support_awarded_div'] = 0.461 p['util_lam'] = 0.7 p['util_alp'] = 0.5 p['util_xi'] = 1.5 p['util_kap'] = 0.5 p['util_qbar'] = 0.0 p['util_out_lf'] = 0.0 p['ppreg_sim_mult'] = 1.0 p['tax_childless_couples'] = True p['tax_couples_woth_children'] = True p['tax_single_mothers'] = True p['preg_21'] = 0.01 p['preg_28'] = 0.5 p['preg_35'] = 0.3 for key, value in kwargs.items(): assert (key in p), 'wrong name?' p[key] = value if p['high education']: p['sig_zm'] = p['income_sd_mult']*0.16138593 p['sig_zm_0'] = p['income_sd_mult']*0.41966813 p['sig_zf'] = p['income_sd_mult']*p['m_zf']*0.19571624 p['sig_zf_0'] = p['income_sd_mult']*p['m_zf0']*0.43351219 else: p['sig_zm'] = p['income_sd_mult']*0.17195085 p['sig_zm_0'] = p['income_sd_mult']*0.2268650 p['sig_zf'] = p['income_sd_mult']*p['m_zf']*0.1762148 p['sig_zf_0'] = p['income_sd_mult']*p['m_zf0']*0.1762148 p['sm_init'] = (0.02 if p['high education'] else 0.25) if p['any kids'] else 0.0 # initial share of single moms # college if p['high education']: m_trend_data = [3.3899509,3.5031169,3.6395543,3.7025571,3.7646729,3.8168057,3.8706297,3.9168266,3.9628897,3.999365,4.0401083,4.0584735,4.114779,4.1344154,4.1570761,4.1703323,4.189479,4.1908761,4.2096766,4.2190858,4.2284456,4.2330354,4.2364118,4.2443216,4.2449467] f_trend_data = [3.0796507,3.1468105,3.2911468,3.4024807,3.5485041,3.6067711,3.662677,3.7131363,3.7598972,3.794152,3.8221141,3.8500988,3.8791148,3.8860543,3.9176438,3.9332115,3.9479731,3.9537252,3.9578583,3.9480597,3.9559513,3.9562479,3.9585957,3.9576563,3.9589798] nm = len(m_trend_data)-1 nf = len(f_trend_data)-1 p['m_wage_trend'] = np.array( [m_trend_data[min(t,nm)] for t in range(T)] ) p['f_wage_trend'] = np.array( [f_trend_data[min(t,nf)] for t in range(T)] ) else: # no college m_trend_data = [3.091856,3.130104,3.2259613,3.2581962,3.2772099,3.2999002,3.3206571,3.3314928,3.3573047,3.3663062,3.3801406,3.3909449,3.4160915,3.4358479,3.4488938,3.4534575,3.4654005,3.4655065,3.4815268,3.4859583,3.4967845,3.4972438,3.5118738,3.525121,3.5271331] f_trend_data = [2.9056071,2.9427025,2.9773922,2.999882,3.0932755,3.1129375,3.1199322,3.1352323,3.1498192,3.1606338,3.168912,3.1722982,3.1775691,3.1831384,3.2040837,3.1997439,3.2122542,3.2085543,3.2209876,3.2232882,3.2273824,3.2336534,3.233879,3.244275,3.2527455] nm = len(m_trend_data)-1 nf = len(f_trend_data)-1 p['m_wage_trend'] = np.array( [m_trend_data[min(t,nm)] for t in range(T)] ) p['f_wage_trend'] = np.array( [f_trend_data[min(t,nf)] for t in range(T)] ) if not p['pay_gap']: p['sig_zf'], p['sig_zf_0'] = p['sig_zm'], p['sig_zm_0'] p['f_wage_trend'] = p['m_wage_trend'] p['preg_az'] = 0.00 p['preg_azt'] = 0.00 #Get the probability of meeting, adjusting for year-period p['taste_shock'] = 0.0 #*p['taste_shock_mult']*0.0#p['sigma_psi'] p['is fertile'] = [p['any kids']]*Tfert + [False]*(T-Tfert) p['can divorce'] = [True]*Tdiv + [False]*(T-Tdiv) #p['poutsm_t'] = [p['poutsm']]*T p['pmeet_0'], p['pmeet_t'], p['pmeet_t2'] = prob_polyfit( (p['pmeet_21'],0),(p['pmeet_30'],9),(p['pmeet_40'],19), max_power=2) p['preg_a0'], p['preg_at'], p['preg_at2'] = prob_polyfit( (p['preg_21'],0),(p['preg_28'],7),(p['preg_35'],14), max_power=2) if p['pmeet_exo'] is None: p['pmeet_t'] = [np.clip(p['pmeet_0'] + t*p['pmeet_t'] + (t**2)*p['pmeet_t2'],0.0,1.0) for t in range(20)] + \ [p['pmeet_40']]*(Tmeet - 20) + [0.0]*(T-Tmeet) p['pmeet_t'] = np.array(p['pmeet_t']) if p['pmeet_pre25'] is not None: p['pmeet_t'][:4] = p['pmeet_pre25'] else: p['pmeet_t'] = [p['pmeet_exo'][min(t,p['pmeet_exo'].size-1)] for t in range(Tmeet)] + [0.0]*(T-Tmeet) p['pmeet_t'] = np.array(p['pmeet_t']) p['n_psi_t'] = [p['n_psi']]*T p['psi_clip'] = 8.5*p['sigma_psi_init'] p['fert_prob_t'] = [0.86*(t<=3) + 0.78*(t>3 and t<=8) + 0.63*(t>=9 and t<=13) + 0.52*(t>=14) for t in range(T)] #p['fert_prob_t'] = [1.0]*T self.pars = p self.dtype = np.float64 # type for all floats # relevant for integration self.state_names = ['Female, single','Male, single','Female and child','Couple, no children','Couple and child'] # female labor supply lmin = 0.2 lmax = 1.0 nl = 2 ls = np.array([0.2,1.0]) #np.linspace(lmin,lmax,nl,dtype=self.dtype) ps = np.array([p['pls'],0.0]) ls_ushift = np.array([p['util_out_lf'],0.0]) self.ls_levels = dict() self.ls_levels['Couple, no children'] = np.array([1.0],dtype=self.dtype) self.ls_levels['Female, single'] = np.array([1.0],dtype=self.dtype) self.ls_levels['Male, single'] = np.array([1.0],dtype=self.dtype) self.ls_levels['Couple and child'] = ls self.ls_levels['Female and child'] = ls self.ls_ushift = dict() self.ls_ushift['Couple, no children'] = np.array([0.0],dtype=self.dtype) self.ls_ushift['Female, single'] = np.array([0.0],dtype=self.dtype) self.ls_ushift['Male, single'] = np.array([0.0],dtype=self.dtype) self.ls_ushift['Couple and child'] = ls_ushift self.ls_ushift['Female and child'] = ls_ushift #self.ls_utilities = np.array([p['uls'],0.0],dtype=self.dtype) self.ls_pdown = dict() self.ls_pdown['Couple, no children'] = np.array([0.0],dtype=self.dtype) self.ls_pdown['Female, single'] = np.array([0.0],dtype=self.dtype) self.ls_pdown['Male, single'] = np.array([0.0],dtype=self.dtype) self.ls_pdown['Female and child'] = ps self.ls_pdown['Couple and child'] = ps self.nls = dict() self.nls['Couple and child'] = len(self.ls_levels['Couple and child']) self.nls['Couple, no children'] = len(self.ls_levels['Couple, no children']) self.nls['Female and child'] = len(self.ls_levels['Female and child']) self.nls['Female, single'] = len(self.ls_levels['Female, single']) self.nls['Male, single'] = len(self.ls_levels['Male, single']) #Cost of Divorce if divorce_costs_k == 'Default': # by default the costs are set in the bottom self.divorce_costs_k = DivorceCosts(u_lost_m=self.pars['u_lost_divorce'], u_lost_f=self.pars['u_lost_divorce']) else: if isinstance(divorce_costs_k,dict): # you can feed in arguments to DivorceCosts self.divorce_costs_k = DivorceCosts(**divorce_costs_k) else: # or just the output of DivorceCosts assert isinstance(divorce_costs_k,DivorceCosts) self.divorce_costs_k = divorce_costs_k #Cost of Separation if divorce_costs_nk == 'Default': # by default the costs are set in the bottom self.divorce_costs_nk = DivorceCosts(u_lost_m=self.pars['u_lost_divorce'], u_lost_f=self.pars['u_lost_divorce']) else: if isinstance(divorce_costs_nk,dict): # you can feed in arguments to DivorceCosts self.divorce_costs_nk = DivorceCosts(**divorce_costs_nk) else: # or just the output of DivorceCosts assert isinstance(divorce_costs_nk,DivorceCosts) self.divorce_costs_nk = divorce_costs_nk # exogrid should be deprecated if not nogrid: exogrid = dict() # let's approximate three Markov chains # this sets up exogenous grid # FIXME: this uses number of points from 0th entry. # in principle we can generalize this exogrid['zf_t'], exogrid['zf_t_mat'] = rouw_nonst(p['T'],p['sig_zf'],p['sig_zf_0'],p['n_zf_t'][0]) exogrid['zm_t'], exogrid['zm_t_mat'] = rouw_nonst(p['T'],p['sig_zm'],p['sig_zm_0'],p['n_zm_t'][0]) for t in range(Tinc,Tret): for key in ['zf_t','zf_t_mat','zm_t','zm_t_mat']: exogrid[key][t] = exogrid[key][Tinc] for t in range(Tret,T): exogrid['zf_t'][t] = np.array([np.log(p['wret'])]) exogrid['zm_t'][t] = np.array([np.log(p['wret'])]) exogrid['zf_t_mat'][t] = np.atleast_2d(1.0) exogrid['zm_t_mat'][t] = np.atleast_2d(1.0) # fix transition from non-retired to retired exogrid['zf_t_mat'][Tret-1] = np.ones((p['n_zf_t'][Tret-1],1)) exogrid['zm_t_mat'][Tret-1] = np.ones((p['n_zm_t'][Tret-1],1)) exogrid['psi_t'], exogrid['psi_t_mat'] = rouw_nonst(p['T'],p['sigma_psi'],p['sigma_psi_init'],p['n_psi_t'][0]) #exogrid['psi_t'], exogrid['psi_t_mat'] = tauchen_nonst(p['T'],p['sigma_psi'],p['sigma_psi_init'],p['n_psi_t'][0],nsd=2.5,fix_0=False) #assert False zfzm, zfzmmat = combine_matrices_two_lists(exogrid['zf_t'], exogrid['zm_t'], exogrid['zf_t_mat'], exogrid['zm_t_mat']) all_t, all_t_mat = combine_matrices_two_lists(zfzm,exogrid['psi_t'],zfzmmat,exogrid['psi_t_mat']) all_t_mat_sparse_T = [sparse.csc_matrix(D.T) if D is not None else None for D in all_t_mat] #Create a new bad version of transition matrix p(zf_t) zf_bad = [tauchen_drift(exogrid['zf_t'][t], exogrid['zf_t'][t+1], 1.0, p['sig_zf'], p['z_drift']) for t in range(self.pars['T']-1) ] + [None] #zf_bad = [cut_matrix(exogrid['zf_t_mat'][t]) if t < Tret -1 # else (exogrid['zf_t_mat'][t] if t < T - 1 else None) # for t in range(self.pars['T'])] zf_t_mat_down = zf_bad zfzm, zfzmmat = combine_matrices_two_lists(exogrid['zf_t'], exogrid['zm_t'], zf_t_mat_down, exogrid['zm_t_mat']) all_t_down, all_t_mat_down = combine_matrices_two_lists(zfzm,exogrid['psi_t'],zfzmmat,exogrid['psi_t_mat']) all_t_mat_down_sparse_T = [sparse.csc_matrix(D.T) if D is not None else None for D in all_t_mat_down] all_t_mat_by_l_nk = [ [(1-p)*m + p*md if m is not None else None for m , md in zip(all_t_mat,all_t_mat_down)] for p in self.ls_pdown['Couple, no children'] ] all_t_mat_by_l_spt_nk = [ [(1-p)*m + p*md if m is not None else None for m, md in zip(all_t_mat_sparse_T,all_t_mat_down_sparse_T)] for p in self.ls_pdown['Couple, no children'] ] all_t_mat_by_l_k = [ [(1-p)*m + p*md if m is not None else None for m , md in zip(all_t_mat,all_t_mat_down)] for p in self.ls_pdown['Couple and child'] ] all_t_mat_by_l_spt_k = [ [(1-p)*m + p*md if m is not None else None for m, md in zip(all_t_mat_sparse_T,all_t_mat_down_sparse_T)] for p in self.ls_pdown['Couple and child'] ] zf_t_mat_by_l_sk = [ [(1-p)*m + p*md if md is not None else None for m , md in zip(exogrid['zf_t_mat'],zf_bad)] for p in self.ls_pdown['Female and child'] ] exogrid['all_t_mat_by_l_nk'] = all_t_mat_by_l_nk exogrid['all_t_mat_by_l_spt_nk'] = all_t_mat_by_l_spt_nk exogrid['all_t_mat_by_l_k'] = all_t_mat_by_l_k exogrid['all_t_mat_by_l_spt_k'] = all_t_mat_by_l_spt_k exogrid['zf_t_mat_by_l_sk'] = zf_t_mat_by_l_sk exogrid['all_t'] = all_t Exogrid_nt = namedtuple('Exogrid_nt',exogrid.keys()) self.exogrid = Exogrid_nt(**exogrid) self.pars['nexo_t'] = [v.shape[0] for v in all_t] self.compute_child_support_transitions(child_support_share=p['child_support_share']) #assert False #Grid Couple self.na = 40 self.amin = 0 self.amax = 1000.0 self.agrid_c = np.linspace(self.amin**0.5,self.amax**0.5,self.na,dtype=self.dtype)**2 #tune=1.5 #self.agrid_c = np.geomspace(self.amin+tune,self.amax+tune,num=self.na)-tune # this builds finer grid for potential savings s_between = 7 # default numer of points between poitns on agrid s_da_min = 0.2 # minimal step (does not create more points) s_da_max = 10.0 # maximal step (creates more if not enough) self.sgrid_c = build_s_grid(self.agrid_c,s_between,s_da_min,s_da_max) self.vsgrid_c = VecOnGrid(self.agrid_c,self.sgrid_c) #Grid Single self.amin_s = 0 self.amax_s = self.amax/2.0 self.agrid_s = self.agrid_c/2.0 #tune_s=1.5 #self.agrid_s = np.geomspace(self.amin_s+tune_s,self.amax_s+tune_s,num=self.na)-tune_s self.sgrid_s = build_s_grid(self.agrid_s,s_between,s_da_min,s_da_max) self.vsgrid_s = VecOnGrid(self.agrid_s,self.sgrid_s) # grid for theta self.ntheta = 11 self.thetamin = 0.05 self.thetamax = 0.95 self.thetagrid = np.linspace(self.thetamin,self.thetamax,self.ntheta,dtype=self.dtype) self.child_a_cost_single = np.minimum(self.agrid_s,self.pars['child_a_cost']) self.child_a_cost_couple = np.minimum(self.agrid_c,self.pars['child_a_cost']) assert self.pars['child_a_cost']<1e-3, 'not implemented' #self.vagrid_child_single = VecOnGrid(self.agrid_s, self.agrid_s - self.child_a_cost_single) #self.vagrid_child_couple = VecOnGrid(self.agrid_c, self.agrid_c - self.child_a_cost_couple) # construct finer grid for bargaining ntheta_fine = 10*self.ntheta # actual number may be a bit bigger self.thetagrid_fine = np.unique(np.concatenate( (self.thetagrid,np.linspace(self.thetamin,self.thetamax,ntheta_fine,dtype=self.dtype)) )) self.ntheta_fine = self.thetagrid_fine.size i_orig = list() for theta in self.thetagrid: assert theta in self.thetagrid_fine i_orig.append(np.where(self.thetagrid_fine==theta)[0]) assert len(i_orig) == self.thetagrid.size # allows to recover original gird points on the fine grid self.theta_orig_on_fine = np.array(i_orig).flatten() self.v_thetagrid_fine = VecOnGrid(self.thetagrid,self.thetagrid_fine) # precomputed object for interpolation self.exo_grids = {'Female, single':exogrid['zf_t'], 'Male, single':exogrid['zm_t'], 'Female and child':exogrid['zf_t'], 'Couple and child':exogrid['all_t'], 'Couple, no children':exogrid['all_t']} self.exo_mats = {'Female, single':exogrid['zf_t_mat'], 'Male, single':exogrid['zm_t_mat'], 'Female and child':exogrid['zf_t_mat_by_l_sk'], 'Couple and child':exogrid['all_t_mat_by_l_k'], 'Couple, no children':exogrid['all_t_mat_by_l_nk']} # sparse version? self.utility_shifters = {'Female, single':0.0, 'Male, single':0.0, 'Female and child':p['u_shift_mar'] + p['sm_shift'], 'Couple and child':p['u_shift_mar'], 'Couple, no children':p['u_shift_coh']} # this pre-computes transition matrices for meeting a partner name_fem_pkl = 'az_dist_fem.pkl' if p['high education'] else 'az_dist_fem_noc.pkl' name_mal_pkl = 'az_dist_mal.pkl' if p['high education'] else 'az_dist_mal_noc.pkl' name_fem_csv = 'income_assets_distribution_male_col.csv' if p['high education'] else 'income_assets_distribution_male_hs.csv' name_mal_csv = 'income_assets_distribution_female_col.csv' if p['high education'] else 'income_assets_distribution_female_hs.csv' # this is not an error, things are switched try: self.partners_distribution_fem = filer(name_fem_pkl,0,0,repeat=False) self.partners_distribution_mal = filer(name_mal_pkl,0,0,repeat=False) except: print('recreating estimates...') est_fem = get_estimates(fname=name_fem_csv, age_start=23,age_stop=42, zlist=self.exogrid.zm_t[2:], female=False,college=p['high education']) filer(name_fem_pkl,est_fem,True,repeat=False) self.partners_distribution_fem = est_fem est_mal = get_estimates(fname=name_mal_csv, age_start=21,age_stop=40, zlist=self.exogrid.zf_t[0:], female=True,college=p['high education']) filer(name_mal_pkl,est_mal,True,repeat=False) self.partners_distribution_mal = est_mal self.build_matches() # building m grid ezfmin = min([np.min(np.exp(g+t)) for g,t in zip(exogrid['zf_t'],p['f_wage_trend'])]) ezmmin = min([np.min(np.exp(g+t)) for g,t in zip(exogrid['zm_t'],p['m_wage_trend'])]) ezfmax = max([np.max(np.exp(g+t)) for g,t in zip(exogrid['zf_t'],p['f_wage_trend'])]) ezmmax = max([np.max(np.exp(g+t)) for g,t in zip(exogrid['zm_t'],p['m_wage_trend'])]) self.money_min = 0.95*min(self.ls_levels['Female and child'])*min(ezmmin,ezfmin) # cause FLS can be up to 0 mmin = self.money_min mmax = ezfmax + ezmmax + np.max(self.pars['R_t'])*self.amax mint = (ezfmax + ezmmax) # poin where more dense grid begins ndense = 1900 nm = 3500 gsparse = np.linspace(mint,mmax,nm-ndense) gdense = np.linspace(mmin,mint,ndense+1) # +1 as there is a common pt self.mgrid = np.zeros(nm,dtype=self.dtype) self.mgrid[ndense:] = gsparse self.mgrid[:(ndense+1)] = gdense self.mgrid_c = self.mgrid self.mgrid_s = self.mgrid assert np.all(np.diff(self.mgrid)>0) self.u_precompute() self.unplanned_pregnancy_probability() self.compute_taxes() self.cupyfy()
def fun(x): assert type(x) is tuple, 'x must be a tuple!' action = x[0] args = x[1] assert type(action) is str, 'x[0] should be string for action' assert len(x) <= 2, 'too many things in x! x is (action,agrs)' if action == 'test': return mdl_resid() elif action == 'compute': return mdl_resid(args) elif action == 'minimize': import dfols import pybobyqa i, N_st, xfix = args xl, xu, x0, keys, translator = calibration_params(xfix=xfix) #Sort lists def sortFirst(val): return val[0] #Get the starting point for local minimization #Open File with best solution so far param = filer('wisdom.pkl', 0, False) param.sort(key=sortFirst) print('f best so far is {} and x is {}'.format(param[0][0], param[0][1])) xm = param[0][1] #Get right sobol sequence point xt = filer('sobol.pkl', None, False) #Determine the initial position dump = min(max(0.1, ((i + 1) / N_st)**(0.5)), 0.995) xc = dump * xm + (1 - dump) * xt[:, i] xc = xc.squeeze() print('The initial position is {}'.format(xc)) #Standard Way def q(pt): try: ans = mdl_resid(translator(pt), return_format=['scaled residuals'])[0] except: print('During optimization function evaluation failed at {}'. format(pt)) ans = np.array([1e6]) finally: gc.collect() return ans res = dfols.solve(q, xc, rhobeg=0.01, rhoend=1e-4, maxfun=100, bounds=(xl, xu), scaling_within_bounds=True, objfun_has_noise=False, print_progress=True) #res=pybobyqa.solve(q, xc, rhobeg = 0.001, rhoend=1e-6, maxfun=80, bounds=(xl,xu), # scaling_within_bounds=True,objfun_has_noise=False,print_progress=True) print(res) if res.flag == -1: raise Exception('solver returned something creepy...') fbest = mdl_resid(translator( res.x))[0] # in prnciple, this can be inconsistent with # squared sum of residuals print('fbest is {} and res.f is {}'.format(fbest, res.f)) print('Final value is {}'.format(fbest)) param_new = filer('wisdom.pkl', None, False) param_write = param_new + [(fbest, res.x)] #Save Updated File param_write.sort(key=sortFirst) filer('wisdom.pkl', param_write, True) return fbest else: raise Exception('unsupported action or format')
def mdl_resid(x=None, targets=None, weights=w, save_to=None, load_from=None, return_format=['distance'], store_path=None, verbose=False, draw=False, graphs=False, rel_diff=False, cs_moments=False, moments_repeat=5, Tsim=30, moments_save_name=None): from model import Model from setup import DivorceCosts from simulations import Agents from crosssection import CrossSection from calibration_params import calibration_params if type(x) is dict: kwords = x if 'targets' in x: targets = x.pop('targets') else: lb, ub, xdef, keys, translator = calibration_params() if x is None: x = xdef kwords = translator(x) if verbose: print(kwords) # this is for the default model #dc_k = DivorceCosts(unilateral_divorce=True,assets_kept = 1.0,u_lost_m=0.00,u_lost_f=0.00,eq_split=1.0) #dc_nk = DivorceCosts(unilateral_divorce=True,assets_kept = 1.0,u_lost_m=0.00,u_lost_f=0.00,eq_split=1.0) def join_path(name, path): return os.path.join(path, name) if load_from is not None: if type(load_from) is not list: load_from = [load_from] if store_path is not None: load_from = [join_path(n, store_path) for n in load_from] if save_to is not None: if type(save_to) is not list: save_to = [save_to] if store_path is not None: save_to = [join_path(n, store_path) for n in save_to] if load_from is None: mdl = Model(verbose=verbose, **kwords) mdl_list = [mdl] else: mdl_list = [dill.load(open(l, 'rb+')) for l in load_from] mdl = mdl_list[0] if save_to is not None: if len(save_to) > 1: print('warning: too much stuff is save_to') dill.dump(mdl, open(save_to[0], 'wb+')) np.random.seed(18) agents = Agents(mdl_list, verbose=verbose, fix_seed=False, T=Tsim) if not cs_moments: moments_list = [agents.compute_moments()] + [ Agents(mdl_list, verbose=False, T=Tsim, fix_seed=False).compute_moments() for _ in range(moments_repeat - 1) ] else: moments_list = [ CrossSection(mdl_list, verbose=False, N_total=30000, fix_seed=False).compute_moments() for _ in range(moments_repeat) ] mom = { key: np.mean([m[key] for m in moments_list], axis=0) for key in moments_list[0].keys() } #mom_join = Agents( mdl_list, N=10000, T=18, female=False, verbose=False).aux_moments() #mom_men = agents_extra.compute_moments() #mom.update(mom_join) if moments_save_name: # is not None can be ommited filer('{}.pkl'.format(moments_save_name), mom, True) if targets is None: from targets import target_values tar = target_values() elif type(targets) is str: from targets import target_values tar = target_values(mode=targets) else: tar = targets resid_all, resid_sc, dist = distance_to_targets(mom, tar, weights=weights, report=verbose) #if verbose: # print('data moments are {}'.format(dat)) # print('simulated moments are {}'.format(sim)) tt = mdl_list[0].get_total_time() print('Distance {}, time {}'.format(dist, tt)) out_dict = { 'distance': dist, 'all residuals': resid_all, 'scaled residuals': resid_sc, 'models': mdl_list, 'agents': agents, 'moments': mom } out = [out_dict[key] for key in return_format] del (out_dict) if 'models' not in return_format: for m in mdl_list: del (m) del mdl_list if 'agents' not in return_format: del (agents) if len(out) == 1: out = out[0] return out
def run(resume=False, high_e=True): xinit, targ_mode = get_point(high_e, read_wisdom=False) tar = target_values(targ_mode) if resume: x_load = filer('wisdom_refined.pkl', 0, 0) prob_meet_load = x_load['pmeet_exo'] prob_preg_load = x_load['ppreg_exo'] xinit = x_load out, mdl, agents, res, mom = mdl_resid(x=xinit, targets=tar, return_format=[ 'distance', 'models', 'agents', 'scaled residuals', 'moments' ], verbose=False) print('initial distance is {}'.format(out)) if resume: prob_meet_init = prob_meet_load prob_preg_init = prob_preg_load else: prob_meet_init = np.array( mdl[0].setup.pars['pmeet_t'][:mdl[0].setup.pars['Tmeet']]) prob_preg_init = np.array([ mdl[0].setup.upp_precomputed_fem[t][3] for t in range(mdl[0].setup.pars['Tmeet']) ]) nopt = 10 yfactor = 1.5 for iopt in range(nopt): print('running esimation round {}'.format(iopt)) print('estimating probabilities:') prob_meet_est = 0.0 prob_preg_est = 0.0 nrep = 4 if iopt > 0 else 1 np.random.seed(12) for rep in range(nrep): o = AgentsEst(mdl, T=30, verbose=False, fix_seed=False) prob_meet_est += (1 / nrep) * o.pmeet_exo.copy() prob_preg_est += (1 / nrep) * o.ppreg_exo.copy() print('estimated pmeet = {}'.format(prob_meet_est)) print('estimated ppreg = {}'.format(prob_preg_est)) # this does binary search w = 1.0 factor = 0.5 ne = prob_meet_est.size nw = 10 print('reference value is {}'.format(out)) y_previous = out for i in range(nw): prob_meet_w = w * prob_meet_est + (1 - w) * prob_meet_init[:ne] prob_preg_w = w * prob_preg_est + (1 - w) * prob_preg_init[:ne] xsearch = xinit.copy() xsearch.update({ 'pmeet_exo': prob_meet_w, 'ppreg_exo': prob_preg_w }) out_w = mdl_resid(x=xsearch, targets=tar, return_format=['distance'], verbose=False) print('with weight = {}, distance is {}'.format(w, out_w)) if out_w < yfactor * out: print('found a potentially imporving weight for yfactor = {}'. format(yfactor)) break else: w = factor * w if i < nw - 1: print('trying new weight = {}'.format(w)) else: print('no luck...') xfix = { k: xinit[k] for k in [ 'pmeet_21', 'pmeet_30', 'pmeet_40', 'preg_21', 'preg_28', 'preg_35' ] } lb, ub, _, keys, translator = calibration_params(xfix=xfix) def tr(x): xx = translator(x) xx.update({'pmeet_exo': prob_meet_w, 'ppreg_exo': prob_preg_w}) return xx x0 = [xinit[key] for key in keys] x0, lb, ub = np.array(x0), np.array(lb), np.array(ub) print('starting from {}'.format(tr(x0))) tar = target_values('high education') def q(pt): #print('computing at point {}'.format(translator(pt))) try: ans = mdl_resid(tr(pt), return_format=['scaled residuals']) except BaseException as a: print('During optimization function evaluation failed at {}'. format(pt)) print(a) ans = np.array([1e6]) finally: gc.collect() return ans res = dfols.solve(q, x0, rhobeg=0.02, rhoend=1e-5, maxfun=60, bounds=(lb, ub), scaling_within_bounds=True, objfun_has_noise=False, npt=len(x0) + 5, user_params={ 'restarts.use_restarts': True, 'restarts.rhoend_scale': 0.5, 'restarts.increase_npt': True }) print(res) print('Result is {}'.format(tr(res.x))) filer('wisdom_refined.pkl', tr(res.x), True) print('wrote to the file!') xinit = tr(res.x) out, mdl, agents, res, mom = mdl_resid(x=xinit, return_format=[ 'distance', 'models', 'agents', 'scaled residuals', 'moments' ]) if out > y_previous: print('no reduction in function value obtained') yfactor = 0.5 * yfactor + 0.5 y_previous = out
'sm_shift': [ 0.1 * sm, 0.25 * sm, 0.5 * sm, 0.75 * sm, sm, 1.25 * sm, 1.5 * sm, 1.75 * sm, 2 * sm ], 'preg_mult': [0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], 'poutsm': [0.05, 0.1, 0.2, 1 / 3, 1 / 2, 2 / 3, 0.99], 'z_drift': [-0.4, -0.3, -0.2, -0.15, -0.1, -0.05, -0.025, 0.0] } inp = list() change = list() for par, vlist in request.items(): for val in vlist: change.append((par, val)) xin = x.copy() xin.update({par: val}) inp.append(('moments', xin)) from p_client import compute_for_values print('generated {} points'.format(len(change))) result = compute_for_values(inp) from tiktak import filer filer('sens_results.py', { 'init': x, 'input': change, 'output': result }, True)
def run(adj_name, fix, educ_name, resume=False, noplot=False): # at first this takes point x saved in estimates.py # by calling get_point(high_e). x is a dict with parameter names and values # then it applies adjustment fix (a dict) to the point x # resulting file name is educ_name + adj_name # adj_name -- name of adjustment # fix -- dictionary containing parameters of setup.py to change # educ_name -- name of education group ("col" or "hs") # high_e -- input to get_point function, True for college, False for HS high_e = (educ_name == 'col' or educ_name == 'college') low_e = (educ_name == 'hs' or educ_name == 'high school') assert (high_e or low_e), 'wrong specifier for education' x, targ_mode = get_point(high_e, read_wisdom=False) tar = target_values(targ_mode) print('\n\n\n\n\n\n') print('doing {} {}'.format(educ_name, adj_name)) x_new = x.copy() fix = fix.copy() if 'multiply' in fix: mult = fix.pop('multiply') for m in mult: x_new[m] *= mult[m] x_new.update(fix) print(x_new) name = '{} {}'.format(educ_name, adj_name) fname = '{}.pkl'.format(name) try: if resume: try: mom = filer(fname, 0, 0, repeat=False) skip = True except: skip = False else: skip = False if not skip: print("computing {}".format(fname)) out, mom = mdl_resid(x=x_new, targets=tar, return_format=['distance', 'moments'], verbose=False, draw=False, cs_moments=False, save_to='mdl for {}'.format(fname), moments_save_name=name, moments_repeat=5, Tsim=42) print("file {} saved".format(fname)) else: print("file {} already exists".format(fname)) print('done, doing fit plots') try: if adj_name == 'baseline': fp = FitPlots(targ_mode=targ_mode, compare=None, base='{} baseline.pkl'.format(educ_name), compare_name='Data', base_name='baseline', moments_aux=None, noplot=noplot) else: fp = FitPlots(targ_mode=targ_mode, compare='{} baseline.pkl'.format(educ_name), base=fname, compare_name='baseline', base_name=adj_name, moments_aux=None, noplot=noplot) except: print('something wrong with fit plots...') except KeyboardInterrupt: raise (KeyboardInterrupt) except BaseException as a: print("file {} {}.pkl could not be produced. Exception:".format( name, adj_name)) print(a) print(' ') return mom, fp
def get_new_estimates(fname='income_assets_distribution.csv', save='za_dist.pkl', age_start=23,age_stop=45,weighted=True): print('obtaining estimates from file {}'.format(fname)) df = pd.read_csv(fname) income_for_normalization = np.median(np.exp(df[df['tage']==27]['log_income'])) ages_array = np.arange(age_start,age_stop+1) estimates = np.zeros((ages_array.size,6)) for i, age in enumerate(ages_array): df_age = df[df['tage']==age][['log_income','log_assets','wpfinwgt']] if weighted: weights = df_age['wpfinwgt'] / df_age['wpfinwgt'].sum() else: weights = np.ones_like(df_age['wpfinwgt']) weights = weights / weights.sum() mean_income = (df_age['log_income']*weights).sum() df_age['n_log_income'] = df_age['log_income'] - mean_income data = df_age[['n_log_income','log_assets']].copy() has_assets = data['log_assets']>0 data['log_assets'] = sps.mstats.winsorize(data['log_assets'],(None,0.1)) - np.log(income_for_normalization) data['n_log_income'] = sps.mstats.winsorize(data['n_log_income'],(0.05,0.05)) data_npar = pd.DataFrame({'z':data['n_log_income'].copy(),'a':data['log_assets'].copy()}) def log_l(theta): mu_a = theta[0] sig_a = theta[1] sig_i = theta[2] rho = theta[3] beta_cons = theta[4] beta_i = theta[5] saa = sig_a**2 sii = sig_i**2 sia = sig_a*sig_i*rho cov = [[sii,sia],[sia,saa]] mu = [0,mu_a] li_pos = sps.multivariate_normal.logpdf(data,mean=mu,cov=cov).mean() li_zero = sps.norm.logpdf(data['n_log_income'],0.0,sig_i) p_0 = sps.norm.cdf(beta_cons + beta_i*data['n_log_income']) p_pos = 1-p_0 is_positive = has_assets li = weights*( (is_positive)*(np.log(p_pos) + li_pos) + (~is_positive)*(np.log(p_0) + li_zero) ) return li.sum() bnds_z = (None,None) if has_assets.mean() > 0 else (0.0,0.0) print('\n mean log assets {}, std {}'.format(data['log_assets'].mean(),np.std(data['log_assets']) )) print('max assets {}'.format(np.exp(data['log_assets']).max()) ) res = spo.minimize(lambda x : -log_l(x),[3.0,0.4,0.4,0.0,0.0,0.0], method = 'L-BFGS-B', bounds=[(None,None),(1e-4,None),(1e-4,None),(-0.95,0.95),(None,None),bnds_z]) #print('for age = {} result is {}, percentage of non-0 is {}'.format(age,res.x,has_assets.mean())) estimates[i,:] = res.x print('estimated mean log assets {}, estimated std {}\n'.format(res.x[0],res.x[1]) ) out = {'Ages':ages_array,'mu_a':estimates[:,0], 'sig_a':estimates[:,1], 'sig_i':estimates[:,2], 'rho':estimates[:,3], 'beta0':estimates[:,4], 'beta1':estimates[:,5]} if save: filer(save,out,True) return out
def conditional_normal_parameters(zval,mu_a,mu_z,sig_a,sig_z,rho): mean = mu_a + (sig_a/sig_z)*rho*(zval-mu_z) std = (1-rho**2)**(0.5) * sig_a return mean, std if __name__ == '__main__': dist = get_new_estimates() #print(dist) dist = filer('za_dist.pkl',0,0) #print(dist) tval = np.array([-2,-1,0,1,2],dtype=np.float64) x, w = np.polynomial.hermite.hermgauss(5) x = x*np.sqrt(2) w = w/w.sum() print((w*(x**2)).sum()) print((w*(x**3)).sum()) print((w*(x**4)).sum()) for t in range(dist['Ages'].size): zval = dist['sig_i'][t]*tval inputs = (zval,dist['mu_a'][t],0.0,dist['sig_a'][t],dist['sig_i'][t],dist['rho'][t]) print('inputs are {}'.format(inputs)) mu, sig = conditional_normal_parameters(*inputs) share_0 = sps.norm.cdf(dist['beta0'][t] + dist['beta1'][t]*zval)
def fun(x): assert type(x) is tuple, 'x must be a tuple!' action = x[0] args = x[1] assert type(action) is str, 'x[0] should be string for action' assert len(x) <= 2, 'too many things in x! x is (action,agrs)' if action == 'test': return mdl_resid() elif action == 'compute': return mdl_resid(args) elif action == 'moments': agents = mdl_resid(args, return_format=['agents']) mom = agents.compute_moments() return mom elif action == 'minimize': import dfols i, N_st, xfix = args xl, xu, x0, keys, translator = calibration_params(xfix=xfix) #Sort lists def sortFirst(val): return val[0] #Get the starting point for local minimization #Open File with best solution so far param = filer('wisdom.pkl', 0, False) param.sort(key=sortFirst) print('f best so far is {} and x is {}'.format(param[0][0], param[0][1])) xm = param[0][1] #Get right sobol sequence point xt = filer('sobol.pkl', None, False) #Determine the initial position dump = min(max(0.1, ((i + 1) / N_st)**(0.5)), 0.995) xc = dump * xm + (1 - dump) * xt[:, i] xc = xc.squeeze() def q(pt): try: ans = mdl_resid(translator(pt), moments_repeat=3, return_format=['scaled residuals']) except BaseException as a: print('During optimization function evaluation failed at {}'. format(pt)) print(a) ans = np.array([1e6]) finally: gc.collect() return ans res = dfols.solve( q, xc, rhobeg=0.15, rhoend=1e-6, maxfun=npt, bounds=(xl, xu), #npt=len(xc)+5, scaling_within_bounds=True, #user_params={'tr_radius.gamma_dec':0.75,'tr_radius.gamma_inc':1.5, # 'tr_radius.alpha1':0.5,'tr_radius.alpha2':0.75, # 'regression.momentum_extra_steps':True, #'restarts.use_restarts':True}, objfun_has_noise=True) print(res) if res.flag == -1: raise Exception('solver returned something creepy...') fbest = mdl_resid(translator( res.x)) # in prnciple, this can be inconsistent with # squared sum of residuals print('fbest is {} and res.f is {}'.format(fbest, res.f)) print('Final value is {}'.format(fbest)) param_new = filer('wisdom.pkl', None, False) param_write = param_new + [(fbest, res.x)] #Save Updated File param_write.sort(key=sortFirst) filer('wisdom.pkl', param_write, True) return fbest else: raise Exception('unsupported action or format')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue May 26 14:25:40 2020 @author: egorkozlov """ import numpy as np from tiktak import filer V_base = filer('v_save_base.pkl',0,0) V_comp = filer('v_save_counterfactual.pkl',0,0) from setup import ModelSetup import matplotlib.pyplot as plt d = {'high education':True} su_def = ModelSetup(**d) agrid = su_def.agrid_s t = 4 a0 = 0.0 ia0 = 0#np.searchsorted(agrid,a0) sname = 'Female, single' oname = 'V' zfgrid = su_def.exogrid.zf_t[t] zftrend = su_def.pars['f_wage_trend'][t] names = ['extra assets in base to reach 0 in compare',
def __init__(self, nogrid=False, divorce_costs_k='Default', divorce_costs_nk='Default', **kwargs): p = dict() T = 55 Tret = 45 # first period when the agent is retired Tfert = 18 # first peroid when infertile Tdiv = 44 # first period when cannot divorce / renegotiate Tmeet = 25 # first period when stop meeting partners Tinc = 25 # first period where stop tracking income process and assume it to be fixed p['T'] = T p['Tret'] = Tret p['Tfert'] = Tfert p['Tsim'] = T p['n_zf_t'] = [7] * Tret + [1] * (T - Tret) p['n_zm_t'] = [5] * Tret + [1] * (T - Tret) p['sigma_psi_mult'] = 0.28 p['sigma_psi'] = 0.11 p['R_t'] = [1.025] * T p['n_psi'] = 15 p['beta_t'] = [0.98] * T p['A'] = 1.0 # consumption in couple: c = (1/A)*[c_f^(1+rho) + c_m^(1+rho)]^(1/(1+rho)) p['crra_power'] = 1.5 p['couple_rts'] = 0.23 p['sig_partner_a'] = 0.1 p['mu_partner_a_female'] = 0.00 p['mu_partner_a_male'] = -0.00 p['dump_factor_z'] = 0.75 p['sig_partner_z'] = 0.25 p['mu_partner_z_male'] = -0.02 p['mu_partner_z_female'] = 0.02 p['m_bargaining_weight'] = 0.5 p['pmeet_21'] = 0.1 p['pmeet_28'] = 0.2 p['pmeet_35'] = 0.1 p['m_zf'] = 0.9 p['m_zf0'] = 1.0 p['z_drift'] = -0.09 p['no kids at meeting'] = True p['high education'] = True # what trend to pick p['any kids'] = True p['wret'] = 0.8 p['uls'] = 0.2 p['pls'] = 1.0 p['income_sd_mult'] = 1.0 p['pay_gap'] = True p['preg_mult'] = 1.0 p['u_shift_mar'] = 0.0 p['u_shift_coh'] = 0.0 p['sm_shift'] = 0.0 p['disutil_marry_sm_fem_coef'] = 0.0 p['disutil_marry_sm_mal_coef'] = 10.0 p['disutil_shotgun_coef'] = 2.0 p['pmeet_multiplier_fem'] = 1.0 p['p_to_meet_sm_if_mal'] = 0.1 p['taste_shock_mult'] = 1.0 p['p_abortion_access'] = 0.5 p['abortion_costs_mult'] = 10.0 p['u_lost_divorce_mult'] = 0.0 p['child_a_cost'] = 0.0 p['child_support_share'] = 0.0 p['util_lam'] = 0.7 p['util_alp'] = 0.5 p['util_xi'] = 1.5 p['util_kap'] = 0.5 p['util_qbar'] = 0.0 p['util_out_lf'] = 0.0 p['preg_21'] = 0.01 p['preg_28'] = 0.5 p['preg_35'] = 0.3 for key, value in kwargs.items(): assert (key in p), 'wrong name?' p[key] = value if p['high education']: p['sig_zm'] = p['income_sd_mult'] * 0.16138593 p['sig_zm_0'] = p['income_sd_mult'] * 0.41966813 # FIXME: I need guidance how to pin these down p['sig_zf'] = p['income_sd_mult'] * p['m_zf'] * 0.19571624 p['sig_zf_0'] = p['income_sd_mult'] * p['m_zf0'] * 0.43351219 else: p['sig_zm'] = p['income_sd_mult'] * 0.2033373 p['sig_zm_0'] = p['income_sd_mult'] * 0.40317171 p['sig_zf'] = p['income_sd_mult'] * p['m_zf'] * 0.14586778 p['sig_zf_0'] = p['income_sd_mult'] * p['m_zf0'] * 0.62761052 # college if p['high education']: m_trend_data = [ 0.0, 0.11316599, .2496034, .31260625, .37472204, .4268548, .48067884, .52687573, .57293878, .60941412, .65015743, .6685226, .72482815, .74446455, .76712521, .78038137, .79952806, .80092523, .81972567, .82913486, .83849471, .84308452, .84646086, .85437072, .85499576 ] f_trend_data = [ 0.0, 0.06715984, .21149606, .32283002, .46885336, .52712037, .58302632, .63348555, .68024646, .71450132, .74246337, .77044807, .79946406, .80640353, .83799304, .85356081, .86832235, .87407447, .87820755, .86840901, .87630054, .8765972, .87894493, .87800553, .87932908 ] nm = len(m_trend_data) - 1 nf = len(f_trend_data) - 1 t0 = 4 gap = 3.0340077 - 2.8180354 # male - female c_female = -f_trend_data[t0] c_male = gap - m_trend_data[t0] p['m_wage_trend'] = np.array( [c_male + m_trend_data[min(t, nm)] for t in range(T)]) p['f_wage_trend'] = np.array( [c_female + f_trend_data[min(t, nf)] for t in range(T)]) else: # no college p['m_wage_trend'] = np.array([ -0.2424105 + 0.037659 * (min(t + 2, 30) - 5) - 0.0015337 * ((min(t + 2, 30) - 5)**2) + 0.000026 * ((min(t + 2, 30) - 5)**3) for t in range(T) ]) p['f_wage_trend'] = np.array([ -0.3668214 + 0.0264887 * (min(t, 30) - 5) - 0.0012464 * ((min(t, 30) - 5)**2) + 0.0000251 * ((min(t, 30) - 5)**3) for t in range(T) ]) if not p['pay_gap']: p['sig_zf'], p['sig_zf_0'] = p['sig_zm'], p['sig_zm_0'] p['f_wage_trend'] = p['m_wage_trend'] # derivative parameters p['sigma_psi_init'] = p['sigma_psi_mult'] * p['sigma_psi'] p['disutil_marry_sm_mal'] = p['disutil_marry_sm_mal_coef'] * p[ 'u_shift_mar'] p['disutil_marry_sm_fem'] = p['disutil_marry_sm_fem_coef'] * p[ 'u_shift_mar'] p['disutil_shotgun'] = p['disutil_shotgun_coef'] * p['sigma_psi_init'] p['abortion_costs'] = p['abortion_costs_mult'] * p['u_shift_mar'] p['u_lost_divorce'] = p['u_lost_divorce_mult'] * p['sigma_psi_init'] p['preg_az'] = 0.00 p['preg_azt'] = 0.00 #Get the probability of meeting, adjusting for year-period p['taste_shock'] = 0.0 * p['taste_shock_mult'] * 0.0 #p['sigma_psi'] p['is fertile'] = [p['any kids']] * Tfert + [False] * (T - Tfert) p['can divorce'] = [True] * Tdiv + [False] * (T - Tdiv) #p['poutsm_t'] = [p['poutsm']]*T p['pmeet_0'], p['pmeet_t'], p['pmeet_t2'] = prob_polyfit( (p['pmeet_21'], 0), (p['pmeet_28'], 7), (p['pmeet_35'], 14), max_power=2) p['preg_a0'], p['preg_at'], p['preg_at2'] = prob_polyfit( (p['preg_21'], 0), (p['preg_28'], 7), (p['preg_35'], 14), max_power=2) p['pmeet_t'] = [ np.clip(p['pmeet_0'] + t * p['pmeet_t'] + (t**2) * p['pmeet_t2'], 0.0, 1.0) for t in range(Tmeet) ] + [0.0] * (T - Tmeet) p['n_psi_t'] = [p['n_psi']] * T self.pars = p self.dtype = np.float64 # type for all floats # relevant for integration self.state_names = [ 'Female, single', 'Male, single', 'Female and child', 'Couple, no children', 'Couple and child' ] # female labor supply lmin = 0.2 lmax = 1.0 nl = 2 ls = np.array([0.2, 1.0]) #np.linspace(lmin,lmax,nl,dtype=self.dtype) ps = np.array([p['pls'], 0.0]) ls_ushift = np.array([p['util_out_lf'], 0.0]) self.ls_levels = dict() self.ls_levels['Couple, no children'] = np.array([1.0], dtype=self.dtype) self.ls_levels['Female, single'] = np.array([1.0], dtype=self.dtype) self.ls_levels['Male, single'] = np.array([1.0], dtype=self.dtype) self.ls_levels['Couple and child'] = ls self.ls_levels['Female and child'] = ls self.ls_ushift = dict() self.ls_ushift['Couple, no children'] = np.array([0.0], dtype=self.dtype) self.ls_ushift['Female, single'] = np.array([0.0], dtype=self.dtype) self.ls_ushift['Male, single'] = np.array([0.0], dtype=self.dtype) self.ls_ushift['Couple and child'] = ls_ushift self.ls_ushift['Female and child'] = ls_ushift #self.ls_utilities = np.array([p['uls'],0.0],dtype=self.dtype) self.ls_pdown = dict() self.ls_pdown['Couple, no children'] = np.array([0.0], dtype=self.dtype) self.ls_pdown['Female, single'] = np.array([0.0], dtype=self.dtype) self.ls_pdown['Male, single'] = np.array([0.0], dtype=self.dtype) self.ls_pdown['Female and child'] = ps self.ls_pdown['Couple and child'] = ps self.nls = dict() self.nls['Couple and child'] = len(self.ls_levels['Couple and child']) self.nls['Couple, no children'] = len( self.ls_levels['Couple, no children']) self.nls['Female and child'] = len(self.ls_levels['Female and child']) self.nls['Female, single'] = len(self.ls_levels['Female, single']) self.nls['Male, single'] = len(self.ls_levels['Male, single']) #Cost of Divorce if divorce_costs_k == 'Default': # by default the costs are set in the bottom self.divorce_costs_k = DivorceCosts( u_lost_m=self.pars['u_lost_divorce'], u_lost_f=self.pars['u_lost_divorce']) else: if isinstance(divorce_costs_k, dict): # you can feed in arguments to DivorceCosts self.divorce_costs_k = DivorceCosts(**divorce_costs_k) else: # or just the output of DivorceCosts assert isinstance(divorce_costs_k, DivorceCosts) self.divorce_costs_k = divorce_costs_k #Cost of Separation if divorce_costs_nk == 'Default': # by default the costs are set in the bottom self.divorce_costs_nk = DivorceCosts( u_lost_m=self.pars['u_lost_divorce'], u_lost_f=self.pars['u_lost_divorce']) else: if isinstance(divorce_costs_nk, dict): # you can feed in arguments to DivorceCosts self.divorce_costs_nk = DivorceCosts(**divorce_costs_nk) else: # or just the output of DivorceCosts assert isinstance(divorce_costs_nk, DivorceCosts) self.divorce_costs_nk = divorce_costs_nk # exogrid should be deprecated if not nogrid: exogrid = dict() # let's approximate three Markov chains # this sets up exogenous grid # FIXME: this uses number of points from 0th entry. # in principle we can generalize this exogrid['zf_t'], exogrid['zf_t_mat'] = rouw_nonst( p['T'], p['sig_zf'], p['sig_zf_0'], p['n_zf_t'][0]) exogrid['zm_t'], exogrid['zm_t_mat'] = rouw_nonst( p['T'], p['sig_zm'], p['sig_zm_0'], p['n_zm_t'][0]) for t in range(Tinc, Tret): for key in ['zf_t', 'zf_t_mat', 'zm_t', 'zm_t_mat']: exogrid[key][t] = exogrid[key][Tinc] for t in range(Tret, T): exogrid['zf_t'][t] = np.array([np.log(p['wret'])]) exogrid['zm_t'][t] = np.array([np.log(p['wret'])]) exogrid['zf_t_mat'][t] = np.atleast_2d(1.0) exogrid['zm_t_mat'][t] = np.atleast_2d(1.0) # fix transition from non-retired to retired exogrid['zf_t_mat'][Tret - 1] = np.ones((p['n_zf_t'][Tret - 1], 1)) exogrid['zm_t_mat'][Tret - 1] = np.ones((p['n_zm_t'][Tret - 1], 1)) exogrid['psi_t'], exogrid['psi_t_mat'] = rouw_nonst( p['T'], p['sigma_psi'], p['sigma_psi_init'], p['n_psi_t'][0]) zfzm, zfzmmat = combine_matrices_two_lists(exogrid['zf_t'], exogrid['zm_t'], exogrid['zf_t_mat'], exogrid['zm_t_mat']) all_t, all_t_mat = combine_matrices_two_lists( zfzm, exogrid['psi_t'], zfzmmat, exogrid['psi_t_mat']) all_t_mat_sparse_T = [ sparse.csc_matrix(D.T) if D is not None else None for D in all_t_mat ] #Create a new bad version of transition matrix p(zf_t) zf_bad = [ tauchen_drift(exogrid['zf_t'][t], exogrid['zf_t'][t + 1], 1.0, p['sig_zf'], p['z_drift']) for t in range(self.pars['T'] - 1) ] + [None] #zf_bad = [cut_matrix(exogrid['zf_t_mat'][t]) if t < Tret -1 # else (exogrid['zf_t_mat'][t] if t < T - 1 else None) # for t in range(self.pars['T'])] zf_t_mat_down = zf_bad zfzm, zfzmmat = combine_matrices_two_lists(exogrid['zf_t'], exogrid['zm_t'], zf_t_mat_down, exogrid['zm_t_mat']) all_t_down, all_t_mat_down = combine_matrices_two_lists( zfzm, exogrid['psi_t'], zfzmmat, exogrid['psi_t_mat']) all_t_mat_down_sparse_T = [ sparse.csc_matrix(D.T) if D is not None else None for D in all_t_mat_down ] all_t_mat_by_l_nk = [[ (1 - p) * m + p * md if m is not None else None for m, md in zip(all_t_mat, all_t_mat_down) ] for p in self.ls_pdown['Couple, no children']] all_t_mat_by_l_spt_nk = [[ (1 - p) * m + p * md if m is not None else None for m, md in zip(all_t_mat_sparse_T, all_t_mat_down_sparse_T) ] for p in self.ls_pdown['Couple, no children']] all_t_mat_by_l_k = [[ (1 - p) * m + p * md if m is not None else None for m, md in zip(all_t_mat, all_t_mat_down) ] for p in self.ls_pdown['Couple and child']] all_t_mat_by_l_spt_k = [[ (1 - p) * m + p * md if m is not None else None for m, md in zip(all_t_mat_sparse_T, all_t_mat_down_sparse_T) ] for p in self.ls_pdown['Couple and child']] zf_t_mat_by_l_sk = [[ (1 - p) * m + p * md if md is not None else None for m, md in zip(exogrid['zf_t_mat'], zf_bad) ] for p in self.ls_pdown['Female and child']] exogrid['all_t_mat_by_l_nk'] = all_t_mat_by_l_nk exogrid['all_t_mat_by_l_spt_nk'] = all_t_mat_by_l_spt_nk exogrid['all_t_mat_by_l_k'] = all_t_mat_by_l_k exogrid['all_t_mat_by_l_spt_k'] = all_t_mat_by_l_spt_k exogrid['zf_t_mat_by_l_sk'] = zf_t_mat_by_l_sk exogrid['all_t'] = all_t Exogrid_nt = namedtuple('Exogrid_nt', exogrid.keys()) self.exogrid = Exogrid_nt(**exogrid) self.pars['nexo_t'] = [v.shape[0] for v in all_t] self.compute_child_support_transitions( child_support_share=p['child_support_share']) #assert False #Grid Couple self.na = 40 self.amin = 0 self.amax = 100 self.agrid_c = np.linspace(self.amin**0.5, self.amax**0.5, self.na, dtype=self.dtype)**2 #tune=1.5 #self.agrid_c = np.geomspace(self.amin+tune,self.amax+tune,num=self.na)-tune # this builds finer grid for potential savings s_between = 7 # default numer of points between poitns on agrid s_da_min = 0.01 # minimal step (does not create more points) s_da_max = 0.1 # maximal step (creates more if not enough) self.sgrid_c = build_s_grid(self.agrid_c, s_between, s_da_min, s_da_max) self.vsgrid_c = VecOnGrid(self.agrid_c, self.sgrid_c) #Grid Single self.amin_s = 0 self.amax_s = self.amax / 2.0 self.agrid_s = self.agrid_c / 2.0 #tune_s=1.5 #self.agrid_s = np.geomspace(self.amin_s+tune_s,self.amax_s+tune_s,num=self.na)-tune_s self.sgrid_s = build_s_grid(self.agrid_s, s_between, s_da_min, s_da_max) self.vsgrid_s = VecOnGrid(self.agrid_s, self.sgrid_s) # grid for theta self.ntheta = 11 self.thetamin = 0.01 self.thetamax = 0.99 self.thetagrid = np.linspace(self.thetamin, self.thetamax, self.ntheta, dtype=self.dtype) self.child_a_cost_single = np.minimum(self.agrid_s, self.pars['child_a_cost']) self.child_a_cost_couple = np.minimum(self.agrid_c, self.pars['child_a_cost']) self.vagrid_child_single = VecOnGrid( self.agrid_s, self.agrid_s - self.child_a_cost_single) self.vagrid_child_couple = VecOnGrid( self.agrid_c, self.agrid_c - self.child_a_cost_couple) # construct finer grid for bargaining ntheta_fine = 10 * self.ntheta # actual number may be a bit bigger self.thetagrid_fine = np.unique( np.concatenate((self.thetagrid, np.linspace(self.thetamin, self.thetamax, ntheta_fine, dtype=self.dtype)))) self.ntheta_fine = self.thetagrid_fine.size i_orig = list() for theta in self.thetagrid: assert theta in self.thetagrid_fine i_orig.append(np.where(self.thetagrid_fine == theta)[0]) assert len(i_orig) == self.thetagrid.size # allows to recover original gird points on the fine grid self.theta_orig_on_fine = np.array(i_orig).flatten() self.v_thetagrid_fine = VecOnGrid(self.thetagrid, self.thetagrid_fine) # precomputed object for interpolation self.exo_grids = { 'Female, single': exogrid['zf_t'], 'Male, single': exogrid['zm_t'], 'Female and child': exogrid['zf_t'], 'Couple and child': exogrid['all_t'], 'Couple, no children': exogrid['all_t'] } self.exo_mats = { 'Female, single': exogrid['zf_t_mat'], 'Male, single': exogrid['zm_t_mat'], 'Female and child': exogrid['zf_t_mat_by_l_sk'], 'Couple and child': exogrid['all_t_mat_by_l_k'], 'Couple, no children': exogrid['all_t_mat_by_l_nk'] } # sparse version? self.utility_shifters = { 'Female, single': 0.0, 'Male, single': 0.0, 'Female and child': p['u_shift_mar'] + p['sm_shift'], 'Couple and child': p['u_shift_mar'], 'Couple, no children': p['u_shift_coh'] } # this pre-computes transition matrices for meeting a partner try: self.partners_distribution_fem = filer('az_dist_fem.pkl', 0, 0, repeat=False) self.partners_distribution_mal = filer('az_dist_mal.pkl', 0, 0, repeat=False) except: print('recreating estimates...') est_fem = get_estimates( fname='income_assets_distribution_male.csv', age_start=23, age_stop=42, zlist=self.exogrid.zm_t[2:]) filer('az_dist_fem.pkl', est_fem, True, repeat=False) self.partners_distribution_fem = est_fem est_mal = get_estimates( fname='income_assets_distribution_female.csv', age_start=21, age_stop=40, zlist=self.exogrid.zf_t[0:]) filer('az_dist_mal.pkl', est_mal, True, repeat=False) self.partners_distribution_mal = est_mal self.build_matches() # building m grid ezfmin = min([ np.min(np.exp(g + t)) for g, t in zip(exogrid['zf_t'], p['f_wage_trend']) ]) ezmmin = min([ np.min(np.exp(g + t)) for g, t in zip(exogrid['zm_t'], p['m_wage_trend']) ]) ezfmax = max([ np.max(np.exp(g + t)) for g, t in zip(exogrid['zf_t'], p['f_wage_trend']) ]) ezmmax = max([ np.max(np.exp(g + t)) for g, t in zip(exogrid['zm_t'], p['m_wage_trend']) ]) self.money_min = 0.95 * min(self.ls_levels['Female and child']) * min( ezmmin, ezfmin) # cause FLS can be up to 0 mmin = self.money_min mmax = ezfmax + ezmmax + np.max(self.pars['R_t']) * self.amax mint = (ezfmax + ezmmax) # poin where more dense grid begins ndense = 600 nm = 1500 gsparse = np.linspace(mint, mmax, nm - ndense) gdense = np.linspace(mmin, mint, ndense + 1) # +1 as there is a common pt self.mgrid = np.zeros(nm, dtype=self.dtype) self.mgrid[ndense:] = gsparse self.mgrid[:(ndense + 1)] = gdense self.mgrid_c = self.mgrid self.mgrid_s = self.mgrid assert np.all(np.diff(self.mgrid) > 0) self.u_precompute() self.unplanned_pregnancy_probability() self.compute_taxes()
def __init__(self,*,base,targ_mode,compare=None, setup=None, base_name='Model', compare_name='Data', graphs_title_add=None, moments_aux=None): if setup is None: self.setup = ModelSetup() if type(base) is str: self.moments = filer(base,0,0) else: self.moments = base self.targ_mode = targ_mode self.base_name = base_name self.compare_name = compare_name if graphs_title_add: self.graphs_title_add = '\n ' + graphs_title_add else: self.graphs_title_add = '' if type(compare) is str: targ_load = filer(compare,0,0) # emulating targets self.targets = {key: (targ_load[key],0.0) for key in targ_load} else: self.targets = all_targets(targ_mode) try: self.print_things() except: print('failed to print') try: self.plot_estimates() except: print('failed to plot estimates') try: self.plot_hazards() except: print('failed to plot hazards') try: self.plot_cumulative() except: print('failed to plot cumulative') try: self.plot_by_years_after_marriage() except: print('failed to plot by years after marriage') try: self.plot_kfmf() except: print('failed to plot kfmf') if moments_aux is not None: try: self.plot_men() except: print('failed to plot men') try: self.plot_kfmf_ref() except: print('failed to plot ref')
(r'\% MF women in population at 30', 'm then k in population at 30'), (r'\% single mothers among mothers at 35', 'single mothers among mothers at 35'), (r'\% unplanned pregnancies aborted', 'unplanned pregnancies aborted'), #(r'\% with kids 5 years after marriage','ever kids by years after marriage, 5') ] table_cols = list() for educ in ['col', 'hs']: def file_name(nm): return '{} {}.pkl'.format(educ, nm) for ename, nm in names: fname = file_name(nm) mom = filer(fname, 0, 0) table_col = [r'\textbf{' + ename + r'}'] + [mom[e] for _, e in entries] table_cols.append(table_col) table_left = [r'\textbf{Experiment}' ] + [r'\textit{' + e + r'}' for e, _ in entries] # build table rows table_rows = list() for irow in range(len(entries) + 1): table_row = table_left[irow] for icol in range(2 * len(names)): try: table_row += ' & ' + '{:02.1f}'.format( 100 * table_cols[icol][irow])