コード例 #1
0
def calculateKYratioDifference(sim_wealth, weights, total_output, target_KY):
    '''
    Calculates the absolute distance between the simulated capital-to-output
    ratio and the true U.S. level.
    
    Parameters
    ----------
    sim_wealth : numpy.array
        Array with simulated wealth values.
    weights : numpy.array
        List of weights for each row of sim_wealth.
    total_output : float
        Denominator for the simulated K/Y ratio.
    target_KY : float
        Actual U.S. K/Y ratio to match.
        
    Returns
    -------
    distance : float
        Absolute distance between simulated and actual K/Y ratios.
    '''
    sim_K = calcWeightedAvg(sim_wealth, weights) / (Params.l_bar)
    sim_KY = sim_K / total_output
    distance = (sim_KY - target_KY)**1.0
    return distance
コード例 #2
0
def calcKappaMean(DiscFac, nabla):
    '''
    Calculates the average MPC for the given parameters.  This is a very small
    sub-function of sensitivityAnalysis.
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors
    nabla : float
        Width of the uniform distribution of discount factors
        
    Returns
    -------
    kappa_all : float
        Average marginal propensity to consume in the population.
    '''
    DiscFac_list = approxUniform(N=Params.pref_type_count,
                                 bot=DiscFac - nabla,
                                 top=DiscFac + nabla)[1]
    assignBetaDistribution(est_type_list, DiscFac_list)
    multiThreadCommandsFake(est_type_list, beta_point_commands)

    kappa_all = calcWeightedAvg(
        np.vstack((this_type.kappa_history for this_type in est_type_list)),
        np.tile(Params.age_weight_all / float(Params.pref_type_count),
                Params.pref_type_count))
    return kappa_all
コード例 #3
0
ファイル: cstwMPC.py プロジェクト: chiomh/HARK
def calcKappaMean(DiscFac,nabla):
    '''
    Calculates the average MPC for the given parameters.  This is a very small
    sub-function of sensitivityAnalysis.
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors
    nabla : float
        Width of the uniform distribution of discount factors
        
    Returns
    -------
    kappa_all : float
        Average marginal propensity to consume in the population.
    '''
    DiscFac_list = approxUniform(N=Params.pref_type_count,bot=DiscFac-nabla,top=DiscFac+nabla)[1]
    assignBetaDistribution(est_type_list,DiscFac_list)
    multiThreadCommandsFake(est_type_list,beta_point_commands)
    
    kappa_all = calcWeightedAvg(np.vstack((this_type.kappa_history for this_type in est_type_list)),
                                np.tile(Params.age_weight_all/float(Params.pref_type_count),
                                        Params.pref_type_count))
    return kappa_all
コード例 #4
0
ファイル: cstwMPC.py プロジェクト: chiomh/HARK
def calculateKYratioDifference(sim_wealth,weights,total_output,target_KY):
    '''
    Calculates the absolute distance between the simulated capital-to-output
    ratio and the true U.S. level.
    
    Parameters
    ----------
    sim_wealth : numpy.array
        Array with simulated wealth values.
    weights : numpy.array
        List of weights for each row of sim_wealth.
    total_output : float
        Denominator for the simulated K/Y ratio.
    target_KY : float
        Actual U.S. K/Y ratio to match.
        
    Returns
    -------
    distance : float
        Absolute distance between simulated and actual K/Y ratios.
    '''
    sim_K = calcWeightedAvg(sim_wealth,weights)/(Params.l_bar)
    sim_KY = sim_K/total_output
    distance = (sim_KY - target_KY)**1.0
    return distance
コード例 #5
0
def makeCSTWstats(DiscFac,
                  nabla,
                  this_type_list,
                  age_weight,
                  lorenz_distance=0.0,
                  save_name=None):
    '''
    Displays (and saves) a bunch of statistics.  Separate from makeCSTWresults()
    for compatibility with the aggregate shock model.
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors
    nabla : float
        Width of the uniform distribution of discount factors
    this_type_list : [cstwMPCagent]
        List of agent types in the economy.
    age_weight : np.array
        Age-conditional array of weights for the wealth data.
    lorenz_distance : float
        Distance between simulated and actual Lorenz curves, for display.
    save_name : string
        Name to save the calculated results, for later use in producing figures
        and tables, etc.
        
    Returns
    -------
    none
    '''
    sim_length = this_type_list[0].sim_periods
    sim_wealth = (np.vstack(
        (this_type.W_history for this_type in this_type_list))).flatten()
    sim_wealth_short = (np.vstack(
        (this_type.W_history[0:sim_length, :]
         for this_type in this_type_list))).flatten()
    sim_kappa = (np.vstack(
        (this_type.kappa_history for this_type in this_type_list))).flatten()
    sim_income = (np.vstack((this_type.pHist[0:sim_length, :] *
                             np.asarray(this_type.TranShkHist[0:sim_length, :])
                             for this_type in this_type_list))).flatten()
    sim_ratio = (np.vstack((this_type.W_history[0:sim_length, :] /
                            this_type.pHist[0:sim_length, :]
                            for this_type in this_type_list))).flatten()
    if Params.do_lifecycle:
        sim_unemp = (np.vstack((np.vstack((
            this_type.IncUnemp == this_type.TranShkHist[0:Params.working_T, :],
            np.zeros((Params.retired_T + 1, this_type_list[0].Nagents),
                     dtype=bool)))
                                for this_type in this_type_list))).flatten()
        sim_emp = (np.vstack((np.vstack(
            (this_type.IncUnemp !=
             this_type.TranShkHist[0:Params.working_T, :],
             np.zeros((Params.retired_T + 1, this_type_list[0].Nagents),
                      dtype=bool)))
                              for this_type in this_type_list))).flatten()
        sim_ret = (np.vstack((np.vstack(
            (np.zeros((Params.working_T, this_type_list[0].Nagents),
                      dtype=bool),
             np.ones((Params.retired_T + 1, this_type_list[0].Nagents),
                     dtype=bool)))
                              for this_type in this_type_list))).flatten()
    else:
        sim_unemp = np.vstack(
            (this_type.IncUnemp == this_type.TranShkHist[0:sim_length, :]
             for this_type in this_type_list)).flatten()
        sim_emp = np.vstack(
            (this_type.IncUnemp != this_type.TranShkHist[0:sim_length, :]
             for this_type in this_type_list)).flatten()
        sim_ret = np.zeros(sim_emp.size, dtype=bool)
    sim_weight_all = np.tile(np.repeat(age_weight, this_type_list[0].Nagents),
                             Params.pref_type_count)

    if Params.do_beta_dist and Params.do_lifecycle:
        kappa_mean_by_age_type = (np.mean(np.vstack(
            (this_type.kappa_history for this_type in this_type_list)),
                                          axis=1)).reshape(
                                              (Params.pref_type_count * 3,
                                               DropoutType.T_total + 1))
        kappa_mean_by_age_pref = np.zeros(
            (Params.pref_type_count, DropoutType.T_total + 1)) + np.nan
        for j in range(Params.pref_type_count):
            kappa_mean_by_age_pref[
                j, ] = Params.d_pct * kappa_mean_by_age_type[
                    3 * j + 0, ] + Params.h_pct * kappa_mean_by_age_type[
                        3 * j + 1, ] + Params.c_pct * kappa_mean_by_age_type[
                            3 * j + 2, ]
        kappa_mean_by_age = np.mean(kappa_mean_by_age_pref, axis=0)
        kappa_lo_beta_by_age = kappa_mean_by_age_pref[0, :]
        kappa_hi_beta_by_age = kappa_mean_by_age_pref[Params.pref_type_count -
                                                      1, :]

    lorenz_fig_data = makeLorenzFig(Params.SCF_wealth, Params.SCF_weights,
                                    sim_wealth, sim_weight_all)
    mpc_fig_data = makeMPCfig(sim_kappa, sim_weight_all)

    kappa_all = calcWeightedAvg(
        np.vstack((this_type.kappa_history for this_type in this_type_list)),
        np.tile(age_weight / float(Params.pref_type_count),
                Params.pref_type_count))
    kappa_unemp = np.sum(
        sim_kappa[sim_unemp] * sim_weight_all[sim_unemp]) / np.sum(
            sim_weight_all[sim_unemp])
    kappa_emp = np.sum(sim_kappa[sim_emp] * sim_weight_all[sim_emp]) / np.sum(
        sim_weight_all[sim_emp])
    kappa_ret = np.sum(sim_kappa[sim_ret] * sim_weight_all[sim_ret]) / np.sum(
        sim_weight_all[sim_ret])

    my_cutoffs = [(0.99, 1), (0.9, 1), (0.8, 1), (0.6, 0.8), (0.4, 0.6),
                  (0.2, 0.4), (0.0, 0.2)]
    kappa_by_ratio_groups = calcSubpopAvg(sim_kappa, sim_ratio, my_cutoffs,
                                          sim_weight_all)
    kappa_by_income_groups = calcSubpopAvg(sim_kappa, sim_income, my_cutoffs,
                                           sim_weight_all)

    quintile_points = getPercentiles(sim_wealth_short,
                                     weights=sim_weight_all,
                                     percentiles=[0.2, 0.4, 0.6, 0.8])
    wealth_quintiles = np.ones(sim_wealth_short.size, dtype=int)
    wealth_quintiles[sim_wealth_short > quintile_points[0]] = 2
    wealth_quintiles[sim_wealth_short > quintile_points[1]] = 3
    wealth_quintiles[sim_wealth_short > quintile_points[2]] = 4
    wealth_quintiles[sim_wealth_short > quintile_points[3]] = 5
    MPC_cutoff = getPercentiles(sim_kappa,
                                weights=sim_weight_all,
                                percentiles=[2.0 / 3.0])
    these_quintiles = wealth_quintiles[sim_kappa > MPC_cutoff]
    these_weights = sim_weight_all[sim_kappa > MPC_cutoff]
    hand_to_mouth_total = np.sum(these_weights)
    hand_to_mouth_pct = []
    for q in range(5):
        hand_to_mouth_pct.append(
            np.sum(these_weights[these_quintiles == (q + 1)]) /
            hand_to_mouth_total)

    results_string = 'Estimate is DiscFac=' + str(DiscFac) + ', nabla=' + str(
        nabla) + '\n'
    results_string += 'Lorenz distance is ' + str(lorenz_distance) + '\n'
    results_string += 'Average MPC for all consumers is ' + mystr(
        kappa_all) + '\n'
    results_string += 'Average MPC in the top percentile of W/Y is ' + mystr(
        kappa_by_ratio_groups[0]) + '\n'
    results_string += 'Average MPC in the top decile of W/Y is ' + mystr(
        kappa_by_ratio_groups[1]) + '\n'
    results_string += 'Average MPC in the top quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[2]) + '\n'
    results_string += 'Average MPC in the second quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[3]) + '\n'
    results_string += 'Average MPC in the middle quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[4]) + '\n'
    results_string += 'Average MPC in the fourth quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[5]) + '\n'
    results_string += 'Average MPC in the bottom quintile of W/Y is ' + mystr(
        kappa_by_ratio_groups[6]) + '\n'
    results_string += 'Average MPC in the top percentile of y is ' + mystr(
        kappa_by_income_groups[0]) + '\n'
    results_string += 'Average MPC in the top decile of y is ' + mystr(
        kappa_by_income_groups[1]) + '\n'
    results_string += 'Average MPC in the top quintile of y is ' + mystr(
        kappa_by_income_groups[2]) + '\n'
    results_string += 'Average MPC in the second quintile of y is ' + mystr(
        kappa_by_income_groups[3]) + '\n'
    results_string += 'Average MPC in the middle quintile of y is ' + mystr(
        kappa_by_income_groups[4]) + '\n'
    results_string += 'Average MPC in the fourth quintile of y is ' + mystr(
        kappa_by_income_groups[5]) + '\n'
    results_string += 'Average MPC in the bottom quintile of y is ' + mystr(
        kappa_by_income_groups[6]) + '\n'
    results_string += 'Average MPC for the employed is ' + mystr(
        kappa_emp) + '\n'
    results_string += 'Average MPC for the unemployed is ' + mystr(
        kappa_unemp) + '\n'
    results_string += 'Average MPC for the retired is ' + mystr(
        kappa_ret) + '\n'
    results_string += 'Of the population with the 1/3 highest MPCs...' + '\n'
    results_string += mystr(
        hand_to_mouth_pct[0] *
        100) + '% are in the bottom wealth quintile,' + '\n'
    results_string += mystr(
        hand_to_mouth_pct[1] *
        100) + '% are in the second wealth quintile,' + '\n'
    results_string += mystr(hand_to_mouth_pct[2] *
                            100) + '% are in the third wealth quintile,' + '\n'
    results_string += mystr(
        hand_to_mouth_pct[3] *
        100) + '% are in the fourth wealth quintile,' + '\n'
    results_string += 'and ' + mystr(
        hand_to_mouth_pct[4] *
        100) + '% are in the top wealth quintile.' + '\n'
    print(results_string)

    if save_name is not None:
        with open('./Results/' + save_name + 'LorenzFig.txt', 'w') as f:
            my_writer = csv.writer(
                f,
                delimiter='\t',
            )
            for j in range(len(lorenz_fig_data[0])):
                my_writer.writerow([
                    lorenz_fig_data[0][j], lorenz_fig_data[1][j],
                    lorenz_fig_data[2][j]
                ])
            f.close()
        with open('./Results/' + save_name + 'MPCfig.txt', 'w') as f:
            my_writer = csv.writer(f, delimiter='\t')
            for j in range(len(mpc_fig_data[0])):
                my_writer.writerow([lorenz_fig_data[0][j], mpc_fig_data[1][j]])
            f.close()
        if Params.do_beta_dist and Params.do_lifecycle:
            with open('./Results/' + save_name + 'KappaByAge.txt', 'w') as f:
                my_writer = csv.writer(f, delimiter='\t')
                for j in range(len(kappa_mean_by_age)):
                    my_writer.writerow([
                        kappa_mean_by_age[j], kappa_lo_beta_by_age[j],
                        kappa_hi_beta_by_age[j]
                    ])
                f.close()
        with open('./Results/' + save_name + 'Results.txt', 'w') as f:
            f.write(results_string)
            f.close()
コード例 #6
0
ファイル: cstwMPC.py プロジェクト: chiomh/HARK
def makeCSTWstats(DiscFac,nabla,this_type_list,age_weight,lorenz_distance=0.0,save_name=None):
    '''
    Displays (and saves) a bunch of statistics.  Separate from makeCSTWresults()
    for compatibility with the aggregate shock model.
    
    Parameters
    ----------
    DiscFac : float
        Center of the uniform distribution of discount factors
    nabla : float
        Width of the uniform distribution of discount factors
    this_type_list : [cstwMPCagent]
        List of agent types in the economy.
    age_weight : np.array
        Age-conditional array of weights for the wealth data.
    lorenz_distance : float
        Distance between simulated and actual Lorenz curves, for display.
    save_name : string
        Name to save the calculated results, for later use in producing figures
        and tables, etc.
        
    Returns
    -------
    none
    '''
    sim_length = this_type_list[0].sim_periods
    sim_wealth = (np.vstack((this_type.W_history for this_type in this_type_list))).flatten()
    sim_wealth_short = (np.vstack((this_type.W_history[0:sim_length,:] for this_type in this_type_list))).flatten()
    sim_kappa = (np.vstack((this_type.kappa_history for this_type in this_type_list))).flatten()
    sim_income = (np.vstack((this_type.pHist[0:sim_length,:]*np.asarray(this_type.TranShkHist[0:sim_length,:]) for this_type in this_type_list))).flatten()
    sim_ratio = (np.vstack((this_type.W_history[0:sim_length,:]/this_type.pHist[0:sim_length,:] for this_type in this_type_list))).flatten()
    if Params.do_lifecycle:
        sim_unemp = (np.vstack((np.vstack((this_type.IncUnemp == this_type.TranShkHist[0:Params.working_T,:],np.zeros((Params.retired_T+1,this_type_list[0].Nagents),dtype=bool))) for this_type in this_type_list))).flatten()
        sim_emp = (np.vstack((np.vstack((this_type.IncUnemp != this_type.TranShkHist[0:Params.working_T,:],np.zeros((Params.retired_T+1,this_type_list[0].Nagents),dtype=bool))) for this_type in this_type_list))).flatten()
        sim_ret = (np.vstack((np.vstack((np.zeros((Params.working_T,this_type_list[0].Nagents),dtype=bool),np.ones((Params.retired_T+1,this_type_list[0].Nagents),dtype=bool))) for this_type in this_type_list))).flatten()
    else:
        sim_unemp = np.vstack((this_type.IncUnemp == this_type.TranShkHist[0:sim_length,:] for this_type in this_type_list)).flatten()
        sim_emp = np.vstack((this_type.IncUnemp != this_type.TranShkHist[0:sim_length,:] for this_type in this_type_list)).flatten()
        sim_ret = np.zeros(sim_emp.size,dtype=bool)
    sim_weight_all = np.tile(np.repeat(age_weight,this_type_list[0].Nagents),Params.pref_type_count)
    
    if Params.do_beta_dist and Params.do_lifecycle:
        kappa_mean_by_age_type = (np.mean(np.vstack((this_type.kappa_history for this_type in this_type_list)),axis=1)).reshape((Params.pref_type_count*3,DropoutType.T_total+1))
        kappa_mean_by_age_pref = np.zeros((Params.pref_type_count,DropoutType.T_total+1)) + np.nan
        for j in range(Params.pref_type_count):
            kappa_mean_by_age_pref[j,] = Params.d_pct*kappa_mean_by_age_type[3*j+0,] + Params.h_pct*kappa_mean_by_age_type[3*j+1,] + Params.c_pct*kappa_mean_by_age_type[3*j+2,] 
        kappa_mean_by_age = np.mean(kappa_mean_by_age_pref,axis=0)
        kappa_lo_beta_by_age = kappa_mean_by_age_pref[0,:]
        kappa_hi_beta_by_age = kappa_mean_by_age_pref[Params.pref_type_count-1,:]
    
    lorenz_fig_data = makeLorenzFig(Params.SCF_wealth,Params.SCF_weights,sim_wealth,sim_weight_all)
    mpc_fig_data = makeMPCfig(sim_kappa,sim_weight_all)
    
    kappa_all = calcWeightedAvg(np.vstack((this_type.kappa_history for this_type in this_type_list)),np.tile(age_weight/float(Params.pref_type_count),Params.pref_type_count))
    kappa_unemp = np.sum(sim_kappa[sim_unemp]*sim_weight_all[sim_unemp])/np.sum(sim_weight_all[sim_unemp])
    kappa_emp = np.sum(sim_kappa[sim_emp]*sim_weight_all[sim_emp])/np.sum(sim_weight_all[sim_emp])
    kappa_ret = np.sum(sim_kappa[sim_ret]*sim_weight_all[sim_ret])/np.sum(sim_weight_all[sim_ret])
    
    my_cutoffs = [(0.99,1),(0.9,1),(0.8,1),(0.6,0.8),(0.4,0.6),(0.2,0.4),(0.0,0.2)]
    kappa_by_ratio_groups = calcSubpopAvg(sim_kappa,sim_ratio,my_cutoffs,sim_weight_all)
    kappa_by_income_groups = calcSubpopAvg(sim_kappa,sim_income,my_cutoffs,sim_weight_all)
    
    quintile_points = getPercentiles(sim_wealth_short,weights=sim_weight_all,percentiles=[0.2, 0.4, 0.6, 0.8])
    wealth_quintiles = np.ones(sim_wealth_short.size,dtype=int)
    wealth_quintiles[sim_wealth_short > quintile_points[0]] = 2
    wealth_quintiles[sim_wealth_short > quintile_points[1]] = 3
    wealth_quintiles[sim_wealth_short > quintile_points[2]] = 4
    wealth_quintiles[sim_wealth_short > quintile_points[3]] = 5
    MPC_cutoff = getPercentiles(sim_kappa,weights=sim_weight_all,percentiles=[2.0/3.0])
    these_quintiles = wealth_quintiles[sim_kappa > MPC_cutoff]
    these_weights = sim_weight_all[sim_kappa > MPC_cutoff]
    hand_to_mouth_total = np.sum(these_weights)
    hand_to_mouth_pct = []
    for q in range(5):
        hand_to_mouth_pct.append(np.sum(these_weights[these_quintiles == (q+1)])/hand_to_mouth_total)
    
    results_string = 'Estimate is DiscFac=' + str(DiscFac) + ', nabla=' + str(nabla) + '\n'
    results_string += 'Lorenz distance is ' + str(lorenz_distance) + '\n'
    results_string += 'Average MPC for all consumers is ' + mystr(kappa_all) + '\n'
    results_string += 'Average MPC in the top percentile of W/Y is ' + mystr(kappa_by_ratio_groups[0]) + '\n'
    results_string += 'Average MPC in the top decile of W/Y is ' + mystr(kappa_by_ratio_groups[1]) + '\n'
    results_string += 'Average MPC in the top quintile of W/Y is ' + mystr(kappa_by_ratio_groups[2]) + '\n'
    results_string += 'Average MPC in the second quintile of W/Y is ' + mystr(kappa_by_ratio_groups[3]) + '\n'
    results_string += 'Average MPC in the middle quintile of W/Y is ' + mystr(kappa_by_ratio_groups[4]) + '\n'
    results_string += 'Average MPC in the fourth quintile of W/Y is ' + mystr(kappa_by_ratio_groups[5]) + '\n'
    results_string += 'Average MPC in the bottom quintile of W/Y is ' + mystr(kappa_by_ratio_groups[6]) + '\n'
    results_string += 'Average MPC in the top percentile of y is ' + mystr(kappa_by_income_groups[0]) + '\n'
    results_string += 'Average MPC in the top decile of y is ' + mystr(kappa_by_income_groups[1]) + '\n'
    results_string += 'Average MPC in the top quintile of y is ' + mystr(kappa_by_income_groups[2]) + '\n'
    results_string += 'Average MPC in the second quintile of y is ' + mystr(kappa_by_income_groups[3]) + '\n'
    results_string += 'Average MPC in the middle quintile of y is ' + mystr(kappa_by_income_groups[4]) + '\n'
    results_string += 'Average MPC in the fourth quintile of y is ' + mystr(kappa_by_income_groups[5]) + '\n'
    results_string += 'Average MPC in the bottom quintile of y is ' + mystr(kappa_by_income_groups[6]) + '\n'
    results_string += 'Average MPC for the employed is ' + mystr(kappa_emp) + '\n'
    results_string += 'Average MPC for the unemployed is ' + mystr(kappa_unemp) + '\n'
    results_string += 'Average MPC for the retired is ' + mystr(kappa_ret) + '\n'
    results_string += 'Of the population with the 1/3 highest MPCs...' + '\n'
    results_string += mystr(hand_to_mouth_pct[0]*100) + '% are in the bottom wealth quintile,' + '\n'
    results_string += mystr(hand_to_mouth_pct[1]*100) + '% are in the second wealth quintile,' + '\n'
    results_string += mystr(hand_to_mouth_pct[2]*100) + '% are in the third wealth quintile,' + '\n'
    results_string += mystr(hand_to_mouth_pct[3]*100) + '% are in the fourth wealth quintile,' + '\n'
    results_string += 'and ' + mystr(hand_to_mouth_pct[4]*100) + '% are in the top wealth quintile.' + '\n'
    print(results_string)
    
    if save_name is not None:
        with open('./Results/' + save_name + 'LorenzFig.txt','w') as f:
            my_writer = csv.writer(f, delimiter='\t',)
            for j in range(len(lorenz_fig_data[0])):
                my_writer.writerow([lorenz_fig_data[0][j], lorenz_fig_data[1][j], lorenz_fig_data[2][j]])
            f.close()
        with open('./Results/' + save_name + 'MPCfig.txt','w') as f:
            my_writer = csv.writer(f, delimiter='\t')
            for j in range(len(mpc_fig_data[0])):
                my_writer.writerow([lorenz_fig_data[0][j], mpc_fig_data[1][j]])
            f.close()
        if Params.do_beta_dist and Params.do_lifecycle:
            with open('./Results/' + save_name + 'KappaByAge.txt','w') as f:
                my_writer = csv.writer(f, delimiter='\t')
                for j in range(len(kappa_mean_by_age)):
                    my_writer.writerow([kappa_mean_by_age[j], kappa_lo_beta_by_age[j], kappa_hi_beta_by_age[j]])
                f.close()
        with open('./Results/' + save_name + 'Results.txt','w') as f:
            f.write(results_string)
            f.close()
コード例 #7
0
ファイル: cstwMPC.py プロジェクト: ganong123/HARK
        
    Returns
    -------
    kappa_all : float
        Average marginal propensity to consume in the population.
    '''
<<<<<<< HEAD
    DiscFac_list = approxUniform(DiscFac,nabla,N=Params.pref_type_count)
=======
    DiscFac_list = approxUniform(N=Params.pref_type_count,bot=DiscFac-nabla,top=DiscFac+nabla)[1]
>>>>>>> eeb37f24755d0c683c9d9efbe5e7447425c98b86
    assignBetaDistribution(est_type_list,DiscFac_list)
    multiThreadCommandsFake(est_type_list,beta_point_commands)
    
    kappa_all = calcWeightedAvg(np.vstack((this_type.kappa_history for this_type in est_type_list)),
                                np.tile(Params.age_weight_all/float(Params.pref_type_count),
                                        Params.pref_type_count))
    return kappa_all
    
    
def sensitivityAnalysis(parameter,values,is_time_vary):
    '''
    Perform a sensitivity analysis by varying a chosen parameter over given values
    and re-estimating the model at each.  Only works for perpetual youth version.
    Saves numeric results in a file named SensitivityPARAMETER.txt.
    
    Parameters
    ----------
    parameter : string
        Name of an attribute/parameter of cstwMPCagent on which to perform a
        sensitivity analysis.  The attribute should be a single float.