Ejemplo n.º 1
0
    def save(self):

        filename = os.path.join(PathFinder.getSourceDir(), 'InitialGuess.pkl')

        dict = np.array([[]])

        if os.path.isfile(filename):
            with open(filename, 'rb') as f:
                dict = pickle.load(f)
            # dict is an array with a scenario, Dynamic, Market values

        # Find exact match (if any), otherwise add
        found = False
        if dict.shape[1] == 3:
            for i in range(dict.shape[0]):
                scenario = dict[i, 0]
                if self.scenario.isEquivalent(scenario):
                    dict[i, :] = [scenario, self.Dynamic, self.Market]
                    found = True
                    break

        if not found:
            np.append(dict, [self.scenario, self.Dynamic, self.Market])

        with open(filename, 'wb') as f:
            pickle.dump(dict, f, protocol=pickle.HIGHEST_PROTOCOL)

        if found:
            source = 'Overwrote existing guess.'
        else:
            source = 'Added as new guess.'

        print('[INFO] Saved initial guess to file. %s \n' % source)
Ejemplo n.º 2
0
    def fetch(self):
        filename = os.path.join(PathFinder.getSourceDir(), 'InitialGuess.pkl')
        if not os.path.isfile(filename):
            self.generate()
            return

        # Load the DB
        with open(filename, 'rb') as f:
            dict = pickle.load(f)
        #  Rem: Dict is n x 3 cell array {scenario, Dynamic, Market}

        # First, find exact match (if any)
        if dict.shape[1] == 3:
            for i in range(dict.shape[0]):
                scenario = dict[i, 0]
                if self.scenario.isEquivalent(scenario):
                    self.Dynamic = dict[i, 1]
                    self.Market = dict[i, 2]
                    return

        # Check for non-version math as a back-up
        if dict.shape[1] == 3:
            for i in range(dict.shape[0]):
                scenario = dict[i, 0]
                if self.scenario.isEquivalentIgnoreVersion(scenario):
                    self.Dynamic = dict[i, 1]
                    self.Market = dict[i, 2]
                    print(
                        '[INFO] Using initial guess from different scenario version.\n'
                    )
                    return

        # If not found, generate
        self.generate()
Ejemplo n.º 3
0
    def export(self, outputName=None):
        # If no outputName, create one from Scenario
        if outputName == None:
            outputName = self.Description

        if not self.isSolved():
            from modelSolverModule import ModelSolver
            ModelSolver.solve(self)

        from pathFinderModule import PathFinder
        cacheDir = PathFinder.getCacheDir(self)
        outDir = PathFinder(self).getNamedOutputPath(outputName)

        print('Exporting scenario to %s \n' % outDir)
        if os.path.exists(outDir):
            shutil.rmtree(outDir)
        shutil.copyfile(cacheDir, outDir)
Ejemplo n.º 4
0
 def isSolved(self):
     from pathFinderModule import PathFinder
     flag = os.path.exists(
         os.path.join(PathFinder.getCacheDir(self), 'solved'))
     if flag:
         # Check that hashed location is correct scenario
         with open(
                 os.path.join(PathFinder.getCacheDir(self), 'scenario.pkl'),
                 'rb') as handle:
             s = pickle.load(handle)
         flag = self.isEquivalent(Scenario(s['scenario']))
         if not flag:
             # TBD: Until cache-ing is revised.
             raise Exception(
                 'Scenario:BAD_CACHEING - WARNING! Cached scenario at location is not this scenario. '
             )
     return flag
Ejemplo n.º 5
0
    def generate(self):

        Market = {}
        Dynamic = {}
        steadyScenario = self.scenario.currentPolicy().steady()
        steady_dir = PathFinder.getCacheDir(steadyScenario)

        source = 'cached scenario'
        if os.path.isfile(os.path.join(steady_dir, 'market.pkl')):
            with open(os.path.join(steady_dir, 'market.pkl'), 'rb') as handle:
                Market = pickle.load(handle)

        if os.path.isfile(os.path.join(steady_dir, 'dynamics.pkl')):
            with open(os.path.join(steady_dir, 'dynamics.pkl'),
                      'rb') as handle:
                Dynamic = pickle.load(handle)

        if len(Market) == 0 or len(Dynamic) == 0:

            source = 'made-up numbers'

            # Load initial guesses (values come from some steady state results)
            Dynamic['outs'] = np.array([3.1980566])
            Dynamic['caps'] = np.array([9.1898354])
            Dynamic['labs'] = 0.5235
            captoout = Dynamic['caps'] / Dynamic['outs']
            debttoout = np.array([0.75])

            Market['beqs'] = np.array([0.153155])
            Market['capsharesAM'] = captoout / (
                captoout + debttoout
            )  # capshare = (K/Y / (K/Y + D/Y)), where K/Y = captoout = 3 and D/Y = debttoout.
            Market['capsharesPM'] = Market[
                'capsharesAM']  # capshare = (K/Y / (K/Y + D/Y)), where K/Y = captoout = 3 and D/Y = debttoout.
            Market['rhos'] = 4.94974
            Market[
                'invtocaps'] = 0.0078 + 0.056  # I/K = pop growth rate 0.0078 + depreciation

            Market['investmentToCapital0'] = 0.16
            Market['equityDividendRates'] = 0.05
            Market['worldAfterTaxReturn'] = 0.05
            Market['corpLeverageCost'] = 2
            Market['passLeverageCost'] = 2

            Dynamic['debts'] = Dynamic['outs'] * debttoout
            Dynamic['assetsAM'] = Dynamic['caps'] + Dynamic[
                'debts']  # Assume p_K(0)=1
            Dynamic['assetsPM'] = Dynamic['assetsAM']
            Dynamic['labeffs'] = Dynamic['caps'] / Market['rhos']
            Dynamic['investment'] = Dynamic['caps'] * Market['invtocaps']

            Dynamic['caps_foreign'] = 0

        setattr(self, 'Market', Market)
        setattr(self, 'Dynamic', Dynamic)

        print('[INFO] Generated new initial guess from %s. \n' % source)
    def jenkinsTests():
        
        try:
            isHPCC      = PathFinder.isHPCCRun()

            # Run just the matching cases for now
            testNames   = ['steady', 'open_base', 'open_counter', 'closed_base', 'closed_counter']
            for o in testNames:
                if o == 'steady':
                    scenario = Scenario(ModelTester.test_params).currentPolicy().steady()
                elif o == 'open_base':
                    scenario = Scenario(ModelTester.test_params).currentPolicy().open()
                elif o == 'open_counter':
                    scenario = Scenario(ModelTester.test_params).open()
                elif o == 'closed_base':
                    scenario = Scenario(ModelTester.test_params).currentPolicy().closed()
                elif o == 'closed_counter':
                    scenario = Scenario(ModelTester.test_params).closed()
                else:
                    scenario = []

                typeDeviation = ModelTester.testOutput( scenario, o, 0 )

                if typeDeviation != ModelTester.DEVIATION_NONE:
                    if typeDeviation == ModelTester.DEVIATION_TINY and isHPCC:
                        continue
                    else:
                        exit(1)

            # Test writing the 'series' interface with the last scenario
            # Requires that 'baseline' scenario exists
            PathFinder.setToTestingMode()
            print( 'TESTING OutputWriter.writeScenarios\n' )
            ModelSolver.solve( scenario.baseline() )
            OutputWriter.writeScenarios( [scenario] )
            PathFinder.setToDevelopmentMode()

            print( 'ALL TESTS PASSED.\n' )
            exit(0)
        except:
            exit(1)
    def report_baseline_moments():

        outputfilename = os.path.join(PathFinder.getSourceDir(),
                                      'BaselineMoments.txt')
        f = open(outputfilename, 'w+')

        f.write('-------------BASELINE MOMENTS-------------')
        f.write('%s \r\n' % str(datetime.datetime.now()))

        # load the matrix and get inverter function
        (_, f_invert) = ParamGenerator.invert()

        for labelas in np.arange(0.25, 1.0, 0.25):
            for savelas in np.arange(0.25, 1.0, 0.25):
                target = {'labelas': labelas, 'savelas': savelas}
                f.write(
                    '\r\nBASELINE labor elas = %0.2f  savings elas = %0.2f \r\n'
                    % (labelas, savelas))
                inverse = f_invert(target)

                scenario = Scenario({
                    'economy':
                    'steady',
                    'beta':
                    inverse['beta'],
                    'gamma':
                    inverse['gamma'],
                    'sigma':
                    inverse['sigma'],
                    'modelunit_dollar':
                    inverse['modelunit_dollar'],
                    'bequest_phi_1':
                    0
                })

                save_dir = ModelSolver.solve(scenario)

                targets = ModelCalibrator.moment_targets
                targets = np.vstack(
                    (targets, ['labelas', labelas, 'Labor elasticity']))
                targets = np.vstack(
                    (targets, ['savelas', savelas, 'Savings elasticity']))
                outstr = ModelCalibrator.report_moments(save_dir, targets)
                f.write('%s \r\n' % outstr)
                f.write('-------------------------------------\r\n')

        f.write(' ==== DONE ===== \r\n')
        f.close()
    def SS_distribution(self):

        s = {}

        # Import variables common to all elements of s
        dist_retired = self.DIST[:, :, :,
                                 (self.T_work + 1):self.T_life, :, :, :]
        ben_retired = self.ben[:, :, :, (self.T_work + 1):self.T_life, :, :, :]
        dist_retired = dist_retired[:]
        ben_retired = ben_retired[:]

        # Calculate SS outlays as a percentage of GDP
        steady_dir = PathFinder.getCacheDir(self.scenario)
        with open(os.path.join(steady_dir, 'dynamics.pkl'), 'rb') as handle:
            s_dynamics = pickle.load(handle)
        s['SSbentoout'] = np.sum(ben_retired * dist_retired,
                                 axis=1) / s_dynamics['outs']
        s['SStaxtoout'] = s_dynamics['ssts'] / s_dynamics['outs']

        # Table with distribution of Social Security benefits among retired households
        dist_retired0 = self.DIST[:, :, 0,
                                  (self.T_work + 1):self.T_life, :, :, :]
        dist_retired0 = dist_retired0[:] / np.sum(dist_retired)
        dist_retired = dist_retired / np.sum(dist_retired[:])
        ben_distmodel = get_moments(dist_retired, ben_retired)
        ben0 = {
            'percentile': sum(dist_retired0),
            'threshold': 0,
            'cumulativeShare': 0
        }
        s['ben_dist'] = pd.DataFrame(ben0)
        s['ben_dist'].append(ben_distmodel)

        # Average asset holdings of retiree earning no SS benefits
        k_retired0 = self.karray[:, :, 0, self.T_work:self.T_life, :, :, :]
        k_retired0 = k_retired0[:]
        k_retired0 = k_retired0 * dist_retired0
        s['k_retired0'] = sum(k_retired0) / sum(
            dist_retired0) / self.scenario['modelunit_dollar']

        # Average consumption of retiree earning no SS benefits
        c_retired0 = self.con[:, :, 0, self.T_work:self.T_life, :, :, :]
        c_retired0 = c_retired0[:]
        c_retired0 = c_retired0 * dist_retired0
        s.c_retired0 = sum(c_retired0) / sum(
            dist_retired0) / self.scenario['modelunit_dollar']

        return s
class ModelCalibrator:

    # Define list of parameters which define the steady state
    paramlist = ['beta', 'gamma', 'sigma', 'modelunit_dollar']

    # Define list of targets
    targetlist = ['captoout', 'labelas', 'savelas', 'outperHH']
    ntarget = len(targetlist)

    # Define number of discretization points for each dimension of the calibration grid
    ngrid = 15

    # Determine total number of calibration points
    #   There are 3 dimensions for the calibration grid -- beta, sigma, gamma
    npoint = ngrid**3

    # Define calibration point directory and calibration point file path
    pointdir = os.path.join(PathFinder.getSourceDir(), 'CalibrationPoints')
    pointfile = lambda ipoint: os.path.join(ModelCalibrator.pointdir,
                                            'point%05d.pkl' % ipoint)

    # Define the moment targets for the reports on how we did
    #   Cell array: { Variable Name, Value, Description }
    moment_targets = [['r', 0.05, 'Return on capital'],
                      ['PIT', 0.08, 'PIT/GDP'], ['SSTax', 0.05, 'SSTax/GDP'],
                      ['KbyY', 3.0, 'Capital/GDP'],
                      ['outperHH', 7.98e4, 'GDP$/adult']]

    # Define calibration points
    @staticmethod
    def definePoints():

        assert ModelCalibrator.npoint <= 75000, 'Number of calibration points exceeds HPCC task array job size limit.'

        # Clear or create calibration point directory
        if os.path.exists(ModelCalibrator.pointdir):
            shutil.rmtree(ModelCalibrator.pointdir)
        os.mkdir(ModelCalibrator.pointdir)

        # Specify parameter lower and upper bounds
        lb = {}
        ub = {}
        lb['beta'] = 0.920
        lb['gamma'] = 0.150
        lb['sigma'] = 1.20
        ub['beta'] = 1.050
        ub['gamma'] = 0.900
        ub['sigma'] = 9.00

        # Construct vectors of parameter values
        v = {}
        v['beta'] = np.linspace(lb['beta'],
                                ub['beta'],
                                num=ModelCalibrator.ngrid)
        v['gamma'] = np.linspace(lb['gamma'],
                                 ub['gamma'],
                                 num=ModelCalibrator.ngrid)
        v['sigma'] = np.logspace(np.log10(lb['sigma']),
                                 np.log10(ub['sigma']),
                                 num=ModelCalibrator.ngrid)

        # Generate calibration points as unique combinations of parameter values
        grid = {}
        (grid['beta'], grid['gamma'],
         grid['sigma']) = np.meshgrid(v['beta'], v['gamma'], v['sigma'])
        for ipoint in range(ModelCalibrator.npoint):
            params = {}
            for p in ['beta', 'gamma', 'sigma']:
                params[p] = grid[p][ipoint]  #ok<STRNU>
            with open(ModelCalibrator.pointfile(ipoint)) as f:
                pickle.dump(params, f)

    # Solve calibration point
    @staticmethod
    def calibratePoint(ipoint):

        # Load parameter values for calibration point
        with open(ModelCalibrator.pointfile(ipoint)) as f:
            s = pickle.load(f)
        params = s['params']

        try:
            # Calibrate steady state on modelunit_dollar
            (targets, modelunit_dollar,
             solved) = ModelCalibrator.calibrate_dollar(params)  #ok<ASGLU>
        except:
            print(
                'Error encountered calibrating point %u:\n\t \nSaving placeholder solution values.\n'
                % ipoint)

            for o in ModelCalibrator.targetlist:
                targets[o] = None
            modelunit_dollar = None
            solved = 0  #ok<NASGU>

        # Extend parameters structure
        params['modelunit_dollar'] = modelunit_dollar

        # Save parameters, targets, and solution condition to calibration point file
        with open(ModelCalibrator.pointfile(ipoint)) as f:
            pickle.dump(params)
            pickle.dump(targets)
            pickle.dump(solved)

    # Consolidate solved calibration points
    @staticmethod
    def consolidatePoints():

        # Clear or create calibration output directory
        outputdir = PathFinder.getCalibrationOutputDir()
        if os.path.exists(outputdir):
            shutil.rmtree(outputdir)
        os.mkdir(outputdir)

        paramv = {}
        targetv = {}

        # Initialize vectors of parameters, targets, and solution conditions
        for o in ModelCalibrator.paramlist:
            paramv[o] = np.empty(shape=(1, ModelCalibrator.npoint))
        for o in ModelCalibrator.targetlist:
            targetv[o] = np.empty(shape=(1, ModelCalibrator.npoint))
        solved = np.zeros(shape=(1, ModelCalibrator.npoint))

        # Load and consolidate calibration points
        for i in range(ModelCalibrator.npoint):

            print('Reading calibration point %5d of %5d\n' %
                  (i, ModelCalibrator.npoint))

            s = {}

            with open(ModelCalibrator.pointfile[i]) as f:
                s['params'] = pickle.load(f)
                s['targets'] = pickle.load(f)
                s['solved'] = pickle.load(f)

            for o in ModelCalibrator.paramlist:
                paramv[o][i] = s['params'][o]
            for o in ModelCalibrator.targetlist:
                targetv[o][i] = s['targets'][o]
            solved[i] = s['solved']

        # Save consolidated points to calibration output directory
        with open(os.path.join(outputdir, 'calibration.pkl')) as f:
            pickle.dump(paramv)
            pickle.dump(targetv)
            pickle.dump(solved)

        # Initialize plot of calibration point solution conditions
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')

        # Determine colors
        cv = np.zeros((ModelCalibrator.npoint, 3))
        devs = min(abs(targetv['captoout'][solved] - 3)**0.5, 1)
        cv[solved, :] = np.hstack(
            (devs, np.ones(devs.shape), devs)) * 180 / 256  # Gray to green
        cv[np.logical_not(solved), :] = np.tile(
            [200 / 256, 0, 0], (sum(np.logical_not(solved)), 1))  # Red

        # Plot solution conditions
        ax.scatter(paramv['beta'],
                   paramv['gamma'],
                   paramv['sigma'],
                   s=40,
                   c=cv,
                   marker='o')

        # Format axes
        plt.axis('tight')
        ax.set_frame_on(True)
        ax.grid(b=True, which='minor')
        ax.set_aspect(num=1)
        ax.set_xlabel('beta')
        ax.set_xscale('linear')
        ax.set_xticks(np.linspace(ax.get_xlim[0], ax.get_ylim[1], num=3))
        ax.set_ylabel('gamma')
        ax.set_yscale('linear')
        ax.set_yticks(np.linspace(ax.get_ylim[0], ax.get_ylim[1], num=3))
        ax.set_zlabel('sigma')
        ax.set_zscale('log')
        ax.set_zticks(
            np.logspace(np.log10(ax.get_zlim[0]),
                        np.log10(ax.get_zlim[1]),
                        num=3))
        ax.minorticks_off()
        ax.grid(which='minor')

        # Save plot to calibration output directory
        plt.savefig(fig, os.path.join(outputdir, 'conditions.fig'))

    ##
    #  Single loop to calibrate on modelunit_dollar targets
    def calibrate_dollar(gridpoint):

        # Set target = $gdp/adult
        #     from Alex $79.8k for 2016
        #     REM: In moment_targets,
        #        col 1 = varname, col 2 = value, col 3 = description
        target_outperHH_index = np.where(
            ModelCalibrator.moment_targets[:, 0] == 'outperHH')[0]
        target_outperHH = np.array(
            [ModelCalibrator.moment_targets[target_outperHH_index, 1]])

        # Set initial modelunit_dollar.
        # In the future, we could apply a heuristic better initial guess.
        modelunit_dollar = 4.0e-05

        tolerance = 0.01  # as ratio
        err_size = 1
        iter_num = 1
        iter_max = 8  # iterations for modelunit_dollar

        while err_size > tolerance and iter_num <= iter_max:

            # Create Scenario to run
            scenario = Scenario({
                'economy': 'steady',
                'beta': gridpoint.beta,
                'gamma': gridpoint.gamma,
                'sigma': gridpoint.sigma,
                'modelunit_dollar': modelunit_dollar,
                'bequest_phi_1': 0
            })
            save_dir = ModelSolver.solve(scenario)

            # find target -- $gdp/pop
            with open(os.path.join(save_dir, 'paramsTargets.pkl'),
                      'rb') as handle:
                s_paramsTargets = pickle.load(handle)
            run_outperHH = s_paramsTargets['outperHH']

            err_size = abs(run_outperHH / target_outperHH - 1)
            print('...MODELUNIT_DOLLAR iteration %u   error=%f\n ' %
                  (iter_num, err_size))

            # package up answer
            targets = {
                'savelas': s_paramsTargets['savelas'],
                'labelas': s_paramsTargets['labelas'],
                'captoout': s_paramsTargets['captoout'],
                'outperHH': run_outperHH
            }

            # Update by percent shift, reduced a bit as number of
            # iterations increases. This approach slows the update rate
            # in case of slow convergence -- we're usually bouncing around then.
            exp_reduce = max(0.5, 1.0 - iter_num * 0.07)
            modelunit_dollar = modelunit_dollar * (
                (run_outperHH / target_outperHH)**exp_reduce)

            # Find if converged
            #    This only needs to be done after the loop, but
            #    we're about to wipe out the run's files.
            with open(os.path.join(save_dir, 'dynamics.pkl'), 'rb') as handle:
                s_dynamics = pickle.load(handle)
            is_converged = s_dynamics['is_converged']

            # Delete save directory along with parent directories
            shutil.rmtree(os.path.join(save_dir, '..', '..'))

            iter_num = iter_num + 1

        # Keep last successful run with modelunit_dollar
        modelunit_dollar = scenario.modelunit_dollar

        # Check solution condition.
        # Stable solution identified as:
        #  1. Robust solver convergence rate
        #  2. modelunit_dollar convergence
        is_solved = is_converged and (err_size <= tolerance)
        if iter_num > iter_max:
            print('...MODELUNIT_DOLLAR -- max iterations (%u) reached.\n' %
                  iter_max)

        return (targets, modelunit_dollar, is_solved)

    ##
    #  Print moments info on a particular steady state
    def report_moments(save_dir, targets=None):

        delimiter = [chr(13), chr(10)]  # end-of-line

        filepath = os.path.join(save_dir % 'iterations.csv')
        T = pd.read_csv(filepath)
        iters = T.iloc[:, 0].values
        iterations = iters[-1]

        with open(os.path.join(save_dir, 'dynamics.pkl'), 'rb') as handle:
            s_dynamics = pickle.load(handle)
        with open(os.path.join(save_dir, 'paramsTargets.pkl'), 'rb') as handle:
            s_paramsTargets = pickle.load(handle)
        with open(os.path.join(save_dir, 'market.pkl'), 'rb') as handle:
            s_markets = pickle.load(handle)

        # Define some helper vars for clarity
        pop = s_dynamics['pops']
        gdp = s_dynamics['outs']
        dollar = 1 / s_paramsTargets['modelunit_dollar']

        if targets == None:
            targets = ModelCalibrator.moment_targets
            targets = np.vstack((targets, ['labelas', 1, 'Labor elasticity']))
            targets = np.vstack((targets, ['savelas', 1,
                                           'Savings elasticity']))

        # helper function to format results
        myTargetPrint = (lambda lbl, modelResult, targetResult:
                         '   %20s = %f (%f) error = %0.1f%%' %
                         (lbl, modelResult, targetResult,
                          (modelResult / targetResult - 1) * 100.0))
        myParamPrint = lambda lbl, modelInput: '   %20s = %f' % (lbl,
                                                                 modelInput)

        # Make PARAMS section
        params = {
            'beta': s_paramsTargets['beta'],
            'sigma': s_paramsTargets['sigma'],
            'gamma': s_paramsTargets['gamma'],
            'model$': s_paramsTargets['modelunit_dollar']
        }

        param_part = '%s   PARAMS%s' % (delimiter, delimiter)
        for i in range(len(params)):
            result = params[i, 1]
            lbl = params[i, 0]
            line = myParamPrint(lbl, result)
            param_part = '%s%s%s' % (param_part, line, delimiter)

        # Make structure for results
        #   targets has been passed in (or set to default)
        model_results = {
            'r': s_markets['MPKs'],
            'PIT': s_dynamics['pits'] / gdp,
            'SSTax': s_dynamics['ssts'] / gdp,
            'KbyY': s_paramsTargets['captoout'],
            'outperHH': gdp * dollar / pop,
            'labelas': s_paramsTargets['labelas'],
            'savelas': s_paramsTargets['savelas']
        }

        # Make TARGETS section
        target_part = '%s   TARGETS%s' % (delimiter, delimiter)
        for i in range(len(model_results[:, 0])):
            m_index = np.where(targets[:, 0] == model_results[i, 0])[0]
            target = targets[m_index, 1]
            lbl = targets[m_index, 2]
            result = model_results[i, 1]

            line = myTargetPrint(lbl, result, target)
            target_part = '%s%s%s' % (target_part, line, delimiter)

        # Make convergence part
        if s_dynamics['is_converged']:
            s_iter = 'Converged in %u iterations' % iterations
        else:
            s_iter = 'DID NOT converge in %u iterations.' % iterations

        converge_part = '%s CONVERGENCE: %s %s' % (delimiter, s_iter,
                                                   delimiter)

        # Concatentate for full report
        outstr = '%s%s%s' % (param_part, target_part, converge_part)
        return outstr

    ##
    #   Make a report of various moments for the 16 baselines
    def report_baseline_moments():

        outputfilename = os.path.join(PathFinder.getSourceDir(),
                                      'BaselineMoments.txt')
        f = open(outputfilename, 'w+')

        f.write('-------------BASELINE MOMENTS-------------')
        f.write('%s \r\n' % str(datetime.datetime.now()))

        # load the matrix and get inverter function
        (_, f_invert) = ParamGenerator.invert()

        for labelas in np.arange(0.25, 1.0, 0.25):
            for savelas in np.arange(0.25, 1.0, 0.25):
                target = {'labelas': labelas, 'savelas': savelas}
                f.write(
                    '\r\nBASELINE labor elas = %0.2f  savings elas = %0.2f \r\n'
                    % (labelas, savelas))
                inverse = f_invert(target)

                scenario = Scenario({
                    'economy':
                    'steady',
                    'beta':
                    inverse['beta'],
                    'gamma':
                    inverse['gamma'],
                    'sigma':
                    inverse['sigma'],
                    'modelunit_dollar':
                    inverse['modelunit_dollar'],
                    'bequest_phi_1':
                    0
                })

                save_dir = ModelSolver.solve(scenario)

                targets = ModelCalibrator.moment_targets
                targets = np.vstack(
                    (targets, ['labelas', labelas, 'Labor elasticity']))
                targets = np.vstack(
                    (targets, ['savelas', savelas, 'Savings elasticity']))
                outstr = ModelCalibrator.report_moments(save_dir, targets)
                f.write('%s \r\n' % outstr)
                f.write('-------------------------------------\r\n')

        f.write(' ==== DONE ===== \r\n')
        f.close()
    def consolidatePoints():

        # Clear or create calibration output directory
        outputdir = PathFinder.getCalibrationOutputDir()
        if os.path.exists(outputdir):
            shutil.rmtree(outputdir)
        os.mkdir(outputdir)

        paramv = {}
        targetv = {}

        # Initialize vectors of parameters, targets, and solution conditions
        for o in ModelCalibrator.paramlist:
            paramv[o] = np.empty(shape=(1, ModelCalibrator.npoint))
        for o in ModelCalibrator.targetlist:
            targetv[o] = np.empty(shape=(1, ModelCalibrator.npoint))
        solved = np.zeros(shape=(1, ModelCalibrator.npoint))

        # Load and consolidate calibration points
        for i in range(ModelCalibrator.npoint):

            print('Reading calibration point %5d of %5d\n' %
                  (i, ModelCalibrator.npoint))

            s = {}

            with open(ModelCalibrator.pointfile[i]) as f:
                s['params'] = pickle.load(f)
                s['targets'] = pickle.load(f)
                s['solved'] = pickle.load(f)

            for o in ModelCalibrator.paramlist:
                paramv[o][i] = s['params'][o]
            for o in ModelCalibrator.targetlist:
                targetv[o][i] = s['targets'][o]
            solved[i] = s['solved']

        # Save consolidated points to calibration output directory
        with open(os.path.join(outputdir, 'calibration.pkl')) as f:
            pickle.dump(paramv)
            pickle.dump(targetv)
            pickle.dump(solved)

        # Initialize plot of calibration point solution conditions
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')

        # Determine colors
        cv = np.zeros((ModelCalibrator.npoint, 3))
        devs = min(abs(targetv['captoout'][solved] - 3)**0.5, 1)
        cv[solved, :] = np.hstack(
            (devs, np.ones(devs.shape), devs)) * 180 / 256  # Gray to green
        cv[np.logical_not(solved), :] = np.tile(
            [200 / 256, 0, 0], (sum(np.logical_not(solved)), 1))  # Red

        # Plot solution conditions
        ax.scatter(paramv['beta'],
                   paramv['gamma'],
                   paramv['sigma'],
                   s=40,
                   c=cv,
                   marker='o')

        # Format axes
        plt.axis('tight')
        ax.set_frame_on(True)
        ax.grid(b=True, which='minor')
        ax.set_aspect(num=1)
        ax.set_xlabel('beta')
        ax.set_xscale('linear')
        ax.set_xticks(np.linspace(ax.get_xlim[0], ax.get_ylim[1], num=3))
        ax.set_ylabel('gamma')
        ax.set_yscale('linear')
        ax.set_yticks(np.linspace(ax.get_ylim[0], ax.get_ylim[1], num=3))
        ax.set_zlabel('sigma')
        ax.set_zscale('log')
        ax.set_zticks(
            np.logspace(np.log10(ax.get_zlim[0]),
                        np.log10(ax.get_zlim[1]),
                        num=3))
        ax.minorticks_off()
        ax.grid(which='minor')

        # Save plot to calibration output directory
        plt.savefig(fig, os.path.join(outputdir, 'conditions.fig'))
Ejemplo n.º 11
0
    def writeTransitionMatrix(scenario):

        # load solution objects
        from pathFinderModule import PathFinder
        cacheDir = PathFinder.getCacheDir(scenario)
        with open(os.path.join(cacheDir, 'decisions.pkl'), 'rb') as handle:
            OPTs = pickle.load(handle)

        # get the base output directory
        baseOutputDir = PathFinder.getTransitionMatrixOutputDir()

        # create output folder if it does not exist
        if not os.path.exists(baseOutputDir):
            os.path.mkdir(baseOutputDir)

        # get the tagged subfolder output directory
        outputDir = os.path.join(baseOutputDir,
                                 PathFinder.getScenarioPathTag(scenario))

        # check for whether scenario output subfolder exists
        # if it does, then this is a duplicate writing out
        if os.path.exists(outputDir):
            return None

        # check if map file exists, create it if it does not
        if not os.path.exists(os.path.join(baseOutputDir, 'map.csv')):
            fileHandle = open(os.path.join(baseOutputDir, 'map.csv'), 'w')
            for k in scenario:
                fileHandle.write(k + ',')
            fileHandle.write('\n')
            fileHandle.close()

        # append scenario info to map file by writing out to text file
        # then loading text file back in
        with open('.temp.txt', 'w') as f:
            values = scenario.getParams()
            w = csv.DictWriter(f, values.keys())
            w.writerow(values)
        f = open('.temp.txt', 'r')
        text = f.read()
        f.close()
        os.path.remove('.temp.txt')
        fileHandle = open(os.path.join(baseOutputDir, 'map.csv'), 'a+')
        print(fileHandle,
              scenario.basedeftag + ',' + scenario.counterdeftag + ',' + text)
        fileHandle.close()

        # create a folder to store output
        os.path.mkdir(outputDir)

        # converts policy function into discretized transition matrix
        # if policy doesn't fall neatly into grid, averages between two
        # nearest points proportionally to distance from that point
        def convertToTransitionMatrix(policy, values, dim):
            discrete = np.digitize(policy, values)
            distanceToBinEdge = policy - values(discrete)
            distanceToBinEdgeUpper = policy - values(discrete + 1)
            upperProbability = distanceToBinEdge / (distanceToBinEdge -
                                                    distanceToBinEdgeUpper)
            transition = np.zeros((len(discrete), dim))
            transition[np.ravel_multi_index(
                (np.array(range(grids['nz'] * grids['nk'] * grids['nb'])),
                 (discrete + 1)), transition.shape)] = upperProbability
            transition[np.ravel_multi_index(
                (np.array(range(
                    grids['nz'] * grids['nk'] * grids['nb'])), discrete),
                transition.shape)] = 1 - upperProbability
            return transition

        # for a given age, year, discretize assets and lifetime earning
        # average transitions. store output in `transitions` variable.
        transitions = {}

        # store grids for easy access
        from paramGeneratorModule import ParamGenerator
        grids = ParamGenerator.grids(scenario)

        for age in range(OPTs['SAVINGS'].shape[3]):
            for year in range(OPTs['SAVINGS'].shape[4]):

                # compute transition matrices for full state -> assets,
                # earnings grid
                assetsTransition = convertToTransitionMatrix(
                    OPTs['SAVINGS'][:, :, :, age, year], grids['kv'],
                    grids['nk'])

                earningsTransition = convertToTransitionMatrix(
                    OPTs['AVG_EARNINGS'][:, :, :, age, year], grids['bv'],
                    grids['nb'])

                # compute joint transition of assets and earnings
                assetEarningsTransition = (
                    np.kron(np.ones((1, grids['nb'])), assetsTransition) *
                    np.kron(earningsTransition, np.ones((1, grids['nk']))))

                # expand joint transition of asset and earnings to full
                # state space size
                assetEarningsTransition = np.kron(np.ones((1, grids['nz'])),
                                                  assetEarningsTransition)

                # get the productivity transition matrix
                productivityTransition = grids['transz']
                productivityTransition = np.squeeze(
                    productivityTransition[age, :, :])

                # expand it to the full state space size
                productivityTransition = np.kron(
                    productivityTransition,
                    np.ones(grids['nb'] * grids['nk'],
                            grids['nb'] * grids['nk']))

                # multiply to get full transition matrix
                transitionMatrix = productivityTransition * assetEarningsTransition

                # save transition matrix into struct
                transitions['age' + str(age) + 'year' +
                            str(year)] = transitionMatrix

        with open(os.path.join(outputDir, 'data.pkl'), 'wb') as handle:
            pickle.dump(transitions, handle, protocol=pickle.HIGHEST_PROTOCOL)
    def testOutput(scenario, testName, isInteractive):

        # Set to testing environment
        PathFinder.setToTestingMode()
        
        # Clear the old results and solve
        ModelSolver.removeCached(scenario)
        taggedDir = ModelSolver.solve(scenario)
        cacheDir  = PathFinder.getCacheDir(scenario)
        
        # Set to development environment 
        #   TBD: Set back to original environment?
        PathFinder.setToDevelopmentMode()

        # testSet depends on type of scenario
        if( scenario.isSteady() ):
            setNames = ['market', 'dynamics']
        elif( scenario.isCurrentPolicy() ):
            setNames = ['market', 'dynamics' ]
        else:
            setNames = ['market', 'dynamics', 'statics']
        
        # Load target values
        targetfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ModelTester.pkl')
        with open(targetfile, 'rb') as handle:
            s = pickle.load(handle)
        target = s.target

        # Initialize match flag
        typeDeviation = ModelTester.DEVIATION_NONE

        # Define function to flag issues
        # NOTE: Relies on severity of deviation to be increasing
        def flag(str, deviation):
            print('\t%-15s%-20s%s\n' % (setname, valuename, str))
            global typeDeviation
            if deviation > typeDeviation:
                typeDeviation = deviation        

        print('\n[Test results]\n')
        for i in range(len(setNames)):

            # Extract output and target values by set
            setname = setNames[i]
            output = {}
            with open(os.path.join(cacheDir, ('%s.pkl' % setname)), 'rb') as handle:
                output[testName][setname] = pickle.load(handle)
            outputset = output[testName][setname]
            targetset = target[testName][setname]

            # Iterate over target values
            targetvaluenames = targetset.keys()

            for j in range(len(targetvaluenames)):

                valuename = targetvaluenames[j]

                if not valuename in outputset.keys():

                    # Flag missing value
                    flag('Not found', ModelTester.DEVIATION_FATAL)
                    continue

                if isinstance(outputset[valuename], dict):

                    # Skip checking of structs -- it is currently just
                    # priceindex which does not need to be checked
                    print('\tSkipping %s because it is a struct.\n' % valuename)
                    continue

                if np.any(np.isnan(outputset[valuename][:])):

                    # Flag NaN value
                    flag('NaN value', ModelTester.DEVIATION_FATAL)
                    continue

                if np.any(outputset[valuename].shape != targetset[valuename].shape):

                    # Flag for size mismatch
                    flag('Size mismatch', ModelTester.DEVIATION_FATAL)
                    continue

                # Classify deviation
                deviation = ModelTester.calculateDeviation(outputset[valuename][:], targetset[valuename][:])
                if deviation > 0:
                    if (deviation < 1e-6): 
                        msg = 'TINY : %06.16f%% deviation' % deviation*100
                        flag(msg, ModelTester.DEVIATION_TINY)
                    elif deviation < 1e-4:
                        msg = 'SMALL: %06.16f%% deviation' % deviation*100
                        flag( msg, ModelTester.DEVIATION_SMALL )
                    else:
                        msg = 'LARGE: %06.4f%% deviation' % deviation*100
                        flag( msg, ModelTester.DEVIATION_FATAL )

            # Identify new values, if any
            outputvaluenames = outputset.keys()

            for j in range(len(outputvaluenames)):

                valuename = outputvaluenames[j]

                if not valuename in targetset.keys():
                    flag('New', ModelTester.DEVIATION_FATAL)

        # Check for match
        if typeDeviation == ModelTester.DEVIATION_NONE:
            print('\tTarget matched.\n\n')
        else:

            if not isInteractive: 
                print( '\tTarget not matched.\n\n' )
                return
            
            # Query user for target update
            ans = input('\n\tUpdate test target with new values? Y/[N]: ')
            if ans == 'Y':
                target[testName] = output[testName]
                with open(targetfile) as f:
                    pickle.dump(target, f)
                print('\tTarget updated.\n\n')
            else:
                print('\tTarget retained.\n\n')

        return typeDeviation
    def unanticipated_shock():
        
        # Make the baseline scenario and "non-shock" version
        t                   = ModelTester.test_params
        
        # baseline scenario is not shocked
        s_baseline          = Scenario(t).currentPolicy().baseline()
        
        # Make "non-shock" shock baseline
        t                   = s_baseline.getParams()
        t.PolicyShockYear   = t.TransitionFirstYear + ModelTester.policyShockShift
        s_next              = Scenario(t)

        # Get baseline Market, Dynamic
        ModelSolver.removeCached(s_baseline)                 # Clear cached Scenario
        
        tagged_dir      = ModelSolver.solve(s_baseline)
        baseline_dir    = PathFinder.getCacheDir(s_baseline)
        with open(os.path.join(baseline_dir, 'market.pkl'), 'rb') as handle:
            baseMarket      = pickle.load(handle)
        with open(os.path.join(baseline_dir, 'dynamics.pkl'), 'rb') as handle:
            baseDynamic     = pickle.load(handle)   
        
        # Get shocked Market, Dynamic
        ModelSolver.removeCached(s_next)                     # Clear cached scenario
        
        tagged_dir      = ModelSolver.solve(s_next)
        x_dir           = PathFinder.getCacheDir(s_next)
        with open(os.path.join(x_dir, 'market.pkl'), 'rb') as handle:
            xMarket         = pickle.load(handle)
        with open(os.path.join(x_dir, 'dynamics.pkl'), 'rb') as handle:
            xDynamic        = pickle.load(handle)
        
        # Compare baseline and shocked path
        print( '\n' )
        
        def do_check (baseD, xD, dName):
            passed = 1
            for p in baseD.keys():
                valuename = p
                if (not isinstance(baseD[valuename], numbers.Number) or ('_next' in valuename)):
                    continue

                # Check for within percent tolerance, also check 
                #    within numerical deviation (this is in case div by
                #    zero or close to zero)
                # TBD: Standardize deviations and tolerances
                percentDeviation    = abs((xD[valuename] - baseD[valuename]) / baseD[valuename])
                absoluteDeviation   = abs(baseD[valuename] - xD[valuename])
                if not np.all(np.array(percentDeviation) < 1e-4):
                    if not np.all(np.array(absoluteDeviation) < 1e-13):
                        m1 = print( 'Max percentdev = %f' % max(percentDeviation) )
                        m2 = print( 'Max abs dev = %0.14f' % max(absoluteDeviation) )
                        print( '%s.%s outside tolerance;\t\t %s; %s \n' % (dName, valuename, m1, m2))
                        passed = 0
                
            return passed
        
        passed = do_check( baseMarket , xMarket , 'Market'  )
        passed = do_check( baseDynamic, xDynamic, 'Dynamic' )
        if passed:
            print( 'All values within convergence tolerances.\n' )
        
        return passed
    def __init__(self, scenario, DIST=None, Market=None, OPTs=None):

        if not scenario.isSteady():
            raise Exception(
                'Unable to generate income distribution moments for transition paths.'
            )

        # PARAMETERS
        pathFinder = PathFinder(scenario)

        self.scenario = scenario
        save_dir = PathFinder.getCacheDir(scenario)

        # Define time constants and grids
        timing = ParamGenerator.timing(scenario)
        grids = ParamGenerator.grids(scenario)
        T_life = timing['T_life']  # Total life years
        T_model = timing['T_model']  # Transition path model years
        Tmax_work = timing['Tmax_work']  # Largest retirement age
        ng = grids['ng']  # num groups
        nz = grids['nz']  # num labor productivity shocks
        zs = grids['zs']  # shocks grid (by demographic type and age)
        nk = grids['nk']  # num asset points
        nb = grids['nb']  # num avg. earnings points

        # Useful later for a couple of functions
        self.kv = grids['kv']
        self.karray = np.tile(np.reshape(grids['kv'], [1, nk, 1, 1, 1, 1]),
                              [nz, 1, nb, T_life, ng, T_model])
        self.T_work = Tmax_work
        self.T_life = T_life

        ## DISTRIBUTION AND POLICY FUNCTIONS

        # Import households distribution
        if DIST is None:
            with open(os.path.join(save_dir, 'distribution.pkl'),
                      'rb') as handle:
                s = pickle.load(handle)
            DIST = s['DIST']
        dist = DIST.flatten(order='F')
        if T_model == 1:
            DIST = DIST[:, :, :, :, :, np.newaxis]

        dist_l = np.zeros((nz, nk, nb, T_life, ng, T_model))
        dist_l[0:nz, 0:nk, 0:nb, 0:Tmax_work, 0:ng,
               0:T_model] = DIST[0:nz, 0:nk, 0:nb, 0:Tmax_work, 0:ng,
                                 0:T_model]  # Working age population
        dist_l[0:nz, 0:nk, 0:nb, Tmax_work - 1:T_life, 0:ng,
               0:T_model] = 0  # Retired population
        dist_l = dist_l.flatten(order='F') / np.sum(dist_l)

        # Useful later for a couple of functions
        self.DIST = DIST

        # Import market variables
        if Market is None:
            with open(os.path.join(save_dir, 'market.pkl')) as handle:
                s = pickle.load(handle)
            wages = s['wages']
            capsharesAM = s['capsharesAM']
            bondDividendRates = s['bondDividendRates']
            equityDividendRates = s['equityDividendRates']
        else:
            wages = Market['wages']
            capsharesAM = Market['capsharesAM']
            bondDividendRates = Market['bondDividendRates']
            equityDividendRates = Market['equityDividendRates']

        # Import policy functions
        f = lambda X: np.tile(np.reshape(X, [nz, nk, nb, T_life, 1, T_model]),
                              [1, 1, 1, 1, ng, 1])
        if OPTs is None:
            with open(os.path.join(save_dir, 'decisions.pkl')) as handle:
                s = pickle.load(handle)
            s = s['OPTs']
            labinc = f(s['LABOR']) * np.tile(
                np.reshape(np.transpose(zs, [2, 1, 0]),
                           [nz, 1, 1, T_life, 1, T_model]),
                [1, nk, nb, 1, ng, 1]) * wages
            k = f(s['SAVINGS'])
            self.ben = f(s['OASI_BENEFITS'])
            self.lab = f(s['LABOR'])
            self.con = f(s['CONSUMPTION'])
        else:
            labinc = f(OPTs['LABOR']) * np.tile(
                np.reshape(np.transpose(zs, [2, 1, 0]),
                           [nz, 1, 1, T_life, 1, T_model]),
                [1, nk, nb, 1, ng, 1]) * wages
            k = f(OPTs['SAVINGS'])
            self.ben = f(OPTs['OASI_BENEFITS'])
            self.lab = f(OPTs['LABOR'])
            self.con = f(OPTs['CONSUMPTION'])

        kinc = ((1 - capsharesAM) * bondDividendRates +
                capsharesAM * equityDividendRates) * k
        totinc = labinc.flatten(order='F') + kinc.flatten(
            order='F') + self.ben.flatten(order='F')  # Total income
        labinc = labinc.flatten(order='F')  # Labor income
        k = k.flatten(order='F')  # Asset holdings for tomorrow (k')

        # DATA WEALTH AND INCOME DISTRIBUTIONS
        file = pathFinder.getMicrosimInputPath(
            'SIM_NetPersonalWealth_distribution')

        self.a_distdata = pd.read_csv(file)
        self.a_distdata.append([99.9, float('nan'),
                                1])  # Append last point for graph

        file = pathFinder.getMicrosimInputPath(
            'SIM_PreTaxLaborInc_distribution')
        self.l_distdata = pd.read_csv(file)
        self.l_distdata.append([99.9, float('nan'),
                                1])  # Append last point for graph

        # MODEL WEALTH AND INCOME DISTRIBUTIONS

        # Compute wealth distribution
        self.a_distmodel = get_moments(dist, k)
        # Gini and Lorenz curve
        (self.a_ginimodel, self.a_lorenz) = gini(dist, k)

        # Compute labor income distribution
        self.l_distmodel = get_moments(dist_l, labinc)
        # Gini and Lorenz curve
        (self.l_ginimodel, self.l_lorenz) = gini(dist_l, labinc)

        # Compute total income distribution
        self.t_distmodel = get_moments(dist, totinc)
        # Gini and Lorenz curve
        (self.t_ginimodel, self.t_lorenz) = gini(dist, labinc)