def all_cases(): testNames = ['steady', 'open_base', 'open_counter', 'closed_base', 'closed_counter'] for o in testNames: if o == 'steady': scenario = Scenario(ModelTester.test_params).currentPolicy().steady() elif o == 'open_base': scenario = Scenario(ModelTester.test_params).currentPolicy().open() elif o == 'open_counter': scenario = Scenario(ModelTester.test_params).open() elif o == 'closed_base': scenario = Scenario(ModelTester.test_params).currentPolicy().closed() elif o == 'closed_counter': scenario = Scenario(ModelTester.test_params).closed() else: scenario = [] if (ModelTester.testOutput( scenario, o, 0 ) != ModelTester.DEVIATION_NONE): return
def doGELoop(): # Build baseline scenario scenario = Scenario(ModelTester.test_params).currentPolicy() # Get production parameters, remap some parameters -- this is # temporary paramsProduction = ParamGenerator.production( scenario ) paramsProduction['capitalShare'] = paramsProduction['alpha'] paramsProduction['laborShare'] = 1 - paramsProduction['alpha'] Initial = {'capital0': 10} wage = 1.5*np.ones(100) discountRate = 0.04*np.ones(100) T = len(wage) # Specify the tolerance level tolerance = {} tolerance['wage'] = 1e-5 tolerance['discountRate'] = 1e-5 excessLabor = np.ones(100) excessShare = np.ones(100) # Start the iterative method iteration = 0 t = time.time() while (max(excessLabor[:]) > tolerance['wage'] or max(excessShare[1:T]) > tolerance['discountRate']): iteration = iteration + 1 Market = {} Market['wage'] = wage Market['discountRate'] = discountRate # Initialize firm theFirm = DynamicFirmGE( Initial, Market, paramsProduction ) laborSupply = theFirm.getLS() laborDemand = theFirm.getLD() excessLabor = abs(laborSupply - laborDemand) shareDemand = theFirm.getShare() excessShare = abs(shareDemand-1) value = theFirm.getValue() # Update guesses wage = (1 / (1 + ((laborSupply - laborDemand) / laborDemand) * 0.1)) * Market['wage'] for t in range(T-1): discountRate[t] = (1 / (1 + ((1 - value[t+1]) / abs(value[t+1])) * 0.1)) * Market['discountRate'][t] discountRate[T-1] = discountRate[T-2] print(time.time() - t)
def report_baseline_moments(): outputfilename = os.path.join(PathFinder.getSourceDir(), 'BaselineMoments.txt') f = open(outputfilename, 'w+') f.write('-------------BASELINE MOMENTS-------------') f.write('%s \r\n' % str(datetime.datetime.now())) # load the matrix and get inverter function (_, f_invert) = ParamGenerator.invert() for labelas in np.arange(0.25, 1.0, 0.25): for savelas in np.arange(0.25, 1.0, 0.25): target = {'labelas': labelas, 'savelas': savelas} f.write( '\r\nBASELINE labor elas = %0.2f savings elas = %0.2f \r\n' % (labelas, savelas)) inverse = f_invert(target) scenario = Scenario({ 'economy': 'steady', 'beta': inverse['beta'], 'gamma': inverse['gamma'], 'sigma': inverse['sigma'], 'modelunit_dollar': inverse['modelunit_dollar'], 'bequest_phi_1': 0 }) save_dir = ModelSolver.solve(scenario) targets = ModelCalibrator.moment_targets targets = np.vstack( (targets, ['labelas', labelas, 'Labor elasticity'])) targets = np.vstack( (targets, ['savelas', savelas, 'Savings elasticity'])) outstr = ModelCalibrator.report_moments(save_dir, targets) f.write('%s \r\n' % outstr) f.write('-------------------------------------\r\n') f.write(' ==== DONE ===== \r\n') f.close()
def getScenarios(workListFileName): # Get name of worklist from name of file workListName = os.path.split(workListFileName)[1] workListName = os.path.splitext(workListName)[0] # Force read CSV -- readtable gets confused and skips first # rows sometimes workList = (pd.read_csv(workListFileName, header=None)).to_dict(orient='list') numScenarios = len(workList) print('DOGEController.getScenarios: Worklist <%s> size = %d \n' % (workListName, numScenarios)) scenarios = np.array([]) for i in range(numScenarios): scenarios.append(Scenario(workList[i])) return scenarios
def calibrate_dollar(gridpoint): # Set target = $gdp/adult # from Alex $79.8k for 2016 # REM: In moment_targets, # col 1 = varname, col 2 = value, col 3 = description target_outperHH_index = np.where( ModelCalibrator.moment_targets[:, 0] == 'outperHH')[0] target_outperHH = np.array( [ModelCalibrator.moment_targets[target_outperHH_index, 1]]) # Set initial modelunit_dollar. # In the future, we could apply a heuristic better initial guess. modelunit_dollar = 4.0e-05 tolerance = 0.01 # as ratio err_size = 1 iter_num = 1 iter_max = 8 # iterations for modelunit_dollar while err_size > tolerance and iter_num <= iter_max: # Create Scenario to run scenario = Scenario({ 'economy': 'steady', 'beta': gridpoint.beta, 'gamma': gridpoint.gamma, 'sigma': gridpoint.sigma, 'modelunit_dollar': modelunit_dollar, 'bequest_phi_1': 0 }) save_dir = ModelSolver.solve(scenario) # find target -- $gdp/pop with open(os.path.join(save_dir, 'paramsTargets.pkl'), 'rb') as handle: s_paramsTargets = pickle.load(handle) run_outperHH = s_paramsTargets['outperHH'] err_size = abs(run_outperHH / target_outperHH - 1) print('...MODELUNIT_DOLLAR iteration %u error=%f\n ' % (iter_num, err_size)) # package up answer targets = { 'savelas': s_paramsTargets['savelas'], 'labelas': s_paramsTargets['labelas'], 'captoout': s_paramsTargets['captoout'], 'outperHH': run_outperHH } # Update by percent shift, reduced a bit as number of # iterations increases. This approach slows the update rate # in case of slow convergence -- we're usually bouncing around then. exp_reduce = max(0.5, 1.0 - iter_num * 0.07) modelunit_dollar = modelunit_dollar * ( (run_outperHH / target_outperHH)**exp_reduce) # Find if converged # This only needs to be done after the loop, but # we're about to wipe out the run's files. with open(os.path.join(save_dir, 'dynamics.pkl'), 'rb') as handle: s_dynamics = pickle.load(handle) is_converged = s_dynamics['is_converged'] # Delete save directory along with parent directories shutil.rmtree(os.path.join(save_dir, '..', '..')) iter_num = iter_num + 1 # Keep last successful run with modelunit_dollar modelunit_dollar = scenario.modelunit_dollar # Check solution condition. # Stable solution identified as: # 1. Robust solver convergence rate # 2. modelunit_dollar convergence is_solved = is_converged and (err_size <= tolerance) if iter_num > iter_max: print('...MODELUNIT_DOLLAR -- max iterations (%u) reached.\n' % iter_max) return (targets, modelunit_dollar, is_solved)
# -*- coding: utf-8 -*- """ Created on Mon Jul 15 09:58:42 2019 @author: Azanca """ #Run Solver with test parameters from scenarioModule import Scenario from modelTesterModule import ModelTester from modelSolverModule import ModelSolver t = ModelTester.test_params s = Scenario(t) ModelSolver.solve(s)
def open_counter(): scenario = Scenario(ModelTester.test_params).open() testName = 'open_counter' ModelTester.testOutput(scenario, testName, True)
def open_base(): scenario = Scenario(ModelTester.test_params).currentPolicy().open() testName = 'open_base' ModelTester.testOutput(scenario, testName, True)
def steady(): scenario = Scenario(ModelTester.test_params).currentPolicy().steady() testName = 'steady' ModelTester.testOutput(scenario, testName, True)
def jenkinsTests(): try: isHPCC = PathFinder.isHPCCRun() # Run just the matching cases for now testNames = ['steady', 'open_base', 'open_counter', 'closed_base', 'closed_counter'] for o in testNames: if o == 'steady': scenario = Scenario(ModelTester.test_params).currentPolicy().steady() elif o == 'open_base': scenario = Scenario(ModelTester.test_params).currentPolicy().open() elif o == 'open_counter': scenario = Scenario(ModelTester.test_params).open() elif o == 'closed_base': scenario = Scenario(ModelTester.test_params).currentPolicy().closed() elif o == 'closed_counter': scenario = Scenario(ModelTester.test_params).closed() else: scenario = [] typeDeviation = ModelTester.testOutput( scenario, o, 0 ) if typeDeviation != ModelTester.DEVIATION_NONE: if typeDeviation == ModelTester.DEVIATION_TINY and isHPCC: continue else: exit(1) # Test writing the 'series' interface with the last scenario # Requires that 'baseline' scenario exists PathFinder.setToTestingMode() print( 'TESTING OutputWriter.writeScenarios\n' ) ModelSolver.solve( scenario.baseline() ) OutputWriter.writeScenarios( [scenario] ) PathFinder.setToDevelopmentMode() print( 'ALL TESTS PASSED.\n' ) exit(0) except: exit(1)
def unanticipated_shock(): # Make the baseline scenario and "non-shock" version t = ModelTester.test_params # baseline scenario is not shocked s_baseline = Scenario(t).currentPolicy().baseline() # Make "non-shock" shock baseline t = s_baseline.getParams() t.PolicyShockYear = t.TransitionFirstYear + ModelTester.policyShockShift s_next = Scenario(t) # Get baseline Market, Dynamic ModelSolver.removeCached(s_baseline) # Clear cached Scenario tagged_dir = ModelSolver.solve(s_baseline) baseline_dir = PathFinder.getCacheDir(s_baseline) with open(os.path.join(baseline_dir, 'market.pkl'), 'rb') as handle: baseMarket = pickle.load(handle) with open(os.path.join(baseline_dir, 'dynamics.pkl'), 'rb') as handle: baseDynamic = pickle.load(handle) # Get shocked Market, Dynamic ModelSolver.removeCached(s_next) # Clear cached scenario tagged_dir = ModelSolver.solve(s_next) x_dir = PathFinder.getCacheDir(s_next) with open(os.path.join(x_dir, 'market.pkl'), 'rb') as handle: xMarket = pickle.load(handle) with open(os.path.join(x_dir, 'dynamics.pkl'), 'rb') as handle: xDynamic = pickle.load(handle) # Compare baseline and shocked path print( '\n' ) def do_check (baseD, xD, dName): passed = 1 for p in baseD.keys(): valuename = p if (not isinstance(baseD[valuename], numbers.Number) or ('_next' in valuename)): continue # Check for within percent tolerance, also check # within numerical deviation (this is in case div by # zero or close to zero) # TBD: Standardize deviations and tolerances percentDeviation = abs((xD[valuename] - baseD[valuename]) / baseD[valuename]) absoluteDeviation = abs(baseD[valuename] - xD[valuename]) if not np.all(np.array(percentDeviation) < 1e-4): if not np.all(np.array(absoluteDeviation) < 1e-13): m1 = print( 'Max percentdev = %f' % max(percentDeviation) ) m2 = print( 'Max abs dev = %0.14f' % max(absoluteDeviation) ) print( '%s.%s outside tolerance;\t\t %s; %s \n' % (dName, valuename, m1, m2)) passed = 0 return passed passed = do_check( baseMarket , xMarket , 'Market' ) passed = do_check( baseDynamic, xDynamic, 'Dynamic' ) if passed: print( 'All values within convergence tolerances.\n' ) return passed
def closed_counter(): scenario = Scenario(ModelTester.test_params).closed() testName = 'closed_counter' ModelTester.testOutput( scenario, testName, True )
def closed_base(): scenario = Scenario(ModelTester.test_params).currentPolicy().closed() testName = 'closed_base' ModelTester.testOutput(scenario, testName, True)
#Test ParamGenerator.labinc_discretization from modelTesterModule import ModelTester from scenarioModule import Scenario from pathFinderModule import PathFinder from paramGeneratorModule import ParamGenerator from inputReaderModule import InputReader from socialSecurityModule import SocialSecurity from initialGuessModule import InitialGuess import pandas as pd import numpy as np import os import pickle t = ModelTester.test_params scenario = Scenario(t) scenario = scenario.currentPolicy().steady() s = ParamGenerator.labinc_discretization(scenario) ''' #Test ParamGenerator.social_security from modelTesterModule import ModelTester from scenarioModule import Scenario from pathFinderModule import PathFinder from paramGeneratorModule import ParamGenerator from inputReaderModule import InputReader from socialSecurityModule import SocialSecurity from initialGuessModule import InitialGuess import pandas as pd