def solve(workListFileName, index=None): # Get name of worklist from name of file workListName = os.path.split(workListFileName)[1] workListName = os.path.splitext(workListName)[0] scenarios = DOGEController.getScenarios(workListFileName) # If just running one, collapse list to one isSingleItem = False numScenarios = len(scenarios) if index != None: isSingleItem = True numScenarios = 1 for i in range(numScenarios): idx = i if isSingleItem: idx = index print('DOGEController.solve: Solving worklist item %d \n' % idx) # Tag this solution to avoid collisions with other model runs callerTag = '%s%u' % (workListName, idx) taggedDir = ModelSolver.solve(scenarios[idx], callerTag)
def export(self, outputName=None): # If no outputName, create one from Scenario if outputName == None: outputName = self.Description if not self.isSolved(): from modelSolverModule import ModelSolver ModelSolver.solve(self) from pathFinderModule import PathFinder cacheDir = PathFinder.getCacheDir(self) outDir = PathFinder(self).getNamedOutputPath(outputName) print('Exporting scenario to %s \n' % outDir) if os.path.exists(outDir): shutil.rmtree(outDir) shutil.copyfile(cacheDir, outDir)
def jenkinsTests(): try: isHPCC = PathFinder.isHPCCRun() # Run just the matching cases for now testNames = ['steady', 'open_base', 'open_counter', 'closed_base', 'closed_counter'] for o in testNames: if o == 'steady': scenario = Scenario(ModelTester.test_params).currentPolicy().steady() elif o == 'open_base': scenario = Scenario(ModelTester.test_params).currentPolicy().open() elif o == 'open_counter': scenario = Scenario(ModelTester.test_params).open() elif o == 'closed_base': scenario = Scenario(ModelTester.test_params).currentPolicy().closed() elif o == 'closed_counter': scenario = Scenario(ModelTester.test_params).closed() else: scenario = [] typeDeviation = ModelTester.testOutput( scenario, o, 0 ) if typeDeviation != ModelTester.DEVIATION_NONE: if typeDeviation == ModelTester.DEVIATION_TINY and isHPCC: continue else: exit(1) # Test writing the 'series' interface with the last scenario # Requires that 'baseline' scenario exists PathFinder.setToTestingMode() print( 'TESTING OutputWriter.writeScenarios\n' ) ModelSolver.solve( scenario.baseline() ) OutputWriter.writeScenarios( [scenario] ) PathFinder.setToDevelopmentMode() print( 'ALL TESTS PASSED.\n' ) exit(0) except: exit(1)
def report_baseline_moments(): outputfilename = os.path.join(PathFinder.getSourceDir(), 'BaselineMoments.txt') f = open(outputfilename, 'w+') f.write('-------------BASELINE MOMENTS-------------') f.write('%s \r\n' % str(datetime.datetime.now())) # load the matrix and get inverter function (_, f_invert) = ParamGenerator.invert() for labelas in np.arange(0.25, 1.0, 0.25): for savelas in np.arange(0.25, 1.0, 0.25): target = {'labelas': labelas, 'savelas': savelas} f.write( '\r\nBASELINE labor elas = %0.2f savings elas = %0.2f \r\n' % (labelas, savelas)) inverse = f_invert(target) scenario = Scenario({ 'economy': 'steady', 'beta': inverse['beta'], 'gamma': inverse['gamma'], 'sigma': inverse['sigma'], 'modelunit_dollar': inverse['modelunit_dollar'], 'bequest_phi_1': 0 }) save_dir = ModelSolver.solve(scenario) targets = ModelCalibrator.moment_targets targets = np.vstack( (targets, ['labelas', labelas, 'Labor elasticity'])) targets = np.vstack( (targets, ['savelas', savelas, 'Savings elasticity'])) outstr = ModelCalibrator.report_moments(save_dir, targets) f.write('%s \r\n' % outstr) f.write('-------------------------------------\r\n') f.write(' ==== DONE ===== \r\n') f.close()
def calibrate_dollar(gridpoint): # Set target = $gdp/adult # from Alex $79.8k for 2016 # REM: In moment_targets, # col 1 = varname, col 2 = value, col 3 = description target_outperHH_index = np.where( ModelCalibrator.moment_targets[:, 0] == 'outperHH')[0] target_outperHH = np.array( [ModelCalibrator.moment_targets[target_outperHH_index, 1]]) # Set initial modelunit_dollar. # In the future, we could apply a heuristic better initial guess. modelunit_dollar = 4.0e-05 tolerance = 0.01 # as ratio err_size = 1 iter_num = 1 iter_max = 8 # iterations for modelunit_dollar while err_size > tolerance and iter_num <= iter_max: # Create Scenario to run scenario = Scenario({ 'economy': 'steady', 'beta': gridpoint.beta, 'gamma': gridpoint.gamma, 'sigma': gridpoint.sigma, 'modelunit_dollar': modelunit_dollar, 'bequest_phi_1': 0 }) save_dir = ModelSolver.solve(scenario) # find target -- $gdp/pop with open(os.path.join(save_dir, 'paramsTargets.pkl'), 'rb') as handle: s_paramsTargets = pickle.load(handle) run_outperHH = s_paramsTargets['outperHH'] err_size = abs(run_outperHH / target_outperHH - 1) print('...MODELUNIT_DOLLAR iteration %u error=%f\n ' % (iter_num, err_size)) # package up answer targets = { 'savelas': s_paramsTargets['savelas'], 'labelas': s_paramsTargets['labelas'], 'captoout': s_paramsTargets['captoout'], 'outperHH': run_outperHH } # Update by percent shift, reduced a bit as number of # iterations increases. This approach slows the update rate # in case of slow convergence -- we're usually bouncing around then. exp_reduce = max(0.5, 1.0 - iter_num * 0.07) modelunit_dollar = modelunit_dollar * ( (run_outperHH / target_outperHH)**exp_reduce) # Find if converged # This only needs to be done after the loop, but # we're about to wipe out the run's files. with open(os.path.join(save_dir, 'dynamics.pkl'), 'rb') as handle: s_dynamics = pickle.load(handle) is_converged = s_dynamics['is_converged'] # Delete save directory along with parent directories shutil.rmtree(os.path.join(save_dir, '..', '..')) iter_num = iter_num + 1 # Keep last successful run with modelunit_dollar modelunit_dollar = scenario.modelunit_dollar # Check solution condition. # Stable solution identified as: # 1. Robust solver convergence rate # 2. modelunit_dollar convergence is_solved = is_converged and (err_size <= tolerance) if iter_num > iter_max: print('...MODELUNIT_DOLLAR -- max iterations (%u) reached.\n' % iter_max) return (targets, modelunit_dollar, is_solved)
# -*- coding: utf-8 -*- """ Created on Mon Jul 15 09:58:42 2019 @author: Azanca """ #Run Solver with test parameters from scenarioModule import Scenario from modelTesterModule import ModelTester from modelSolverModule import ModelSolver t = ModelTester.test_params s = Scenario(t) ModelSolver.solve(s)
def testOutput(scenario, testName, isInteractive): # Set to testing environment PathFinder.setToTestingMode() # Clear the old results and solve ModelSolver.removeCached(scenario) taggedDir = ModelSolver.solve(scenario) cacheDir = PathFinder.getCacheDir(scenario) # Set to development environment # TBD: Set back to original environment? PathFinder.setToDevelopmentMode() # testSet depends on type of scenario if( scenario.isSteady() ): setNames = ['market', 'dynamics'] elif( scenario.isCurrentPolicy() ): setNames = ['market', 'dynamics' ] else: setNames = ['market', 'dynamics', 'statics'] # Load target values targetfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ModelTester.pkl') with open(targetfile, 'rb') as handle: s = pickle.load(handle) target = s.target # Initialize match flag typeDeviation = ModelTester.DEVIATION_NONE # Define function to flag issues # NOTE: Relies on severity of deviation to be increasing def flag(str, deviation): print('\t%-15s%-20s%s\n' % (setname, valuename, str)) global typeDeviation if deviation > typeDeviation: typeDeviation = deviation print('\n[Test results]\n') for i in range(len(setNames)): # Extract output and target values by set setname = setNames[i] output = {} with open(os.path.join(cacheDir, ('%s.pkl' % setname)), 'rb') as handle: output[testName][setname] = pickle.load(handle) outputset = output[testName][setname] targetset = target[testName][setname] # Iterate over target values targetvaluenames = targetset.keys() for j in range(len(targetvaluenames)): valuename = targetvaluenames[j] if not valuename in outputset.keys(): # Flag missing value flag('Not found', ModelTester.DEVIATION_FATAL) continue if isinstance(outputset[valuename], dict): # Skip checking of structs -- it is currently just # priceindex which does not need to be checked print('\tSkipping %s because it is a struct.\n' % valuename) continue if np.any(np.isnan(outputset[valuename][:])): # Flag NaN value flag('NaN value', ModelTester.DEVIATION_FATAL) continue if np.any(outputset[valuename].shape != targetset[valuename].shape): # Flag for size mismatch flag('Size mismatch', ModelTester.DEVIATION_FATAL) continue # Classify deviation deviation = ModelTester.calculateDeviation(outputset[valuename][:], targetset[valuename][:]) if deviation > 0: if (deviation < 1e-6): msg = 'TINY : %06.16f%% deviation' % deviation*100 flag(msg, ModelTester.DEVIATION_TINY) elif deviation < 1e-4: msg = 'SMALL: %06.16f%% deviation' % deviation*100 flag( msg, ModelTester.DEVIATION_SMALL ) else: msg = 'LARGE: %06.4f%% deviation' % deviation*100 flag( msg, ModelTester.DEVIATION_FATAL ) # Identify new values, if any outputvaluenames = outputset.keys() for j in range(len(outputvaluenames)): valuename = outputvaluenames[j] if not valuename in targetset.keys(): flag('New', ModelTester.DEVIATION_FATAL) # Check for match if typeDeviation == ModelTester.DEVIATION_NONE: print('\tTarget matched.\n\n') else: if not isInteractive: print( '\tTarget not matched.\n\n' ) return # Query user for target update ans = input('\n\tUpdate test target with new values? Y/[N]: ') if ans == 'Y': target[testName] = output[testName] with open(targetfile) as f: pickle.dump(target, f) print('\tTarget updated.\n\n') else: print('\tTarget retained.\n\n') return typeDeviation
def unanticipated_shock(): # Make the baseline scenario and "non-shock" version t = ModelTester.test_params # baseline scenario is not shocked s_baseline = Scenario(t).currentPolicy().baseline() # Make "non-shock" shock baseline t = s_baseline.getParams() t.PolicyShockYear = t.TransitionFirstYear + ModelTester.policyShockShift s_next = Scenario(t) # Get baseline Market, Dynamic ModelSolver.removeCached(s_baseline) # Clear cached Scenario tagged_dir = ModelSolver.solve(s_baseline) baseline_dir = PathFinder.getCacheDir(s_baseline) with open(os.path.join(baseline_dir, 'market.pkl'), 'rb') as handle: baseMarket = pickle.load(handle) with open(os.path.join(baseline_dir, 'dynamics.pkl'), 'rb') as handle: baseDynamic = pickle.load(handle) # Get shocked Market, Dynamic ModelSolver.removeCached(s_next) # Clear cached scenario tagged_dir = ModelSolver.solve(s_next) x_dir = PathFinder.getCacheDir(s_next) with open(os.path.join(x_dir, 'market.pkl'), 'rb') as handle: xMarket = pickle.load(handle) with open(os.path.join(x_dir, 'dynamics.pkl'), 'rb') as handle: xDynamic = pickle.load(handle) # Compare baseline and shocked path print( '\n' ) def do_check (baseD, xD, dName): passed = 1 for p in baseD.keys(): valuename = p if (not isinstance(baseD[valuename], numbers.Number) or ('_next' in valuename)): continue # Check for within percent tolerance, also check # within numerical deviation (this is in case div by # zero or close to zero) # TBD: Standardize deviations and tolerances percentDeviation = abs((xD[valuename] - baseD[valuename]) / baseD[valuename]) absoluteDeviation = abs(baseD[valuename] - xD[valuename]) if not np.all(np.array(percentDeviation) < 1e-4): if not np.all(np.array(absoluteDeviation) < 1e-13): m1 = print( 'Max percentdev = %f' % max(percentDeviation) ) m2 = print( 'Max abs dev = %0.14f' % max(absoluteDeviation) ) print( '%s.%s outside tolerance;\t\t %s; %s \n' % (dName, valuename, m1, m2)) passed = 0 return passed passed = do_check( baseMarket , xMarket , 'Market' ) passed = do_check( baseDynamic, xDynamic, 'Dynamic' ) if passed: print( 'All values within convergence tolerances.\n' ) return passed