def run( self, resources, write_datasets_to_cache_at_end_of_year=True, log_file_name="run_model_system.log", cleanup_datasets=True, ): """Entries in resources: (entries with no defaults are required) models - a list containing names of models to be run. Each name must correspond to the name of the module/class of that model. Default(object): None years - a tuple (start year, end year) debuglevel - an integer. The higher the more output will be printed. Default: 0 expression_library - a dictionary. The keys in the dictionary are pairs (dataset_name, variable_name) and the values are the corresponding expressions. The model system needs to set the expression library (if it isn't None) in DatasetFactory for DatasetFactory to know about variables defined as expressions in the xml expression library. Default: None This method is called both to start up the simulation for all years, and also for each year when running with one process per year. In the latter case, 'years' consists of just (current_year, current_year) rather than the real start and end years for the simulation. """ if not isinstance(resources, Resources): raise TypeError, "Argument 'resources' must be of type 'Resources'." logger_settings = resources.get("log", {"tags": [], "verbosity_level": 3}) logger.set_tags(logger_settings.get("tags", [])) logger.set_verbosity_level(logger_settings.get("verbosity_level", 3)) self.simulation_state = SimulationState() self.simulation_state.set_low_memory_run(resources.get("low_memory_mode", False)) self.simulation_state.set_start_time(resources.get("base_year", 0)) self.run_year_namespace = {} if resources.get("cache_directory", None) is not None: self.simulation_state.set_cache_directory(resources["cache_directory"]) if "expression_library" in resources: VariableFactory().set_expression_library(resources["expression_library"]) if resources.get("sample_input", False): self.update_config_for_multiple_runs(resources) cache_directory = self.simulation_state.get_cache_directory() log_file = os.path.join(cache_directory, log_file_name) logger.enable_file_logging(log_file, verbose=False) try: logger.log_status("Cache Directory set to: " + cache_directory) with logger.block("Start simulation run"): models = resources.get("models", []) models_in_years = resources.get("models_in_year", {}) resources.check_obligatory_keys(["years"]) years = resources["years"] if (not isinstance(years, tuple)) and (not isinstance(years, list)): raise TypeError, "Entry 'years' in resources must be a tuple." if len(years) < 2: print years raise StandardError, "Entry 'years' in resources must be of length at least 2." start_year = years[0] end_year = years[-1] debuglevel = resources.get("debuglevel", 0) seed_values = resources.get("seed", NO_SEED) logger.log_status("random seed = %s" % str(seed_values)) seed(seed_values) for year in range(start_year, end_year + 1): with logger.block("Starting simulation for year " + str(year)): self.simulation_state.set_current_time(year) SessionConfiguration().get_dataset_pool().remove_all_datasets() logger.disable_file_logging(log_file) try: if models_in_years.get(year, None) is not None: models_to_run = models_in_years[year] else: models_to_run = models self._run_year( year=year, models=models_to_run, simulation_state=self.simulation_state, debuglevel=debuglevel, resources=resources, write_datasets_to_cache_at_end_of_year=write_datasets_to_cache_at_end_of_year, cleanup_datasets=cleanup_datasets, ) finally: logger.enable_file_logging(log_file, verbose=False) collect() finally: logger.disable_file_logging(log_file)
def run(self, resources, write_datasets_to_cache_at_end_of_year=True, log_file_name='run_model_system.log', cleanup_datasets=True): """Entries in resources: (entries with no defaults are required) models - a list containing names of models to be run. Each name must correspond to the name of the module/class of that model. Default(object): None years - a tuple (start year, end year) debuglevel - an integer. The higher the more output will be printed. Default: 0 expression_library - a dictionary. The keys in the dictionary are pairs (dataset_name, variable_name) and the values are the corresponding expressions. The model system needs to set the expression library (if it isn't None) in DatasetFactory for DatasetFactory to know about variables defined as expressions in the xml expression library. Default: None This method is called both to start up the simulation for all years, and also for each year when running with one process per year. In the latter case, 'years' consists of just (current_year, current_year) rather than the real start and end years for the simulation. """ if not isinstance(resources, Resources): raise TypeError, "Argument 'resources' must be of type 'Resources'." logger_settings = resources.get("log", { "tags": [], "verbosity_level": 3 }) logger.set_tags(logger_settings.get("tags", [])) logger.set_verbosity_level(logger_settings.get("verbosity_level", 3)) self.simulation_state = SimulationState() self.simulation_state.set_low_memory_run( resources.get("low_memory_mode", False)) self.simulation_state.set_start_time(resources.get("base_year", 0)) self.run_year_namespace = {} if resources.get('cache_directory', None) is not None: self.simulation_state.set_cache_directory( resources['cache_directory']) if 'expression_library' in resources: VariableFactory().set_expression_library( resources['expression_library']) if resources.get('sample_input', False): self.update_config_for_multiple_runs(resources) cache_directory = self.simulation_state.get_cache_directory() log_file = os.path.join(cache_directory, log_file_name) logger.enable_file_logging(log_file, verbose=False) try: logger.log_status("Cache Directory set to: " + cache_directory) with logger.block('Start simulation run'): models = resources.get("models", []) models_in_years = resources.get("models_in_year", {}) resources.check_obligatory_keys(["years"]) years = resources["years"] if (not isinstance(years, tuple)) and (not isinstance( years, list)): raise TypeError, "Entry 'years' in resources must be a tuple." if len(years) < 2: print years raise StandardError, "Entry 'years' in resources must be of length at least 2." start_year = years[0] end_year = years[-1] debuglevel = resources.get("debuglevel", 0) seed_values = resources.get('seed', NO_SEED) logger.log_status("random seed = %s" % str(seed_values)) seed(seed_values) for year in range(start_year, end_year + 1): with logger.block("Starting simulation for year " + str(year)): self.simulation_state.set_current_time(year) SessionConfiguration().get_dataset_pool( ).remove_all_datasets() logger.disable_file_logging(log_file) try: if models_in_years.get(year, None) is not None: models_to_run = models_in_years[year] else: models_to_run = models self._run_year( year=year, models=models_to_run, simulation_state=self.simulation_state, debuglevel=debuglevel, resources=resources, write_datasets_to_cache_at_end_of_year= write_datasets_to_cache_at_end_of_year, cleanup_datasets=cleanup_datasets) finally: logger.enable_file_logging(log_file, verbose=False) collect() finally: logger.disable_file_logging(log_file)
def optimize(bform,prices,costdiscount,dataset_pool): btype = bform.btype ieqcons = None if btype == 1: ieqcons = bform.sfoneoff_bounds elif btype == 2: ieqcons = bform.sfbuilder_bounds elif btype in [3,4]: ieqcons = bform.mf_bounds elif btype in [5,6]: ieqcons = bform.mf_bounds elif btype in COMMERCIALTYPES_D: ieqcons = bform.commercial_bounds else: assert 0 #if btype == 2: return [], -1.0 if btype in [4,6]: nf = 5 elif btype in COMMERCIALTYPES_D: nf = 1 else: nf = 4 x0 = array([0 for i in range(nf)]) proposal_comp = dataset_pool['proposal_component'] proposal_comp['sales_absorption_ratio'] = copy.copy(proposal_comp['sales_absorption']) bform.sales_absorption = proposal_comp['sales_absorption'] bform.rent_absorption = proposal_comp['rent_absorption'] bform.leases_absorption = proposal_comp['leases_absorption'] bform.sales_vacancy_rates = proposal_comp['sales_vacancy_rates'] bform.vacancy_rates = proposal_comp['vacancy_rates'] bldg_type = devmdltypes[btype-1] #throttle development when annual vacancy > 10% #residential vacancy = min(own, rent) max_residential_vacancy = min( proposal_comp['sales_vacancy_rates'][0], proposal_comp['vacancy_rates'][0]) * 12 non_residential_vacancy = proposal_comp['vacancy_rates'][4] * 12 if bldg_type in residential_building_types: if max_residential_vacancy > .1: return [], -1.0 elif non_residential_vacancy > .1: return [], -1.0 logger.set_verbosity_level(0) #proposal.compute_variables(["property_tax = proposal.disaggregate(parcel.property_tax)", # "land_cost = proposal.disaggregate(parcel.land_cost)"], # dataset_pool=dataset_pool) #r = fmin_l_bfgs_b(_objfunc,x0,approx_grad=1,bounds=bounds,epsilon=1.0,factr=1e16) if 0: #DEBUG: r = fmin_slsqp(_objfunc,x0,f_ieqcons=ieqcons,iprint=0,full_output=1,epsilon=1,args=[btype,prices,dataset_pool],iter=150,acc=.01) print r #r2[0] = numpy.round(r2[0], decimals=1) #r2[1] = _objfunc(r2[0],btype) r = fmin_slsqp(_objfunc2,x0,f_ieqcons=ieqcons,iprint=0,full_output=1,epsilon=1,args=[bform,btype,prices,costdiscount,dataset_pool],iter=150,acc=.01) r = list(r) logger.log_status("type r: %s" % (type(r))) logger.log_status("len r: %s" % (len(r))) logger.log_status("r0: %s" % (r[0])) logger.log_status("r1: %s" % (r[1])) #if DEBUG > 0: print r #print r if r[3] <> 0: return r[0], -1 r[0] = numpy.round(r[0], decimals=1) r[1] = _objfunc2(r[0],bform,btype,prices,costdiscount,dataset_pool) if 0: #DEBUG: print r2 print r numpy.testing.assert_approx_equal(r2[1],r[1],significant=1) r[1] *= -1*100000 return r[0], r[1]
def _objfunc2(params,btype,saveexcel=0,excelprefix=None): global proforma_inputs e = None if DEBUG: print "PARAMS", params if btype == 1: # single family one-off assert len(params) == 4 for i in range(4): set_value(e,sp,'Bldg Form','K%d' % (63+i), params[i]) elif btype == 2: # single family builder assert len(params) == 4 for i in range(4): set_value(e,sp,'Bldg Form','K%d' % (58+i), params[i]) elif btype == 3: # mf-rental assert len(params) == 4 for i in range(4): set_value(e,sp,'Bldg Form','K%d' % (68+i), params[i]) elif btype == 5: # mf-condo assert len(params) == 4 for i in range(4): set_value(e,sp,'Bldg Form','K%d' % (73+i), params[i]) elif btype == 6: # mxd-condo assert len(params) == 5 for i in range(4): set_value(e,sp,'Bldg Form','K%d' % (73+i), params[i]) set_value(e,sp,'Bldg Form','K78', params[4]) elif btype in COMMERCIALTYPES_D: # commercial types assert len(params) == 1 set_value(e,sp,'Bldg Form','K%d'%(COMMERCIALTYPES_D[btype]), params[0]*SQFTFACTOR) d = proforma_inputs['proposal_component'] logger.set_verbosity_level(0) d['sales_revenue'] = array([ 0, 0, 0, 0, 0]) d['rent_revenue'] = array([ 0, 0, 0, 0, 0]) d['leases_revenue'] = array([ 0, 0, 0, 0, 0]) if btype in [1,2]: for i in range(4): d['sales_revenue'][i] = \ max(sp.evaluate('Proforma Inputs!B%d' % (60+i)),0) elif btype in [5,6]: for i in range(4): d['sales_revenue'][i] = \ max(sp.evaluate('Proforma Inputs!B%d' % (48+i)),0) elif btype in [3,4]: for i in range(4): d['rent_revenue'][i] = \ max(sp.evaluate('Proforma Inputs!B%d' % (74+i)),0) else: d['leases_revenue'][4] = \ max(sp.evaluate('Proforma Inputs!B%d' % (92)),0) d['sales_absorption'] = .2*d['sales_revenue'] d['rent_absorption'] = array([ 8, 4, 4, 8, 8]) d['leases_absorption'] = array([ 1, 1, 1, 1, 6]) proforma_inputs['proposal']['construction_cost'] = array(sp.evaluate('Proforma Inputs!B40')) if DEBUG: print "COST:",proforma_inputs['proposal']['construction_cost'] if DEBUG: print "SALES REVENUE", d['sales_revenue'] if DEBUG: print d['rent_revenue'] if DEBUG: print d['leases_revenue'] if DEBUG: print d['sales_absorption'] if DEBUG: print d['rent_absorption'] if DEBUG: print d['leases_absorption'] from opus_core.tests.utils import variable_tester po=['urbansim_parcel','urbansim'] v = variable_tester.VariableTester('proforma.py',po,proforma_inputs) npv = v._get_attribute('npv') #print npv, "\n\n" return -1*npv/100000.0