def run_model2(): storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name = jobs_set_table_name, table_data = self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check that the distribution of building type is the same before and after subtracting jobs jobs_set.compute_variables(["urbansim.job.is_in_employment_sector_1_industrial", "urbansim.job.is_in_employment_sector_2_industrial", "urbansim.job.is_in_employment_sector_1_commercial", "urbansim.job.is_in_employment_sector_2_commercial", "urbansim.job.is_in_employment_sector_1_governmental", "urbansim.job.is_in_employment_sector_2_governmental"], resources = Resources({"job_building_type":self.job_building_types})) result = array([jobs_set.get_attribute("is_in_employment_sector_1_industrial").sum(), jobs_set.get_attribute("is_in_employment_sector_2_industrial").sum(), jobs_set.get_attribute("is_in_employment_sector_1_commercial").sum(), jobs_set.get_attribute("is_in_employment_sector_2_commercial").sum(), jobs_set.get_attribute("is_in_employment_sector_1_governmental").sum(), jobs_set.get_attribute("is_in_employment_sector_2_governmental").sum() ]) return result
def test_distribute_unplaced_jobs_model(self): # Places 1750 jobs of sector 15 # gridcell has expected about # 1 4000 sector 15 jobs 5000 sector 15 jobs # 1000 sector 1 jobs 1000 sector 1 jobs # 2 2000 sector 15 jobs 2500 sector 15 jobs # 1000 sector 1 jobs 1000 sector 1 jobs # 3 1000 sector 15 jobs 1250 sector 15 jobs # 1000 sector 1 jobs 1000 sector 1 jobs # unplaced 1750 sector 15 jobs 0 # create jobs storage = StorageFactory().get_storage("dict_storage") job_data = { "job_id": arange(11750) + 1, "sector_id": array(7000 * [15] + 3000 * [1] + 1750 * [15]), "grid_id": array(4000 * [1] + 2000 * [2] + 1000 * [3] + 1000 * [1] + 1000 * [2] + 1000 * [3] + 1750 * [-1]), } jobs_table_name = "jobs" storage.write_table(table_name=jobs_table_name, table_data=job_data) jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name) storage = StorageFactory().get_storage("dict_storage") building_types_table_name = "building_types" storage.write_table(table_name=building_types_table_name, table_data={"grid_id": arange(3) + 1}) gridcells = GridcellDataset(in_storage=storage, in_table_name=building_types_table_name) # run model model = DistributeUnplacedJobsModel(debuglevel=4) model.run(gridcells, jobs) # get results # no jobs are unplaced result1 = where(jobs.get_attribute("grid_id") < 0)[0] self.assertEqual(result1.size, 0) # the first 10000jobs kept their locations result2 = jobs.get_attribute_by_index("grid_id", arange(10000)) # logger.log_status(result2) self.assertEqual(ma.allclose(result2, job_data["grid_id"][0:10000], rtol=0), True) # run model with filter # unplace first 500 jobs of sector 15 jobs.modify_attribute(name="grid_id", data=zeros(500), index=arange(500)) # unplace first 500 jobs of sector 1 jobs.modify_attribute(name="grid_id", data=zeros(500), index=arange(7000, 7501)) # place only unplaced jobs of sector 1 model.run(gridcells, jobs, agents_filter="job.sector_id == 1") # 500 jobs of sector 15 should be unplaced result3 = where(jobs.get_attribute("grid_id") <= 0)[0] self.assertEqual(result3.size, 500) # jobs of sector 1 are placed result4 = jobs.get_attribute_by_index("grid_id", arange(7000, 7501)) self.assertEqual((result4 <= 0).sum(), 0)
def test_my_inputs(self): storage = StorageFactory().get_storage("dict_storage") job_building_types_table_name = "job_building_types" storage.write_table( table_name=job_building_types_table_name, table_data={"id": array([1, 2, 3, 4]), "home_based": array([0, 1, 0, 1])}, ) jobs_table_name = "jobs" storage.write_table( table_name=jobs_table_name, table_data={"job_id": arange(10) + 1, "building_type": array([3, 3, 2, 2, 4, 2, 1, 3, 4, 1])}, ) job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name=job_building_types_table_name) jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name) jobs.compute_variables(self.variable_name, resources=Resources({"job_building_type": job_building_types})) values = jobs.get_attribute(self.variable_name) should_be = array([0, 0, 1, 1, 1, 1, 0, 0, 1, 0], dtype="bool8") self.assert_(ma.allequal(values, should_be), "Error in " + self.variable_name)
def test_my_inputs(self): storage = StorageFactory().get_storage('dict_storage') job_building_types_table_name = 'job_building_types' storage.write_table(table_name=job_building_types_table_name, table_data={ 'id': array([1, 2, 3, 4]), 'home_based': array([1, 0, 1, 0]) }) jobs_table_name = 'jobs' storage.write_table(table_name=jobs_table_name, table_data={ 'job_id': arange(10) + 1, 'building_type': array([3, 3, 2, 2, 4, 2, 1, 3, 4, 1]) }) job_building_types = JobBuildingTypeDataset( in_storage=storage, in_table_name=job_building_types_table_name) jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name) jobs.compute_variables(self.variable_name, resources=Resources( {'job_building_type': job_building_types})) values = jobs.get_attribute(self.variable_name) should_be = array([0, 0, 1, 1, 1, 1, 0, 0, 1, 0]) self.assert_(ma.allequal(values, should_be), 'Error in ' + self.variable_name)
def test_my_inputs(self): storage = StorageFactory().get_storage('dict_storage') job_building_types_table_name = 'job_building_types' storage.write_table( table_name=job_building_types_table_name, table_data={ 'id':array([1,2,3,4]), 'home_based': array([1, 0, 1, 0]) } ) jobs_table_name = 'jobs' storage.write_table( table_name=jobs_table_name, table_data={ 'job_id':arange(10)+1, 'building_type': array([3,3,2,2,4,2,1,3,4,1]) } ) job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name=job_building_types_table_name) jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name) jobs.compute_variables(self.variable_name, resources=Resources({'job_building_type': job_building_types})) values = jobs.get_attribute(self.variable_name) should_be = array([0,0,1,1,1,1,0,0,1,0]) self.assert_(ma.allequal(values, should_be), 'Error in ' + self.variable_name)
def test_same_distribution_after_job_addition(self): """Add 1,750 new jobs of sector 1 without specifying a distribution across gridcells (so it is assumed equal) Test that the total number of jobs in each sector after the addition matches the totals specified in annual_employment_control_totals. Ensure that the number of unplaced jobs after the addition is exactly 1,750 because this model is not responsible for placing jobs, only for creating them. NOTE: unplaced jobs are indicated by grid_id <= 0 """ storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name=jobs_set_table_name, table_data=self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) annual_employment_control_totals_data = self.annual_employment_control_totals_data annual_employment_control_totals_data[ "total_non_home_based_employment"] = array( [5750, 1400, 4000, 1600]) ect_set_table_name = 'ect_set' storage.write_table( table_name=ect_set_table_name, table_data=annual_employment_control_totals_data, ) ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment") # run model model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) #check that there are indeed 14750 total jobs after running the model areas = jobs_set.get_attribute("large_area_id") results = array([0, 0]) for iarea in [0, 1]: results[iarea] = where(areas == [1, 2][iarea])[0].size should_be = [8150, 7600] self.assertEqual( ma.allequal(should_be, results), True, "Error, should_be: %s, but result: %s" % (should_be, results)) #check that total #jobs within each sector are close to what was set in the control_totals results = self.get_count_all_sectors_and_areas(jobs_set) should_be = [5750, 1400, 1000, 4000, 1600, 2000] self.assertEqual( ma.allclose(results, should_be, rtol=0.00001), True, "Error, should_be: %s, but result: %s" % (should_be, results))
def test_unplaced_jobs_after_job_addition(self): """The initial jobs table is now adjusted to include 2000 unplaced jobs. Add 1,750 new jobs and ensure that the number of unplaced jobs after the addition is exactly 3,750 because this model is not responsible for placing jobs, only for creating them. """ # create and populate jobs table for model input add_jobs_data = { "job_id": arange(13001, 15001), "grid_id": array(2000 * [0]), "sector_id": array(2000 * [1]), "building_type": array(2000 * [Constants._industrial_code]) } annual_employment_control_totals_data = self.annual_employment_control_totals_data annual_employment_control_totals_data[ "total_non_home_based_employment"] = array([10750, 3000]) storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table(table_name=jobs_set_table_name, table_data=self.jobs_data) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) ect_set_table_name = 'ect_set' storage.write_table( table_name=ect_set_table_name, table_data=annual_employment_control_totals_data, ) ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment") jobs_set.add_elements(add_jobs_data) # run model with input databases model = EmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) #check that there are indeed 16750 total jobs after running the model results = jobs_set.size() should_be = [16750] self.assertEqual(ma.allequal(should_be, results), True, "Error") #check that the number of unplaced jobs is the number of new jobs created + number of unplaced jobs before running model results = where(jobs_set.get_attribute("grid_id") <= 0)[0].size should_be = [3750.0] self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)
def test_unplaced_jobs_after_job_addition(self): """The initial jobs table is now adjusted to include 2000 unplaced jobs. Add 1,750 new jobs and ensure that the number of unplaced jobs after the addition is exactly 3,750 because this model is not responsible for placing jobs, only for creating them. """ # create and populate jobs table for model input add_jobs_data = { "job_id": arange(13001, 15001), "grid_id": array(2000*[0]), "sector_id": array(2000*[1]), "building_type": array(2000*[Constants._industrial_code]) } annual_employment_control_totals_data = self.annual_employment_control_totals_data annual_employment_control_totals_data["total_non_home_based_employment"] = array([10750, 3000]) storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name=jobs_set_table_name, table_data=self.jobs_data ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) ect_set_table_name = 'ect_set' storage.write_table( table_name=ect_set_table_name, table_data=annual_employment_control_totals_data, ) ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment") jobs_set.add_elements(add_jobs_data) # run model with input databases model = EmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) #check that there are indeed 16750 total jobs after running the model results = jobs_set.size() should_be = [16750] self.assertEqual(ma.allequal(should_be, results), True, "Error") #check that the number of unplaced jobs is the number of new jobs created + number of unplaced jobs before running model results = where(jobs_set.get_attribute("grid_id")<=0)[0].size should_be = [3750.0] self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)
def test_same_distribution_after_job_addition(self): """Add 1,750 new jobs of sector 1 without specifying a distribution across gridcells (so it is assumed equal) Test that the total number of jobs in each sector after the addition matches the totals specified in annual_employment_control_totals. Ensure that the number of unplaced jobs after the addition is exactly 1,750 because this model is not responsible for placing jobs, only for creating them. NOTE: unplaced jobs are indicated by grid_id <= 0 """ storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name = jobs_set_table_name, table_data = self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) annual_employment_control_totals_data = self.annual_employment_control_totals_data annual_employment_control_totals_data["total_non_home_based_employment"] = array([5750, 1400, 4000, 1600]) ect_set_table_name = 'ect_set' storage.write_table( table_name = ect_set_table_name, table_data = annual_employment_control_totals_data, ) ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment") # run model model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) #check that there are indeed 14750 total jobs after running the model areas = jobs_set.get_attribute("large_area_id") results = array([0,0]) for iarea in [0,1]: results[iarea] = where(areas == [1,2][iarea])[0].size should_be = [8150, 7600] self.assertEqual(ma.allequal(should_be, results), True, "Error, should_be: %s, but result: %s" % (should_be, results)) #check that total #jobs within each sector are close to what was set in the control_totals results = self.get_count_all_sectors_and_areas(jobs_set) should_be = [5750, 1400, 1000, 4000, 1600, 2000] self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True, "Error, should_be: %s, but result: %s" % (should_be, results))
def run_model2(): storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name=jobs_set_table_name, table_data=self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check that the distribution of building type is the same before and after subtracting jobs jobs_set.compute_variables([ "urbansim.job.is_in_employment_sector_1_industrial", "urbansim.job.is_in_employment_sector_2_industrial", "urbansim.job.is_in_employment_sector_1_commercial", "urbansim.job.is_in_employment_sector_2_commercial", "urbansim.job.is_in_employment_sector_1_governmental", "urbansim.job.is_in_employment_sector_2_governmental" ], resources=Resources({ "job_building_type": self.job_building_types })) result = array([ jobs_set.get_attribute( "is_in_employment_sector_1_industrial").sum(), jobs_set.get_attribute( "is_in_employment_sector_2_industrial").sum(), jobs_set.get_attribute( "is_in_employment_sector_1_commercial").sum(), jobs_set.get_attribute( "is_in_employment_sector_2_commercial").sum(), jobs_set.get_attribute( "is_in_employment_sector_1_governmental").sum(), jobs_set.get_attribute( "is_in_employment_sector_2_governmental").sum() ]) return result
def run(self, in_storage, out_storage, business_table="business", jobs_table="jobs", control_totals_table=None): logger.log_status("Unrolling %s table." % business_table) # get attributes from the establisments table business_dataset = BusinessDataset(in_storage=in_storage, in_table_name=business_table) business_sizes = business_dataset.get_attribute( self.number_of_jobs_attr).astype("int32") sectors = business_dataset.get_attribute("sector_id") tazes = business_dataset.get_attribute( self.geography_id_attr).astype("int32") building_ids = array([], dtype='int32') if "building_id" in business_dataset.get_primary_attribute_names(): building_ids = business_dataset.get_attribute("building_id") parcel_ids = array([], dtype='int32') if "parcel_id" in business_dataset.get_primary_attribute_names(): parcel_ids = business_dataset.get_attribute("parcel_id") home_based = array([], dtype='int16') if "home_based" in business_dataset.get_primary_attribute_names(): home_based = business_dataset.get_attribute("home_based") building_sqft = business_dataset.get_attribute(self.sqft_attr) building_sqft[building_sqft <= 0] = 0 join_flags = None if "join_flag" in business_dataset.get_primary_attribute_names(): join_flags = business_dataset.get_attribute("join_flag") impute_sqft_flag = None if "impute_building_sqft_flag" in business_dataset.get_primary_attribute_names( ): impute_sqft_flag = business_dataset.get_attribute( "impute_building_sqft_flag") # inititalize jobs attributes total_size = business_sizes.sum() jobs_data = {} jobs_data["sector_id"] = resize(array([-1], dtype=sectors.dtype), total_size) jobs_data["building_id"] = resize( array([-1], dtype=building_ids.dtype), total_size) jobs_data["parcel_id"] = resize(array([-1], dtype=parcel_ids.dtype), total_size) jobs_data[self.geography_id_attr] = resize( array([-1], dtype=tazes.dtype), total_size) jobs_data["building_type"] = resize( array([-1], dtype=home_based.dtype), total_size) jobs_data["sqft"] = resize(array([], dtype=building_sqft.dtype), total_size) if join_flags is not None: jobs_data["join_flag"] = resize(array([], dtype=join_flags.dtype), total_size) if impute_sqft_flag is not None: jobs_data["impute_building_sqft_flag"] = resize( array([], dtype=impute_sqft_flag.dtype), total_size) indices = cumsum(business_sizes) # iterate over establishments. For each business create the corresponding number of jobs by filling the corresponding part # of the arrays start_index = 0 for i in range(business_dataset.size()): end_index = indices[i] jobs_data["sector_id"][start_index:end_index] = sectors[i] if building_ids.size > 0: jobs_data["building_id"][start_index:end_index] = building_ids[ i] if parcel_ids.size > 0: jobs_data["parcel_id"][start_index:end_index] = parcel_ids[i] jobs_data[self.geography_id_attr][start_index:end_index] = tazes[i] if home_based.size > 0: jobs_data["building_type"][start_index:end_index] = home_based[ i] if self.compute_sqft_per_job: jobs_data["sqft"][start_index:end_index] = round( (building_sqft[i] - building_sqft[i] / 10.0) / float(business_sizes[i])) # sqft per employee else: jobs_data["sqft"][start_index:end_index] = building_sqft[i] if join_flags is not None: jobs_data["join_flag"][start_index:end_index] = join_flags[i] if impute_sqft_flag is not None: jobs_data["impute_building_sqft_flag"][ start_index:end_index] = impute_sqft_flag[i] start_index = end_index jobs_data["job_id"] = arange(total_size) + 1 if self.compute_sqft_per_job: jobs_data["sqft"] = clip(jobs_data["sqft"], 0, self.maximum_sqft) jobs_data["sqft"][logical_and( jobs_data["sqft"] > 0, jobs_data["sqft"] < self.minimum_sqft)] = self.minimum_sqft # correct missing job_building_types wmissing_bt = where(jobs_data["building_type"] <= 0)[0] if wmissing_bt.size > 0: jobs_data["building_type"][ wmissing_bt] = 2 # assign non-homebased type for now. It can be re-classified in the assign_bldgs_to_jobs... script # create jobs table and write it out storage = StorageFactory().get_storage('dict_storage') storage.write_table(table_name="jobs", table_data=jobs_data) job_dataset = JobDataset(in_storage=storage) if self.unplace_jobs_with_non_existing_buildings: self.do_unplace_jobs_with_non_existing_buildings( job_dataset, out_storage) # Match to control totals (only eliminate jobs if control totals are smaller than the actual number of jobs). if control_totals_table is not None: logger.log_status("Matching to control totals.") control_totals = ControlTotalDataset( what='employment', id_name=['zone_id', 'sector_id'], in_table_name=control_totals_table, in_storage=in_storage) control_totals.load_dataset( attributes=['zone_id', 'sector_id', 'jobs']) zones_sectors = control_totals.get_id_attribute() njobs = control_totals.get_attribute('jobs') remove = array([], dtype='int32') for i in range(zones_sectors.shape[0]): zone, sector = zones_sectors[i, :] in_sector = job_dataset.get_attribute("sector_id") == sector in_zone_in_sector = logical_and( in_sector, job_dataset.get_attribute("zone_id") == zone) if in_zone_in_sector.sum() <= njobs[i]: continue to_be_removed = in_zone_in_sector.sum() - njobs[i] this_removal = 0 not_considered = ones(job_dataset.size(), dtype='bool8') for unit in [ 'parcel_id', 'building_id', None ]: # first consider jobs without parcel id, then without building_id, then all if unit is not None: wnunit = job_dataset.get_attribute(unit) <= 0 eligible = logical_and( not_considered, logical_and(in_zone_in_sector, wnunit)) not_considered[where(wnunit)] = False else: eligible = logical_and(not_considered, in_zone_in_sector) eligible_sum = eligible.sum() if eligible_sum > 0: where_eligible = where(eligible)[0] if eligible_sum <= to_be_removed - this_removal: draw = arange(eligible_sum) else: draw = sample_noreplace( where_eligible, to_be_removed - this_removal, eligible_sum) remove = concatenate((remove, where_eligible[draw])) this_removal += draw.size if this_removal >= to_be_removed: break job_dataset.remove_elements(remove) logger.log_status("%s jobs removed." % remove.size) logger.log_status("Write jobs table.") job_dataset.write_dataset(out_table_name=jobs_table, out_storage=out_storage) logger.log_status("Created %s jobs." % job_dataset.size())
def test_agents_placed_in_appropriate_types(self): """Create 1000 unplaced industrial jobs and 1 commercial job. Allocate 50 commercial gridcells with enough space for 10 commercial jobs per gridcell. After running the EmploymentLocationChoiceModel, the 1 commercial job should be placed, but the 100 industrial jobs should remain unplaced """ storage = StorageFactory().get_storage('dict_storage') storage.write_table(table_name='job_building_types', table_data = { 'id':array([2,1]), 'name': array(['commercial', 'industrial']) } ) job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name='job_building_types') storage.write_table(table_name='jobs', table_data = { 'job_id': arange(1001)+1, 'grid_id': array([0]*1001), 'building_type': array([1]*1000 + [2]) } ) jobs = JobDataset(in_storage=storage, in_table_name='jobs') storage.write_table(table_name='gridcells', table_data = { 'grid_id': arange(50)+1, 'commercial_sqft': array([1000]*50), 'commercial_sqft_per_job': array([100]*50) } ) gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells') coefficients = Coefficients(names=("dummy",), values=(0.1,)) specification = EquationSpecification(variables=("gridcell.commercial_sqft",), coefficients=("dummy",)) compute_resources = Resources({"job":jobs, "job_building_type": job_building_types}) agents_index = where(jobs.get_attribute("grid_id") == 0) unplace_jobs = DatasetSubset(jobs, agents_index) agents_index = where(unplace_jobs.get_attribute("building_type") == 2)[0] gridcells.compute_variables(["urbansim.gridcell.number_of_commercial_jobs"], resources=compute_resources) commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs") gridcells.compute_variables(["urbansim.gridcell.number_of_industrial_jobs"], resources=compute_resources) industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs") model_group = ModelGroup(job_building_types, "name") elcm = EmploymentLocationChoiceModel(ModelGroupMember(model_group,"commercial"), location_set=gridcells, agents_grouping_attribute = "job.building_type", choices = "opus_core.random_choices_from_index", sample_size_locations = 30) elcm.run(specification, coefficients, agent_set = jobs, agents_index=agents_index, debuglevel=1) gridcells.compute_variables(["urbansim.gridcell.number_of_commercial_jobs"], resources=compute_resources) commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs") gridcells.compute_variables(["urbansim.gridcell.number_of_industrial_jobs"], resources=compute_resources) industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs") self.assertEqual(commercial_jobs.sum() == 1, True, "Error, there should only be a total of 1 commercial job") self.assertEqual(industrial_jobs.sum() == 0, True, "Error, there should be no industrial jobs because there's no space for them")
def test_same_distribution_after_job_addition(self): """Add 1,750 new jobs of sector 1 without specifying a distribution across gridcells (so it is assumed equal) Test that the total number of jobs in each sector after the addition matches the totals specified in annual_employment_control_totals. Ensure that the number of unplaced jobs after the addition is exactly 1,750 because this model is not responsible for placing jobs, only for creating them. NOTE: unplaced jobs are indicated by grid_id <= 0 """ storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name=jobs_set_table_name, table_data=self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) annual_employment_control_totals_data = self.annual_employment_control_totals_data annual_employment_control_totals_data[ "total_non_home_based_employment"] = array([8750, 3000]) ect_set_table_name = 'ect_set' storage.write_table( table_name=ect_set_table_name, table_data=annual_employment_control_totals_data, ) ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment") # run model model = EmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) #check that there are indeed 14750 total jobs after running the model results = jobs_set.size() should_be = [14750] self.assertEqual(ma.allequal(should_be, results), True, "Error") #check that total #jobs within each sector are close to what was set in the control_totals results = self.get_count_all_sectors(jobs_set) should_be = [8750.0, 3000, 3000] self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True) #check that the number of unplaced jobs is the number of new jobs created (1750) results = where(jobs_set.get_attribute("grid_id") <= 0)[0].size should_be = [1750.0] self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True) # test distribution of building type def run_model(): storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table(table_name=jobs_set_table_name, table_data=self.jobs_data) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) model = EmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check that the distribution of building type is the same before and after subtracting jobs jobs_set.compute_variables([ "urbansim.job.is_in_employment_sector_1_industrial", "urbansim.job.is_in_employment_sector_2_industrial", "urbansim.job.is_in_employment_sector_1_commercial", "urbansim.job.is_in_employment_sector_2_commercial", "urbansim.job.is_in_employment_sector_1_governmental", "urbansim.job.is_in_employment_sector_2_governmental" ], resources=Resources({ "job_building_type": self.job_building_types })) result = array([ jobs_set.get_attribute( "is_in_employment_sector_1_industrial").sum(), jobs_set.get_attribute( "is_in_employment_sector_2_industrial").sum(), jobs_set.get_attribute( "is_in_employment_sector_1_commercial").sum(), jobs_set.get_attribute( "is_in_employment_sector_2_commercial").sum(), jobs_set.get_attribute( "is_in_employment_sector_1_governmental").sum(), jobs_set.get_attribute( "is_in_employment_sector_2_governmental").sum() ]) return result expected_results = array([ 3500.0 / 7000.0 * 8750.0, 900, 3500.0 / 7000.0 * 8750.0, 1800, 0, 300 ]) #print expected_results self.run_stochastic_test(__file__, run_model, expected_results, 10) # check data types self.assertEqual( jobs_set.get_attribute("sector_id").dtype, int32, "Error in data type of the new job set. Should be: int32, is: %s" % str(jobs_set.get_attribute("sector_id").dtype)) self.assertEqual( jobs_set.get_attribute("building_type").dtype, int8, "Error in data type of the new job set. Should be: int8, is: %s" % str(jobs_set.get_attribute("building_type").dtype))
def test_same_distribution_after_job_subtraction(self): """Removes 1,750 sector_1 jobs, without specifying the distribution across gridcells (so it is assumed equal) Test that the distribution (in %) of sector 1 jobs across gridcells before and after the subtraction are relatively equal. """ storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table(table_name = jobs_set_table_name, table_data = self.jobs_data) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) ect_set_table_name = 'ect_set' storage.write_table(table_name = ect_set_table_name, table_data = self.annual_employment_control_totals_data) ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment") model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check the totals in regions areas = jobs_set.get_attribute("large_area_id") results = array([0,0]) for iarea in [0,1]: results[iarea] = where(areas == [1,2][iarea])[0].size should_be = [4250, 7000] self.assertEqual(ma.allequal(should_be, results), True, "Error, should_be: %s, but result: %s" % (should_be, results)) def run_model(): storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name = jobs_set_table_name, table_data = self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check that the distribution of jobs is the same before and after subtracting jobs results = self.get_count_all_sectors_and_areas(jobs_set) return results expected_results = array([2250.0, 1000, 1000, 3000, 2000.0, 2000]) self.run_stochastic_test(__file__, run_model, expected_results, 10) def run_model2(): storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name = jobs_set_table_name, table_data = self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check that the distribution of building type is the same before and after subtracting jobs jobs_set.compute_variables(["urbansim.job.is_in_employment_sector_1_industrial", "urbansim.job.is_in_employment_sector_2_industrial", "urbansim.job.is_in_employment_sector_1_commercial", "urbansim.job.is_in_employment_sector_2_commercial", "urbansim.job.is_in_employment_sector_1_governmental", "urbansim.job.is_in_employment_sector_2_governmental"], resources = Resources({"job_building_type":self.job_building_types})) result = array([jobs_set.get_attribute("is_in_employment_sector_1_industrial").sum(), jobs_set.get_attribute("is_in_employment_sector_2_industrial").sum(), jobs_set.get_attribute("is_in_employment_sector_1_commercial").sum(), jobs_set.get_attribute("is_in_employment_sector_2_commercial").sum(), jobs_set.get_attribute("is_in_employment_sector_1_governmental").sum(), jobs_set.get_attribute("is_in_employment_sector_2_governmental").sum() ]) return result expected_results = array([3500.0/7000.0*5250.0, 900, 3500.0/7000.0*5250.0, 1800, 0, 300]) self.run_stochastic_test(__file__, run_model2, expected_results, 20)
def run(self, in_storage, out_storage, business_table="business", jobs_table="jobs", control_totals_table=None): logger.log_status("Unrolling %s table." % business_table) # get attributes from the establisments table business_dataset = BusinessDataset(in_storage=in_storage, in_table_name=business_table) business_sizes = business_dataset.get_attribute(self.number_of_jobs_attr).astype("int32") sectors = business_dataset.get_attribute("sector_id") tazes = business_dataset.get_attribute(self.geography_id_attr).astype("int32") building_ids = array([], dtype='int32') if "building_id" in business_dataset.get_primary_attribute_names(): building_ids = business_dataset.get_attribute("building_id") parcel_ids = array([], dtype='int32') if "parcel_id" in business_dataset.get_primary_attribute_names(): parcel_ids = business_dataset.get_attribute("parcel_id") home_based = array([], dtype='int16') if "home_based" in business_dataset.get_primary_attribute_names(): home_based = business_dataset.get_attribute("home_based") building_sqft = business_dataset.get_attribute(self.sqft_attr) building_sqft[building_sqft <= 0] = 0 join_flags = None if "join_flag" in business_dataset.get_primary_attribute_names(): join_flags = business_dataset.get_attribute("join_flag") impute_sqft_flag = None if "impute_building_sqft_flag" in business_dataset.get_primary_attribute_names(): impute_sqft_flag = business_dataset.get_attribute("impute_building_sqft_flag") # inititalize jobs attributes total_size = business_sizes.sum() jobs_data = {} jobs_data["sector_id"] = resize(array([-1], dtype=sectors.dtype), total_size) jobs_data["building_id"] = resize(array([-1], dtype=building_ids.dtype), total_size) jobs_data["parcel_id"] = resize(array([-1], dtype=parcel_ids.dtype), total_size) jobs_data[self.geography_id_attr] = resize(array([-1], dtype=tazes.dtype), total_size) jobs_data["building_type"] = resize(array([-1], dtype=home_based.dtype), total_size) jobs_data["sqft"] = resize(array([], dtype=building_sqft.dtype), total_size) if join_flags is not None: jobs_data["join_flag"] = resize(array([], dtype=join_flags.dtype), total_size) if impute_sqft_flag is not None: jobs_data["impute_building_sqft_flag"] = resize(array([], dtype=impute_sqft_flag.dtype), total_size) indices = cumsum(business_sizes) # iterate over establishments. For each business create the corresponding number of jobs by filling the corresponding part # of the arrays start_index=0 for i in range(business_dataset.size()): end_index = indices[i] jobs_data["sector_id"][start_index:end_index] = sectors[i] if building_ids.size > 0: jobs_data["building_id"][start_index:end_index] = building_ids[i] if parcel_ids.size > 0: jobs_data["parcel_id"][start_index:end_index] = parcel_ids[i] jobs_data[self.geography_id_attr][start_index:end_index] = tazes[i] if home_based.size > 0: jobs_data["building_type"][start_index:end_index] = home_based[i] if self.compute_sqft_per_job: jobs_data["sqft"][start_index:end_index] = round((building_sqft[i]-building_sqft[i]/10.0)/float(business_sizes[i])) # sqft per employee else: jobs_data["sqft"][start_index:end_index] = building_sqft[i] if join_flags is not None: jobs_data["join_flag"][start_index:end_index] = join_flags[i] if impute_sqft_flag is not None: jobs_data["impute_building_sqft_flag"][start_index:end_index] = impute_sqft_flag[i] start_index = end_index jobs_data["job_id"] = arange(total_size)+1 if self.compute_sqft_per_job: jobs_data["sqft"] = clip(jobs_data["sqft"], 0, self.maximum_sqft) jobs_data["sqft"][logical_and(jobs_data["sqft"]>0, jobs_data["sqft"]<self.minimum_sqft)] = self.minimum_sqft # correct missing job_building_types wmissing_bt = where(jobs_data["building_type"]<=0)[0] if wmissing_bt.size > 0: jobs_data["building_type"][wmissing_bt] = 2 # assign non-homebased type for now. It can be re-classified in the assign_bldgs_to_jobs... script # create jobs table and write it out storage = StorageFactory().get_storage('dict_storage') storage.write_table( table_name="jobs", table_data=jobs_data ) job_dataset = JobDataset(in_storage=storage) if self.unplace_jobs_with_non_existing_buildings: self.do_unplace_jobs_with_non_existing_buildings(job_dataset, out_storage) # Match to control totals (only eliminate jobs if control totals are smaller than the actual number of jobs). if control_totals_table is not None: logger.log_status("Matching to control totals.") control_totals = ControlTotalDataset(what='employment', id_name=['zone_id', 'sector_id'], in_table_name=control_totals_table, in_storage=in_storage) control_totals.load_dataset(attributes=['zone_id', 'sector_id', 'jobs']) zones_sectors = control_totals.get_id_attribute() njobs = control_totals.get_attribute('jobs') remove = array([], dtype='int32') for i in range(zones_sectors.shape[0]): zone, sector = zones_sectors[i,:] in_sector = job_dataset.get_attribute("sector_id") == sector in_zone_in_sector = logical_and(in_sector, job_dataset.get_attribute("zone_id") == zone) if in_zone_in_sector.sum() <= njobs[i]: continue to_be_removed = in_zone_in_sector.sum() - njobs[i] this_removal = 0 not_considered = ones(job_dataset.size(), dtype='bool8') for unit in ['parcel_id', 'building_id', None]: # first consider jobs without parcel id, then without building_id, then all if unit is not None: wnunit = job_dataset.get_attribute(unit) <= 0 eligible = logical_and(not_considered, logical_and(in_zone_in_sector, wnunit)) not_considered[where(wnunit)] = False else: eligible = logical_and(not_considered, in_zone_in_sector) eligible_sum = eligible.sum() if eligible_sum > 0: where_eligible = where(eligible)[0] if eligible_sum <= to_be_removed-this_removal: draw = arange(eligible_sum) else: draw = sample_noreplace(where_eligible, to_be_removed-this_removal, eligible_sum) remove = concatenate((remove, where_eligible[draw])) this_removal += draw.size if this_removal >= to_be_removed: break job_dataset.remove_elements(remove) logger.log_status("%s jobs removed." % remove.size) logger.log_status("Write jobs table.") job_dataset.write_dataset(out_table_name=jobs_table, out_storage=out_storage) logger.log_status("Created %s jobs." % job_dataset.size())
def test_same_distribution_after_job_subtraction(self): """Removes 1,750 sector_1 jobs, without specifying the distribution across gridcells (so it is assumed equal) Test that the distribution (in %) of sector 1 jobs across gridcells before and after the subtraction are relatively equal. """ storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table(table_name=jobs_set_table_name, table_data=self.jobs_data) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) ect_set_table_name = 'ect_set' storage.write_table( table_name=ect_set_table_name, table_data=self.annual_employment_control_totals_data) ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment") model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check the totals in regions areas = jobs_set.get_attribute("large_area_id") results = array([0, 0]) for iarea in [0, 1]: results[iarea] = where(areas == [1, 2][iarea])[0].size should_be = [4250, 7000] self.assertEqual( ma.allequal(should_be, results), True, "Error, should_be: %s, but result: %s" % (should_be, results)) def run_model(): storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name=jobs_set_table_name, table_data=self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check that the distribution of jobs is the same before and after subtracting jobs results = self.get_count_all_sectors_and_areas(jobs_set) return results expected_results = array([2250.0, 1000, 1000, 3000, 2000.0, 2000]) self.run_stochastic_test(__file__, run_model, expected_results, 10) def run_model2(): storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name=jobs_set_table_name, table_data=self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) model = RegionalEmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check that the distribution of building type is the same before and after subtracting jobs jobs_set.compute_variables([ "urbansim.job.is_in_employment_sector_1_industrial", "urbansim.job.is_in_employment_sector_2_industrial", "urbansim.job.is_in_employment_sector_1_commercial", "urbansim.job.is_in_employment_sector_2_commercial", "urbansim.job.is_in_employment_sector_1_governmental", "urbansim.job.is_in_employment_sector_2_governmental" ], resources=Resources({ "job_building_type": self.job_building_types })) result = array([ jobs_set.get_attribute( "is_in_employment_sector_1_industrial").sum(), jobs_set.get_attribute( "is_in_employment_sector_2_industrial").sum(), jobs_set.get_attribute( "is_in_employment_sector_1_commercial").sum(), jobs_set.get_attribute( "is_in_employment_sector_2_commercial").sum(), jobs_set.get_attribute( "is_in_employment_sector_1_governmental").sum(), jobs_set.get_attribute( "is_in_employment_sector_2_governmental").sum() ]) return result expected_results = array([ 3500.0 / 7000.0 * 5250.0, 900, 3500.0 / 7000.0 * 5250.0, 1800, 0, 300 ]) self.run_stochastic_test(__file__, run_model2, expected_results, 20)
def test_agents_placed_in_appropriate_types(self): """Create 1000 unplaced industrial jobs and 1 commercial job. Allocate 50 commercial gridcells with enough space for 10 commercial jobs per gridcell. After running the EmploymentLocationChoiceModel, the 1 commercial job should be placed, but the 100 industrial jobs should remain unplaced """ storage = StorageFactory().get_storage('dict_storage') storage.write_table(table_name='job_building_types', table_data={ 'id': array([2, 1]), 'name': array(['commercial', 'industrial']) }) job_building_types = JobBuildingTypeDataset( in_storage=storage, in_table_name='job_building_types') storage.write_table(table_name='jobs', table_data={ 'job_id': arange(1001) + 1, 'grid_id': array([0] * 1001), 'building_type': array([1] * 1000 + [2]) }) jobs = JobDataset(in_storage=storage, in_table_name='jobs') storage.write_table(table_name='gridcells', table_data={ 'grid_id': arange(50) + 1, 'commercial_sqft': array([1000] * 50), 'commercial_sqft_per_job': array([100] * 50) }) gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells') coefficients = Coefficients(names=("dummy", ), values=(0.1, )) specification = EquationSpecification( variables=("gridcell.commercial_sqft", ), coefficients=("dummy", )) compute_resources = Resources({ "job": jobs, "job_building_type": job_building_types }) agents_index = where(jobs.get_attribute("grid_id") == 0) unplace_jobs = DatasetSubset(jobs, agents_index) agents_index = where( unplace_jobs.get_attribute("building_type") == 2)[0] gridcells.compute_variables( ["urbansim.gridcell.number_of_commercial_jobs"], resources=compute_resources) commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs") gridcells.compute_variables( ["urbansim.gridcell.number_of_industrial_jobs"], resources=compute_resources) industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs") model_group = ModelGroup(job_building_types, "name") elcm = EmploymentLocationChoiceModel( ModelGroupMember(model_group, "commercial"), location_set=gridcells, agents_grouping_attribute="job.building_type", choices="opus_core.random_choices_from_index", sample_size_locations=30) elcm.run(specification, coefficients, agent_set=jobs, agents_index=agents_index, debuglevel=1) gridcells.compute_variables( ["urbansim.gridcell.number_of_commercial_jobs"], resources=compute_resources) commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs") gridcells.compute_variables( ["urbansim.gridcell.number_of_industrial_jobs"], resources=compute_resources) industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs") self.assertEqual( commercial_jobs.sum() == 1, True, "Error, there should only be a total of 1 commercial job") self.assertEqual( industrial_jobs.sum() == 0, True, "Error, there should be no industrial jobs because there's no space for them" )
def test_controlling_sector(self): """ Controls for one marginal characteristics, namely age_of_head. """ annual_employment_control_totals_data = { "year": array([2000, 2000, 2000, 2001, 2001, 2001, 2002, 2002, 2002]), "sector_id": array([ 1,2,3, 1,2,3, 1,2,3]), "number_of_jobs": array([25013, 21513, 18227, # 2000 10055, 15003, 17999, # 2001 15678, 14001, 20432]) # 2002 } jobs_data = { "job_id":arange(15000)+1, "grid_id": array(15000*[1]), "sector_id": array(1000*[1] + 1000*[1] + 2000*[1] + 1000*[1] + 2000*[2] + 1000*[2] + 1000*[2]+ 1000*[2] + 1000*[3] + 1000*[3] + 2000*[3] + 1000*[3]) } storage = StorageFactory().get_storage('dict_storage') storage.write_table(table_name='job_set', table_data=jobs_data) job_set = JobDataset(in_storage=storage, in_table_name='job_set') storage.write_table(table_name='ect_set', table_data=annual_employment_control_totals_data) ect_set = ControlTotalDataset(in_storage=storage, in_table_name='ect_set', what='', id_name=[]) model = TransitionModel(job_set, control_total_dataset=ect_set) model.run(year=2000, target_attribute_name="number_of_jobs", reset_dataset_attribute_value={'grid_id':-1}) results = job_set.size() should_be = [(ect_set.get_attribute("number_of_jobs")[0:3]).sum()] self.assertEqual(ma.allclose(should_be, results, rtol=1e-1), True, "Error, should_be: %s, but result: %s" % (should_be, results)) cats = 3 results = zeros(cats, dtype=int32) for i in range(0, cats): results[i] = (job_set.get_attribute('sector_id') == ect_set.get_attribute("sector_id")[i]).sum() should_be = ect_set.get_attribute("number_of_jobs")[0:3] self.assertEqual(ma.allclose(results, should_be, rtol=1e-6), True, "Error, should_be: %s, but result: %s" % (should_be, results)) # this run should remove households in all four categories #model.run(year=2001, household_set=hh_set, control_totals=hct_set, characteristics=hc_set) model.run(year=2001, target_attribute_name="number_of_jobs", reset_dataset_attribute_value={'grid_id':-1}) results = job_set.size() should_be = [(ect_set.get_attribute("number_of_jobs")[3:6]).sum()] self.assertEqual(ma.allclose(should_be, results, rtol=1e-1), True, "Error, should_be: %s, but result: %s" % (should_be, results)) cats = 3 results = zeros(cats, dtype=int32) for i in range(0, cats): results[i] = (job_set.get_attribute('sector_id') == ect_set.get_attribute("sector_id")[i+3]).sum() should_be = ect_set.get_attribute("number_of_jobs")[3:6] self.assertEqual(ma.allclose(results, should_be, rtol=1e-6), True, "Error, should_be: %s, but result: %s" % (should_be, results)) # this run should add and remove households #model.run(year=2002, household_set=hh_set, control_totals=hct_set, characteristics=hc_set) model.run(year=2002, target_attribute_name="number_of_jobs", reset_dataset_attribute_value={'grid_id':-1}) results = job_set.size() should_be = [(ect_set.get_attribute("number_of_jobs")[6:9]).sum()] self.assertEqual(ma.allclose(should_be, results, rtol=1e-1), True, "Error, should_be: %s, but result: %s" % (should_be, results)) cats = 3 results = zeros(cats, dtype=int32) for i in range(0, cats): results[i] = (job_set.get_attribute('sector_id') == ect_set.get_attribute("sector_id")[i+6]).sum() should_be = ect_set.get_attribute("number_of_jobs")[6:9] self.assertEqual(ma.allclose(results, should_be, rtol=1e-6), True, "Error, should_be: %s, but result: %s" % (should_be, results))
def test_same_distribution_after_job_addition(self): """Add 1,750 new jobs of sector 1 without specifying a distribution across gridcells (so it is assumed equal) Test that the total number of jobs in each sector after the addition matches the totals specified in annual_employment_control_totals. Ensure that the number of unplaced jobs after the addition is exactly 1,750 because this model is not responsible for placing jobs, only for creating them. NOTE: unplaced jobs are indicated by grid_id <= 0 """ storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name=jobs_set_table_name, table_data=self.jobs_data, ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) annual_employment_control_totals_data = self.annual_employment_control_totals_data annual_employment_control_totals_data["total_non_home_based_employment"] = array([8750, 3000]) ect_set_table_name = 'ect_set' storage.write_table( table_name=ect_set_table_name, table_data=annual_employment_control_totals_data, ) ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment") # run model model = EmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) #check that there are indeed 14750 total jobs after running the model results = jobs_set.size() should_be = [14750] self.assertEqual(ma.allequal(should_be, results), True, "Error") #check that total #jobs within each sector are close to what was set in the control_totals results = self.get_count_all_sectors(jobs_set) should_be = [8750.0, 3000, 3000] self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True) #check that the number of unplaced jobs is the number of new jobs created (1750) results = where(jobs_set.get_attribute("grid_id")<=0)[0].size should_be = [1750.0] self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True) # test distribution of building type def run_model(): storage = StorageFactory().get_storage('dict_storage') jobs_set_table_name = 'jobs_set' storage.write_table( table_name=jobs_set_table_name, table_data=self.jobs_data ) jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name) model = EmploymentTransitionModel() model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types) # check that the distribution of building type is the same before and after subtracting jobs jobs_set.compute_variables(["urbansim.job.is_in_employment_sector_1_industrial", "urbansim.job.is_in_employment_sector_2_industrial", "urbansim.job.is_in_employment_sector_1_commercial", "urbansim.job.is_in_employment_sector_2_commercial", "urbansim.job.is_in_employment_sector_1_governmental", "urbansim.job.is_in_employment_sector_2_governmental"], resources = Resources({"job_building_type":self.job_building_types})) result = array([jobs_set.get_attribute("is_in_employment_sector_1_industrial").sum(), jobs_set.get_attribute("is_in_employment_sector_2_industrial").sum(), jobs_set.get_attribute("is_in_employment_sector_1_commercial").sum(), jobs_set.get_attribute("is_in_employment_sector_2_commercial").sum(), jobs_set.get_attribute("is_in_employment_sector_1_governmental").sum(), jobs_set.get_attribute("is_in_employment_sector_2_governmental").sum() ]) return result expected_results = array([3500.0/7000.0*8750.0, 900, 3500.0/7000.0*8750.0, 1800, 0, 300]) #print expected_results self.run_stochastic_test(__file__, run_model, expected_results, 10) # check data types self.assertEqual(jobs_set.get_attribute("sector_id").dtype, int32, "Error in data type of the new job set. Should be: int32, is: %s" % str(jobs_set.get_attribute("sector_id").dtype)) self.assertEqual(jobs_set.get_attribute("building_type").dtype, int8, "Error in data type of the new job set. Should be: int8, is: %s" % str(jobs_set.get_attribute("building_type").dtype))
def test_distribute_unplaced_jobs_model(self): # Places 1750 jobs of sector 15 # gridcell has expected about # 1 4000 sector 15 jobs 5000 sector 15 jobs # 1000 sector 1 jobs 1000 sector 1 jobs # 2 2000 sector 15 jobs 2500 sector 15 jobs # 1000 sector 1 jobs 1000 sector 1 jobs # 3 1000 sector 15 jobs 1250 sector 15 jobs # 1000 sector 1 jobs 1000 sector 1 jobs # unplaced 1750 sector 15 jobs 0 # create jobs storage = StorageFactory().get_storage('dict_storage') job_data = { "job_id": arange(11750)+1, "sector_id": array(7000*[15]+3000*[1]+1750*[15]), "grid_id":array(4000*[1]+2000*[2]+1000*[3]+1000*[1]+1000*[2]+1000*[3]+1750*[-1]) } jobs_table_name = 'jobs' storage.write_table(table_name=jobs_table_name, table_data=job_data) jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name) storage = StorageFactory().get_storage('dict_storage') building_types_table_name = 'building_types' storage.write_table( table_name=building_types_table_name, table_data={ "grid_id":arange(3)+1 } ) gridcells = GridcellDataset(in_storage=storage, in_table_name=building_types_table_name) # run model model = DistributeUnplacedJobsModel(debuglevel=4) model.run(gridcells, jobs) # get results # no jobs are unplaced result1 = where(jobs.get_attribute("grid_id")<0)[0] self.assertEqual(result1.size, 0) # the first 10000jobs kept their locations result2 = jobs.get_attribute_by_index("grid_id", arange(10000)) # logger.log_status(result2) self.assertEqual(ma.allclose(result2, job_data["grid_id"][0:10000], rtol=0), True) # run model with filter # unplace first 500 jobs of sector 15 jobs.modify_attribute(name='grid_id', data=zeros(500), index=arange(500)) # unplace first 500 jobs of sector 1 jobs.modify_attribute(name='grid_id', data=zeros(500), index=arange(7000, 7501)) # place only unplaced jobs of sector 1 model.run(gridcells, jobs, agents_filter='job.sector_id == 1') # 500 jobs of sector 15 should be unplaced result3 = where(jobs.get_attribute("grid_id")<=0)[0] self.assertEqual(result3.size, 500) # jobs of sector 1 are placed result4 = jobs.get_attribute_by_index("grid_id", arange(7000, 7501)) self.assertEqual((result4 <= 0).sum(), 0)