Пример #1
0
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')

        job_building_types_table_name = 'job_building_types'        
        storage.write_table(
            table_name=job_building_types_table_name,
            table_data={
                'id':array([1,2,3,4]), 
                'home_based': array([1, 0, 1, 0])
                }
            )
            
        jobs_table_name = 'jobs'        
        storage.write_table(
            table_name=jobs_table_name,
            table_data={
                'job_id':arange(10)+1,
                'building_type': array([3,3,2,2,4,2,1,3,4,1])
                }
            )

        job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name=job_building_types_table_name)
        jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)

        jobs.compute_variables(self.variable_name, resources=Resources({'job_building_type': job_building_types}))
        
        values = jobs.get_attribute(self.variable_name)

        should_be = array([0,0,1,1,1,1,0,0,1,0])
        
        self.assert_(ma.allequal(values, should_be),
            'Error in ' + self.variable_name)
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')

        job_building_types_table_name = 'job_building_types'
        storage.write_table(table_name=job_building_types_table_name,
                            table_data={
                                'id': array([1, 2, 3, 4]),
                                'home_based': array([1, 0, 1, 0])
                            })

        jobs_table_name = 'jobs'
        storage.write_table(table_name=jobs_table_name,
                            table_data={
                                'job_id':
                                arange(10) + 1,
                                'building_type':
                                array([3, 3, 2, 2, 4, 2, 1, 3, 4, 1])
                            })

        job_building_types = JobBuildingTypeDataset(
            in_storage=storage, in_table_name=job_building_types_table_name)
        jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)

        jobs.compute_variables(self.variable_name,
                               resources=Resources(
                                   {'job_building_type': job_building_types}))

        values = jobs.get_attribute(self.variable_name)

        should_be = array([0, 0, 1, 1, 1, 1, 0, 0, 1, 0])

        self.assert_(ma.allequal(values, should_be),
                     'Error in ' + self.variable_name)
    def test_my_inputs(self):
        storage = StorageFactory().get_storage("dict_storage")

        job_building_types_table_name = "job_building_types"
        storage.write_table(
            table_name=job_building_types_table_name,
            table_data={"id": array([1, 2, 3, 4]), "home_based": array([0, 1, 0, 1])},
        )

        jobs_table_name = "jobs"
        storage.write_table(
            table_name=jobs_table_name,
            table_data={"job_id": arange(10) + 1, "building_type": array([3, 3, 2, 2, 4, 2, 1, 3, 4, 1])},
        )

        job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name=job_building_types_table_name)
        jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)

        jobs.compute_variables(self.variable_name, resources=Resources({"job_building_type": job_building_types}))

        values = jobs.get_attribute(self.variable_name)

        should_be = array([0, 0, 1, 1, 1, 1, 0, 0, 1, 0], dtype="bool8")

        self.assert_(ma.allequal(values, should_be), "Error in " + self.variable_name)
Пример #4
0
    def test_run_model_with_known_buildings(self):
        storage = self.storage
        storage.write_table(
            table_name = 'buildings',
            table_data = {
                  'building_id':    array([1,2,3,4,5,6,7]),
                  'parcel_id':      array([1,1,2,2,2,3,3]),
                  'is_residential': array([0,0,0,1,1,0,0])
                          }
                            )
        
        storage.write_table(
            table_name = 'employment_events',
            table_data = {
           'parcel_id':                     array([2,       2,    -1,     -1,   1]),
           'building_id':                   array([-1,     -1,     6,      7,  -1]),
           'scheduled_year':                array([2006, 2006,   2006,    2006, 2006]),
           'number_of_non_home_based_jobs': array([3500, 500,    -100,     0,   100]),
           'number_of_home_based_jobs':     array([0,     20,       0,    10,    0]),
           'sector_id':                     array([1,     2,       15,     2,    1]),
           'replace_non_home_based_jobs':   array([0,     0,        0,     1,    0])
                          }
                            )
        
        # change in 2006
        ############   
#        parcel/sector       1              2              3
#            1            +100nhb         +3500nhb        --
#            2            =0nhb           +500nhb/+20hb  +10hb
#            15             --             --           -100nhb
       
        dataset_pool = DatasetPool(storage=storage, package_order=['urbansim_parcel', 'urbansim'])
        job_set = JobDataset(in_storage=storage)
        job_set.modify_attribute('building_id', array(6000*[1] + 4000*[3] + 3000*[6]))
        dataset_pool.add_datasets_if_not_included({'job':job_set})
        model = EmploymentEventsModel(dataset_pool=dataset_pool)

        model.run(dataset_pool.get_dataset('employment_event'), job_set, current_year=2006)
        buildings = dataset_pool.get_dataset('building')
        jobs_in_sec_1 = buildings.compute_variables(['urbansim_parcel.building.number_of_jobs_of_sector_1'],
                                                    dataset_pool=dataset_pool)
        jobs_in_sec_2 = buildings.compute_variables(['urbansim_parcel.building.number_of_jobs_of_sector_2'],
                                                    dataset_pool=dataset_pool)
        jobs_in_sec_15 = buildings.compute_variables(['urbansim_parcel.building.number_of_jobs_of_sector_15'],
                                                    dataset_pool=dataset_pool)

        self.assertEqual(jobs_in_sec_1[0:2].sum()==4100, True) # parcel 1
        self.assertEqual(jobs_in_sec_1[2] == 5500, True) # parcel 2 non-residential
        self.assertEqual(jobs_in_sec_1[5:7].sum()==1000, True) # parcel 3
        self.assertEqual(jobs_in_sec_2[0:2].sum()==1000, True) # parcel 1
        self.assertEqual(jobs_in_sec_2[2] == 1500, True) # parcel 2 non-residential
        self.assertEqual(jobs_in_sec_2[3:5].sum() == 20, True) # parcel 2 residential
        self.assertEqual(jobs_in_sec_2[5]==100, True) # parcel 3, building 6
        self.assertEqual(jobs_in_sec_2[6]==10, True) # parcel 3, building 7
        self.assertEqual(jobs_in_sec_15[0:2].sum()==1000, True) # parcel 2 non-residential
        self.assertEqual(jobs_in_sec_15[2] == 1000, True) # parcel 2 residential
        self.assertEqual(jobs_in_sec_15[5]==900, True) # parcel 3, building 6
Пример #5
0
    def test_same_distribution_after_job_addition(self):
        """Add 1,750 new jobs of sector 1 without specifying a distribution across gridcells (so it is assumed equal)
        Test that the total number of jobs in each sector after the addition matches the totals specified
        in annual_employment_control_totals.
        Ensure that the number of unplaced jobs after the addition is exactly 1,750 because this model
        is not responsible for placing jobs, only for creating them.
        NOTE: unplaced jobs are indicated by grid_id <= 0
        """
        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(
            table_name=jobs_set_table_name,
            table_data=self.jobs_data,
        )
        jobs_set = JobDataset(in_storage=storage,
                              in_table_name=jobs_set_table_name)

        annual_employment_control_totals_data = self.annual_employment_control_totals_data
        annual_employment_control_totals_data[
            "total_non_home_based_employment"] = array(
                [5750, 1400, 4000, 1600])

        ect_set_table_name = 'ect_set'
        storage.write_table(
            table_name=ect_set_table_name,
            table_data=annual_employment_control_totals_data,
        )
        ect_set = ControlTotalDataset(in_storage=storage,
                                      in_table_name=ect_set_table_name,
                                      what="employment")

        # run model
        model = RegionalEmploymentTransitionModel()
        model.run(year=2000,
                  job_set=jobs_set,
                  control_totals=ect_set,
                  job_building_types=self.job_building_types)

        #check that there are indeed 14750 total jobs after running the model
        areas = jobs_set.get_attribute("large_area_id")
        results = array([0, 0])
        for iarea in [0, 1]:
            results[iarea] = where(areas == [1, 2][iarea])[0].size
        should_be = [8150, 7600]
        self.assertEqual(
            ma.allequal(should_be, results), True,
            "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that total #jobs within each sector are close to what was set in the control_totals
        results = self.get_count_all_sectors_and_areas(jobs_set)
        should_be = [5750, 1400, 1000, 4000, 1600, 2000]
        self.assertEqual(
            ma.allclose(results, should_be, rtol=0.00001), True,
            "Error, should_be: %s, but result: %s" % (should_be, results))
    def test_distribute_unplaced_jobs_model(self):
        # Places 1750 jobs of sector 15
        # gridcell       has              expected about
        # 1         4000 sector 15 jobs   5000 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # 2         2000 sector 15 jobs   2500 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # 3         1000 sector 15 jobs   1250 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # unplaced  1750 sector 15 jobs   0

        # create jobs

        storage = StorageFactory().get_storage("dict_storage")

        job_data = {
            "job_id": arange(11750) + 1,
            "sector_id": array(7000 * [15] + 3000 * [1] + 1750 * [15]),
            "grid_id": array(4000 * [1] + 2000 * [2] + 1000 * [3] + 1000 * [1] + 1000 * [2] + 1000 * [3] + 1750 * [-1]),
        }

        jobs_table_name = "jobs"
        storage.write_table(table_name=jobs_table_name, table_data=job_data)

        jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)

        storage = StorageFactory().get_storage("dict_storage")

        building_types_table_name = "building_types"
        storage.write_table(table_name=building_types_table_name, table_data={"grid_id": arange(3) + 1})

        gridcells = GridcellDataset(in_storage=storage, in_table_name=building_types_table_name)

        # run model
        model = DistributeUnplacedJobsModel(debuglevel=4)
        model.run(gridcells, jobs)
        # get results

        # no jobs are unplaced
        result1 = where(jobs.get_attribute("grid_id") < 0)[0]
        self.assertEqual(result1.size, 0)
        # the first 10000jobs kept their locations
        result2 = jobs.get_attribute_by_index("grid_id", arange(10000))
        #            logger.log_status(result2)
        self.assertEqual(ma.allclose(result2, job_data["grid_id"][0:10000], rtol=0), True)

        # run model with filter
        # unplace first 500 jobs of sector 15
        jobs.modify_attribute(name="grid_id", data=zeros(500), index=arange(500))
        # unplace first 500 jobs of sector 1
        jobs.modify_attribute(name="grid_id", data=zeros(500), index=arange(7000, 7501))
        # place only unplaced jobs of sector 1
        model.run(gridcells, jobs, agents_filter="job.sector_id == 1")
        # 500 jobs of sector 15 should be unplaced
        result3 = where(jobs.get_attribute("grid_id") <= 0)[0]
        self.assertEqual(result3.size, 500)
        # jobs of sector 1 are placed
        result4 = jobs.get_attribute_by_index("grid_id", arange(7000, 7501))
        self.assertEqual((result4 <= 0).sum(), 0)
    def test_unplaced_jobs_after_job_addition(self):
        """The initial jobs table is now adjusted to include 2000 unplaced jobs.
        Add 1,750 new jobs and ensure that the number of unplaced jobs after the addition
        is exactly 3,750 because this model is not responsible for placing jobs, only for creating them.
        """
        # create and populate jobs table for model input
        add_jobs_data = {
            "job_id": arange(13001, 15001),
            "grid_id": array(2000*[0]),
            "sector_id": array(2000*[1]),
            "building_type": array(2000*[Constants._industrial_code])
            }
        annual_employment_control_totals_data = self.annual_employment_control_totals_data
        annual_employment_control_totals_data["total_non_home_based_employment"] = array([10750, 3000])

        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(
            table_name=jobs_set_table_name,
            table_data=self.jobs_data
            )
        jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

        ect_set_table_name = 'ect_set'
        storage.write_table(
            table_name=ect_set_table_name,
            table_data=annual_employment_control_totals_data,
            )
        ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment")

        jobs_set.add_elements(add_jobs_data)

        # run model with input databases
        model = EmploymentTransitionModel()
        model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)

        #check that there are indeed 16750 total jobs after running the model
        results = jobs_set.size()
        should_be = [16750]
        self.assertEqual(ma.allequal(should_be, results), True, "Error")

        #check that the number of unplaced jobs is the number of new jobs created + number of unplaced jobs before running model
        results = where(jobs_set.get_attribute("grid_id")<=0)[0].size
        should_be = [3750.0]

        self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)
Пример #8
0
        def run_model2():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name=jobs_set_table_name,
                table_data=self.jobs_data,
            )

            jobs_set = JobDataset(in_storage=storage,
                                  in_table_name=jobs_set_table_name)

            model = RegionalEmploymentTransitionModel()
            model.run(year=2000,
                      job_set=jobs_set,
                      control_totals=ect_set,
                      job_building_types=self.job_building_types)
            # check that the distribution of building type is the same before and after subtracting jobs
            jobs_set.compute_variables([
                "urbansim.job.is_in_employment_sector_1_industrial",
                "urbansim.job.is_in_employment_sector_2_industrial",
                "urbansim.job.is_in_employment_sector_1_commercial",
                "urbansim.job.is_in_employment_sector_2_commercial",
                "urbansim.job.is_in_employment_sector_1_governmental",
                "urbansim.job.is_in_employment_sector_2_governmental"
            ],
                                       resources=Resources({
                                           "job_building_type":
                                           self.job_building_types
                                       }))
            result = array([
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_industrial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_industrial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_commercial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_commercial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_governmental").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_governmental").sum()
            ])
            return result
    def test_same_distribution_after_job_addition(self):
        """Add 1,750 new jobs of sector 1 without specifying a distribution across gridcells (so it is assumed equal)
        Test that the total number of jobs in each sector after the addition matches the totals specified
        in annual_employment_control_totals.
        Ensure that the number of unplaced jobs after the addition is exactly 1,750 because this model
        is not responsible for placing jobs, only for creating them.
        NOTE: unplaced jobs are indicated by grid_id <= 0
        """
        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(
            table_name = jobs_set_table_name,
            table_data = self.jobs_data,
            )
        jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

        annual_employment_control_totals_data = self.annual_employment_control_totals_data
        annual_employment_control_totals_data["total_non_home_based_employment"] = array([5750, 1400, 4000, 1600])

        ect_set_table_name = 'ect_set'
        storage.write_table(
            table_name = ect_set_table_name,
            table_data = annual_employment_control_totals_data,
            )
        ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment")

        # run model
        model = RegionalEmploymentTransitionModel()
        model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)

        #check that there are indeed 14750 total jobs after running the model
        areas = jobs_set.get_attribute("large_area_id")
        results = array([0,0])
        for iarea in [0,1]:
            results[iarea] = where(areas == [1,2][iarea])[0].size
        should_be = [8150, 7600]
        self.assertEqual(ma.allequal(should_be, results), True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that total #jobs within each sector are close to what was set in the control_totals
        results = self.get_count_all_sectors_and_areas(jobs_set)
        should_be = [5750, 1400, 1000, 4000, 1600, 2000]
        self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True,
                         "Error, should_be: %s, but result: %s" % (should_be, results))
        def run_model2():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name = jobs_set_table_name,
                table_data = self.jobs_data,
                )

            jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

            model = RegionalEmploymentTransitionModel()
            model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)
            # check that the distribution of building type is the same before and after subtracting jobs
            jobs_set.compute_variables(["urbansim.job.is_in_employment_sector_1_industrial",
                                        "urbansim.job.is_in_employment_sector_2_industrial",
                                        "urbansim.job.is_in_employment_sector_1_commercial",
                                        "urbansim.job.is_in_employment_sector_2_commercial",
                                        "urbansim.job.is_in_employment_sector_1_governmental",
                                        "urbansim.job.is_in_employment_sector_2_governmental"],
                                        resources = Resources({"job_building_type":self.job_building_types}))
            result = array([jobs_set.get_attribute("is_in_employment_sector_1_industrial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_industrial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_1_commercial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_commercial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_1_governmental").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_governmental").sum()
                            ])
            return result
Пример #11
0
    def test_scaling_jobs_model(self):
        # Places 1750 jobs of sector 15
        # gridcell       has              expected about
        # 1         4000 sector 15 jobs   5000 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # 2         2000 sector 15 jobs   2500 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # 3         1000 sector 15 jobs   1250 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # unplaced  1750 sector 15 jobs   0

        storage = StorageFactory().get_storage('dict_storage')

        jobs_table_name = 'building_types'
        storage.write_table(
            table_name=jobs_table_name,
            table_data={
                "job_id":
                arange(11750) + 1,
                "sector_id":
                array(7000 * [15] + 3000 * [1] + 1750 * [15]),
                "grid_id":
                array(4000 * [1] + 2000 * [2] + 1000 * [3] + 1000 * [1] +
                      1000 * [2] + 1000 * [3] + 1750 * [-1])
            })
        jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)

        gridcells_table_name = 'gridcells'
        storage.write_table(table_name=gridcells_table_name,
                            table_data={"grid_id": arange(3) + 1})
        gridcells = GridcellDataset(in_storage=storage,
                                    in_table_name=gridcells_table_name)

        # run model
        model = ScalingJobsModel(debuglevel=4)
        model.run(gridcells, jobs, agents_index=arange(10001, 11750))
        # get results
        gridcells.compute_variables([
            "urbansim.gridcell.number_of_jobs_of_sector_15",
            "urbansim.gridcell.number_of_jobs_of_sector_1"
        ],
                                    resources=Resources({"job": jobs}))
        # sector 1 jobs should be exactly the same
        result1 = gridcells.get_attribute("number_of_jobs_of_sector_1")
        self.assertEqual(
            ma.allclose(result1, array([1000, 1000, 1000]), rtol=0), True)
        # the distribution of sector 15 jobs should be the same with higher means
        result2 = gridcells.get_attribute("number_of_jobs_of_sector_15")
        #            logger.log_status(result2)
        self.assertEqual(
            ma.allclose(result2, array([5000, 2500, 1250]), rtol=0.05), True)
 def __init__(self):
     in_storage = StorageFactory().get_storage(
         'sql_storage',
         hostname=settings.get_db_host_name(),
         username=settings.get_db_user_name(),
         password=settings.get_db_password(),
         database_name=settings.db)
     jobs = JobDataset(in_storage=in_storage, nchunks=5)
     print "Read and Write JobDataset."
     out_storage = StorageFactory().build_storage_for_dataset(
         type='flt_storage', storage_location=settings.dir)
     ReadWriteADataset(jobs,
                       out_storage=out_storage,
                       out_table_name=settings.jobsubdir)
    def test_unplaced_jobs_after_job_addition(self):
        """The initial jobs table is now adjusted to include 2000 unplaced jobs.
        Add 1,750 new jobs and ensure that the number of unplaced jobs after the addition
        is exactly 3,750 because this model is not responsible for placing jobs, only for creating them.
        """
        # create and populate jobs table for model input
        add_jobs_data = {
            "job_id": arange(13001, 15001),
            "grid_id": array(2000 * [0]),
            "sector_id": array(2000 * [1]),
            "building_type": array(2000 * [Constants._industrial_code])
        }
        annual_employment_control_totals_data = self.annual_employment_control_totals_data
        annual_employment_control_totals_data[
            "total_non_home_based_employment"] = array([10750, 3000])

        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(table_name=jobs_set_table_name,
                            table_data=self.jobs_data)
        jobs_set = JobDataset(in_storage=storage,
                              in_table_name=jobs_set_table_name)

        ect_set_table_name = 'ect_set'
        storage.write_table(
            table_name=ect_set_table_name,
            table_data=annual_employment_control_totals_data,
        )
        ect_set = ControlTotalDataset(in_storage=storage,
                                      in_table_name=ect_set_table_name,
                                      what="employment")

        jobs_set.add_elements(add_jobs_data)

        # run model with input databases
        model = EmploymentTransitionModel()
        model.run(year=2000,
                  job_set=jobs_set,
                  control_totals=ect_set,
                  job_building_types=self.job_building_types)

        #check that there are indeed 16750 total jobs after running the model
        results = jobs_set.size()
        should_be = [16750]
        self.assertEqual(ma.allequal(should_be, results), True, "Error")

        #check that the number of unplaced jobs is the number of new jobs created + number of unplaced jobs before running model
        results = where(jobs_set.get_attribute("grid_id") <= 0)[0].size
        should_be = [3750.0]

        self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)
Пример #14
0
        def run_model():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name=jobs_set_table_name,
                table_data=self.jobs_data,
            )

            jobs_set = JobDataset(in_storage=storage,
                                  in_table_name=jobs_set_table_name)

            model = RegionalEmploymentTransitionModel()
            model.run(year=2000,
                      job_set=jobs_set,
                      control_totals=ect_set,
                      job_building_types=self.job_building_types)
            # check that the distribution of jobs is the same before and after subtracting jobs
            results = self.get_count_all_sectors_and_areas(jobs_set)
            return results
    def run(self, nbs, jobs, data_objects, dbcon, variables=(), coefficients=(), submodels=(), \
            debuglevel=0):
        t1 = time()
        l = len(variables)
        print variables, coefficients, submodels
        specification = EquationSpecification(variables=variables,
                                              coefficients=coefficients,
                                              submodels=submodels)

        #        storage = StorageFactory().get_storage(type='mysql_storage', storage_location=dbcon)

        #        specification = EquationSpecification(storage=storage)
        #        specification.load(place="employment_non_home_based_location_choice_model_specification")
        #        coefficients = Coefficients(storage=storage)
        #        coefficients.load(place="employment_commercial_location_choice_model_coefficients")

        elcm = EmploymentLocationChoiceModelCreator().get_model(
            location_set=nbs, sample_size_locations=10
        )  # choice set size (includes current location)

        ##save estimation results
        con = OpusDatabase(hostname=DB_settings.db_host_name,
                           username=DB_settings.db_user_name,
                           password=DB_settings.db_password,
                           database_name=This_Settings.outputdb)

        estimation_set = JobDataset(in_storage=StorageFactory().get_storage(
            'sql_storage', storage_location=dbcon),
                                    in_place="jobs_for_estimation")

        result = elcm.estimate(specification,
                               agent_set=estimation_set,
                               data_objects=data_objects,
                               debuglevel=debuglevel)

        #save estimation results
        #        save_object(specification, 'employment_location_choice_model_specification', type='mysql_storage', base=con)
        #        save_object(result[0], 'employment_location_choice_model_coefficients', type='mysql_storage', base=con)

        print "Simulation done. " + str(time() - t1) + " s"
 def __init__(self,
              variables=(),
              coefficients=(),
              submodels=(),
              debuglevel=0):
     # neighborhoods, jobs and households are loaded from disk ('flt_storage'), other objects from mysql. Change it as you need.
     self.nbs = NeighborhoodDataset(in_storage=StorageFactory().get_storage('flt_storage', storage_location=This_Settings.dir),
             in_place=This_Settings.nbsubdir, \
             out_storage=StorageFactory().get_storage('flt_storage', storage_location = This_Settings.outputdir),
             debuglevel=debuglevel)
     self.jobs = JobDataset(in_base=This_Settings.dir,
                            in_storage=StorageFactory().get_storage(
                                'flt_storage',
                                storage_location=This_Settings.dir),
                            out_storage=StorageFactory().get_storage(
                                'flt_storage',
                                storage_location=This_Settings.outputdir),
                            in_place=This_Settings.jobsubdir,
                            debuglevel=debuglevel)
     Con = OpusDatabase(hostname=DB_settings.db_host_name,
                        username=DB_settings.db_user_name,
                        password=DB_settings.db_password,
                        database_name=This_Settings.db)
     self.hhs = HouseholdDataset(
         in_base=This_Settings.dir,
         in_storage=StorageFactory().get_storage(
             'flt_storage', storage_location=This_Settings.dir),
         out_storage=StorageFactory().get_storage(
             'flt_storage', storage_location=This_Settings.outputdir),
         in_place=This_Settings.hhsubdir,
         debuglevel=debuglevel)
     self.resources = Resources({"household": self.hhs, "job": self.jobs})
     #simulate
     Paris_simulation().run(nbs=self.nbs, jobs=self.jobs, \
             data_objects=self.resources,\
             dbcon=Con, variables=variables, coefficients=coefficients, submodels=submodels, debuglevel=debuglevel)
     Con.close_connection()
    def test_same_distribution_after_job_subtraction(self):
        """Removes 1,750 sector_1 jobs, without specifying the distribution across gridcells (so it is assumed equal)
        Test that the distribution (in %) of sector 1 jobs across gridcells before and after the subtraction are
        relatively equal.
        """
        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(table_name = jobs_set_table_name, table_data = self.jobs_data)
        jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

        ect_set_table_name = 'ect_set'
        storage.write_table(table_name = ect_set_table_name, table_data = self.annual_employment_control_totals_data)
        ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment")

        model = RegionalEmploymentTransitionModel()
        model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)
        
        # check the totals in regions
        areas = jobs_set.get_attribute("large_area_id")
        results = array([0,0])
        for iarea in [0,1]:
            results[iarea] = where(areas == [1,2][iarea])[0].size
        should_be = [4250, 7000]
        self.assertEqual(ma.allequal(should_be, results), True, "Error, should_be: %s, but result: %s" % (should_be, results))

        def run_model():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name = jobs_set_table_name,
                table_data = self.jobs_data,
                )

            jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

            model = RegionalEmploymentTransitionModel()
            model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)
            # check that the distribution of jobs is the same before and after subtracting jobs
            results = self.get_count_all_sectors_and_areas(jobs_set)
            return results

        expected_results = array([2250.0, 1000, 1000, 3000, 2000.0, 2000])

        self.run_stochastic_test(__file__, run_model, expected_results, 10)

        def run_model2():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name = jobs_set_table_name,
                table_data = self.jobs_data,
                )

            jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

            model = RegionalEmploymentTransitionModel()
            model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)
            # check that the distribution of building type is the same before and after subtracting jobs
            jobs_set.compute_variables(["urbansim.job.is_in_employment_sector_1_industrial",
                                        "urbansim.job.is_in_employment_sector_2_industrial",
                                        "urbansim.job.is_in_employment_sector_1_commercial",
                                        "urbansim.job.is_in_employment_sector_2_commercial",
                                        "urbansim.job.is_in_employment_sector_1_governmental",
                                        "urbansim.job.is_in_employment_sector_2_governmental"],
                                        resources = Resources({"job_building_type":self.job_building_types}))
            result = array([jobs_set.get_attribute("is_in_employment_sector_1_industrial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_industrial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_1_commercial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_commercial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_1_governmental").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_governmental").sum()
                            ])
            return result
        expected_results = array([3500.0/7000.0*5250.0, 900, 3500.0/7000.0*5250.0, 1800, 0, 300])
        self.run_stochastic_test(__file__, run_model2, expected_results, 20)
    def test_same_distribution_after_job_subtraction(self):
        """Removes 1,750 sector_1 jobs, without specifying the distribution across gridcells (so it is assumed equal)
        Test that the distribution (in %) of sector 1 jobs across gridcells before and after the subtraction are
        relatively equal.
        """
        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(table_name=jobs_set_table_name,
                            table_data=self.jobs_data)
        jobs_set = JobDataset(in_storage=storage,
                              in_table_name=jobs_set_table_name)

        ect_set_table_name = 'ect_set'
        storage.write_table(
            table_name=ect_set_table_name,
            table_data=self.annual_employment_control_totals_data)
        ect_set = ControlTotalDataset(in_storage=storage,
                                      in_table_name=ect_set_table_name,
                                      what="employment")

        # unplace some jobs
        jobs_set.modify_attribute(name="grid_id",
                                  data=zeros(int(jobs_set.size() / 2)),
                                  index=arange(int(jobs_set.size() / 2)))
        #run model with input Datasets

        model = EmploymentTransitionModel()
        model.run(year=2000,
                  job_set=jobs_set,
                  control_totals=ect_set,
                  job_building_types=self.job_building_types)
        results = jobs_set.size()
        should_be = [11250]
        self.assertEqual(ma.allequal(should_be, results), True, "Error")

        def run_model():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name=jobs_set_table_name,
                table_data=self.jobs_data,
            )

            jobs_set = JobDataset(in_storage=storage,
                                  in_table_name=jobs_set_table_name)

            model = EmploymentTransitionModel()
            model.run(year=2000,
                      job_set=jobs_set,
                      control_totals=ect_set,
                      job_building_types=self.job_building_types)
            # check that the distribution of jobs is the same before and after subtracting jobs
            results = self.get_count_all_sectors_and_gridcells(jobs_set)
            return results

        expected_results = array([
            4000.0 / 7000.0 * 5250.0, 1000, 1000, 2000.0 / 7000.0 * 5250.0,
            1000, 1000, 1000.0 / 7000.0 * 5250.0, 1000, 1000
        ])

        self.run_stochastic_test(__file__, run_model, expected_results, 10)

        def run_model2():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name=jobs_set_table_name,
                table_data=self.jobs_data,
            )

            jobs_set = JobDataset(in_storage=storage,
                                  in_table_name=jobs_set_table_name)

            model = EmploymentTransitionModel()
            model.run(year=2000,
                      job_set=jobs_set,
                      control_totals=ect_set,
                      job_building_types=self.job_building_types)
            # check that the distribution of building type is the same before and after subtracting jobs
            jobs_set.compute_variables([
                "urbansim.job.is_in_employment_sector_1_industrial",
                "urbansim.job.is_in_employment_sector_2_industrial",
                "urbansim.job.is_in_employment_sector_1_commercial",
                "urbansim.job.is_in_employment_sector_2_commercial",
                "urbansim.job.is_in_employment_sector_1_governmental",
                "urbansim.job.is_in_employment_sector_2_governmental"
            ],
                                       resources=Resources({
                                           "job_building_type":
                                           self.job_building_types
                                       }))
            result = array([
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_industrial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_industrial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_commercial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_commercial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_governmental").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_governmental").sum()
            ])
            return result

        expected_results = array([
            3500.0 / 7000.0 * 5250.0, 900, 3500.0 / 7000.0 * 5250.0, 1800, 0,
            300
        ])
        self.run_stochastic_test(__file__, run_model2, expected_results, 20)
    def test_same_distribution_after_job_addition(self):
        """Add 1,750 new jobs of sector 1 without specifying a distribution across gridcells (so it is assumed equal)
        Test that the total number of jobs in each sector after the addition matches the totals specified
        in annual_employment_control_totals.
        Ensure that the number of unplaced jobs after the addition is exactly 1,750 because this model
        is not responsible for placing jobs, only for creating them.
        NOTE: unplaced jobs are indicated by grid_id <= 0
        """
        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(
            table_name=jobs_set_table_name,
            table_data=self.jobs_data,
        )
        jobs_set = JobDataset(in_storage=storage,
                              in_table_name=jobs_set_table_name)

        annual_employment_control_totals_data = self.annual_employment_control_totals_data
        annual_employment_control_totals_data[
            "total_non_home_based_employment"] = array([8750, 3000])

        ect_set_table_name = 'ect_set'
        storage.write_table(
            table_name=ect_set_table_name,
            table_data=annual_employment_control_totals_data,
        )
        ect_set = ControlTotalDataset(in_storage=storage,
                                      in_table_name=ect_set_table_name,
                                      what="employment")

        # run model
        model = EmploymentTransitionModel()
        model.run(year=2000,
                  job_set=jobs_set,
                  control_totals=ect_set,
                  job_building_types=self.job_building_types)

        #check that there are indeed 14750 total jobs after running the model
        results = jobs_set.size()
        should_be = [14750]
        self.assertEqual(ma.allequal(should_be, results), True, "Error")

        #check that total #jobs within each sector are close to what was set in the control_totals
        results = self.get_count_all_sectors(jobs_set)
        should_be = [8750.0, 3000, 3000]
        self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)

        #check that the number of unplaced jobs is the number of new jobs created (1750)
        results = where(jobs_set.get_attribute("grid_id") <= 0)[0].size
        should_be = [1750.0]
        self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)

        # test distribution of building type
        def run_model():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(table_name=jobs_set_table_name,
                                table_data=self.jobs_data)
            jobs_set = JobDataset(in_storage=storage,
                                  in_table_name=jobs_set_table_name)

            model = EmploymentTransitionModel()
            model.run(year=2000,
                      job_set=jobs_set,
                      control_totals=ect_set,
                      job_building_types=self.job_building_types)
            # check that the distribution of building type is the same before and after subtracting jobs
            jobs_set.compute_variables([
                "urbansim.job.is_in_employment_sector_1_industrial",
                "urbansim.job.is_in_employment_sector_2_industrial",
                "urbansim.job.is_in_employment_sector_1_commercial",
                "urbansim.job.is_in_employment_sector_2_commercial",
                "urbansim.job.is_in_employment_sector_1_governmental",
                "urbansim.job.is_in_employment_sector_2_governmental"
            ],
                                       resources=Resources({
                                           "job_building_type":
                                           self.job_building_types
                                       }))
            result = array([
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_industrial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_industrial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_commercial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_commercial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_governmental").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_governmental").sum()
            ])
            return result

        expected_results = array([
            3500.0 / 7000.0 * 8750.0, 900, 3500.0 / 7000.0 * 8750.0, 1800, 0,
            300
        ])
        #print expected_results
        self.run_stochastic_test(__file__, run_model, expected_results, 10)

        # check data types
        self.assertEqual(
            jobs_set.get_attribute("sector_id").dtype, int32,
            "Error in data type of the new job set. Should be: int32, is: %s" %
            str(jobs_set.get_attribute("sector_id").dtype))
        self.assertEqual(
            jobs_set.get_attribute("building_type").dtype, int8,
            "Error in data type of the new job set. Should be: int8, is: %s" %
            str(jobs_set.get_attribute("building_type").dtype))
Пример #20
0
    def test_distribute_unplaced_jobs_model(self):
        # Places 1750 jobs of sector 15
        # gridcell       has              expected about
        # 1         4000 sector 15 jobs   5000 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs 
        # 2         2000 sector 15 jobs   2500 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # 3         1000 sector 15 jobs   1250 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # unplaced  1750 sector 15 jobs   0
        
        # create jobs
        
        storage = StorageFactory().get_storage('dict_storage')

        job_data = {
            "job_id": arange(11750)+1,
            "sector_id": array(7000*[15]+3000*[1]+1750*[15]),
            "grid_id":array(4000*[1]+2000*[2]+1000*[3]+1000*[1]+1000*[2]+1000*[3]+1750*[-1])
            }
        
        jobs_table_name = 'jobs'        
        storage.write_table(table_name=jobs_table_name, table_data=job_data)
        
        jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)
        
        storage = StorageFactory().get_storage('dict_storage')

        building_types_table_name = 'building_types'        
        storage.write_table(
            table_name=building_types_table_name,
            table_data={
                "grid_id":arange(3)+1
                }
            )

        gridcells = GridcellDataset(in_storage=storage, in_table_name=building_types_table_name)

        # run model
        model = DistributeUnplacedJobsModel(debuglevel=4)
        model.run(gridcells, jobs)
        # get results

        # no jobs are unplaced
        result1 = where(jobs.get_attribute("grid_id")<0)[0]
        self.assertEqual(result1.size, 0)
        # the first 10000jobs kept their locations
        result2 = jobs.get_attribute_by_index("grid_id", arange(10000))
#            logger.log_status(result2)
        self.assertEqual(ma.allclose(result2, job_data["grid_id"][0:10000], rtol=0), True)
        
        # run model with filter
        # unplace first 500 jobs of sector 15
        jobs.modify_attribute(name='grid_id', data=zeros(500), index=arange(500))
        # unplace first 500 jobs of sector 1
        jobs.modify_attribute(name='grid_id', data=zeros(500), index=arange(7000, 7501))
        # place only unplaced jobs of sector 1
        model.run(gridcells, jobs, agents_filter='job.sector_id == 1')
        # 500 jobs of sector 15 should be unplaced
        result3 = where(jobs.get_attribute("grid_id")<=0)[0]
        self.assertEqual(result3.size, 500)
        # jobs of sector 1 are placed
        result4 = jobs.get_attribute_by_index("grid_id", arange(7000, 7501))
        self.assertEqual((result4 <= 0).sum(), 0)
    def test_agents_placed_in_appropriate_types(self):
        """Create 1000 unplaced industrial jobs and 1 commercial job. Allocate 50 commercial
        gridcells with enough space for 10 commercial jobs per gridcell. After running the
        EmploymentLocationChoiceModel, the 1 commercial job should be placed,
        but the 100 industrial jobs should remain unplaced
        """
        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='job_building_types',
            table_data = {
                'id':array([2,1]),
                'name': array(['commercial', 'industrial'])
                }
            )
        job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name='job_building_types')

        storage.write_table(table_name='jobs',
            table_data = {
                'job_id': arange(1001)+1,
                'grid_id': array([0]*1001),
                'building_type': array([1]*1000 + [2])
                }
            )
        jobs = JobDataset(in_storage=storage, in_table_name='jobs')

        storage.write_table(table_name='gridcells',
            table_data = {
                'grid_id': arange(50)+1,
                'commercial_sqft': array([1000]*50),
                'commercial_sqft_per_job': array([100]*50)
                }
            )
        gridcells = GridcellDataset(in_storage=storage, in_table_name='gridcells')

        coefficients = Coefficients(names=("dummy",), values=(0.1,))
        specification = EquationSpecification(variables=("gridcell.commercial_sqft",), coefficients=("dummy",))

        compute_resources = Resources({"job":jobs, "job_building_type": job_building_types})
        agents_index = where(jobs.get_attribute("grid_id") == 0)
        unplace_jobs = DatasetSubset(jobs, agents_index)
        agents_index = where(unplace_jobs.get_attribute("building_type") == 2)[0]
        gridcells.compute_variables(["urbansim.gridcell.number_of_commercial_jobs"],
                                    resources=compute_resources)
        commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs")

        gridcells.compute_variables(["urbansim.gridcell.number_of_industrial_jobs"],
                                    resources=compute_resources)
        industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs")
        model_group = ModelGroup(job_building_types, "name")
        elcm = EmploymentLocationChoiceModel(ModelGroupMember(model_group,"commercial"), location_set=gridcells,
               agents_grouping_attribute = "job.building_type",
               choices = "opus_core.random_choices_from_index", sample_size_locations = 30)
        elcm.run(specification, coefficients, agent_set = jobs, agents_index=agents_index, debuglevel=1)

        gridcells.compute_variables(["urbansim.gridcell.number_of_commercial_jobs"],
                                    resources=compute_resources)
        commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs")

        gridcells.compute_variables(["urbansim.gridcell.number_of_industrial_jobs"],
                                    resources=compute_resources)
        industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs")

        self.assertEqual(commercial_jobs.sum() == 1,
                         True, "Error, there should only be a total of 1 commercial job")
        self.assertEqual(industrial_jobs.sum() == 0,
                         True, "Error, there should be no industrial jobs because there's no space for them")
Пример #22
0
    location_set=zones_psrc,
    sampler="opus_core.samplers.weighted_sampler",
    sample_size_locations=10,
    choices="opus_core.random_choices_from_index",
    compute_capacity_flag=False,
    estimate_config=Resources({"weights_for_estimation_string": None}))

specification = EquationSpecification(
    variables=("urbansim.zone.number_of_jobs", "urbansim.household.income"),
    coefficients=("NOJ", "INC"))

agents_psrc.compute_variables(["urbansim.household.zone_id"],
                              resources=Resources({"gridcell":
                                                   locations_psrc}))
jobs_psrc = JobDataset(in_storage=StorageFactory().get_storage(
    'flt_storage', storage_location="/home/hana/urbansim/data/GPSRC"),
                       in_table_name="jobs")
coef, result = hlcm_psrc_zones.estimate(specification,
                                        agents_psrc,
                                        agents_index=idx,
                                        estimate_config=Resources(
                                            {"estimation_size_agents": 0.01}),
                                        data_objects={
                                            "job": jobs_psrc,
                                            "gridcell": locations_psrc
                                        },
                                        debuglevel=4)

result = hlcm_psrc_zones.run(specification,
                             coef,
                             agents_psrc,
    def test_same_distribution_after_job_addition(self):
        """Add 1,750 new jobs of sector 1 without specifying a distribution across gridcells (so it is assumed equal)
        Test that the total number of jobs in each sector after the addition matches the totals specified
        in annual_employment_control_totals.
        Ensure that the number of unplaced jobs after the addition is exactly 1,750 because this model
        is not responsible for placing jobs, only for creating them.
        NOTE: unplaced jobs are indicated by grid_id <= 0
        """
        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(
            table_name=jobs_set_table_name,
            table_data=self.jobs_data,
            )
        jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

        annual_employment_control_totals_data = self.annual_employment_control_totals_data
        annual_employment_control_totals_data["total_non_home_based_employment"] = array([8750, 3000])

        ect_set_table_name = 'ect_set'
        storage.write_table(
            table_name=ect_set_table_name,
            table_data=annual_employment_control_totals_data,
            )
        ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment")

        # run model
        model = EmploymentTransitionModel()
        model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)

        #check that there are indeed 14750 total jobs after running the model
        results = jobs_set.size()
        should_be = [14750]
        self.assertEqual(ma.allequal(should_be, results), True, "Error")

        #check that total #jobs within each sector are close to what was set in the control_totals
        results = self.get_count_all_sectors(jobs_set)
        should_be = [8750.0, 3000, 3000]
        self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)

        #check that the number of unplaced jobs is the number of new jobs created (1750)
        results = where(jobs_set.get_attribute("grid_id")<=0)[0].size
        should_be = [1750.0]
        self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)

        # test distribution of building type
        def run_model():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name=jobs_set_table_name,
                table_data=self.jobs_data
                )
            jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

            model = EmploymentTransitionModel()
            model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)
            # check that the distribution of building type is the same before and after subtracting jobs
            jobs_set.compute_variables(["urbansim.job.is_in_employment_sector_1_industrial",
                                        "urbansim.job.is_in_employment_sector_2_industrial",
                                        "urbansim.job.is_in_employment_sector_1_commercial",
                                        "urbansim.job.is_in_employment_sector_2_commercial",
                                        "urbansim.job.is_in_employment_sector_1_governmental",
                                        "urbansim.job.is_in_employment_sector_2_governmental"],
                                        resources = Resources({"job_building_type":self.job_building_types}))
            result = array([jobs_set.get_attribute("is_in_employment_sector_1_industrial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_industrial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_1_commercial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_commercial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_1_governmental").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_governmental").sum()
                            ])
            return result
        expected_results = array([3500.0/7000.0*8750.0, 900, 3500.0/7000.0*8750.0, 1800, 0, 300])
        #print expected_results
        self.run_stochastic_test(__file__, run_model, expected_results, 10)

        # check data types
        self.assertEqual(jobs_set.get_attribute("sector_id").dtype, int32,
             "Error in data type of the new job set. Should be: int32, is: %s" % str(jobs_set.get_attribute("sector_id").dtype))
        self.assertEqual(jobs_set.get_attribute("building_type").dtype, int8,
             "Error in data type of the new job set. Should be: int8, is: %s" % str(jobs_set.get_attribute("building_type").dtype))
    def test_same_distribution_after_job_subtraction(self):
        """Removes 1,750 sector_1 jobs, without specifying the distribution across gridcells (so it is assumed equal)
        Test that the distribution (in %) of sector 1 jobs across gridcells before and after the subtraction are
        relatively equal.
        """
        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(table_name=jobs_set_table_name, table_data=self.jobs_data)
        jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

        ect_set_table_name = 'ect_set'
        storage.write_table(table_name=ect_set_table_name, table_data=self.annual_employment_control_totals_data)
        ect_set = ControlTotalDataset(in_storage=storage, in_table_name=ect_set_table_name, what="employment")

        # unplace some jobs
        jobs_set.modify_attribute(name="grid_id", data=zeros(int(jobs_set.size()/2)), index=arange(int(jobs_set.size()/2)))
        #run model with input Datasets

        model = EmploymentTransitionModel()
        model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)
        results = jobs_set.size()
        should_be = [11250]
        self.assertEqual(ma.allequal(should_be, results), True, "Error")

        def run_model():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name=jobs_set_table_name,
                table_data=self.jobs_data,
                )

            jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

            model = EmploymentTransitionModel()
            model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)
            # check that the distribution of jobs is the same before and after subtracting jobs
            results = self.get_count_all_sectors_and_gridcells(jobs_set)
            return results

        expected_results = array([4000.0/7000.0*5250.0, 1000, 1000, 2000.0/7000.0*5250.0, 1000,
                                  1000, 1000.0/7000.0*5250.0, 1000, 1000])

        self.run_stochastic_test(__file__, run_model, expected_results, 10)

        def run_model2():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name=jobs_set_table_name,
                table_data=self.jobs_data,
                )

            jobs_set = JobDataset(in_storage=storage, in_table_name=jobs_set_table_name)

            model = EmploymentTransitionModel()
            model.run(year=2000, job_set=jobs_set, control_totals=ect_set, job_building_types=self.job_building_types)
            # check that the distribution of building type is the same before and after subtracting jobs
            jobs_set.compute_variables(["urbansim.job.is_in_employment_sector_1_industrial",
                                        "urbansim.job.is_in_employment_sector_2_industrial",
                                        "urbansim.job.is_in_employment_sector_1_commercial",
                                        "urbansim.job.is_in_employment_sector_2_commercial",
                                        "urbansim.job.is_in_employment_sector_1_governmental",
                                        "urbansim.job.is_in_employment_sector_2_governmental"],
                                        resources = Resources({"job_building_type":self.job_building_types}))
            result = array([jobs_set.get_attribute("is_in_employment_sector_1_industrial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_industrial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_1_commercial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_commercial").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_1_governmental").sum(),
                            jobs_set.get_attribute("is_in_employment_sector_2_governmental").sum()
                            ])
            return result
        expected_results = array([3500.0/7000.0*5250.0, 900, 3500.0/7000.0*5250.0, 1800, 0, 300])
        self.run_stochastic_test(__file__, run_model2, expected_results, 20)
    def test_erm_correct_distribution_of_jobs_relocate(self):
        # In addition to unplaced jobs choose 50% of jobs of sector 2 to relocate and
        # no job of sector 1.
        # gridcell       has              expected
        # 1         100 sector 1 jobs    100 sector 1 jobs
        #           400 sector 2 jobs    about 200 sector 2 jobs
        # 2         100 sector 1 jobs    100 sector 1 jobs
        #           200 sector 2 jobs    about 100 sector 2 jobs
        # 3         100 sector 1 jobs    100 sector 1 jobs
        #           100 sector 2 jobs    about 50 sector 2 jobs
        # unplaced   10 sector 1 jobs
        #            10 sector 2 jobs

        storage = StorageFactory().get_storage("dict_storage")

        # create jobs
        job_grid_ids = array(100 * [1] + 100 * [2] + 100 * [3] + 400 * [1] + 200 * [2] + 100 * [3] + 20 * [-1])

        storage.write_table(
            table_name="jobs",
            table_data={
                "job_id": arange(1020) + 1,
                "sector_id": array(300 * [1] + 700 * [2] + 10 * [1] + 10 * [2]),
                "grid_id": job_grid_ids,
            },
        )
        jobs = JobDataset(in_storage=storage, in_table_name="jobs")

        # create gridcells
        storage.write_table(table_name="gridcells", table_data={"grid_id": arange(3) + 1})
        gridcells = GridcellDataset(in_storage=storage, in_table_name="gridcells")

        # create rate set with rate 0 for jobs of sector 1 and 0.5 for jobs of sector 2
        storage.write_table(
            table_name="rates", table_data={"sector_id": array([1, 2]), "job_relocation_probability": array([0, 0.5])}
        )
        rates = JobRelocationRateDataset(in_storage=storage, in_table_name="rates")

        # run model
        model = EmploymentRelocationModelCreator().get_model(debuglevel=0)
        hrm_resources = Resources({"annual_job_relocation_rate": rates})

        # get results from one run
        movers_indices = model.run(jobs, resources=hrm_resources)
        jobs.compute_variables(["urbansim.job.is_in_employment_sector_1"])

        # unplace chosen jobs
        compute_resources = Resources({"job": jobs, "urbansim_constant": {"industrial_code": 1, "commercial_code": 2}})
        jobs.set_values_of_one_attribute(attribute="grid_id", values=-1, index=movers_indices)
        gridcells.compute_variables(
            ["urbansim.gridcell.number_of_jobs_of_sector_1", "urbansim.gridcell.number_of_jobs_of_sector_2"],
            resources=compute_resources,
        )

        # only 100 jobs of sector 1 (unplaced jobs) should be selected
        result1 = jobs.get_attribute_by_index("is_in_employment_sector_1", movers_indices).astype(int8).sum()
        self.assertEqual(result1 == 10, True)

        # number of sector 1 jobs should not change
        result2 = gridcells.get_attribute("number_of_jobs_of_sector_1")
        self.assertEqual(ma.allclose(result2, array([100, 100, 100]), rtol=0), True)

        def run_model():
            jobs.modify_attribute(name="grid_id", data=job_grid_ids)
            indices = model.run(jobs, resources=hrm_resources)
            jobs.modify_attribute(name="grid_id", data=-1, index=indices)
            gridcells.compute_variables(["urbansim.gridcell.number_of_jobs_of_sector_2"], resources=compute_resources)
            return gridcells.get_attribute("number_of_jobs_of_sector_2")

        # distribution of sector 2 jobs should be the same, the mean are halfs of the original values
        should_be = array([200, 100, 50])
        self.run_stochastic_test(__file__, run_model, should_be, 10)
    def setUp( self ):
        """here, we simulate 50 residential units
        and 5000 commercial, industrial, and governmental sqft added to each of the gridcells in previous years.
        """

        ### TODO: do not redefine these constants.
        self.comc = 1
        self.indc = 3
        self.govc = 2
        self.sfhc = 4
        self.mfhc = 5

        storage = StorageFactory().get_storage('dict_storage')

        gridcells_table_name = 'gridcells'
#            create 100 gridcells, each with 200 residential units and space for 100 commercial jobs,
#            100 industrial jobs, and residential, industrial, and commercial value at $500,000 each
        storage.write_table(
            table_name=gridcells_table_name,
            table_data={
                "grid_id": arange( 1, 100+1 ),
                "commercial_sqft_per_job":array( 100*[100] ),
                "industrial_sqft_per_job":array( 100*[100] ),
                "single_family_improvement_value":array( 100*[500000] ),
                "commercial_improvement_value":array( 100*[500000] ),
                "industrial_improvement_value":array( 100*[500000] )
                }
            )
        self.gridcells = GridcellDataset(in_storage=storage, in_table_name=gridcells_table_name)

        buildings_table_name = 'buildings'
#            2000 buildings (1000 with 20 residential units each, 500 with 20 commercial job and 500 with 20 industrial job each)
        storage.write_table(
            table_name=buildings_table_name,
            table_data={
                "building_id":arange( 1, 2000+1 ), # 2000 buildings
                "grid_id":array( 20*range( 1, 100+1 ), dtype=int32 ), # spread evenly across 100 gridcells
                "building_type_id":array(1000*[self.sfhc] +
                                         500*[self.comc] +
                                         500*[self.indc], dtype=int8),
                "sqft": array(1000*[0] +
                              500*[2000] +
                              500*[2000], dtype=int32),
                "residential_units": array(1000*[20] +
                                           500* [0] +
                                           500* [0], dtype=int32),
                "improvement_value": array(1000*[50] +
                                           500* [50] +
                                           500* [50], dtype=float32),
                "year_built": array(1000*[1940] +
                                    500* [1940] +
                                    500* [1940], dtype=int32)
                }
            )
        self.buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name)

        households_table_name = 'households'
#            create 10000 households, 100 in each of the 100 gridcells.
#            there will initially be 100 vacant residential units in each gridcell then.
        storage.write_table(
            table_name=households_table_name,
            table_data={
                "household_id":arange( 1, 10000+1 ),
                "grid_id":array( 100*range( 1, 100+1 ), dtype=int32 )
                }
            )
        self.households = HouseholdDataset(in_storage=storage, in_table_name=households_table_name)

        building_types_table_name = 'building_types'
        storage.write_table(
            table_name=building_types_table_name,
            table_data={
                "building_type_id":array([self.govc,self.comc,self.indc, self.sfhc, self.mfhc], dtype=int8),
                "name": array(["governmental", "commercial", "industrial", "single_family","multiple_family"]),
                "units": array(["governmental_sqft", "commercial_sqft", "industrial_sqft", "residential_units", "residential_units"]),
                "is_residential": array([0,0,0,1,1], dtype='?')
                }
            )
        self.building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)

        job_building_types_table_name = 'job_building_types'
        storage.write_table(
            table_name=job_building_types_table_name,
            table_data={
                "id":array([self.govc,self.comc,self.indc, self.sfhc, self.mfhc], dtype=int8),
                "name": array(["governmental", "commercial", "industrial", "single_family","multiple_family"])
                }
            )
        self.job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name=job_building_types_table_name)

        jobs_table_name = 'jobs'
#            create 2500 commercial jobs and distribute them equally across the 100 gridcells,
#            25 commercial buildings/gridcell
        storage.write_table(
            table_name=jobs_table_name,
            table_data={
                "job_id":arange( 1, 2500+1 ),
                "grid_id":array( 25*range( 1, 100+1 ), dtype=int32 ),
                "sector_id":array( 2500*[1], dtype=int32 ),
                "building_type":array(2500*[self.comc], dtype=int8)
                }
            )
        self.jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)

        self.dataset_pool = DatasetPool()
        self.dataset_pool.add_datasets_if_not_included({
                                            "household":self.households,
                                            "job":self.jobs,
                                            "building":self.buildings,
                                            "building_type": self.building_types,
                                            "job_building_type": self.job_building_types})

        self.building_categories = {'commercial': array([1000,5000]),
                                    'industrial': array([500,800,1000])}
 def run(self, in_storage, out_storage, business_table="business", jobs_table="jobs", control_totals_table=None):
     logger.log_status("Unrolling %s table." % business_table)
     # get attributes from the establisments table
     business_dataset = BusinessDataset(in_storage=in_storage, in_table_name=business_table)
     business_sizes = business_dataset.get_attribute(self.number_of_jobs_attr).astype("int32")
     sectors = business_dataset.get_attribute("sector_id")
     tazes = business_dataset.get_attribute(self.geography_id_attr).astype("int32")
     building_ids = array([], dtype='int32')
     if "building_id" in business_dataset.get_primary_attribute_names():
         building_ids = business_dataset.get_attribute("building_id")
     parcel_ids = array([], dtype='int32')
     if "parcel_id" in business_dataset.get_primary_attribute_names():
         parcel_ids = business_dataset.get_attribute("parcel_id")
     home_based = array([], dtype='int16')
     if "home_based" in business_dataset.get_primary_attribute_names():
         home_based = business_dataset.get_attribute("home_based")
     building_sqft = business_dataset.get_attribute(self.sqft_attr)
     building_sqft[building_sqft <= 0] = 0
     join_flags = None
     if "join_flag" in business_dataset.get_primary_attribute_names():
         join_flags = business_dataset.get_attribute("join_flag")
     impute_sqft_flag = None
     if "impute_building_sqft_flag" in business_dataset.get_primary_attribute_names():
         impute_sqft_flag = business_dataset.get_attribute("impute_building_sqft_flag")
     
     # inititalize jobs attributes
     total_size = business_sizes.sum()
     jobs_data = {}
     jobs_data["sector_id"] = resize(array([-1], dtype=sectors.dtype), total_size)
     jobs_data["building_id"] = resize(array([-1], dtype=building_ids.dtype), total_size)
     jobs_data["parcel_id"] = resize(array([-1], dtype=parcel_ids.dtype), total_size)
     jobs_data[self.geography_id_attr] = resize(array([-1], dtype=tazes.dtype), total_size)
     jobs_data["building_type"] = resize(array([-1], dtype=home_based.dtype), total_size)
     jobs_data["sqft"] = resize(array([], dtype=building_sqft.dtype), total_size)
     if join_flags is not None:
         jobs_data["join_flag"] = resize(array([], dtype=join_flags.dtype), total_size)
     if impute_sqft_flag is not None:
         jobs_data["impute_building_sqft_flag"] = resize(array([], dtype=impute_sqft_flag.dtype), total_size)
     
     indices = cumsum(business_sizes)
     # iterate over establishments. For each business create the corresponding number of jobs by filling the corresponding part 
     # of the arrays
     start_index=0
     for i in range(business_dataset.size()):
         end_index = indices[i]
         jobs_data["sector_id"][start_index:end_index] = sectors[i]
         if building_ids.size > 0:
             jobs_data["building_id"][start_index:end_index] = building_ids[i]
         if parcel_ids.size > 0:
             jobs_data["parcel_id"][start_index:end_index] = parcel_ids[i]
         jobs_data[self.geography_id_attr][start_index:end_index] = tazes[i]
         if home_based.size > 0:
             jobs_data["building_type"][start_index:end_index] = home_based[i]
         if self.compute_sqft_per_job:
             jobs_data["sqft"][start_index:end_index] = round((building_sqft[i]-building_sqft[i]/10.0)/float(business_sizes[i])) # sqft per employee
         else:
             jobs_data["sqft"][start_index:end_index] = building_sqft[i]
         if join_flags is not None:
             jobs_data["join_flag"][start_index:end_index] = join_flags[i]
         if impute_sqft_flag is not None:
             jobs_data["impute_building_sqft_flag"][start_index:end_index]  = impute_sqft_flag[i]
         start_index = end_index
         
     jobs_data["job_id"] = arange(total_size)+1
     if self.compute_sqft_per_job:
         jobs_data["sqft"] = clip(jobs_data["sqft"], 0, self.maximum_sqft)
         jobs_data["sqft"][logical_and(jobs_data["sqft"]>0, jobs_data["sqft"]<self.minimum_sqft)] = self.minimum_sqft
     
     # correct missing job_building_types
     wmissing_bt = where(jobs_data["building_type"]<=0)[0]
     if wmissing_bt.size > 0:
         jobs_data["building_type"][wmissing_bt] = 2 # assign non-homebased type for now. It can be re-classified in the assign_bldgs_to_jobs... script
     
     # create jobs table and write it out
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(
             table_name="jobs",
             table_data=jobs_data
             )
     job_dataset = JobDataset(in_storage=storage)
     if self.unplace_jobs_with_non_existing_buildings:
         self.do_unplace_jobs_with_non_existing_buildings(job_dataset, out_storage)
     
     # Match to control totals (only eliminate jobs if control totals are smaller than the actual number of jobs). 
     if control_totals_table is not None:
         logger.log_status("Matching to control totals.")
         control_totals = ControlTotalDataset(what='employment', id_name=['zone_id', 'sector_id'], 
                                              in_table_name=control_totals_table, in_storage=in_storage)
         control_totals.load_dataset(attributes=['zone_id', 'sector_id', 'jobs'])
         zones_sectors = control_totals.get_id_attribute()
         njobs = control_totals.get_attribute('jobs')
         remove = array([], dtype='int32')
         for i in range(zones_sectors.shape[0]):
             zone, sector = zones_sectors[i,:]
             in_sector = job_dataset.get_attribute("sector_id") == sector
             in_zone_in_sector = logical_and(in_sector, job_dataset.get_attribute("zone_id") == zone)
             if in_zone_in_sector.sum() <= njobs[i]:
                 continue
             to_be_removed = in_zone_in_sector.sum() - njobs[i]
             this_removal = 0
             not_considered = ones(job_dataset.size(), dtype='bool8')
             for unit in ['parcel_id', 'building_id', None]: # first consider jobs without parcel id, then without building_id, then all
                 if unit is not None:
                     wnunit = job_dataset.get_attribute(unit) <= 0
                     eligible = logical_and(not_considered, logical_and(in_zone_in_sector, wnunit))
                     not_considered[where(wnunit)] = False
                 else:
                     eligible = logical_and(not_considered, in_zone_in_sector)
                 eligible_sum = eligible.sum()
                 if eligible_sum > 0:
                     where_eligible = where(eligible)[0]
                     if eligible_sum <= to_be_removed-this_removal:
                         draw = arange(eligible_sum)
                     else:
                         draw = sample_noreplace(where_eligible, to_be_removed-this_removal, eligible_sum)
                     remove = concatenate((remove, where_eligible[draw]))
                     this_removal += draw.size
                     if this_removal >= to_be_removed:
                         break
             
         job_dataset.remove_elements(remove)
         logger.log_status("%s jobs removed." % remove.size)
         
     
     logger.log_status("Write jobs table.")
     job_dataset.write_dataset(out_table_name=jobs_table, out_storage=out_storage)
     logger.log_status("Created %s jobs." % job_dataset.size())
Пример #28
0
    def get_resources(self, data_dictionary, dataset):
        """Create resources for computing a variable. """
        resources = Resources()

        for key in data_dictionary.keys():
            if key in self.datasets:
                data = data_dictionary[key]

                storage = StorageFactory().get_storage('dict_storage')

                if self.id_names[key] not in data_dictionary[key].keys(
                ) and not isinstance(self.id_names[key], list):
                    data[self.id_names[key]] = arange(
                        1,
                        len(data_dictionary[key][data_dictionary[key].keys()
                                                 [0]]) + 1)  # add id array

                id_name = self.id_names[key]
                storage.write_table(table_name='data', table_data=data)

                if key == "gridcell":
                    gc = GridcellDataset(in_storage=storage,
                                         in_table_name='data')

                    # add relative_x and relative_y
                    gc.get_id_attribute()
                    n = int(ceil(sqrt(gc.size())))
                    if "relative_x" not in data.keys():
                        x = (indices((n, n)) + 1)[1].ravel()
                        gc.add_attribute(x[0:gc.size()],
                                         "relative_x",
                                         metadata=1)
                    if "relative_y" not in data.keys():
                        y = (indices((n, n)) + 1)[0].ravel()
                        gc.add_attribute(y[0:gc.size()],
                                         "relative_y",
                                         metadata=1)
                    resources.merge({key: gc})

                elif key == "household":
                    resources.merge({
                        key:
                        HouseholdDataset(in_storage=storage,
                                         in_table_name='data')
                    })
                elif key == "development_project":
                    resources.merge({
                        key:
                        DevelopmentProjectDataset(in_storage=storage,
                                                  in_table_name='data')
                    })
                elif key == "development_event":
                    resources.merge({
                        key:
                        DevelopmentEventDataset(in_storage=storage,
                                                in_table_name='data')
                    })
                elif key == "neighborhood":
                    resources.merge({
                        key:
                        NeighborhoodDataset(in_storage=storage,
                                            in_table_name='data')
                    })
                elif key == "job":
                    resources.merge({
                        key:
                        JobDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "zone":
                    resources.merge({
                        key:
                        ZoneDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "travel_data":
                    resources.merge({
                        key:
                        TravelDataDataset(in_storage=storage,
                                          in_table_name='data')
                    })
                elif key == "faz":
                    resources.merge({
                        key:
                        FazDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "fazdistrict":
                    resources.merge({
                        key:
                        FazdistrictDataset(in_storage=storage,
                                           in_table_name='data')
                    })
                elif key == "race":
                    resources.merge({
                        key:
                        RaceDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "county":
                    resources.merge({
                        key:
                        CountyDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "large_area":
                    resources.merge({
                        key:
                        LargeAreaDataset(in_storage=storage,
                                         in_table_name='data')
                    })
                elif key == "development_group":
                    resources.merge({
                        key:
                        DevelopmentGroupDataset(in_storage=storage,
                                                in_table_name='data')
                    })
                elif key == "employment_sector_group":
                    resources.merge({
                        key:
                        EmploymentSectorGroupDataset(in_storage=storage,
                                                     in_table_name='data')
                    })
                elif key == "plan_type_group":
                    resources.merge({
                        key:
                        PlanTypeGroupDataset(in_storage=storage,
                                             in_table_name='data')
                    })
                elif key == "building":
                    resources.merge({
                        key:
                        BuildingDataset(in_storage=storage,
                                        in_table_name='data')
                    })

            else:
                resources.merge({key: data_dictionary[key]})

        if dataset in self.interactions:
            if dataset == "household_x_gridcell":
                resources.merge({
                    "dataset":
                    HouseholdXGridcellDataset(dataset1=resources["household"],
                                              dataset2=resources["gridcell"])
                })
            if dataset == "job_x_gridcell":
                resources.merge({
                    "dataset":
                    JobXGridcellDataset(dataset1=resources["job"],
                                        dataset2=resources["gridcell"])
                })
            if dataset == "household_x_zone":
                resources.merge({
                    "dataset":
                    HouseholdXZoneDataset(dataset1=resources["household"],
                                          dataset2=resources["zone"])
                })
            if dataset == "household_x_neighborhood":
                resources.merge({
                    "dataset":
                    HouseholdXNeighborhoodDataset(
                        dataset1=resources["household"],
                        dataset2=resources["neighborhood"])
                })
            if dataset == "development_project_x_gridcell":
                resources.merge({
                    "dataset":
                    DevelopmentProjectXGridcellDataset(
                        dataset1=resources["development_project"],
                        dataset2=resources["gridcell"])
                })

        else:
            resources.merge({"dataset": resources[dataset]})
        resources.merge({"check_variables": '*', "debug": 4})
        return resources
    def test_controlling_sector(self):
        """ Controls for one marginal characteristics, namely age_of_head.
        """
        annual_employment_control_totals_data = {
            "year": array([2000, 2000, 2000, 2001, 2001, 2001, 2002, 2002, 2002]),
            "sector_id": array([ 1,2,3, 1,2,3,  1,2,3]),
            "number_of_jobs": array([25013, 21513, 18227,  # 2000
                                                 10055, 15003, 17999, # 2001
                                                 15678, 14001, 20432]) # 2002
            }


        jobs_data = {
            "job_id":arange(15000)+1,
            "grid_id": array(15000*[1]),
            "sector_id": array(1000*[1] + 1000*[1] + 2000*[1] + 1000*[1] +
                            2000*[2] + 1000*[2] + 1000*[2]+ 1000*[2] +
                            1000*[3] + 1000*[3] + 2000*[3] + 1000*[3])
            }
        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='job_set', table_data=jobs_data)
        job_set = JobDataset(in_storage=storage, in_table_name='job_set')

        storage.write_table(table_name='ect_set', table_data=annual_employment_control_totals_data)
        ect_set = ControlTotalDataset(in_storage=storage, in_table_name='ect_set', what='',
                                      id_name=[])

        
        model = TransitionModel(job_set, control_total_dataset=ect_set)
        model.run(year=2000, target_attribute_name="number_of_jobs", reset_dataset_attribute_value={'grid_id':-1})

        results = job_set.size()
        should_be = [(ect_set.get_attribute("number_of_jobs")[0:3]).sum()]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))
        cats = 3
        results = zeros(cats, dtype=int32)
        for i in range(0, cats):
            results[i] = (job_set.get_attribute('sector_id') == ect_set.get_attribute("sector_id")[i]).sum()
        should_be = ect_set.get_attribute("number_of_jobs")[0:3]
        self.assertEqual(ma.allclose(results, should_be, rtol=1e-6),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        # this run should remove households in all four categories
        #model.run(year=2001, household_set=hh_set, control_totals=hct_set, characteristics=hc_set)
        model.run(year=2001, target_attribute_name="number_of_jobs", reset_dataset_attribute_value={'grid_id':-1})
        results = job_set.size()
        should_be = [(ect_set.get_attribute("number_of_jobs")[3:6]).sum()]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))
        cats = 3
        results = zeros(cats, dtype=int32)
        for i in range(0, cats):
            results[i] = (job_set.get_attribute('sector_id') == ect_set.get_attribute("sector_id")[i+3]).sum()
        should_be = ect_set.get_attribute("number_of_jobs")[3:6]
        self.assertEqual(ma.allclose(results, should_be, rtol=1e-6),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        # this run should add and remove households
        #model.run(year=2002, household_set=hh_set, control_totals=hct_set, characteristics=hc_set)
        model.run(year=2002, target_attribute_name="number_of_jobs", reset_dataset_attribute_value={'grid_id':-1})
        results = job_set.size()
        should_be = [(ect_set.get_attribute("number_of_jobs")[6:9]).sum()]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))
        cats = 3
        results = zeros(cats, dtype=int32)
        for i in range(0, cats):
            results[i] = (job_set.get_attribute('sector_id') == ect_set.get_attribute("sector_id")[i+6]).sum()
        should_be = ect_set.get_attribute("number_of_jobs")[6:9]
        self.assertEqual(ma.allclose(results, should_be, rtol=1e-6),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))
Пример #30
0
    def test_agents_placed_in_appropriate_types(self):
        """Create 1000 unplaced industrial jobs and 1 commercial job. Allocate 50 commercial
        gridcells with enough space for 10 commercial jobs per gridcell. After running the
        EmploymentLocationChoiceModel, the 1 commercial job should be placed,
        but the 100 industrial jobs should remain unplaced
        """
        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='job_building_types',
                            table_data={
                                'id': array([2, 1]),
                                'name': array(['commercial', 'industrial'])
                            })
        job_building_types = JobBuildingTypeDataset(
            in_storage=storage, in_table_name='job_building_types')

        storage.write_table(table_name='jobs',
                            table_data={
                                'job_id': arange(1001) + 1,
                                'grid_id': array([0] * 1001),
                                'building_type': array([1] * 1000 + [2])
                            })
        jobs = JobDataset(in_storage=storage, in_table_name='jobs')

        storage.write_table(table_name='gridcells',
                            table_data={
                                'grid_id': arange(50) + 1,
                                'commercial_sqft': array([1000] * 50),
                                'commercial_sqft_per_job': array([100] * 50)
                            })
        gridcells = GridcellDataset(in_storage=storage,
                                    in_table_name='gridcells')

        coefficients = Coefficients(names=("dummy", ), values=(0.1, ))
        specification = EquationSpecification(
            variables=("gridcell.commercial_sqft", ), coefficients=("dummy", ))

        compute_resources = Resources({
            "job": jobs,
            "job_building_type": job_building_types
        })
        agents_index = where(jobs.get_attribute("grid_id") == 0)
        unplace_jobs = DatasetSubset(jobs, agents_index)
        agents_index = where(
            unplace_jobs.get_attribute("building_type") == 2)[0]
        gridcells.compute_variables(
            ["urbansim.gridcell.number_of_commercial_jobs"],
            resources=compute_resources)
        commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs")

        gridcells.compute_variables(
            ["urbansim.gridcell.number_of_industrial_jobs"],
            resources=compute_resources)
        industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs")
        model_group = ModelGroup(job_building_types, "name")
        elcm = EmploymentLocationChoiceModel(
            ModelGroupMember(model_group, "commercial"),
            location_set=gridcells,
            agents_grouping_attribute="job.building_type",
            choices="opus_core.random_choices_from_index",
            sample_size_locations=30)
        elcm.run(specification,
                 coefficients,
                 agent_set=jobs,
                 agents_index=agents_index,
                 debuglevel=1)

        gridcells.compute_variables(
            ["urbansim.gridcell.number_of_commercial_jobs"],
            resources=compute_resources)
        commercial_jobs = gridcells.get_attribute("number_of_commercial_jobs")

        gridcells.compute_variables(
            ["urbansim.gridcell.number_of_industrial_jobs"],
            resources=compute_resources)
        industrial_jobs = gridcells.get_attribute("number_of_industrial_jobs")

        self.assertEqual(
            commercial_jobs.sum() == 1, True,
            "Error, there should only be a total of 1 commercial job")
        self.assertEqual(
            industrial_jobs.sum() == 0, True,
            "Error, there should be no industrial jobs because there's no space for them"
        )
    def run(self,
            in_storage,
            out_storage,
            business_table="business",
            jobs_table="jobs",
            control_totals_table=None):
        logger.log_status("Unrolling %s table." % business_table)
        # get attributes from the establisments table
        business_dataset = BusinessDataset(in_storage=in_storage,
                                           in_table_name=business_table)
        business_sizes = business_dataset.get_attribute(
            self.number_of_jobs_attr).astype("int32")
        sectors = business_dataset.get_attribute("sector_id")
        tazes = business_dataset.get_attribute(
            self.geography_id_attr).astype("int32")
        building_ids = array([], dtype='int32')
        if "building_id" in business_dataset.get_primary_attribute_names():
            building_ids = business_dataset.get_attribute("building_id")
        parcel_ids = array([], dtype='int32')
        if "parcel_id" in business_dataset.get_primary_attribute_names():
            parcel_ids = business_dataset.get_attribute("parcel_id")
        home_based = array([], dtype='int16')
        if "home_based" in business_dataset.get_primary_attribute_names():
            home_based = business_dataset.get_attribute("home_based")
        building_sqft = business_dataset.get_attribute(self.sqft_attr)
        building_sqft[building_sqft <= 0] = 0
        join_flags = None
        if "join_flag" in business_dataset.get_primary_attribute_names():
            join_flags = business_dataset.get_attribute("join_flag")
        impute_sqft_flag = None
        if "impute_building_sqft_flag" in business_dataset.get_primary_attribute_names(
        ):
            impute_sqft_flag = business_dataset.get_attribute(
                "impute_building_sqft_flag")

        # inititalize jobs attributes
        total_size = business_sizes.sum()
        jobs_data = {}
        jobs_data["sector_id"] = resize(array([-1], dtype=sectors.dtype),
                                        total_size)
        jobs_data["building_id"] = resize(
            array([-1], dtype=building_ids.dtype), total_size)
        jobs_data["parcel_id"] = resize(array([-1], dtype=parcel_ids.dtype),
                                        total_size)
        jobs_data[self.geography_id_attr] = resize(
            array([-1], dtype=tazes.dtype), total_size)
        jobs_data["building_type"] = resize(
            array([-1], dtype=home_based.dtype), total_size)
        jobs_data["sqft"] = resize(array([], dtype=building_sqft.dtype),
                                   total_size)
        if join_flags is not None:
            jobs_data["join_flag"] = resize(array([], dtype=join_flags.dtype),
                                            total_size)
        if impute_sqft_flag is not None:
            jobs_data["impute_building_sqft_flag"] = resize(
                array([], dtype=impute_sqft_flag.dtype), total_size)

        indices = cumsum(business_sizes)
        # iterate over establishments. For each business create the corresponding number of jobs by filling the corresponding part
        # of the arrays
        start_index = 0
        for i in range(business_dataset.size()):
            end_index = indices[i]
            jobs_data["sector_id"][start_index:end_index] = sectors[i]
            if building_ids.size > 0:
                jobs_data["building_id"][start_index:end_index] = building_ids[
                    i]
            if parcel_ids.size > 0:
                jobs_data["parcel_id"][start_index:end_index] = parcel_ids[i]
            jobs_data[self.geography_id_attr][start_index:end_index] = tazes[i]
            if home_based.size > 0:
                jobs_data["building_type"][start_index:end_index] = home_based[
                    i]
            if self.compute_sqft_per_job:
                jobs_data["sqft"][start_index:end_index] = round(
                    (building_sqft[i] - building_sqft[i] / 10.0) /
                    float(business_sizes[i]))  # sqft per employee
            else:
                jobs_data["sqft"][start_index:end_index] = building_sqft[i]
            if join_flags is not None:
                jobs_data["join_flag"][start_index:end_index] = join_flags[i]
            if impute_sqft_flag is not None:
                jobs_data["impute_building_sqft_flag"][
                    start_index:end_index] = impute_sqft_flag[i]
            start_index = end_index

        jobs_data["job_id"] = arange(total_size) + 1
        if self.compute_sqft_per_job:
            jobs_data["sqft"] = clip(jobs_data["sqft"], 0, self.maximum_sqft)
            jobs_data["sqft"][logical_and(
                jobs_data["sqft"] > 0,
                jobs_data["sqft"] < self.minimum_sqft)] = self.minimum_sqft

        # correct missing job_building_types
        wmissing_bt = where(jobs_data["building_type"] <= 0)[0]
        if wmissing_bt.size > 0:
            jobs_data["building_type"][
                wmissing_bt] = 2  # assign non-homebased type for now. It can be re-classified in the assign_bldgs_to_jobs... script

        # create jobs table and write it out
        storage = StorageFactory().get_storage('dict_storage')
        storage.write_table(table_name="jobs", table_data=jobs_data)
        job_dataset = JobDataset(in_storage=storage)
        if self.unplace_jobs_with_non_existing_buildings:
            self.do_unplace_jobs_with_non_existing_buildings(
                job_dataset, out_storage)

        # Match to control totals (only eliminate jobs if control totals are smaller than the actual number of jobs).
        if control_totals_table is not None:
            logger.log_status("Matching to control totals.")
            control_totals = ControlTotalDataset(
                what='employment',
                id_name=['zone_id', 'sector_id'],
                in_table_name=control_totals_table,
                in_storage=in_storage)
            control_totals.load_dataset(
                attributes=['zone_id', 'sector_id', 'jobs'])
            zones_sectors = control_totals.get_id_attribute()
            njobs = control_totals.get_attribute('jobs')
            remove = array([], dtype='int32')
            for i in range(zones_sectors.shape[0]):
                zone, sector = zones_sectors[i, :]
                in_sector = job_dataset.get_attribute("sector_id") == sector
                in_zone_in_sector = logical_and(
                    in_sector,
                    job_dataset.get_attribute("zone_id") == zone)
                if in_zone_in_sector.sum() <= njobs[i]:
                    continue
                to_be_removed = in_zone_in_sector.sum() - njobs[i]
                this_removal = 0
                not_considered = ones(job_dataset.size(), dtype='bool8')
                for unit in [
                        'parcel_id', 'building_id', None
                ]:  # first consider jobs without parcel id, then without building_id, then all
                    if unit is not None:
                        wnunit = job_dataset.get_attribute(unit) <= 0
                        eligible = logical_and(
                            not_considered,
                            logical_and(in_zone_in_sector, wnunit))
                        not_considered[where(wnunit)] = False
                    else:
                        eligible = logical_and(not_considered,
                                               in_zone_in_sector)
                    eligible_sum = eligible.sum()
                    if eligible_sum > 0:
                        where_eligible = where(eligible)[0]
                        if eligible_sum <= to_be_removed - this_removal:
                            draw = arange(eligible_sum)
                        else:
                            draw = sample_noreplace(
                                where_eligible, to_be_removed - this_removal,
                                eligible_sum)
                        remove = concatenate((remove, where_eligible[draw]))
                        this_removal += draw.size
                        if this_removal >= to_be_removed:
                            break

            job_dataset.remove_elements(remove)
            logger.log_status("%s jobs removed." % remove.size)

        logger.log_status("Write jobs table.")
        job_dataset.write_dataset(out_table_name=jobs_table,
                                  out_storage=out_storage)
        logger.log_status("Created %s jobs." % job_dataset.size())
        
class RunAssignBldgsToJobs(Model):
    model_name = 'Assigning Buildings To Jobs'
    
    def run(self, job_dataset, dataset_pool):
        AssignBuildingsToJobs().run(job_dataset, dataset_pool)
        ds = CreateBuildingSqftPerJobDataset()._do_run(dataset_pool)
        dataset_pool.replace_dataset('building_sqft_per_job', ds)
        
if __name__ == '__main__':
    # Uncomment the right instorage and outstorage.
    # input/output_database_name is used only if MysqlStorage is uncommented.
    # input/output_cache is used only if FltStorage is uncommented.
    input_database_name = "psrc_2005_parcel_baseyear_change_20070613"
    output_database_name = "psrc_2005_data_workspace_hana"
    input_cache =  "/Users/hana/urbansim_cache/psrc/data_preparation/cache/2000"
    output_cache = "/Users/hana/urbansim_cache/psrc/data_preparation/stepIV"
    input_cache =  "/workspace/work/psrc/unroll_jobs/unroll_jobs_from_establishments_cache"
    output_cache = "/workspace/work/psrc/unroll_jobs/output"
    #instorage = MysqlStorage().get(input_database_name)
    #outstorage = MysqlStorage().get(output_database_name)
    instorage = FltStorage().get(input_cache)
    outstorage = FltStorage().get(output_cache)
    pool_storage = instorage
    job_dataset = JobDataset(in_storage=instorage, in_table_name = "jobs")
    dataset_pool = DatasetPool(package_order=['urbansim_parcel', 'urbansim'],
                                   storage=pool_storage)
    seed(1)
    AssignBuildingsToJobs().run(job_dataset, dataset_pool, out_storage=outstorage)
    CreateBuildingSqftPerJobDataset().run(in_storage=outstorage, out_storage=outstorage)
Пример #33
0
    def test_same_distribution_after_job_subtraction(self):
        """Removes 1,750 sector_1 jobs, without specifying the distribution across gridcells (so it is assumed equal)
        Test that the distribution (in %) of sector 1 jobs across gridcells before and after the subtraction are
        relatively equal.
        """
        storage = StorageFactory().get_storage('dict_storage')

        jobs_set_table_name = 'jobs_set'
        storage.write_table(table_name=jobs_set_table_name,
                            table_data=self.jobs_data)
        jobs_set = JobDataset(in_storage=storage,
                              in_table_name=jobs_set_table_name)

        ect_set_table_name = 'ect_set'
        storage.write_table(
            table_name=ect_set_table_name,
            table_data=self.annual_employment_control_totals_data)
        ect_set = ControlTotalDataset(in_storage=storage,
                                      in_table_name=ect_set_table_name,
                                      what="employment")

        model = RegionalEmploymentTransitionModel()
        model.run(year=2000,
                  job_set=jobs_set,
                  control_totals=ect_set,
                  job_building_types=self.job_building_types)

        # check the totals in regions
        areas = jobs_set.get_attribute("large_area_id")
        results = array([0, 0])
        for iarea in [0, 1]:
            results[iarea] = where(areas == [1, 2][iarea])[0].size
        should_be = [4250, 7000]
        self.assertEqual(
            ma.allequal(should_be, results), True,
            "Error, should_be: %s, but result: %s" % (should_be, results))

        def run_model():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name=jobs_set_table_name,
                table_data=self.jobs_data,
            )

            jobs_set = JobDataset(in_storage=storage,
                                  in_table_name=jobs_set_table_name)

            model = RegionalEmploymentTransitionModel()
            model.run(year=2000,
                      job_set=jobs_set,
                      control_totals=ect_set,
                      job_building_types=self.job_building_types)
            # check that the distribution of jobs is the same before and after subtracting jobs
            results = self.get_count_all_sectors_and_areas(jobs_set)
            return results

        expected_results = array([2250.0, 1000, 1000, 3000, 2000.0, 2000])

        self.run_stochastic_test(__file__, run_model, expected_results, 10)

        def run_model2():
            storage = StorageFactory().get_storage('dict_storage')

            jobs_set_table_name = 'jobs_set'
            storage.write_table(
                table_name=jobs_set_table_name,
                table_data=self.jobs_data,
            )

            jobs_set = JobDataset(in_storage=storage,
                                  in_table_name=jobs_set_table_name)

            model = RegionalEmploymentTransitionModel()
            model.run(year=2000,
                      job_set=jobs_set,
                      control_totals=ect_set,
                      job_building_types=self.job_building_types)
            # check that the distribution of building type is the same before and after subtracting jobs
            jobs_set.compute_variables([
                "urbansim.job.is_in_employment_sector_1_industrial",
                "urbansim.job.is_in_employment_sector_2_industrial",
                "urbansim.job.is_in_employment_sector_1_commercial",
                "urbansim.job.is_in_employment_sector_2_commercial",
                "urbansim.job.is_in_employment_sector_1_governmental",
                "urbansim.job.is_in_employment_sector_2_governmental"
            ],
                                       resources=Resources({
                                           "job_building_type":
                                           self.job_building_types
                                       }))
            result = array([
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_industrial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_industrial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_commercial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_commercial").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_1_governmental").sum(),
                jobs_set.get_attribute(
                    "is_in_employment_sector_2_governmental").sum()
            ])
            return result

        expected_results = array([
            3500.0 / 7000.0 * 5250.0, 900, 3500.0 / 7000.0 * 5250.0, 1800, 0,
            300
        ])
        self.run_stochastic_test(__file__, run_model2, expected_results, 20)
Пример #34
0
    def test_run_model_with_known_buildings(self):
        storage = self.storage
        storage.write_table(table_name='buildings',
                            table_data={
                                'building_id': array([1, 2, 3, 4, 5, 6, 7]),
                                'parcel_id': array([1, 1, 2, 2, 2, 3, 3]),
                                'is_residential': array([0, 0, 0, 1, 1, 0, 0])
                            })

        storage.write_table(table_name='employment_events',
                            table_data={
                                'parcel_id':
                                array([2, 2, -1, -1, 1]),
                                'building_id':
                                array([-1, -1, 6, 7, -1]),
                                'scheduled_year':
                                array([2006, 2006, 2006, 2006, 2006]),
                                'number_of_non_home_based_jobs':
                                array([3500, 500, -100, 0, 100]),
                                'number_of_home_based_jobs':
                                array([0, 20, 0, 10, 0]),
                                'sector_id':
                                array([1, 2, 15, 2, 1]),
                                'replace_non_home_based_jobs':
                                array([0, 0, 0, 1, 0])
                            })

        # change in 2006
        ############
        #        parcel/sector       1              2              3
        #            1            +100nhb         +3500nhb        --
        #            2            =0nhb           +500nhb/+20hb  +10hb
        #            15             --             --           -100nhb

        dataset_pool = DatasetPool(
            storage=storage, package_order=['urbansim_parcel', 'urbansim'])
        job_set = JobDataset(in_storage=storage)
        job_set.modify_attribute('building_id',
                                 array(6000 * [1] + 4000 * [3] + 3000 * [6]))
        dataset_pool.add_datasets_if_not_included({'job': job_set})
        model = EmploymentEventsModel(dataset_pool=dataset_pool)

        model.run(dataset_pool.get_dataset('employment_event'),
                  job_set,
                  current_year=2006)
        buildings = dataset_pool.get_dataset('building')
        jobs_in_sec_1 = buildings.compute_variables(
            ['urbansim_parcel.building.number_of_jobs_of_sector_1'],
            dataset_pool=dataset_pool)
        jobs_in_sec_2 = buildings.compute_variables(
            ['urbansim_parcel.building.number_of_jobs_of_sector_2'],
            dataset_pool=dataset_pool)
        jobs_in_sec_15 = buildings.compute_variables(
            ['urbansim_parcel.building.number_of_jobs_of_sector_15'],
            dataset_pool=dataset_pool)

        self.assertEqual(jobs_in_sec_1[0:2].sum() == 4100, True)  # parcel 1
        self.assertEqual(jobs_in_sec_1[2] == 5500,
                         True)  # parcel 2 non-residential
        self.assertEqual(jobs_in_sec_1[5:7].sum() == 1000, True)  # parcel 3
        self.assertEqual(jobs_in_sec_2[0:2].sum() == 1000, True)  # parcel 1
        self.assertEqual(jobs_in_sec_2[2] == 1500,
                         True)  # parcel 2 non-residential
        self.assertEqual(jobs_in_sec_2[3:5].sum() == 20,
                         True)  # parcel 2 residential
        self.assertEqual(jobs_in_sec_2[5] == 100, True)  # parcel 3, building 6
        self.assertEqual(jobs_in_sec_2[6] == 10, True)  # parcel 3, building 7
        self.assertEqual(jobs_in_sec_15[0:2].sum() == 1000,
                         True)  # parcel 2 non-residential
        self.assertEqual(jobs_in_sec_15[2] == 1000,
                         True)  # parcel 2 residential
        self.assertEqual(jobs_in_sec_15[5] == 900,
                         True)  # parcel 3, building 6