Example #1
0
    def test_scaling_households_model(self):       
        storage = StorageFactory().get_storage('dict_storage')

        hhs_table_name = 'households'        
        storage.write_table(
            table_name=hhs_table_name,
            table_data={
                "household_id": arange(100)+1,
                "building_id":array(10*[1]+60*[2]+30*[-1])
                }
            )
        households = HouseholdDataset(in_storage=storage, in_table_name=hhs_table_name)
        
        buildings_table_name = 'buildings'        
        storage.write_table(
            table_name=buildings_table_name,
            table_data={"building_id":arange(2)+1}
            )
        buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name)
        
        # run model
        model = ScalingAgentsModel(debuglevel=4)
        model.run(buildings, households, agents_index = arange(70, 100))
        # all households are placed
        self.assertEqual((households['building_id']>0).all(), True)
        # get results
        buildings.compute_variables(["urbansim_parcel.building.number_of_households"], 
                                        resources = Resources({"household":households}))
        result = buildings["number_of_households"]
        self.assertEqual(result.sum(), 100)
        res_incr = result - array([10, 60]) # building increments
        # second building should get many more HHs than the first building (at least twice as much)
        self.assertEqual(res_incr[1] > 2*res_incr[0], True)
Example #2
0
    def test_scaling_households_model_with_weights(self):       
        storage = StorageFactory().get_storage('dict_storage')

        hhs_table_name = 'households'        
        storage.write_table(
            table_name=hhs_table_name,
            table_data={
                "household_id": arange(100)+1,
                "building_id":array(10*[1]+50*[2]+10*[3]+30*[-1])
                }
            )
        households = HouseholdDataset(in_storage=storage, in_table_name=hhs_table_name)
        
        buildings_table_name = 'buildings'        
        storage.write_table(
            table_name=buildings_table_name,
            table_data={"building_id":arange(3)+1}
            )
        buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name)
        
        # run model: Give the first building ten times as much weight as building 2. No weight for building 3.
        model = ScalingAgentsModel(weights="10*(building.building_id == 1) + (building.building_id == 2)", 
                                   debuglevel=4)
        model.run(buildings, households, agents_index = arange(70, 100))
        # all households are placed
        self.assertEqual((households['building_id']>0).all(), True)
        # get results
        buildings.compute_variables(["urbansim_parcel.building.number_of_households"], 
                                        resources = Resources({"household":households}))
        result = buildings["number_of_households"]
        self.assertEqual(result.sum(), 100)
        res_incr = result - array([10, 50, 10]) # building increments
        self.assertEqual(res_incr[2], 0) # third building should have no increment
        # first building should get more HHs than the second building
        self.assertEqual(res_incr[1] < res_incr[0], True)
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')

        building_types_table_name = 'building_types'        
        storage.write_table(
                table_name=building_types_table_name,
                table_data={
                    'building_type_id':array([0,2]), 
                    'name': array(['foo', 'commercial'])
                    }
            )

        buildings_table_name = 'buildings'        
        storage.write_table(
                table_name=buildings_table_name,
                table_data={
                    'building_id':array([1,2,3]),
                    'building_type_id': array([2,0,2])
                    }
            )

        building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)
        buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name)
        
        buildings.compute_variables(self.variable_name, resources=Resources({'building_type':building_types}))
        
        values = buildings.get_attribute(self.variable_name)
        should_be = array([1,0,1])
        
        self.assert_(ma.allequal(values, should_be),
            'Error in ' + self.variable_name)
Example #4
0
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')

        building_types_table_name = 'building_types'        
        storage.write_table(
                table_name=building_types_table_name,
                table_data={
                    'building_type_id':array([0,2]), 
                    'name': array(['foo', 'commercial'])
                    }
            )

        buildings_table_name = 'buildings'        
        storage.write_table(
                table_name=buildings_table_name,
                table_data={
                    'building_id':array([1,2,3]),
                    'building_type_id': array([2,0,2])
                    }
            )

        building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)
        buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name)
        
        buildings.compute_variables(self.variable_name, resources=Resources({'building_type':building_types}))
        
        values = buildings.get_attribute(self.variable_name)
        should_be = array([1,0,1])
        
        self.assert_(ma.allequal(values, should_be),
            'Error in ' + self.variable_name)
 def do_unplace_jobs_with_non_existing_buildings(self, jobs, in_storage):
     buildings = BuildingDataset(in_storage=in_storage)
     building_ids = jobs.get_attribute(buildings.get_id_name()[0])
     valid_building_ids_idx = where(building_ids > 0)[0]
     index = buildings.try_get_id_index(building_ids[valid_building_ids_idx])
     logger.log_status("%s jobs have non-existing locations and are unplaced from buildings (parcel_id and zone_id are not affected)." % where(index < 0)[0].size)
     jobs.modify_attribute(name="building_id", data=-1, index=valid_building_ids_idx[index < 0])
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')

        building_types_table_name = 'building_types'
        storage.write_table(
                table_name=building_types_table_name,
                table_data={
                    'building_type_id':array([1,2]),
                    'name': array(['residential', 'commercial']),
                    'units': array(['residential_units', 'commercial_sqft'])
                    }
            )

        buildings_table_name = 'buildings'
        storage.write_table(
                table_name=buildings_table_name,
                table_data={
                     'building_id': arange(6)+1,
                     'building_type_id': array([1,2,1,2,1,1]),
                     'sqft': array([100, 350, 1000, 0, 430, 95]),
                     'residential_units': array([300, 0, 100, 0, 1000, 600])
                     }
            )

        building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)
        buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name)

        buildings.compute_variables(self.variable_name, resources=Resources({'building_type': building_types}))

        should_be = array([300, 350, 100, 0, 1000, 600])
        values = buildings.get_attribute(self.variable_name)

        self.assert_(ma.allequal(values, should_be),
            'Error in ' + self.variable_name)
 def do_unplace_jobs_with_non_existing_buildings(self, jobs, in_storage):
     buildings = BuildingDataset(in_storage=in_storage)
     building_ids = jobs.get_attribute(buildings.get_id_name()[0])
     valid_building_ids_idx = where(building_ids > 0)[0]
     index = buildings.try_get_id_index(
         building_ids[valid_building_ids_idx])
     logger.log_status(
         "%s jobs have non-existing locations and are unplaced from buildings (parcel_id and zone_id are not affected)."
         % where(index < 0)[0].size)
     jobs.modify_attribute(name="building_id",
                           data=-1,
                           index=valid_building_ids_idx[index < 0])
Example #8
0
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')

        building_types_table_name = 'building_types'        
        storage.write_table(
                table_name=building_types_table_name,
                table_data={
                    'building_type_id':array([1,2]), 
                    'name': array(['residential', 'commercial']),
                    'units': array(['residential_units', 'commercial_sqft'])
                    }
            )

        buildings_table_name = 'buildings'        
        storage.write_table(
                table_name=buildings_table_name,
                table_data={
                    'building_id': arange(7)+1,
                    'building_type_id': array([1,2,1,2,1,1,2]),
                    'sqft': array([100, 350, 1000, 0, 430, 95, 750]),
                    'residential_units': array([300, 0, 100, 0, 1300, 600, 10])
                    },
            )

        building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)
        buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name,
            resources=Resources({
                'building_categories': {
                    'residential': array([200, 500, 1200]),
                    'commercial': array([200, 500])
                    }
                })
            )
        
        variable_names = map(lambda type: '%s_%s' % (self.variable_name_prefix, type), ['commercial', 'residential'])
        buildings.compute_variables(variable_names, resources=Resources({'building_type': building_types}))

        should_be_residential = array([2, 0, 1, 0, 4, 3, 0])
        should_be_commercial = array([0, 2, 0, 1, 0, 0, 3])
        values_commercial = buildings.get_attribute(variable_names[0])
        values_residential = buildings.get_attribute(variable_names[1])
        
        self.assert_(ma.allequal(values_commercial, should_be_commercial),
            'Error in ' + variable_names[0])
        self.assert_(ma.allequal(values_residential, should_be_residential),
            'Error in ' + variable_names[1])
Example #9
0
    def test_my_inputs(self):
        storage = StorageFactory().get_storage("dict_storage")

        building_types_table_name = "building_types"
        storage.write_table(
            table_name=building_types_table_name,
            table_data={"building_type_id": array([0, 2]), "building_type_name": array(["foo", "commercial"])},
        )

        buildings_table_name = "buildings"
        storage.write_table(
            table_name=buildings_table_name,
            table_data={"building_id": array([1, 2, 3]), "building_type_id": array([2, 0, 2])},
        )

        building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)
        buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name)

        buildings.compute_variables(self.variable_name, resources=Resources({"building_type": building_types}))

        values = buildings.get_attribute(self.variable_name)
        should_be = array([1, 0, 1])

        self.assert_(ma.allequal(values, should_be), "Error in " + self.variable_name)
Example #10
0
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')

        building_types_table_name = 'building_types'
        storage.write_table(table_name=building_types_table_name,
                            table_data={
                                'building_type_id':
                                array([1, 2]),
                                'name':
                                array(['residential', 'commercial']),
                                'units':
                                array(['residential_units', 'commercial_sqft'])
                            })

        buildings_table_name = 'buildings'
        storage.write_table(
            table_name=buildings_table_name,
            table_data={
                'building_id': arange(7) + 1,
                'building_type_id': array([1, 2, 1, 2, 1, 1, 2]),
                'sqft': array([100, 350, 1000, 0, 430, 95, 750]),
                'residential_units': array([300, 0, 100, 0, 1300, 600, 10])
            },
        )

        building_types = BuildingTypeDataset(
            in_storage=storage, in_table_name=building_types_table_name)
        buildings = BuildingDataset(in_storage=storage,
                                    in_table_name=buildings_table_name,
                                    resources=Resources({
                                        'building_categories': {
                                            'residential':
                                            array([200, 500, 1200]),
                                            'commercial': array([200, 500])
                                        }
                                    }))

        variable_names = map(
            lambda type: '%s_%s' % (self.variable_name_prefix, type),
            ['commercial', 'residential'])
        buildings.compute_variables(variable_names,
                                    resources=Resources(
                                        {'building_type': building_types}))

        should_be_residential = array([2, 0, 1, 0, 4, 3, 0])
        should_be_commercial = array([0, 2, 0, 1, 0, 0, 3])
        values_commercial = buildings.get_attribute(variable_names[0])
        values_residential = buildings.get_attribute(variable_names[1])

        self.assert_(ma.allequal(values_commercial, should_be_commercial),
                     'Error in ' + variable_names[0])
        self.assert_(ma.allequal(values_residential, should_be_residential),
                     'Error in ' + variable_names[1])
Example #11
0
    def run(self):
        dataset_pool = SessionConfiguration().get_dataset_pool()
        current_year = SimulationState().get_current_time()
        building_set = dataset_pool.get_dataset('building')
        parcel_set = dataset_pool.get_dataset('parcel')
        proposal_set = dataset_pool.get_dataset('proposed_development_event')
        parcels = proposal_set.get_attribute('parcel_id')
        amount = proposal_set.get_attribute('amount')
        year = proposal_set.get_attribute('year')
        proposal_in_current_year = (year==current_year)
        building_type_id = proposal_set.get_attribute('building_type_id')
        proposed_sf_units = amount * (building_type_id == 1)*proposal_in_current_year
        proposed_townhome_units = amount * (building_type_id == 2)*proposal_in_current_year
        proposed_mf_units = amount * (building_type_id == 3)*proposal_in_current_year
        proposal_zone = proposal_in_current_year * proposal_set.compute_variables('_proposal_zone = proposed_development_event.disaggregate(parcel.zone_id)')
        parcel_zone = parcel_set.get_attribute('zone_id')
        zone_ids = unique(parcel_zone)
        in_zone = None
        for zone_id in zone_ids:
            # proposals_in_zone = where(proposal_zone==zone_id)[0]
            # logger.log_status("proposals_in_zone %s: %s" % (zone_id,proposals_in_zone.size))
            # if proposals_in_zone.size > 0:
                # logger.log_status("sf units in zone %s: %s" % (zone_id,proposed_sf_units[proposals_in_zone].sum()))
                # logger.log_status("townhome units in zone %s: %s" % (zone_id,proposed_townhome_units[proposals_in_zone].sum()))
                # logger.log_status("mf units in zone %s: %s" % (zone_id,proposed_mf_units[proposals_in_zone].sum()))
                ######Need to come back and look at the treatment of proposed projects.  But this won't affect calibration/validation, so proceed for now.
            if in_zone is not None:
                building_set.delete_computed_attributes()
                parcel_set.delete_computed_attributes()
            in_zone = building_set.compute_variables('_in_zone = building.disaggregate(parcel.zone_id)==%s'% (zone_id))
            idx_in_zone = where(in_zone)[0]
            max_building_id = (building_set.get_attribute('building_id')).max()
            attribs = building_set.get_known_attribute_names()
            table_data={}
            for name in attribs:
                table_data["%s"%(name)]=building_set.get_attribute("%s" %(name))[idx_in_zone]
            storage = StorageFactory().get_storage('dict_storage')
            table_name = 'buildings_zone'
            storage.write_table(
                table_name=table_name,
                table_data=table_data
                )
            buildings_zone = BuildingDataset(in_storage=storage, in_table_name=table_name)
            if buildings_zone.size() > 0:
                dptm = RealEstateTransitionModel(target_vancy_dataset=dataset_pool.get_dataset('target_vacancy'))
                results, index = dptm.run(realestate_dataset = buildings_zone,
                                   year = current_year,
                                   occupied_spaces_variable = 'occupied_spaces',
                                   total_spaces_variable = 'total_spaces',
                                   target_attribute_name = 'target_vacancy_rate',
                                   sample_from_dataset = dataset_pool.get_dataset('building'),
                                   dataset_pool=dataset_pool,
                                   append_to_realestate_dataset = False,
                                   reset_attribute_value={'parcel_id':-1},
                                   sample_filter="(building.building_type_id==1)*(building.year_built>1989) +  (building.building_type_id==3)*(building.year_built>1979) +  (building.building_type_id==2)*(building.year_built>1989)+ (building.building_type_id>3)*(building.building_type_id<12)"
                                   )
                #This is where, now that we know the demand for sqft, I'd want to insert the appropriate amount of permitted/proposed projects.
                if results is not None:
                    results.modify_attribute('year_built', array(index.size*[current_year]))
                    attribs2 = results.get_known_attribute_names()
                    table_data2={}
                    for name in attribs2:
                        if name in attribs:
                            table_data2["%s"%(name)]=results.get_attribute("%s" %(name))
                    building_set.add_elements(data=table_data2, require_all_attributes=False, change_ids_if_not_unique=True)

                    index_new_sf_units = where(logical_and(building_set['building_id']>max_building_id, building_set['building_type_id']==1))[0]
                    index_new_mf_units = where(logical_and(building_set['building_id']>max_building_id, (building_set['building_type_id']==2)+(building_set['building_type_id']==3)))[0]
                    index_new_industrial_units = where(logical_and(building_set['building_id']>max_building_id, (building_set['building_type_id']==6)+(building_set['building_type_id']==7)))[0]
                    index_new_commercial_units = where(logical_and(building_set['building_id']>max_building_id, (building_set['building_type_id']==9)+(building_set['building_type_id']==8)))[0]
                    if index_new_sf_units.size > 0:
                        for building in index_new_sf_units:
                            has_available_sf_capacity = parcel_set.compute_variables('_has_available_sf_capacity = (parcel.zone_id==%s) * parcel.disaggregate(zoning.allow_sf==1) * (((safe_array_divide(parcel.parcel_sqft,parcel.disaggregate(zoning.min_lot_size)).round().astype(int32)) - (parcel.number_of_agents(building)))>0)'%(zone_id))
                            idx_has_available_sf_capacity = where(has_available_sf_capacity)[0]
                            if idx_has_available_sf_capacity.size < 1:
                                logger.log_status("No more single-family capacity remaining in this zone")
                                break
                            parcel_ids_available_sf_capacity=(parcel_set.get_attribute('parcel_id'))[idx_has_available_sf_capacity]
                            shuffle(parcel_ids_available_sf_capacity)
                            parcel_id_to_assign = parcel_ids_available_sf_capacity[:1]
                            building_set.modify_attribute('parcel_id', parcel_id_to_assign, building)
                    if index_new_commercial_units.size > 0:
                        for building in index_new_commercial_units:
                            available_commercial_capacity = parcel_set.compute_variables('_available_commercial_capacity = (parcel.zone_id==%s) * parcel.disaggregate(zoning.allow_comm==1) * clip_to_zero((((parcel.parcel_sqft)*parcel.disaggregate(zoning.max_far)).round().astype(int32)-(parcel.aggregate(1000*building.residential_units))) - (parcel.aggregate(building.non_residential_sqft)))'%(zone_id))
                            ####update the capacity calcs to account for sqft per unit of hotel/resort units
                            building_sqft = building_set['non_residential_sqft'][building]
                            idx_building_would_fit = where(available_commercial_capacity>=building_sqft)[0]
                            if idx_building_would_fit.size < 1:
                                logger.log_status("No more commercial capacity remaining in this zone")
                                break
                            parcel_ids_with_enough_capacity = (parcel_set.get_attribute('parcel_id'))[idx_building_would_fit]
                            shuffle(parcel_ids_with_enough_capacity) #replace with code involving random/uniform/cumprob/searchsorted etc...  I think it would be faster
                            parcel_id_to_assign = parcel_ids_with_enough_capacity[:1]
                            building_set.modify_attribute('parcel_id', parcel_id_to_assign, building)
                    if index_new_industrial_units.size > 0:
                        for building in index_new_industrial_units:
                            available_industrial_capacity = parcel_set.compute_variables('_available_industrial_capacity = (parcel.zone_id==%s) * parcel.disaggregate(zoning.allow_indust==1) * clip_to_zero((((parcel.parcel_sqft)*parcel.disaggregate(zoning.max_far)).round().astype(int32)-(parcel.aggregate(1000*building.residential_units))) - (parcel.aggregate(building.non_residential_sqft)))'%(zone_id))
                            building_sqft = building_set['non_residential_sqft'][building]
                            idx_building_would_fit = where(available_industrial_capacity>=building_sqft)[0]
                            if idx_building_would_fit.size < 1:
                                logger.log_status("No more industrial capacity remaining in this zone")
                                break
                            parcel_ids_with_enough_capacity = (parcel_set.get_attribute('parcel_id'))[idx_building_would_fit]
                            shuffle(parcel_ids_with_enough_capacity) 
                            parcel_id_to_assign = parcel_ids_with_enough_capacity[:1]
                            building_set.modify_attribute('parcel_id', parcel_id_to_assign, building)
                    if index_new_mf_units.size > 0:
                        for building in index_new_mf_units:
                            available_mf_capacity = parcel_set.compute_variables('_available_mf_capacity = (parcel.zone_id==%s) * parcel.disaggregate(zoning.allow_mf==1) * clip_to_zero(((((parcel.parcel_sqft)*parcel.disaggregate(zoning.max_far)).round().astype(int32)) - (parcel.aggregate(building.non_residential_sqft)))/1000 - (parcel.aggregate(building.residential_units)))'%(zone_id))
                            building_resunits = building_set['residential_units'][building]
                            idx_building_would_fit = where(available_mf_capacity>=building_resunits)[0]
                            if idx_building_would_fit.size < 1:
                                logger.log_status("No more multifamily capacity remaining in this zone")
                                break
                            parcel_ids_with_enough_capacity = (parcel_set.get_attribute('parcel_id'))[idx_building_would_fit] 
                            shuffle(parcel_ids_with_enough_capacity)
                            parcel_id_to_assign = parcel_ids_with_enough_capacity[:1]
                            building_set.modify_attribute('parcel_id', parcel_id_to_assign, building)
                    index_unplaced_buildings = where(logical_and(building_set['building_id']>max_building_id, building_set['parcel_id']<=0))[0]
                    logger.log_status("Number of unplaced buildings to be removed from zone%s: %s" % (zone_id,index_unplaced_buildings.size))
                    building_set.remove_elements(index_unplaced_buildings)
    def setUp( self ):
        """here, we simulate 50 residential units
        and 5000 commercial, industrial, and governmental sqft added to each of the gridcells in previous years.
        """

        ### TODO: do not redefine these constants.
        self.comc = 1
        self.indc = 3
        self.govc = 2
        self.sfhc = 4
        self.mfhc = 5

        storage = StorageFactory().get_storage('dict_storage')

        gridcells_table_name = 'gridcells'
#            create 100 gridcells, each with 200 residential units and space for 100 commercial jobs,
#            100 industrial jobs, and residential, industrial, and commercial value at $500,000 each
        storage.write_table(
            table_name=gridcells_table_name,
            table_data={
                "grid_id": arange( 1, 100+1 ),
                "commercial_sqft_per_job":array( 100*[100] ),
                "industrial_sqft_per_job":array( 100*[100] ),
                "single_family_improvement_value":array( 100*[500000] ),
                "commercial_improvement_value":array( 100*[500000] ),
                "industrial_improvement_value":array( 100*[500000] )
                }
            )
        self.gridcells = GridcellDataset(in_storage=storage, in_table_name=gridcells_table_name)

        buildings_table_name = 'buildings'
#            2000 buildings (1000 with 20 residential units each, 500 with 20 commercial job and 500 with 20 industrial job each)
        storage.write_table(
            table_name=buildings_table_name,
            table_data={
                "building_id":arange( 1, 2000+1 ), # 2000 buildings
                "grid_id":array( 20*range( 1, 100+1 ), dtype=int32 ), # spread evenly across 100 gridcells
                "building_type_id":array(1000*[self.sfhc] +
                                         500*[self.comc] +
                                         500*[self.indc], dtype=int8),
                "sqft": array(1000*[0] +
                              500*[2000] +
                              500*[2000], dtype=int32),
                "residential_units": array(1000*[20] +
                                           500* [0] +
                                           500* [0], dtype=int32),
                "improvement_value": array(1000*[50] +
                                           500* [50] +
                                           500* [50], dtype=float32),
                "year_built": array(1000*[1940] +
                                    500* [1940] +
                                    500* [1940], dtype=int32)
                }
            )
        self.buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name)

        households_table_name = 'households'
#            create 10000 households, 100 in each of the 100 gridcells.
#            there will initially be 100 vacant residential units in each gridcell then.
        storage.write_table(
            table_name=households_table_name,
            table_data={
                "household_id":arange( 1, 10000+1 ),
                "grid_id":array( 100*range( 1, 100+1 ), dtype=int32 )
                }
            )
        self.households = HouseholdDataset(in_storage=storage, in_table_name=households_table_name)

        building_types_table_name = 'building_types'
        storage.write_table(
            table_name=building_types_table_name,
            table_data={
                "building_type_id":array([self.govc,self.comc,self.indc, self.sfhc, self.mfhc], dtype=int8),
                "name": array(["governmental", "commercial", "industrial", "single_family","multiple_family"]),
                "units": array(["governmental_sqft", "commercial_sqft", "industrial_sqft", "residential_units", "residential_units"]),
                "is_residential": array([0,0,0,1,1], dtype='?')
                }
            )
        self.building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)

        job_building_types_table_name = 'job_building_types'
        storage.write_table(
            table_name=job_building_types_table_name,
            table_data={
                "id":array([self.govc,self.comc,self.indc, self.sfhc, self.mfhc], dtype=int8),
                "name": array(["governmental", "commercial", "industrial", "single_family","multiple_family"])
                }
            )
        self.job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name=job_building_types_table_name)

        jobs_table_name = 'jobs'
#            create 2500 commercial jobs and distribute them equally across the 100 gridcells,
#            25 commercial buildings/gridcell
        storage.write_table(
            table_name=jobs_table_name,
            table_data={
                "job_id":arange( 1, 2500+1 ),
                "grid_id":array( 25*range( 1, 100+1 ), dtype=int32 ),
                "sector_id":array( 2500*[1], dtype=int32 ),
                "building_type":array(2500*[self.comc], dtype=int8)
                }
            )
        self.jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)

        self.dataset_pool = DatasetPool()
        self.dataset_pool.add_datasets_if_not_included({
                                            "household":self.households,
                                            "job":self.jobs,
                                            "building":self.buildings,
                                            "building_type": self.building_types,
                                            "job_building_type": self.job_building_types})

        self.building_categories = {'commercial': array([1000,5000]),
                                    'industrial': array([500,800,1000])}
class BTMTests(StochasticTestCase):

    def setUp( self ):
        """here, we simulate 50 residential units
        and 5000 commercial, industrial, and governmental sqft added to each of the gridcells in previous years.
        """

        ### TODO: do not redefine these constants.
        self.comc = 1
        self.indc = 3
        self.govc = 2
        self.sfhc = 4
        self.mfhc = 5

        storage = StorageFactory().get_storage('dict_storage')

        gridcells_table_name = 'gridcells'
#            create 100 gridcells, each with 200 residential units and space for 100 commercial jobs,
#            100 industrial jobs, and residential, industrial, and commercial value at $500,000 each
        storage.write_table(
            table_name=gridcells_table_name,
            table_data={
                "grid_id": arange( 1, 100+1 ),
                "commercial_sqft_per_job":array( 100*[100] ),
                "industrial_sqft_per_job":array( 100*[100] ),
                "single_family_improvement_value":array( 100*[500000] ),
                "commercial_improvement_value":array( 100*[500000] ),
                "industrial_improvement_value":array( 100*[500000] )
                }
            )
        self.gridcells = GridcellDataset(in_storage=storage, in_table_name=gridcells_table_name)

        buildings_table_name = 'buildings'
#            2000 buildings (1000 with 20 residential units each, 500 with 20 commercial job and 500 with 20 industrial job each)
        storage.write_table(
            table_name=buildings_table_name,
            table_data={
                "building_id":arange( 1, 2000+1 ), # 2000 buildings
                "grid_id":array( 20*range( 1, 100+1 ), dtype=int32 ), # spread evenly across 100 gridcells
                "building_type_id":array(1000*[self.sfhc] +
                                         500*[self.comc] +
                                         500*[self.indc], dtype=int8),
                "sqft": array(1000*[0] +
                              500*[2000] +
                              500*[2000], dtype=int32),
                "residential_units": array(1000*[20] +
                                           500* [0] +
                                           500* [0], dtype=int32),
                "improvement_value": array(1000*[50] +
                                           500* [50] +
                                           500* [50], dtype=float32),
                "year_built": array(1000*[1940] +
                                    500* [1940] +
                                    500* [1940], dtype=int32)
                }
            )
        self.buildings = BuildingDataset(in_storage=storage, in_table_name=buildings_table_name)

        households_table_name = 'households'
#            create 10000 households, 100 in each of the 100 gridcells.
#            there will initially be 100 vacant residential units in each gridcell then.
        storage.write_table(
            table_name=households_table_name,
            table_data={
                "household_id":arange( 1, 10000+1 ),
                "grid_id":array( 100*range( 1, 100+1 ), dtype=int32 )
                }
            )
        self.households = HouseholdDataset(in_storage=storage, in_table_name=households_table_name)

        building_types_table_name = 'building_types'
        storage.write_table(
            table_name=building_types_table_name,
            table_data={
                "building_type_id":array([self.govc,self.comc,self.indc, self.sfhc, self.mfhc], dtype=int8),
                "name": array(["governmental", "commercial", "industrial", "single_family","multiple_family"]),
                "units": array(["governmental_sqft", "commercial_sqft", "industrial_sqft", "residential_units", "residential_units"]),
                "is_residential": array([0,0,0,1,1], dtype='?')
                }
            )
        self.building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)

        job_building_types_table_name = 'job_building_types'
        storage.write_table(
            table_name=job_building_types_table_name,
            table_data={
                "id":array([self.govc,self.comc,self.indc, self.sfhc, self.mfhc], dtype=int8),
                "name": array(["governmental", "commercial", "industrial", "single_family","multiple_family"])
                }
            )
        self.job_building_types = JobBuildingTypeDataset(in_storage=storage, in_table_name=job_building_types_table_name)

        jobs_table_name = 'jobs'
#            create 2500 commercial jobs and distribute them equally across the 100 gridcells,
#            25 commercial buildings/gridcell
        storage.write_table(
            table_name=jobs_table_name,
            table_data={
                "job_id":arange( 1, 2500+1 ),
                "grid_id":array( 25*range( 1, 100+1 ), dtype=int32 ),
                "sector_id":array( 2500*[1], dtype=int32 ),
                "building_type":array(2500*[self.comc], dtype=int8)
                }
            )
        self.jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)

        self.dataset_pool = DatasetPool()
        self.dataset_pool.add_datasets_if_not_included({
                                            "household":self.households,
                                            "job":self.jobs,
                                            "building":self.buildings,
                                            "building_type": self.building_types,
                                            "job_building_type": self.job_building_types})

        self.building_categories = {'commercial': array([1000,5000]),
                                    'industrial': array([500,800,1000])}

    def test_no_development_with_zero_target_vacancy( self ):
        """If the target vacany ratest are 0%, then no development should occur and thus,
        the building set should remain unchanged (should have the same size).
        """

        """specify that the target vacancies for the year 2000 should be 0% for
        commercial building type."""
        storage = StorageFactory().get_storage('dict_storage')

        target_vacancies_table_name = 'target_vacancies'
        storage.write_table(
            table_name=target_vacancies_table_name,
            table_data={
                "year":array( [2000] ),
                "target_total_commercial_vacancy":array( [0.0] )
                }
            )
        target_vacancies = TargetVacancyDataset(in_storage=storage, in_table_name=target_vacancies_table_name)

        nbuildings = self.buildings.size()
        btm = BuildingTransitionModel()
        results = btm.run(self.buildings, self.building_types,
                           target_vacancies,
                           2000,
                           self.gridcells, building_categories=self.building_categories,
                           dataset_pool=self.dataset_pool)

        self.assertEqual( results, 0, "No buildings should've been added/developed" )
        self.assertEqual( nbuildings, self.buildings.size(), "No buildings should've been added/developed" )

    def test_development_with_nonzero_target_vacancy( self ):
        """Test basic cases, where current single family vacancy = 50%, target single family vacancy is 75%,
        current commercial vacancy is 75%, and target commercial vacancy is 50%.
        Single family development projects should occur, and none for commercial"""

        storage = StorageFactory().get_storage('dict_storage')

        target_vacancies_table_name = 'target_vacancies'
        storage.write_table(
            table_name=target_vacancies_table_name,
            table_data={
                "year":array( [2001], dtype=int32 ),
                "target_total_single_family_vacancy":array( [0.75] ),
                "target_total_commercial_vacancy":array( [0.50] )
                }
            )
        target_vacancies = TargetVacancyDataset(in_storage=storage, in_table_name=target_vacancies_table_name)

        resunits_before, commercial_before, industrial_before, tmp1, tmp2, tmp3 = self.get_residential_commercial_industrial_units(self.buildings)

        btm = BuildingTransitionModel()
        results = btm.run(self.buildings, self.building_types,
                           target_vacancies,
                           2001,
                           self.gridcells, building_categories=self.building_categories,
                           dataset_pool=self.dataset_pool )

        """20000 residential units should've been added because current ratio of
        10000 unoccupied / 20000 total = 0.5, and target residential vacancy rate
        is 0.75. add 20000 to numerator and denominator, and 30000 / 40000 = 0.75"""
        resunits_after, commercial_after, industrial_after, tmp1, tmp2, tmp3 = self.get_residential_commercial_industrial_units(self.buildings)

        self.assertEqual( resunits_after-resunits_before, 20000,
                         """Exactly 20000 residential units should've been added/developed.
                         Instead, got %s""" % ( resunits_after-resunits_before, ) )

        """Anytime the target vacancy rate is less than the current vacancy rate,
        no new development should occur."""
        self.assertEqual( commercial_before - commercial_after, 0,
                         "No commercial units should've been added/developed." )

        self.assertEqual( industrial_before-industrial_after, 0,
                         "No industrial units should've been added/developed." )

        """Check categories"""
        self.assertEqual(ma.allequal(self.buildings.get_categories("commercial"), self.building_categories["commercial"]), True,
                         "Error in creating categories for commercial buildings.")
        self.assertEqual(ma.allequal(self.buildings.get_categories("industrial"), self.building_categories["industrial"]), True,
                         "Error in creating categories for industrial buildings.")

    def test_development_with_99_percent_target_vacancy( self ):
        """Not too different from the basic case above, just trying the other extreme.
        Notice that a 100% target vacancy rate doesn't really make sense and is not possible unless
        the current vacancy rate is also 100% (also not feasible)."""

        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(
            table_name='target_vacancies',
            table_data={
                'year':array([2001], dtype=int32),
                'target_total_single_family_vacancy':array([0.99]),
                'target_total_commercial_vacancy':array([0.99]),
                'target_total_industrial_vacancy':array([0.99])
                },
            )
        target_vacancies = TargetVacancyDataset(in_storage=storage, in_table_name='target_vacancies')

        resunits_before, commercial_before, industrial_before, tmp1, tmp2, tmp3 = self.get_residential_commercial_industrial_units(self.buildings)

        btm = BuildingTransitionModel()
        results = btm.run(self.buildings, self.building_types,
                           target_vacancies,
                           2001,
                           self.gridcells, building_categories=self.building_categories,
                           dataset_pool=self.dataset_pool)

        """20000 residential units should've been added because current ratio of
        10000 unoccupied / 20000 total = 0.5, and target residential vacancy rate
        is 0.75. add 20000 to numerator and denominator, and 30000 / 40000 = 0.75"""
        resunits_after, commercial_after, industrial_after, tmp1, tmp2, tmp3 = self.get_residential_commercial_industrial_units(self.buildings)

        """
        .01 = 10000 / (20000 + x)

        x = (10000 - (.01*20000))/.01
        """
        residential_units_developed = (10000 - (.01*20000))/.01
        max_difference = 50
        self.assert_(self.is_close(resunits_after - resunits_before, residential_units_developed, max_difference),
                         """Approximately %s residential units should've been added/developed.
                         Instead, got %s""" % (residential_units_developed, resunits_after - resunits_before))

        """
        2500 commercial jobs * 100 occupied square feet per commercial job is
        250,000 commercial square feet occupied

        250,000 / (1,000,000 + x) = .01

        which converts into:
        x = (250,000 - .01*1,000,000)/.01

        x = 24,000,000
        """
        commercial_sqft_developed = (250000 - (.01*1000000))/.01
        max_difference = 5000
        self.assert_(self.is_close(commercial_after - commercial_before, commercial_sqft_developed, max_difference),
                         """Approximately %s commercial sqft should've been added/developed.
                         Instead, got %s""" % (commercial_sqft_developed, commercial_after - commercial_before))

        self.assertEqual(industrial_before - industrial_after, 0,
                         "No industrial units should've been added/developed.")

    def get_residential_commercial_industrial_units(self, buildings):
        resunits = buildings.get_attribute("residential_units").sum()
        buildings.compute_variables([
                  "urbansim.building.is_building_type_commercial", "urbansim.building.is_building_type_industrial",
                  "urbansim.building.is_building_type_single_family"],
                                         dataset_pool=self.dataset_pool)
        commercial = (buildings.get_attribute("sqft")*buildings.get_attribute("is_building_type_commercial")).sum()
        industrial = (buildings.get_attribute("sqft")*buildings.get_attribute("is_building_type_industrial")).sum()
        return (resunits, commercial, industrial,
                buildings.get_attribute("is_building_type_single_family").sum(),
                buildings.get_attribute("is_building_type_commercial").sum(),
                buildings.get_attribute("is_building_type_industrial").sum())

    def is_close(self, first_value, second_value, max_difference):
        return abs(first_value - second_value) <= max_difference
    def test_unrolling(self):
        from urbansim.datasets.gridcell_dataset import GridcellDataset
        from urbansim.datasets.building_dataset import BuildingDataset
        from urbansim.datasets.building_type_dataset import BuildingTypeDataset
        from opus_core.datasets.dataset_pool import DatasetPool
        from numpy import arange
        
        storage = StorageFactory().get_storage('dict_storage')

        gridcells_table_name = 'gridcells'        
        storage.write_table(
            table_name = gridcells_table_name,
            table_data = {
                'grid_id':array([1,2,3]),
                'commercial_sqft':array([50,50,50]),
                'industrial_sqft':array([100,100,100]),
                'governmental_sqft':array([0,0,0]),
                'residential_units':array([10,0,0]),
                'commercial_improvement_value':array([0,0,0]),
                'industrial_improvement_value':array([0,0,0]),
                'governmental_improvement_value':array([0,0,0]),
                'residential_improvement_value':array([0,0,0]),
                },
            )

        building_table_name = 'buildings'        
        storage.write_table(
            table_name = building_table_name,
            table_data = {
                'building_id': arange(6)+1, 
                'year_built':array([1999,1999,1998,1998,1998,1999]),
                'grid_id':array([1,3,2,3,1,1]),
                'sqft':array([10,20,30,40,0,20]),
                'residential_units':array([0,0,0,0,5,0]),
                'improvement_value':array([0,0,0,0,0,0]),
                'building_type_id': array([1,2,1,2,3,1])
                },
            )
        building_types_table_name = 'building_types'        
        storage.write_table(
            table_name = building_types_table_name,
            table_data = {
                    'building_type_id':array([1,2,3,4]), 
                    'name': array(['industrial', 'commercial', 'residential', 'governmental'])
                    }
            )

        building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)
        gridcells = GridcellDataset(in_storage=storage, in_table_name=gridcells_table_name)
        buildings = BuildingDataset(in_storage=storage, in_table_name=building_table_name)
        dataset_pool = DatasetPool()
        dataset_pool._add_dataset(building_types.get_dataset_name(), building_types)
        
        roller = RollbackGridcellsFromBuildings()
        
        roller.unroll_gridcells_for_one_year(gridcells, buildings, 2000, dataset_pool)
        self.assert_(ma.allequal(gridcells.get_attribute('commercial_sqft'),
                              array([50,50,50])))
        self.assert_(ma.allequal(gridcells.get_attribute('industrial_sqft'),
                              array([100,100,100])))
        self.assert_(ma.allequal(gridcells.get_attribute('residential_units'),
                              array([10, 0, 0])))
        
        roller.unroll_gridcells_for_one_year(gridcells, buildings, 1999, dataset_pool)
        self.assert_(ma.allequal(gridcells.get_attribute('commercial_sqft'),
                              array([50,50,30])),
                     'Unexpected results: expected %s; received %s' % 
                     (array([50,50,30]), gridcells.get_attribute('commercial_sqft')))
        self.assert_(ma.allequal(gridcells.get_attribute('industrial_sqft'),
                              array([70,100,100])))
        
        roller.unroll_gridcells_for_one_year(gridcells, buildings, 1998, dataset_pool)
        self.assert_(ma.allequal(gridcells.get_attribute('commercial_sqft'),
                              array([50,50,0])))
        self.assert_(ma.allequal(gridcells.get_attribute('industrial_sqft'),
                              array([70,70,100])))
        self.assert_(ma.allequal(gridcells.get_attribute('residential_units'),
                              array([5, 0, 0])))
Example #15
0
    def get_resources(self, data_dictionary, dataset):
        """Create resources for computing a variable. """
        resources = Resources()

        for key in data_dictionary.keys():
            if key in self.datasets:
                data = data_dictionary[key]

                storage = StorageFactory().get_storage('dict_storage')

                if self.id_names[key] not in data_dictionary[key].keys(
                ) and not isinstance(self.id_names[key], list):
                    data[self.id_names[key]] = arange(
                        1,
                        len(data_dictionary[key][data_dictionary[key].keys()
                                                 [0]]) + 1)  # add id array

                id_name = self.id_names[key]
                storage.write_table(table_name='data', table_data=data)

                if key == "gridcell":
                    gc = GridcellDataset(in_storage=storage,
                                         in_table_name='data')

                    # add relative_x and relative_y
                    gc.get_id_attribute()
                    n = int(ceil(sqrt(gc.size())))
                    if "relative_x" not in data.keys():
                        x = (indices((n, n)) + 1)[1].ravel()
                        gc.add_attribute(x[0:gc.size()],
                                         "relative_x",
                                         metadata=1)
                    if "relative_y" not in data.keys():
                        y = (indices((n, n)) + 1)[0].ravel()
                        gc.add_attribute(y[0:gc.size()],
                                         "relative_y",
                                         metadata=1)
                    resources.merge({key: gc})

                elif key == "household":
                    resources.merge({
                        key:
                        HouseholdDataset(in_storage=storage,
                                         in_table_name='data')
                    })
                elif key == "development_project":
                    resources.merge({
                        key:
                        DevelopmentProjectDataset(in_storage=storage,
                                                  in_table_name='data')
                    })
                elif key == "development_event":
                    resources.merge({
                        key:
                        DevelopmentEventDataset(in_storage=storage,
                                                in_table_name='data')
                    })
                elif key == "neighborhood":
                    resources.merge({
                        key:
                        NeighborhoodDataset(in_storage=storage,
                                            in_table_name='data')
                    })
                elif key == "job":
                    resources.merge({
                        key:
                        JobDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "zone":
                    resources.merge({
                        key:
                        ZoneDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "travel_data":
                    resources.merge({
                        key:
                        TravelDataDataset(in_storage=storage,
                                          in_table_name='data')
                    })
                elif key == "faz":
                    resources.merge({
                        key:
                        FazDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "fazdistrict":
                    resources.merge({
                        key:
                        FazdistrictDataset(in_storage=storage,
                                           in_table_name='data')
                    })
                elif key == "race":
                    resources.merge({
                        key:
                        RaceDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "county":
                    resources.merge({
                        key:
                        CountyDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "large_area":
                    resources.merge({
                        key:
                        LargeAreaDataset(in_storage=storage,
                                         in_table_name='data')
                    })
                elif key == "development_group":
                    resources.merge({
                        key:
                        DevelopmentGroupDataset(in_storage=storage,
                                                in_table_name='data')
                    })
                elif key == "employment_sector_group":
                    resources.merge({
                        key:
                        EmploymentSectorGroupDataset(in_storage=storage,
                                                     in_table_name='data')
                    })
                elif key == "plan_type_group":
                    resources.merge({
                        key:
                        PlanTypeGroupDataset(in_storage=storage,
                                             in_table_name='data')
                    })
                elif key == "building":
                    resources.merge({
                        key:
                        BuildingDataset(in_storage=storage,
                                        in_table_name='data')
                    })

            else:
                resources.merge({key: data_dictionary[key]})

        if dataset in self.interactions:
            if dataset == "household_x_gridcell":
                resources.merge({
                    "dataset":
                    HouseholdXGridcellDataset(dataset1=resources["household"],
                                              dataset2=resources["gridcell"])
                })
            if dataset == "job_x_gridcell":
                resources.merge({
                    "dataset":
                    JobXGridcellDataset(dataset1=resources["job"],
                                        dataset2=resources["gridcell"])
                })
            if dataset == "household_x_zone":
                resources.merge({
                    "dataset":
                    HouseholdXZoneDataset(dataset1=resources["household"],
                                          dataset2=resources["zone"])
                })
            if dataset == "household_x_neighborhood":
                resources.merge({
                    "dataset":
                    HouseholdXNeighborhoodDataset(
                        dataset1=resources["household"],
                        dataset2=resources["neighborhood"])
                })
            if dataset == "development_project_x_gridcell":
                resources.merge({
                    "dataset":
                    DevelopmentProjectXGridcellDataset(
                        dataset1=resources["development_project"],
                        dataset2=resources["gridcell"])
                })

        else:
            resources.merge({"dataset": resources[dataset]})
        resources.merge({"check_variables": '*', "debug": 4})
        return resources