コード例 #1
0
    def __init__(self, 
            resources=None, 
            in_storage=None,
            out_storage=None, 
            in_table_name=None, 
            out_table_name=None, 
            attributes=None, 
            id_name=None, 
            nchunks=None, 
            debuglevel=0
            ):
        try: 
            debug = SessionConfiguration().get('debuglevel', 0)
        except:
            debug = 0
        debug = DebugPrinter(debug)
        if debuglevel > debug.flag:
            debug.flag = debuglevel
            
        debug.print_debug("Creating object %s.%s" % (self.__class__.__module__, self.__class__.__name__), 2)
        
        resources = ResourceFactory().get_resources_for_dataset(
            self.dataset_name, 
            resources = resources, 
            in_storage = in_storage,
            in_table_name_pair = (in_table_name,self.in_table_name_default), 
            attributes_pair = (attributes,self.attributes_default),
            out_storage = out_storage,
            out_table_name_pair = (out_table_name, self.out_table_name_default), 
            id_name_pair = (id_name, self.id_name_default), 
            nchunks_pair = (nchunks,self.nchunks_default), 
            debug_pair = (debug,None),
            )

        CoreDataset.__init__(self,resources = resources)
コード例 #2
0
   def run(self):
   
       dataset_pool = SessionConfiguration().get_dataset_pool()
       
       estabs = dataset_pool.get_dataset('establishment')
       
       extant = estabs.compute_variables("establishment.disappeared==0")
       
       index_extant = np.where(extant==1)[0]
       
       logger.log_status("num establishments: %s" %(index_extant.size) )
 
       unplaced = estabs.compute_variables("(establishment.building_id==-1)*(establishment.disappeared==0)")
       index_unplaced = np.where(unplaced==1)[0]
       
       logger.log_status("num unplaced establishments: %s" %(index_unplaced.size) )
       
       employment = estabs.get_attribute('employment')
       
       logger.log_status("avg employees in all establishments: %s" %(np.average(employment[extant])) )
       
       logger.log_status("avg employees in unplaced establishments: %s" %(np.average(employment[index_unplaced])) )
       
       
       
コード例 #3
0
    def run(self, year=None, years_to_run=[]):
        if year not in years_to_run:
            return

        logger.log_status("Finding buildings with employees exceeding amount allowed by square feet...")
        dataset_pool = SessionConfiguration().get_dataset_pool()
        building = dataset_pool.get_dataset('building')
	establishment = dataset_pool.get_dataset('establishment')
	building_sqft_per_employee = dataset_pool.get_dataset('building_sqft_per_employee')
        employees = establishment.get_attribute('employees')
        building_type_ids = establishment.compute_variables('establishment.disaggregate(building.building_type_id)')
	building_sqft = building.get_attribute('building_sqft')
	sqft_required_per_employee = establishment.compute_variables('_sqft_required_per_employee=establishment.disaggregate(building.disaggregate(building_sqft_per_employee.building_sqft_per_employee))')
	#establishment.add_primary_attribute(name='building_type_id', data=building_type_ids)
	#establishment.add_primary_attribute(name='sqft_required_per_employee', data=sqft_required_per_employee)
        required_sqft_in_building = building.compute_variables('_required_sqft_in_building = building.aggregate(establishment.employees*_sqft_required_per_employee)')
	#sqft_required_total = employees*sqft_required_per_employee
        overassigned = building.compute_variables('_overassigned = _required_sqft_in_building > building.building_sqft')
        idx_overassigned = where(overassigned)[0]
        num_overassigned = len(idx_overassigned)
        logger.log_status("Found %d overassigned buildings" % num_overassigned)
        new_building_sqft = required_sqft_in_building[idx_overassigned]
        building.modify_attribute('building_sqft', new_building_sqft, idx_overassigned)
        overassigned=building.compute_variables('_overassigned=_required_sqft_in_building>building.building_sqft')
        idx_overassigned = where(overassigned)[0]
        num_overassigned = len(idx_overassigned)
        logger.log_status("Found %d overassigned buildings" % num_overassigned)
コード例 #4
0
    def _compute_variable_for_prior_year(self, dataset, full_name, time, resources=None):
        """Create a new dataset for this variable, compute the variable, and then return
        the values for this variable."""
        calling_dataset_pool = SessionConfiguration().get_dataset_pool()
        calling_time = SimulationState().get_current_time()
        SimulationState().set_current_time(time)
        try:
            # Get an empty dataset pool with same search paths.
            my_dataset_pool = DatasetPool(
                package_order=calling_dataset_pool.get_package_order(),
                storage=AttributeCache())

            ds = dataset.empty_dataset_like_me(in_storage=AttributeCache())

            # Don't pass any datasets via resources, since they may be from a different time.
            my_resources = Resources(resources)
            for key in my_resources:
                if isinstance(key, Dataset):
                    del my_resources[key]

            ds.compute_variables(full_name, my_dataset_pool, resources=my_resources)
            values = ds.get_attribute(full_name)
            return values
        finally:
            SimulationState().set_current_time(calling_time)
コード例 #5
0
    def run(self, config, year, *args, **kwargs):
        """This is the main entry point.  It gets the appropriate configuration info from the 
        travel_model_configuration part of this config, and then copies the specified 
        UrbanSim data into files for travel mdel to read.  
        """
        cache_directory = config['cache_directory']
        simulation_state = SimulationState()
        simulation_state.set_cache_directory(cache_directory)
        simulation_state.set_current_time(year)
        attribute_cache = AttributeCache()
        dataset_pool = SessionConfiguration(new_instance=True,
                                            package_order=config['dataset_pool_configuration'].package_order,
                                            in_storage=attribute_cache).get_dataset_pool()

        #cache_storage = AttributeCache().get_flt_storage_for_year(year_for_base_year_cache)
        #datasets = DatasetFactory().create_datasets_from_flt(config.get('datasets_to_preload',{}),
                                                            #"urbansim",
                                                            #additional_arguments={'in_storage': attribute_cache})
        zone_set = dataset_pool.get_dataset('travel_zone')
        self.prepare_for_run(config['travel_model_configuration'], year)
        self.create_travel_model_input_file(config=config, 
                                            year=year, 
                                            zone_set=zone_set, 
                                            datasets=dataset_pool,
                                            *args, **kwargs)
コード例 #6
0
    def _compute_variable_for_prior_year(self, dataset, full_name, time, resources=None):
        """Create a new dataset for this variable, compute the variable, and then return
        the values for this variable."""
        calling_dataset_pool = SessionConfiguration().get_dataset_pool()
        calling_time = SimulationState().get_current_time()
        SimulationState().set_current_time(time)
        # Do not flush any variables when computing dependencies for a lag variable.
        prior_flush_state = SimulationState().get_flush_datasets()
        SimulationState().set_flush_datasets(False)
        try:
            # Get an empty dataset pool with same search paths.
            my_dataset_pool = DatasetPool(
                package_order=calling_dataset_pool.get_package_order(), storage=AttributeCache()
            )

            try:
                ds = dataset.empty_dataset_like_me(in_storage=AttributeCache())
            except FileNotFoundError:
                ## necessary when a dataset is not cached, but created on-the-fly, e.g submarket
                ds = my_dataset_pool.get_dataset(dataset.dataset_name)

            # Don't pass any datasets via resources, since they may be from a different time.
            my_resources = Resources(resources)
            for key in my_resources:
                if isinstance(key, Dataset):
                    del my_resources[key]

            ds.compute_variables(full_name, my_dataset_pool, resources=my_resources)
            values = ds.get_attribute(full_name)
            return values
        finally:
            SimulationState().set_current_time(calling_time)
            SimulationState().set_flush_datasets(prior_flush_state)
コード例 #7
0
    def __init__(self, name_of_dataset_to_merge, in_table_name, attribute_cache, years_to_merge, *args, **kwargs):
        """Create a dataset that contains this many years of data from this dataset.
        
        Years are from current year backwards, inclusive.
        """
        self.name_of_dataset_to_merge = name_of_dataset_to_merge
        self.years_to_merge = years_to_merge
        
        self._validate_primary_attributes_same_for_all_years(name_of_dataset_to_merge, in_table_name, attribute_cache, years_to_merge)
        
        # Add 'year' to id_names.
        dataset_for_current_year = SessionConfiguration().get_dataset_from_pool(
            self.name_of_dataset_to_merge)
        id_names = dataset_for_current_year.get_id_name() + ['year']
        self.base_id_name = dataset_for_current_year.get_id_name()
        
        # Masquerade as a dataset of the right type (important for computing the right variables).
        dataset_name = dataset_for_current_year.get_dataset_name()
        
        AbstractDataset.__init__(self,
                                 id_name=id_names,
                                 in_table_name=in_table_name,
                                 dataset_name=dataset_name,
                                 *args, **kwargs)

        coord_system = dataset_for_current_year.get_coordinate_system()
        if coord_system is not None:
            self._coordinate_system = coord_system
 def test_create_tripgen_travel_model_input_files(self):
     in_storage = StorageFactory().get_storage(
           'sql_storage',
           storage_location = self.database)
     sc = SessionConfiguration(new_instance=True,
                          package_order = ['urbansim', 'psrc'],
                          in_storage=in_storage)
     dataset_pool = sc.get_dataset_pool()
     
     TravelModelInputFileWriter().run(self.tempdir_path, 2000, dataset_pool)
     
     logger.log_status('tazdata path: ', self.tempdir_path)
     # expected values - data format: {zone:{column_value:value}}
     expected_tazdata = {1: [[1,1], [1,2]], 
                         2: [[2,2]], 
                         3: [],
                         4: [[2,2]]
                         }
     # get real data from file
     real_tazdata = {1:[],2:[], 3:[], 4:[]}
     # income groups 1 to 4
     for i in [1,2,3,4]:
         tazdata_file = open(os.path.join(self.tempdir_path, 'tripgen', 'inputtg', 'tazdata.mf9%s' % i), 'r')
         for a_line in tazdata_file.readlines():
             if a_line[0].isspace():
                 numbers = a_line.split()
                 zone_id = int(numbers[0])
                 job_zone_id = int(numbers[1])
                 real_tazdata[i].append([zone_id, job_zone_id])
                 
     for group in expected_tazdata.keys():
         self.assertEqual(real_tazdata[group], expected_tazdata[group],
                                    "income group %d, columns did not match up."%group)
コード例 #9
0
    def __init__(self, config):
        ss = SimulationState(new_instance=True)
        ss.set_current_time(config['base_year'])
        ss.set_cache_directory(config['cache_directory'])

        SessionConfiguration(new_instance=True,
                             package_order=config['dataset_pool_configuration'].package_order,
                             in_storage=AttributeCache())
        #if not os.path.exists(config['cache_directory']):  ## if cache exists, it will automatically skip
        cacher = CreateBaseyearCache()
        cache_dir = cacher.run(config)

        if 'estimation_database_configuration' in config:
            db_server = DatabaseServer(config['estimation_database_configuration'])
            db = db_server.get_database(config['estimation_database_configuration'].database_name)
            out_storage = StorageFactory().get_storage(
                'sql_storage', 
                storage_location = db)
        else:
            output_cache = os.path.join(config['cache_directory'], str(config['base_year']+1))
            out_storage = StorageFactory().get_storage('flt_storage', storage_location=output_cache)

        dataset_pool = SessionConfiguration().get_dataset_pool()
        households = dataset_pool.get_dataset("household")
        buildings = dataset_pool.get_dataset("building")
        zones = dataset_pool.get_dataset("zone")
        zone_ids = zones.get_id_attribute()
        capacity_attribute_name = "residential_units"  #_of_use_id_%s" % id
        capacity_variable_name = "%s=sanfrancisco.zone.aggregate_%s_from_building" % \
                                 (capacity_attribute_name, capacity_attribute_name)
        buildings.compute_variables("sanfrancisco.building.zone_id", dataset_pool=dataset_pool)
        zones.compute_variables(capacity_variable_name, dataset_pool=dataset_pool)

        building_zone_id = buildings.get_attribute('zone_id')
        
#        is_household_unplace = datasets['household'].get_attribute("building_id") <= 0
        is_household_unplaced = 1 #all households are unplaced
        household_building_id = zeros(households.size(), dtype='int32')-1 #datasets['household'].get_attribute("building_id")
        
        for zone_id in zone_ids:
            capacity = zones.get_attribute_by_id(capacity_attribute_name, zone_id)
            is_household_in_this_zone = (households.get_attribute('zone_id') == zone_id)
            is_unplaced_household_in_this_zone = is_household_in_this_zone * is_household_unplaced
            is_building_in_this_zone = (building_zone_id == zone_id)
#            if not is_household_in_this_zone.sum() <= capacity:
            if capacity == 0 or is_household_in_this_zone.sum()==0:
                print "WARNING: zone %s has %s households but only %s units" % (zone_id, is_household_in_this_zone.sum(), capacity)
                continue
                        
            prob = buildings.get_attribute(capacity_attribute_name) * is_building_in_this_zone / array(capacity, dtype=float64)

            r = random(sum(is_unplaced_household_in_this_zone))
            prob_cumsum = ncumsum(prob)
            index_to_bldg = searchsorted(prob_cumsum, r)

            household_building_id[where(is_unplaced_household_in_this_zone)] = buildings.get_attribute_by_index('building_id', index_to_bldg)

#        import pdb;pdb.set_trace()
        households.set_values_of_one_attribute('building_id', household_building_id)
        households.write_dataset(out_table_name='households', out_storage=out_storage)
コード例 #10
0
    def run(self, year=None, years_to_run=[]):
        if year not in years_to_run:
            return

        logger.log_status("Finding buildings with over-assigned households...")
        dataset_pool = SessionConfiguration().get_dataset_pool()
        building = dataset_pool.get_dataset('building')
        assigned_households = building.compute_variables('_assigned_hh = building.number_of_agents(household)')
        building_type_ids = building.get_attribute('building_type_id')
        overassigned = building.compute_variables('_overassigned = _assigned_hh > building.residential_units')
        idx_overassigned = where(overassigned)[0]
        num_overassigned = len(idx_overassigned)
        logger.log_status("Found %d overassigned buildings" % num_overassigned)
        new_res_units = building.get_attribute('_assigned_hh')[idx_overassigned]
        building.modify_attribute('residential_units', new_res_units, idx_overassigned)

        # make all over-assigned buildings of type 3
        idx_sf_overassigned = where(logical_and(overassigned, logical_or(building_type_ids == 1,
                                                                         building_type_ids == 2)))[0]
        new_building_ids = ones(idx_sf_overassigned.size, dtype="i4")*3
        building.modify_attribute('building_type_id', new_building_ids, idx_sf_overassigned)

        # recalculate overassignment to see how we did
        overassigned = building.compute_variables('_overassigned = _assigned_hh > building.residential_units')
        idx_overassigned = where(overassigned)[0]
        num_overassigned = len(idx_overassigned)
        logger.log_status("%d overassigned remain" % num_overassigned)
コード例 #11
0
 def _do_flush_dependent_variables_if_required(self):
     try:
         if not SessionConfiguration().get('flush_variables', False):
             return
     except:
         return
     from opus_core.datasets.interaction_dataset import InteractionDataset
     dataset = self.get_dataset()
     dependencies = self.get_current_dependencies()
     my_dataset_name = dataset.get_dataset_name()
     for iattr in range(len(dependencies)): # iterate over dependent variables
         dep_item = dependencies[iattr][0]
         if isinstance(dep_item, str):
             depvar_name = VariableName(dep_item)
         else:
             depvar_name = dep_item.get_variable_name() # dep_item should be an instance of AttributeBox
         dataset_name = depvar_name.get_dataset_name()
         if dataset_name == my_dataset_name:
             ds = dataset
         else:
             ds = SessionConfiguration().get_dataset_from_pool(dataset_name)
             #ds = dataset_pool.get_dataset('dataset_name')
         if not isinstance(ds, InteractionDataset):
             short_name = depvar_name.get_alias()
             if short_name not in ds.get_id_name():   
                 ds.flush_attribute(depvar_name)
コード例 #12
0
 def run(self):
 
     dataset_pool = SessionConfiguration().get_dataset_pool()
     
     z_scen0 = dataset_pool.get_dataset('zones_baseline')
     
     tcd = z_scen0['tcd']
     tco = z_scen0['tco']
     vpd = z_scen0['vpd']
     vpo = z_scen0['vpo']
     
     zones = dataset_pool.get_dataset('zone')
     
     zones.modify_attribute('tcd', tcd)
     zones.modify_attribute('tco', tco)
     zones.modify_attribute('vpd', vpd)
     zones.modify_attribute('vpo', vpo)
     
     z_scen0.delete_one_attribute('tcd')
     z_scen0.delete_one_attribute('tco')
     z_scen0.delete_one_attribute('vpd')
     z_scen0.delete_one_attribute('vpo')
     
     
     
コード例 #13
0
    def __init__(self, config):
        if 'estimation_database_configuration' in config:
            db_server = DatabaseServer(config['estimation_database_configuration'])
            db = db_server.get_database(config['estimation_database_configuration'].database_name)
        
            out_storage = StorageFactory().build_storage_for_dataset(
                type='sql_storage', storage_location=db)
        else:
            out_storage = StorageFactory().get_storage(type='flt_storage',
                storage_location=os.path.join(config['cache_directory'], str(config['base_year']+1)))

        simulation_state = SimulationState()
        simulation_state.set_cache_directory(config['cache_directory'])
        simulation_state.set_current_time(config['base_year'])
        attribute_cache = AttributeCache()
        
        SessionConfiguration(new_instance=True,
                             package_order=config['dataset_pool_configuration'].package_order,
                             in_storage=attribute_cache)
        
        if not os.path.exists(os.path.join(config['cache_directory'], str(config['base_year']))):
            #raise RuntimeError, "datasets uncached; run prepare_estimation_data.py first"
            CacheScenarioDatabase().run(config, unroll_gridcells=False)

        for dataset_name in config['datasets_to_preload']:
            SessionConfiguration().get_dataset_from_pool(dataset_name)

        households = SessionConfiguration().get_dataset_from_pool("household")
        household_ids = households.get_id_attribute()
        workers = households.get_attribute("workers")
        
        hh_ids = []
        member_ids = []
        is_worker = []
        job_ids = []

        for i in range(households.size()):  
            if workers[i] > 0:
                hh_ids += [household_ids[i]] * workers[i]
                member_ids += range(1, workers[i]+1)
                is_worker += [1] * workers[i]
                job_ids += [-1] * workers[i]

        in_storage = StorageFactory().get_storage('dict_storage')
        
        persons_table_name = 'persons'
        in_storage.write_table(
                table_name=persons_table_name,
                table_data={
                    'person_id':arange(len(hh_ids))+1,
                    'household_id':array(hh_ids),
                    'member_id':array(member_ids),
                    'is_worker':array(is_worker),                    
                    'job_id':array(job_ids),
                    },
            )

        persons = PersonDataset(in_storage=in_storage, in_table_name=persons_table_name)
        persons.write_dataset(out_storage=out_storage, out_table_name=persons_table_name)
コード例 #14
0
 def run(self, year, cache_directory=None):
     """The class is initialized with the appropriate configuration info from the 
     travel_model_configuration part of this config, and then copies the specified 
     UrbanSim data into files for daysim to read.
     The variables/expressions to export are defined in the node travel_model_configuration/urbansim_to_tm_variable_mapping
     of the configuration file.
     """
     if cache_directory is None:
         cache_directory = self.config['cache_directory']
     simulation_state = SimulationState()
     simulation_state.set_cache_directory(cache_directory)
     simulation_state.set_current_time(year)
     attribute_cache = AttributeCache()
     sc = SessionConfiguration(new_instance=True,
                               package_order=self.config['dataset_pool_configuration'].package_order,
                               in_storage=attribute_cache)
     dataset_pool = sc.get_dataset_pool()
     tm_config = self.config['travel_model_configuration']
     data_to_export = tm_config['urbansim_to_tm_variable_mapping']
     
     table_names = data_to_export.keys()
     variable_names = {}
     datasets = {}
     filenames = {}
     in_table_names = {}
     for table_name in table_names:
         filter = data_to_export[table_name].get('__filter__', None)
         if filter is not None:
             del data_to_export[table_name]['__filter__']
         out_table_name = data_to_export[table_name].get('__out_table_name__', None)
         if out_table_name is not None:
             del data_to_export[table_name]['__out_table_name__']
         else:
             out_table_name = table_name
         variables_to_export = map(lambda alias: "%s = %s" % (alias, data_to_export[table_name][alias]), data_to_export[table_name].keys())
         dataset_name = None            
         for var in variables_to_export:
             var_name = VariableName(var)
             if dataset_name is None:
                 dataset_name = var_name.get_dataset_name()
                 ds = dataset_pool.get_dataset(dataset_name)
                 
                 datasets[dataset_name] = ds
                 filenames[dataset_name] = out_table_name
                 in_table_names[dataset_name] = table_name
                 if dataset_name not in variable_names.keys():
                     variable_names[dataset_name] = []
             variable_names[dataset_name].append(var_name.get_alias())                
             ds.compute_variables([var_name], dataset_pool=dataset_pool)
         if filter is not None:
             filter_idx = where(ds.compute_variables(["__filter__ = %s" % filter], dataset_pool=dataset_pool)>0)[0]
             ds = DatasetSubset(ds, index = filter_idx)
             datasets[dataset_name] = ds
             
     return self._call_input_file_writer(year, datasets, in_table_names, filenames, variable_names, dataset_pool)
コード例 #15
0
    def run(self):
        """Runs the test model. 
        """

        dataset_pool = SessionConfiguration().get_dataset_pool()

        zone_set = dataset_pool.get_dataset('zone')
        start = time.time()
        zone_sqft_per_job = zone_set.compute_variables('_sqft_per_employee = safe_array_divide(zone.aggregate(building.non_residential_sqft,intermediates=[parcel]),zone.aggregate(establishment.employees,intermediates=[building,parcel]))')
        end = time.time()
        logger.log_status("%s" % (end - start))
コード例 #16
0
 def run(self):
 
     dataset_pool = SessionConfiguration().get_dataset_pool()
     
     households = dataset_pool.get_dataset('household')
     
     buildings = dataset_pool.get_dataset('building')
     
     unplaced = households.compute_variables("household.building_id==-1")
     index_unplaced = np.where(unplaced==1)[0]
     
     model = ScalingAgentsModel()
     
     model.run(buildings, households, index_unplaced)
コード例 #17
0
ファイル: updater.py プロジェクト: psrc/urbansim
    def run(self):
        """Keeps household building type id attribute consistent with residential_building_type_id. 
        """
        dataset_pool = SessionConfiguration().get_dataset_pool()

        household_set = dataset_pool.get_dataset("household")

        household_set.delete_one_attribute("county")

        county = household_set.compute_variables(
            "_county = household.disaggregate(parcel.county_id, intermediates=[building])"
        )

        household_set.add_primary_attribute(name="county", data=county)
コード例 #18
0
 def run(self):
 
     dataset_pool = SessionConfiguration().get_dataset_pool()
     
     establishments = dataset_pool.get_dataset('establishment')
     
     buildings = dataset_pool.get_dataset('building')
     
     unplaced = establishments.compute_variables("establishment.building_id==-1")
     index_unplaced = np.where(unplaced==1)[0]
     
     model = ScalingAgentsModel()
     
     model.run(buildings, establishments, index_unplaced)
コード例 #19
0
    def run(self):
        """Keeps household building type id attribute consistent with residential_building_type_id. 
        """
        dataset_pool = SessionConfiguration().get_dataset_pool()

        household_set = dataset_pool.get_dataset('household')

        household_res_type = household_set.get_attribute('residential_building_type_id')

        #index_update_building_type = where(household_res_type>0)[0]

        #household_set.modify_attribute('building_type_id', household_res_type[index_update_building_type], index_update_building_type)

        household_set.modify_attribute('building_type_id', household_res_type)
コード例 #20
0
def setup_environment(cache_directory, year, package_order, additional_datasets={}):
    gc.collect()
    ss = SimulationState(new_instance=True)
    ss.set_cache_directory(cache_directory)
    ss.set_current_time(year)
    ac = AttributeCache()
    storage = ac.get_flt_storage_for_year(year)
    sc = SessionConfiguration(new_instance=True,
                         package_order=package_order,
                         in_storage=ac)
    logger.log_status("Setup environment for year %s. Use cache directory %s." % (year, storage.get_storage_location()))
    dp = sc.get_dataset_pool()
    for name, ds in additional_datasets.iteritems():
        dp.replace_dataset(name, ds)
    return dp
コード例 #21
0
    def test_create_tripgen_travel_model_input_file(self):
        
        in_storage = StorageFactory().get_storage(
              'sql_storage',
              storage_location = self.database)

        sc = SessionConfiguration(new_instance=True,
                             package_order = ['urbansim', 'psrc'],
                             in_storage=in_storage)
        dataset_pool = sc.get_dataset_pool()
        #zone_set = dataset_pool.get_dataset('zone')
        #hh_set = dataset_pool.get_dataset('household')
        #job_set = dataset_pool.get_dataset('job')
        #taz_col_set = dataset_pool.get_dataset('constant_taz_column')
        
        TravelModelInputFileWriter().run(self.tempdir_path, 2000, dataset_pool)
        
        logger.log_status('tazdata path: ', self.tempdir_path)
        # expected values - data format: {zone:{column_value:value}}
        expected_tazdata = {1:{101: 19.9, 
                               102: 2., 103: 0., 104:1., 105:0.,
                               106: 3., 107:11., 109:1., 
                               110:0., 111:0., 112:0., 113:0., 114:0., 
                               115:0., 116:0., 117:0., 118:0., 119:0., 
                               120:2., 121:42., 122:0., 123:0., 124:11.}, 
                            2:{101: 29.9, 
                               102: 0., 103: 2., 104:1., 105:3.,
                               106: 1., 107:3., 109:0., 
                               110:0., 111:0., 112:0., 113:3., 114:0., 
                               115:0., 116:0., 117:0., 118:1., 119:1., 
                               120:0., 121:241., 122:0., 123:0., 124:3.}}
        
        # get real data from file
        real_tazdata = {1:{},2:{}}
        tazdata_file = open(os.path.join(self.tempdir_path, 'tripgen', 'inputtg', 'tazdata.ma2'), 'r')
        for a_line in tazdata_file.readlines():
            if a_line[0].isspace():
                numbers = a_line.replace(':', ' ').split() # data line format:  1   101:  15.5
                zone_id = int(numbers[0])
                column_var = int(numbers[1])
                value = float(numbers[2])
                if value != -1:
                    real_tazdata[zone_id][column_var] = value

        for zone in expected_tazdata.keys():
            for col_var in expected_tazdata[zone].keys():
                self.assertAlmostEqual(real_tazdata[zone][col_var], expected_tazdata[zone][col_var], 3,\
                                       "zone %d, column variable %d did not match up."%(zone, col_var))
コード例 #22
0
 def _get_attribute_for_year(self, dataset_name, attribute_name, year):
     """Return the attribute values for this year."""
     calling_dataset_pool = SessionConfiguration().get_dataset_pool()
     calling_time = SimulationState().get_current_time()
     SimulationState().set_current_time(year)
     try:
         my_dataset_pool = DatasetPool(
             package_order=calling_dataset_pool.get_package_order(),
             storage=AttributeCache())
         dataset = my_dataset_pool.get_dataset(dataset_name)
         attribute_name = attribute_name.replace('DDDD',repr(year))
         dataset.compute_variables(attribute_name, my_dataset_pool)
         values = dataset.get_attribute(attribute_name)
         return values
     finally:
         SimulationState().set_current_time(calling_time)
コード例 #23
0
    def _test_generate_results(self, indicator_name, dataset_name, expression, source):

        # grab the first base_year_data in results_manager/simulation_runs and
        # fetch the year for it
        base_year = self.project.find("results_manager/simulation_runs/run[@name='base_year_data']/end_year")
        if base_year is None:
            return False, "Project doesn't have any base year data to check against"

        start_year = int(base_year.text)
        result_generator = OpusResultGenerator(self.project)
        result_generator.set_data(
               source_data_name = 'base_year_data',
               indicator_name = indicator_name,
               dataset_name = dataset_name,
               years = [start_year,],
               indicator_definition = (expression, source))

        interface = IndicatorFrameworkInterface(self.project)
        src_data = interface.get_source_data(source_data_name = 'base_year_data', years = [start_year,])
        SimulationState().set_current_time(start_year)
        SimulationState().set_cache_directory(src_data.cache_directory)
        SessionConfiguration(
            new_instance = True,
            package_order = src_data.dataset_pool_configuration.package_order,
            in_storage = AttributeCache())


        dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
        if isinstance(dataset,InteractionDataset):
            #create a subset if its an interaction dataset...
            dataset_arguments = {
                 'index1':numpy.random.randint(0,dataset.dataset1.size(), size=100),
                 'index2':numpy.random.randint(0,dataset.dataset2.size(), size=100)
            }
            SessionConfiguration().delete_datasets()
            dataset = SessionConfiguration().get_dataset_from_pool(dataset_name,
                                                                   dataset_arguments = dataset_arguments)

        try:
            dataset.compute_variables(names = [expression])
            return True, None
        except Exception, e:
            type, value, tb = sys.exc_info()
            stack_dump = ''.join(traceback.format_exception(type, value, tb))
            errors = "{}\n\n{}".format(e, stack_dump)
            return False, errors
コード例 #24
0
 def prepare_for_run(self, control_total_dataset_name=None, control_total_table=None, control_total_storage=None):
     if (control_total_storage is None) or ((control_total_table is None) and (control_total_dataset_name is None)):
         dataset_pool = SessionConfiguration().get_dataset_pool()
         self.control_totals = dataset_pool.get_dataset( 'annual_%s_control_total' % self.dataset.get_dataset_name() )
         return self.control_totals
     
     if not control_total_dataset_name:
         control_total_dataset_name = DatasetFactory().dataset_name_for_table(control_total_table)
     
     self.control_totals = DatasetFactory().search_for_dataset(control_total_dataset_name,
                                                               package_order=SessionConfiguration().package_order,
                                                               arguments={'in_storage':control_total_storage, 
                                                                          'in_table_name':control_total_table,
                                                                          'id_name':[]
                                                                          }
                                                               )
     return self.control_totals
コード例 #25
0
    def run(self):
        """Runs the add dpa model. 
        """
        dataset_pool = SessionConfiguration().get_dataset_pool()

        job_set = dataset_pool.get_dataset('job')

        household_set = dataset_pool.get_dataset('household')

        submarket_set = dataset_pool.get_dataset('submarket')

        employment_submarket_set = dataset_pool.get_dataset('employment_submarket')

        building_set = dataset_pool.get_dataset('building')
        
        zone_set = dataset_pool.get_dataset('zone')

        employment_submarket_set.add_attribute(name='dpa_id', data=employment_submarket_set.compute_variables('employment_submarket.disaggregate(zone.dpa_id)'))

        submarket_set.add_attribute(name='dpa_id', data=submarket_set.compute_variables('submarket.disaggregate(zone.dpa_id)'))

        job_dpa = job_set.compute_variables('job.disaggregate(employment_submarket.dpa_id)')

        household_dpa = household_set.compute_variables('household.disaggregate(submarket.dpa_id)')
        
        household_zone = household_set.compute_variables('household.disaggregate(parcel.zone_id,intermediates=[building])')

        job_set.add_primary_attribute(name='dpa_id', data=job_dpa)

        household_set.add_primary_attribute(name='dpa_id', data=household_dpa)
        
        household_set.add_primary_attribute(name='household_zone', data=household_zone)
コード例 #26
0
 def run(self, agent_set, agents_index=None):
     dataset_pool = SessionConfiguration().get_dataset_pool()
     building_set = dataset_pool.get_dataset('building')
     chosen_submarkets=(agent_set.get_attribute('submarket_id'))[agents_index]
     building_submarkets=(building_set.compute_variables('mrcog.building.submarket_id'))
     building_ids=(building_set.get_attribute('building_id'))
     unique_chosen_submarkets=unique(chosen_submarkets)
     for submarket in unique_chosen_submarkets:
         if submarket > 0:
             idx_hh_in_submarket = where(chosen_submarkets==submarket)[0]
             num_hh = idx_hh_in_submarket.size
             idx_building_in_submarket = where(building_submarkets==submarket)[0]
             num_buildings = idx_building_in_submarket.size
             if num_buildings>0:
                 building_ids_in_submarket = building_ids[idx_building_in_submarket]
                 sampler = np.random.randint(0, num_buildings, size=num_hh)
                 building_ids_to_assign = building_ids_in_submarket[sampler]
                 agent_set['building_id'][agents_index[idx_hh_in_submarket]] = building_ids_to_assign
コード例 #27
0
 def prepare_for_run(self, scheduled_events_dataset_name=None, scheduled_events_table=None, scheduled_events_storage=None):
     if (scheduled_events_storage is None) or ((scheduled_events_table is None) and (scheduled_events_dataset_name is None)):
         ## this should not happen
         dataset_pool = SessionConfiguration().get_dataset_pool()
         self.scheduled_events = dataset_pool.get_dataset( 'scheduled_%s_events' % self.dataset.get_dataset_name() )
         return self.scheduled_events
     
     if not scheduled_events_dataset_name:
         scheduled_events_dataset_name = DatasetFactory().dataset_name_for_table(scheduled_events_table)
     
     self.scheduled_events = DatasetFactory().search_for_dataset(scheduled_events_dataset_name,
                                                               package_order=SessionConfiguration().package_order,
                                                               arguments={'in_storage':scheduled_events_storage, 
                                                                          'in_table_name':scheduled_events_table,
                                                                          'id_name':[]
                                                                          }
                                                               )
     return self.scheduled_events
コード例 #28
0
    def run(self):
        """Runs the test model. 
        """

        dataset_pool = SessionConfiguration().get_dataset_pool()

        building_set = dataset_pool.get_dataset('building')

        res_unit_price = building_set.compute_variables('safe_array_divide(building.improvement_value*1.0, building.residential_units)')

        nonres_unit_price = building_set.compute_variables('safe_array_divide(building.improvement_value*1.0, building.non_residential_sqft)')

        building_set.add_primary_attribute(name='unit_price_residential', data=res_unit_price)

        building_set.add_primary_attribute(name='unit_price_non_residential', data=nonres_unit_price)

        cache_dir = SimulationState().get_cache_directory(); logger.log_status("cache_dir %s" % cache_dir)

        attribute_cache = AttributeCache(); logger.log_status("attribute_cache %s" % attribute_cache)
コード例 #29
0
    def run(self, year):
        """This is the main entry point.  The class is initialized with the appropriate configuration info from the 
        travel_model_configuration part of this config, and then copies the specified 
        UrbanSim data into files for emme/2 to read.  
        If households and jobs do not have a primary attribute zone_id, the entry 'locations_to_disaggregate'
        in the travel_model_configuration should be a list of dataset names over which the zone_id 
        will be dissaggregated, ordered from higher to lower aggregation level, e.g. ['parcel', 'building']
        """
        cache_directory = self.config['cache_directory']
        simulation_state = SimulationState()
        simulation_state.set_cache_directory(cache_directory)
        simulation_state.set_current_time(year)
        attribute_cache = AttributeCache()
        sc = SessionConfiguration(new_instance=True,
                                  package_order=self.config['dataset_pool_configuration'].package_order,
                                  in_storage=attribute_cache)
        dataset_pool = sc.get_dataset_pool()

        hh_set = dataset_pool.get_dataset('household')
        zone_set = dataset_pool.get_dataset('zone')
        job_set = dataset_pool.get_dataset('job')
        locations_to_disaggregate = self.config['travel_model_configuration']['locations_to_disaggregate']
        len_locations_to_disaggregate = len(locations_to_disaggregate)
        if len_locations_to_disaggregate > 0:
            primary_location = locations_to_disaggregate[0]
            if len_locations_to_disaggregate > 1:
                intermediates_string = ", intermediates=["
                for i in range(1, len_locations_to_disaggregate):
                    intermediates_string = "%s%s, " % (intermediates_string, locations_to_disaggregate[i])
                intermediates_string = "%s]" % intermediates_string
            else:
                intermediates_string = ""
            hh_set.compute_variables(['%s = household.disaggregate(%s.%s %s)' % (zone_set.get_id_name()[0],
                                                                                 primary_location, zone_set.get_id_name()[0],
                                                                                 intermediates_string)], 
                                      dataset_pool=dataset_pool)
            job_set.compute_variables(['%s = job.disaggregate(%s.%s %s)' % (zone_set.get_id_name()[0],
                                                                            primary_location, zone_set.get_id_name()[0],
                                                                            intermediates_string)], 
                                       dataset_pool=dataset_pool)
        
        return self._call_input_file_writer(year, dataset_pool)
コード例 #30
0
 def prepare_for_run(self, dataset_name=None, table_name=None, storage=None):
     if (storage is None) or ((table_name is None) and (dataset_name is None)):
         dataset_pool = SessionConfiguration().get_dataset_pool()
         dataset = dataset_pool.get_dataset( 'target_vacancy' )
         return dataset
     
     if not dataset_name:
         dataset_name = DatasetFactory().dataset_name_for_table(table_name)
     
     dataset = DatasetFactory().search_for_dataset(dataset_name,
                                                   package_order=SessionConfiguration().package_order,
                                                   arguments={'in_storage':storage, 
                                                              'in_table_name':table_name,
                                                              'id_name':[]
                                                              }
                                                   )
     if self.target_vancy_dataset is None:
         self.target_vancy_dataset = dataset
         
     return dataset
コード例 #31
0
        info_file = file_name_pattern + "__info.txt"
        logger.log_status("Constrained Estimation with alternatives of full choice set for %s" % \
                          aggregate_lookup[options.aggregate_demand])
    else:
        logger.log_status("weight_string: " + options.weight_string)

    logger.log_status(
        "submarket defined by %s x %s " %
        (options.submarket_geography, options.submarket_attribute))

    estimator = HLCMEstimator(config=my_configuration,
                              save_estimation_results=False)
    estimator.simulation_state.set_current_time(2000)

    attribute_cache = AttributeCache()
    sc = SessionConfiguration()
    CLOSE = 0.005  #criterion for convergence
    sc.put_data({'CLOSE': CLOSE, 'info_file': info_file})

    seed(71)  # was: seed(71,110)

    ## relocate movers
    from urbansim.models.household_relocation_model_creator import HouseholdRelocationModelCreator
    hrm = HouseholdRelocationModelCreator().get_model(
        probabilities='urbansim.household_relocation_probabilities',
        location_id_name='building_id')
    hrm_resources = hrm.prepare_for_run(
        rate_storage=attribute_cache,
        rate_table='annual_relocation_rates_for_households',
        what='households')
    hrm_index = hrm.run(agent_set=sc.get_dataset_from_pool('household'),
コード例 #32
0
    def estimate(self,
                 spec_var=None,
                 spec_py=None,
                 movers_index=None,
                 submodel_string="",
                 alt_sample_size=None,
                 sampler="opus_core.samplers.weighted_sampler",
                 weight_string="supply",
                 aggregate_demand=False,
                 submarket_definition=('zone', 'building_type_id'),
                 sample_size_from_each_stratum=50):
        """

        """

        t1 = time()
        SimulationState().set_current_time(2000)

        dataset_pool = SessionConfiguration().get_dataset_pool()

        buildings = dataset_pool.get_dataset("building")
        agent_set = dataset_pool.get_dataset('household')
        #buildings.load_dataset()

        submarket_geography = dataset_pool.get_dataset(submarket_definition[0])
        intermediates = '[]'
        if submarket_geography.dataset_name == 'zone':
            intermediates = '[parcel]'
        elif submarket_geography.dataset_name == 'faz':
            intermediates = '[zone, parcel]'
        elif submarket_geography.dataset_name == 'large_area':
            intermediates = '[faz, zone, parcel]'

        submarket_id_expression = 'building.disaggregate(%s.%s, intermediates=%s) * 100' % \
                                                (submarket_geography.dataset_name, submarket_geography.get_id_name()[0],
                                                 intermediates)
        submarket_variables = [
            '%s=numpy.ceil(submarket.submarket_id / 100)' %
            submarket_geography.get_id_name()[0]
        ]

        if submarket_definition[1] == 'residential_building_type_id':
            set_residential_building_types(
                dataset_pool.get_dataset("building_type"),
                dataset_pool.get_dataset("building"))
        if submarket_definition[1] != '':
            submarket_id_expression = submarket_id_expression + ' + building.%s' % submarket_definition[
                1]
            submarket_variables.append(submarket_definition[1] +
                                       '=submarket.submarket_id % 100')

        submarkets = define_submarket(
            buildings,
            submarket_id_expression,
            #"urbansim_parcel.building.zone_id*100 + building.residential_building_type_id",
            #"building.disaggregate(faz.large_area_id, intermediates=[zone, parcel]) * 100 + building.residential_building_type_id",
            compute_variables=submarket_variables + [
                "residential_units=submarket.aggregate(building.residential_units)",
                "number_of_buildings_with_non_zero_units=submarket.aggregate(building.residential_units > 0 )",
                "number_of_surveyed_households=submarket.aggregate(household.household_id > 5000000, intermediates=[building])",
            ],
            #filter = 'numpy.logical_and(submarket.number_of_surveyed_households > 0, submarket.residential_units>0)',
            #filter = 'submarket.supply > 0',
            #"psrc_parcel.building.large_area_id*100 + building.residential_building_type_id",
            #compute_variables=['residential_building_type_id=submarket.submarket_id % 100',
            #'large_area_id=numpy.ceil(submarket.submarket_id / 100)']
            #"psrc_parcel.building.large_area_id",
            #compute_variables=[#'residential_building_type_id=submarket.submarket_id % 100',
            #'large_area_id=numpy.ceil(submarket.submarket_id)']
        )

        dataset_pool.add_datasets_if_not_included({'submarket': submarkets})
        compute_lambda_and_supply(buildings, agent_set, movers_index,
                                  submarkets)

        submarket_filter = 'submarket.supply > 0'
        if submarket_filter is not None:
            from numpy import logical_not
            submarkets.remove_elements(index=where(
                logical_not(submarkets.compute_variables(submarket_filter)))
                                       [0])
            submarkets.touch_attribute(submarkets.get_id_name()[0])
            buildings.touch_attribute(submarkets.get_id_name()[0])

        if self.save_estimation_results:
            out_storage = StorageFactory().build_storage_for_dataset(
                type='sql_storage', storage_location=self.out_con)

        if spec_py is not None:
            reload(spec_py)
            spec_var = spec_py.specification

        if spec_var is not None:
            self.specification = load_specification_from_dictionary(spec_var)
        else:
            in_storage = StorageFactory().build_storage_for_dataset(
                type='sql_storage', storage_location=self.in_con)
            self.specification = EquationSpecification(in_storage=in_storage)
            self.specification.load(
                in_table_name="household_location_choice_model_specification")

        self.model_name = "household_location_choice_model"

        agent_set, agents_index_for_estimation = get_households_for_estimation(
            agent_set,
            AttributeCache(),
            "households_for_estimation",
            exclude_condition=
            "household.disaggregate(submarket.submarket_id, intermediates=[building])<=0",
        )
        agent_set.compute_variables(
            "submarket_id=household.disaggregate(building.submarket_id)")
        agent_sample_rate = agents_index_for_estimation.size / float(
            movers_index.size)
        dataset_pool.add_datasets_if_not_included(
            {'sample_rate': agent_sample_rate})

        if aggregate_demand:
            location_set = buildings
            aggregate_dataset = 'submarket'
            #weight_string = 'inv_submarket_supply = 1.0 / (building.disaggregate(submarket.number_of_agents(building))).astype(float32) * (building.disaggregate(submarket.submarket_id) > 0)'
            #weight_string = 'submarket_supply = (building.disaggregate(submarket.supply) > 0).astype(int32)'
            #weight_string = 'submarket_supply = building.disaggregate(submarket.supply) * (building.disaggregate(submarket.submarket_id) > 0).astype(float32)'
        else:
            location_set = submarkets
            aggregate_dataset = None
            #weight_string = 'supply'

        model = HouseholdLocationChoiceModelCreator().get_model(
            location_set=location_set,
            #location_set=submarkets,
            #filter = 'building.disaggregate(submarket.submarket_id) > 0',
            #filter = 'numpy.logical_and(submarket.number_of_surveyed_households > 0, submarket.residential_units>0)',
            #filter = 'building.disaggregate(numpy.logical_and(submarket.number_of_buildings_with_non_zero_units > 5000, submarket.number_of_surveyed_households > 0))',
            submodel_string=submodel_string,
            sampler=sampler,
            #estimation_size_agents = agent_sample_rate * 100/20,
            # proportion of the agent set that should be used for the estimation
            sample_size_locations=alt_sample_size,
            #sample_proportion_locations = 1.0/1000,
            # choice set size (includes current location)
            compute_capacity_flag=True,
            probabilities="opus_core.mnl_probabilities",
            choices="urbansim.lottery_choices",
            #run_config = Resources({"capacity_string":"supply"}),
            estimate_config=Resources({
                "capacity_string": "supply",
                "weights_for_estimation_string": weight_string,
                "aggregate_to_dataset": aggregate_dataset,
                "stratum": "building.disaggregate(submarket.submarket_id)",
                "sample_size_from_each_stratum": sample_size_from_each_stratum,
                #"index2":where(submarkets.compute_variables('submarket.number_of_surveyed_households > 0'))[0],
                #"sample_rate": 1.0/5000,
                #"sample_size_from_chosen_stratum": 0,
                "include_chosen_choice": True
            }))

        # was dataset_pool.add_datasets_if_not_included({'sample_rate':agent_sample_rate})
        self.result = model.estimate(
            self.specification,
            agent_set=agent_set,
            agents_index=agents_index_for_estimation,
            debuglevel=self.debuglevel,
            procedure="urbansim.constrain_estimation_bhhh_two_loops"
        )  #"urbansim.constrain_estimation_bhhh"

        #save estimation results
        if self.save_estimation_results:
            self.save_results(out_storage)

        logger.log_status("Estimation done. " + str(time() - t1) + " s")