Esempio n. 1
0
 def compute(self, dataset_pool):
     dataset = self.get_dataset()
     with logger.block("Compute sc_residential_sqm", verbose=True):
         residential_sqm = dataset.compute_variables(["sc_residential_sqm"], dataset_pool=dataset_pool)
     logger.log_note("residential_sqm in development event history: %s" % sum(residential_sqm))
     
     attr_names_matches = [re.match('sqm_sector([0-9]+)', n) for n in dataset.get_known_attribute_names()]
     sector_ids = sorted([int(m.group(1)) for m in attr_names_matches if m])
     
     sqm_sector_array = reshape(residential_sqm, (-1, 1))
     
     for sector_id in sector_ids:
         sqm_sector = dataset.compute_one_variable_with_unknown_package("sqm_sector%s" % sector_id, dataset_pool=dataset_pool)
         logger.log_note("sqm_sector%s in development event history: %s" % (sector_id, sum(sqm_sector)))
         sqm_sector_array = append(sqm_sector_array, reshape(sqm_sector, (-1, 1)), 1)
     
     sqm_sector_argmax = argmax(sqm_sector_array, 1)
     #logger.log_note("sqm_sector_argmax: %s" % sqm_sector_argmax)
     
     sector_id_array = array([0] + sector_ids)
     #logger.log_note("sector_id_array: %s" % sector_id_array)
     
     val = sector_id_array[sqm_sector_argmax]
     #logger.log_note("val: %s" % val)
     return val
Esempio n. 2
0
 def finishedCallback(self, success):
     #        if success:
     #            logger.log_note("Success returned from results")
     #        else:
     #            logger.log_warning("Error returned from results")
     logger.log_note('Results finished.')
     self.emit(SIGNAL("runFinished(PyQt_PyObject)"), success)
Esempio n. 3
0
 def export_dataset(self, dataset_name, in_storage, out_storage, overwrite=True, out_dataset_name=None, nchunks = 1, **kwargs):
     if not overwrite and dataset_name in out_storage.get_table_names():
         logger.log_note('Dataset %s ignored because it already exists in OPUS' % dataset_name)
         return
     with logger.block('Exporting dataset %s' % dataset_name):
         if out_dataset_name is None:
             out_dataset_name = dataset_name
         cols_in_this_chunk = in_storage.ALL_COLUMNS
         if nchunks > 1:
             colnames = in_storage.get_column_names(dataset_name)
             chunk_size = int(ceil(len(colnames) / float(nchunks)))
         for chunk in range(nchunks):
             if nchunks > 1:
                 cols_in_this_chunk = colnames[int(chunk*chunk_size):int((chunk+1)*chunk_size)]
             with logger.block('Loading %s - chunk %s out of %s' % (dataset_name, chunk+1, nchunks)):
                 values_from_storage = in_storage.load_table(dataset_name, column_names=cols_in_this_chunk)
                 length = len(values_from_storage) and len(values_from_storage.values()[0])
                 if  length == 0:
                     logger.log_warning("Dataset %s ignored because it's empty" % dataset_name)
                     return
             with logger.block('Storing %s' % dataset_name):
                 if chunk > 0:
                     kwargs['mode'] = out_storage.APPEND
                 out_storage.write_table(out_dataset_name, values_from_storage, **kwargs)
         logger.log_note("Exported %s records for dataset %s" % (length, dataset_name))
    def finishedCallback(self,success):
#        if success:
#            logger.log_note("Success returned from results")
#        else:
#            logger.log_warning("Error returned from results")
        logger.log_note('Results finished.')
        self.emit(SIGNAL("runFinished(PyQt_PyObject)"),success)
Esempio n. 5
0
    def run(self, config, year):
        """Running MATSim.  A lot of paths are relative; the base path is ${OPUS_HOME}/opus_matsim.  As long as ${OPUS_HOME}
        is correctly set and the matsim tar-file was unpacked in OPUS_HOME, this should work out of the box.  There may eventually
        be problems with the java version.
        """

        logger.start_block("Starting RunTravelModel.run(...)")

        # try: # tnicolai :for debugging
        #    import pydevd
        #    pydevd.settrace()
        # except: pass

        config_obj_v3 = MATSimConfigObjectV3(config, year)
        self.matsim_config_full = config_obj_v3.marschall()

        # check for test parameter
        tmc = config["travel_model_configuration"]
        if tmc["matsim4urbansim"].get("test_parameter") != None:
            self.test_parameter = tmc["matsim4urbansim"].get("test_parameter")
        # change to directory opus_matsim
        os.chdir(config_obj_v3.matsim4opus_path)

        # int cmd
        cmd = ""
        # calling travel model with cmd command
        if sys.platform.lower() == "win32":
            # reserve memory for java
            xmx = "-Xmx1500m"  # Windows can't reserve more than 1500m
            logger.log_note("Note that Java for Windows can't reserve more than 1500 MB of memory to run MATSim!!!")
            cmd = """java %(vmargs)s -cp %(classpath)s %(javaclass)s %(matsim_config_file)s %(test_parameter)s""" % {
                "vmargs": xmx,
                "classpath": "jar/matsim.jar;jar/contrib/matsim4urbansim.jar",
                "javaclass": "org.matsim.contrib.matsim4urbansim.run.MATSim4UrbanSimZone",
                "matsim_config_file": self.matsim_config_full,
                "test_parameter": self.test_parameter,
            }
        else:
            # reserve memory for java
            xmx = "-Xmx4000m"
            cmd = """java %(vmargs)s -cp %(classpath)s %(javaclass)s %(matsim_config_file)s %(test_parameter)s""" % {
                "vmargs": xmx,
                "classpath": "jar/matsim.jar:jar/contrib/matsim4urbansim.jar",
                "javaclass": "org.matsim.contrib.matsim4urbansim.run.MATSim4UrbanSimZone",
                "matsim_config_file": self.matsim_config_full,
                "test_parameter": self.test_parameter,
            }

        logger.log_status("Running command %s" % cmd)

        cmd_result = os.system(cmd)
        if cmd_result != 0:
            error_msg = "MATSim Run failed. Code returned by cmd was %d" % (cmd_result)
            logger.log_error(error_msg)
            logger.log_error("Note that paths in the matsim config files are relative to the matsim4opus root,")
            logger.log_error("which is one level 'down' from OPUS_HOME.")
            raise StandardError(error_msg)

        logger.end_block()
 def get_index_of_my_agents(self, dataset, index, dataset_pool=None, resources=None):
     agents_grouping_attr = self.get_agents_grouping_attribute()
     if agents_grouping_attr is None:
         logger.log_warning("'agents_grouping_attribute' wasn't set. No agent selection was done.")
         logger.log_note("Use method 'set_agents_grouping_attribute' for agents selection.")
         return arange(index.size)
     dataset.compute_variables(agents_grouping_attr, dataset_pool=dataset_pool, resources=resources)
     code_values = dataset.get_attribute_by_index(agents_grouping_attr, index)
     return where(code_values == self.get_member_code())[0]
Esempio n. 7
0
 def __get_plans_file(self, common_matsim_part, entry):
     try:    # checks if sub config for matsim input plans file exists
         self.sub_config_exists = ( common_matsim_part[entry] != None)
     except: return ""
     if self.sub_config_exists:
         self.check_abolute_path( common_matsim_part[entry]  )    
         logger.log_note('Input plans file found (MATSim warm start enabled).') 
         return paths.get_opus_home_path( common_matsim_part[entry]  )
     else: 
         logger.log_note('No input plans file set in the "travel_model_configuration" of your current configuration file (MATSim warm start disabled).')
         return ""
Esempio n. 8
0
 def __get_plans_file(self, common_matsim_part, entry):
     try:
         self.sub_config_exists = ( common_matsim_part[entry] != None)
     except:
         logger.log_note('No input plans file in "travel_model_configuration" section found (i.e. MATSim warm/hot start is not active).') 
         return ""
     if self.sub_config_exists:
         self.check_abolute_path( common_matsim_part[entry]  )    
         logger.log_note('Input plans file found (MATSim warm start enabled).') 
         return paths.get_opus_home_path( common_matsim_part[entry]  )
     else:
         return ""
Esempio n. 9
0
    def run( self, vacancy_table, history_table, year, location_set, dataset_pool=None, resources=None ):
        self.dataset_pool=dataset_pool
        building_types = self.dataset_pool.get_dataset('building_type')
        target_vacancy_this_year = DatasetSubset(vacancy_table, index=where(vacancy_table.get_attribute("year")==year)[0])
        building_type_ids = target_vacancy_this_year.get_attribute('building_type_id')
        building_type_idx = building_types.get_id_index(building_type_ids)
        self.used_building_types = DatasetSubset(building_types, index=building_type_idx)
        project_types =  self.used_building_types.get_attribute('building_type_name')
        is_residential = self.used_building_types.get_attribute('is_residential')
        unit_names =  where(is_residential, 'residential_units', 'non_residential_sqft')
        specific_unit_names =  where(is_residential, 'residential_units', '_sqft')
        rates =  target_vacancy_this_year.get_attribute('target_total_vacancy')
        self.project_units = {}
        self.project_specific_units = {}
        target_rates = {}
        for i in range(self.used_building_types.size()):
            self.project_units[project_types[i]] = unit_names[i]
            if is_residential[i]:
                self.project_specific_units[project_types[i]] = specific_unit_names[i]
            else:
                self.project_specific_units[project_types[i]] = "%s%s" % (project_types[i], specific_unit_names[i])
            target_rates[building_type_ids[i]] = rates[i]
            
        self._compute_vacancy_and_total_units_variables(location_set, project_types, resources)
        self.pre_check( location_set, target_vacancy_this_year, project_types)
    
        projects = None
        for project_type_id, target_vacancy_rate in target_rates.iteritems():
            # determine current-year vacancy rates
            project_type = building_types.get_attribute_by_id('building_type_name', project_type_id)
            vacant_units_sum = location_set.get_attribute(self.variable_for_vacancy[project_type]).sum()
            units_sum = float( location_set.get_attribute(self.variable_for_total_units[project_type]).sum() )
            should_develop_units = int(round(max( 0, ( target_vacancy_rate * units_sum - vacant_units_sum ) /
                                         ( 1 - target_vacancy_rate ) )))
            logger.log_status(project_type + ": vacant units: %d, should be vacant: %f, sum units: %d"
                          % (vacant_units_sum, target_vacancy_rate * units_sum, units_sum))

            if not should_develop_units:
                logger.log_note(("Will not build any " + project_type + " units, because the current vacancy of %d units\n"
                             + "is more than the %d units desired for the vacancy rate of %f.")
                            % (vacant_units_sum,
                               target_vacancy_rate * units_sum,
                               target_vacancy_rate))
            #create projects
            if should_develop_units > 0:
                this_project = self._create_projects(should_develop_units, project_type, project_type_id, history_table,
                                                               location_set, units_sum, resources)
                if projects is None:
                    projects = this_project
                else:
                    projects.join_by_rows(this_project, change_ids_if_not_unique=True)
        return projects
Esempio n. 10
0
def read_native_write_h5py(in_fnamel, out_fname, dataset_name, 
                           skiprows=1, delimiter=",", comments="#",
                          rename_and_fix_attrs=None):
    logger.log_note('Importing %s' % dataset_name)
    names, dtype = read_header(in_fnamel[0], rename_attrs=rename_and_fix_attrs)

    for i, in_fname in enumerate(in_fnamel):
        shape1 = determine_dims(in_fname)
        if i == 0:
            shape = shape1
        else:
            assert (shape[1] == shape1[1])
            shape = (shape[0] + shape1[0], shape[1])

    out_fh = h5py.File(out_fname)
    h5data = out_fh.create_dataset(dataset_name, shape=(shape[0],), dtype=dtype, 
                                   compression='gzip', compression_opts=5)
    
    GAP = 10000000
    drow = 0
    
    for i, in_fname in enumerate(in_fnamel):
        with open(in_fname, 'U') as fh:
            logger.log_note('Processing %s' % in_fnamel)
            for irow, row in enumerate(fh):
                if irow < skiprows: continue
                
                if irow % 1e4 == 0:
                    logger.log_note('Processed %d/%d rows' % (irow, shape[0]))
    
                row = row.split(comments)[0].strip()
                if row == '': continue
                vals = [int(val) for val in row.split(delimiter)]
                
                maxdelta = dict( (names.index(n), vals[names.index(n)]) for n in rename_and_fix_attrs.values())

                # Adjust those attributes in rename_and_fix_attrs
                # by the respective value of the first record
                if irow == skiprows:
                    delta = dict( (n, GAP * i - maxdelta[n]) for n in maxdelta.keys())
                    logger.log_note('Adjusting IDs: %s' % delta)
                for i, d in delta.iteritems():
                    vals[i] += d
                
                h5data[drow] = np.array([tuple(vals)], dtype=dtype)
                drow += 1
                
            logger.log_note('Processed %d rows in total' % (irow + 1))

    out_fh.close()
    return h5data
Esempio n. 11
0
    def estimate(
        self,
        specification,
        agent_set,
        agents_index=None,
        procedure=None,
        data_objects=None,
        estimate_config=None,
        debuglevel=0,
    ):
        """ Computes capacity if required and calls the estimate method of ChoiceModel.
        See ChoiceModel.estimate for details on arguments.
        """
        if agents_index is None:
            agents_index = arange(agent_set.size())
        if agents_index.size <= 0:
            logger.log_status("Nothing to be done.")
            return (None, None)

        logger.log_note(
            "Using dataset pool: %s" % self.dataset_pool.get_package_order()
            if self.dataset_pool is not None
            else self.dataset_pool
        )

        if estimate_config == None:
            estimate_config = Resources()
        self.estimate_config = estimate_config.merge_with_defaults(self.estimate_config)
        if data_objects is not None:
            self.dataset_pool.add_datasets_if_not_included(data_objects)
        if self.location_id_string is not None:
            agent_set.compute_variables(self.location_id_string, dataset_pool=self.dataset_pool)

        self.capacity = None
        if self.estimate_config.get("compute_capacity_flag", False):
            capacity_string_for_estimation = self.estimate_config.get("capacity_string", None)
            self.capacity = self.determine_capacity(
                capacity_string=capacity_string_for_estimation, agent_set=agent_set, agents_index=agents_index
            )

        self.estimate_config.merge({"capacity": self.capacity})
        return ChoiceModel.estimate(
            self,
            specification,
            agent_set,
            agents_index,
            procedure,
            estimate_config=self.estimate_config,
            debuglevel=debuglevel,
        )
Esempio n. 12
0
    def compute(self,  dataset_pool):
        with logger.block('Analyzing sector'):
            sectors = dataset_pool.get_dataset("sector")
            name_equals_sector = sectors.get_attribute("name") == self.sector
            name_equals_sector_indexes = where(name_equals_sector)
            assert(len(name_equals_sector_indexes) == 1)
            name_equals_sector_index = name_equals_sector_indexes[0]
            sector_ids = sectors.get_attribute("sector_id")
            sector_id = sector_ids[name_equals_sector_index][0]


        with logger.block('Analyzing buildings'):
            buildings = self.get_dataset()
            sqm_our_sector = buildings.get_attribute("sqm_sector%s" % sector_id) #get column of observed jobs
            logger.log_note("sqm_sector%s: %s" % (sector_id, sum(sqm_our_sector)))
            
        return sqm_our_sector
    def __init__(self,
                 config_path=None,
                 config_file_name=None,
                 xsd_file_name=None):
        print "Entering setup"

        logger.log_status(
            'Validation test of genereated MATSim configuration via xsd...')

        # xml destination
        self.matsim_config_file = self.matsim_config_destination = os.path.join(
            os.environ['OPUS_HOME'], "opus_matsim", "matsim_config",
            "test_matsim_config.xml")

        # create xml config file (only possible if pyxb is installed) ...
        # from opus_matsim.sustain_city.tests.pyxb.create_MATSim_config import Create_MATSim_Config
        # config_creator = Create_MATSim_Config(config_path, config_file_name, self.matsim_config_file)
        # if not config_creator.build_xml_config():
        #    logger.log_error("Problems while creating MATSim config file...")
        #    sys.exit()

        # ... therefore we are copying a matsim confing file
        logger.log_note(
            'An exsisting configuration file will be used for this test, since PyXB needed to be installed to create the MATSim configuration.'
        )
        # If the automatic generation of the MATSim configuration is desierd, please enable "Create_MATSim_Config" and disable "self.copy_matsim_config()" function in this test class.
        self.copy_matsim_config()

        self.config_path = config_path
        if self.config_path == None:
            self.config_path = pyxb_test.__path__[0]
        self.config_name = config_file_name
        if self.config_name == None:
            self.config_name = 'test_urbansim_config.xml'
        self.xsd_name = xsd_file_name
        if self.xsd_name == None:
            self.xsd_name = 'test_xsd.xsd'

        # get xsd location
        self.xsd_file = os.path.join(self.config_path, self.xsd_name)
        if not os.path.exists(self.xsd_file):
            sys.exit()

        print "Leaving setup"
Esempio n. 14
0
 def compute(self,  dataset_pool):
     buildings = self.get_dataset()
     results = zeros(buildings.size(), dtype=self._return_type)
     unit_names = unique(buildings["unit_name"])
     logger.log_note("Unit names: %s" % unit_names)        
     for unit_name in unit_names:
         #should not count parcel_sqft
         if unit_name == "parcel_sqft":
             logger.log_warning("occupied_spaces: Skipping unit name %s" % unit_name)
             continue
         if unit_name == '':
             logger.log_warning("occupied_spaces: Skipping empty unit name")
             continue
         
         vname = "occupied_" + unit_name
         self.add_and_solve_dependencies(["urbansim_parcel.building." + vname], dataset_pool)
         matched = buildings["unit_name"] == unit_name
         results[matched] = buildings[vname][matched].astype(self._return_type)
     return results
Esempio n. 15
0
 def export_dataset(self,
                    dataset_name,
                    in_storage,
                    out_storage,
                    overwrite=True,
                    out_dataset_name=None,
                    nchunks=1,
                    **kwargs):
     if not overwrite and dataset_name in out_storage.get_table_names():
         logger.log_note(
             'Dataset %s ignored because it already exists in OPUS' %
             dataset_name)
         return
     with logger.block('Exporting dataset %s' % dataset_name):
         if out_dataset_name is None:
             out_dataset_name = dataset_name
         cols_in_this_chunk = in_storage.ALL_COLUMNS
         if nchunks > 1:
             colnames = in_storage.get_column_names(dataset_name)
             chunk_size = int(ceil(len(colnames) / float(nchunks)))
         for chunk in range(nchunks):
             if nchunks > 1:
                 cols_in_this_chunk = colnames[int(chunk * chunk_size):int(
                     (chunk + 1) * chunk_size)]
             with logger.block('Loading %s - chunk %s out of %s' %
                               (dataset_name, chunk + 1, nchunks)):
                 values_from_storage = in_storage.load_table(
                     dataset_name, column_names=cols_in_this_chunk)
                 length = len(values_from_storage) and len(
                     values_from_storage.values()[0])
                 if length == 0:
                     logger.log_warning(
                         "Dataset %s ignored because it's empty" %
                         dataset_name)
                     return
             with logger.block('Storing %s' % dataset_name):
                 if chunk > 0:
                     kwargs['mode'] = out_storage.APPEND
                 out_storage.write_table(out_dataset_name,
                                         values_from_storage, **kwargs)
         logger.log_note("Exported %s records for dataset %s" %
                         (length, dataset_name))
Esempio n. 16
0
 def get_index_of_my_agents(self,
                            dataset,
                            index,
                            dataset_pool=None,
                            resources=None):
     agents_grouping_attr = self.get_agents_grouping_attribute()
     if agents_grouping_attr is None:
         logger.log_warning(
             "'agents_grouping_attribute' wasn't set. No agent selection was done."
         )
         logger.log_note(
             "Use method 'set_agents_grouping_attribute' for agents selection."
         )
         return arange(index.size)
     dataset.compute_variables(agents_grouping_attr,
                               dataset_pool=dataset_pool,
                               resources=resources)
     code_values = dataset.get_attribute_by_index(agents_grouping_attr,
                                                  index)
     return where(code_values == self.get_member_code())[0]
Esempio n. 17
0
    def compute(self,  dataset_pool):
        building = self.get_dataset()
        total = building.compute_one_variable_with_unknown_package("total_%s_job_space" % self.sector, dataset_pool=dataset_pool)
        logger.log_note("total: %s" % (repr(total)))
        occupied = building.compute_one_variable_with_unknown_package("occupied_%s_job_space" % self.sector, dataset_pool=dataset_pool)
        logger.log_note("occupied: %s" % (repr(occupied)))
        vacant = total - occupied
        logger.log_note("vacant: %s" % (repr(vacant)))

        # HACK
        vacant = clip(vacant, a_min=0, a_max=1e100)
        
        logger.log_note("vacant: %s" % (sum(vacant)))
        assert((vacant >= 0).all())
        assert(sum(vacant) > 0)
        return vacant
    def __init__(self, config_path=None, config_file_name=None, xsd_file_name=None):
        print "Entering setup"
        
        logger.log_status('Validation test of genereated MATSim configuration via xsd...')
        
        # xml destination
        self.matsim_config_file = self.matsim_config_destination = os.path.join( os.environ['OPUS_HOME'], "opus_matsim", "matsim_config", "test_matsim_config.xml")
        
        # create xml config file (only possible if pyxb is installed) ...
        # from opus_matsim.sustain_city.tests.pyxb.create_MATSim_config import Create_MATSim_Config
        # config_creator = Create_MATSim_Config(config_path, config_file_name, self.matsim_config_file)
        # if not config_creator.build_xml_config():
        #    logger.log_error("Problems while creating MATSim config file...")
        #    sys.exit()
        
        # ... therefore we are copying a matsim confing file
        logger.log_note('An exsisting configuration file will be used for this test, since PyXB needed to be installed to create the MATSim configuration.')
        # If the automatic generation of the MATSim configuration is desierd, please enable "Create_MATSim_Config" and disable "self.copy_matsim_config()" function in this test class.
        self.copy_matsim_config()
        
        self.config_path = config_path
        if self.config_path == None:
            self.config_path = pyxb_test.__path__[0]
        self.config_name = config_file_name
        if self.config_name == None:
            self.config_name = 'test_urbansim_config.xml'
        self.xsd_name = xsd_file_name
        if self.xsd_name == None:
            self.xsd_name = 'test_xsd.xsd'

        # get xsd location
        self.xsd_file = os.path.join(self.config_path, self.xsd_name)
        if not os.path.exists( self.xsd_file ):
            sys.exit()
        
        print "Leaving setup"
 def send_to_urbancanvas(self):
     '''
     Sends to UrbanCanvas for visualization.
     '''
     self._update_variable_from_fields()
     func = batch_check_data
     dummy, result, msgs = func([self.variable,], self.validator)[0]
     expression = dummy['definition']
     if dummy['dataset'] == 'parcel':
         from opus_core.storage_factory import StorageFactory
         from opus_core.datasets.dataset_pool import DatasetPool
         import os, sys
         base_year = self.validator.project.xml_config.get_estimation_configuration()['base_year']
         project_name = self.validator.project.name
         opus_data_path = self.validator.project.xml_config.get_opus_data_path()
         logger.log_note(base_year)
         logger.log_note(project_name)
         logger.log_note(opus_data_path)
         cache = os.path.join(opus_data_path,project_name,'base_year_data',str(base_year))
         logger.log_note(cache)
         storage = StorageFactory().get_storage('flt_storage',storage_location=cache)
         dataset_pool = DatasetPool(storage=storage, package_order=[project_name,'urbansim_parcel','urbansim','opus_core'])
         parcels = dataset_pool.get_dataset('parcel')
         parcel_ids = pd.Series(parcels.get_attribute('parcel_id'))
         values = pd.Series(parcels.compute_variables([expression],dataset_pool=dataset_pool).astype('float'))
         parcels = pd.DataFrame({"parcel_id":parcel_ids,"vl_values":values})
         parcels.set_index(keys='parcel_id',inplace=True)
         #parcels["vl_values"][parcels["vl_values"]==0] = np.nan
         parcels = parcels[parcels["vl_values"]>0]
         
         os.chdir(os.path.join(opus_data_path,project_name))
         
         np.savez('variable_library_indicator',parcel_id=parcels.vl_values.index.values.astype('int32'),values=parcels.vl_values.values.astype('int32'))
         
         ##############UNCOMMENT IF WEBSERVICE IS DESIRED
         # parcels.save('variable_library.pkl') ##I believe 'save' was just deprectated in pandas- its now to_pickle or some such thing... change this later
         # web_service_path = os.path.join(os.getenv("OPUS_HOME"),'src',project_name,'scripts','web_service.py')
         # logger.log_note(web_service_path)
         # p = subprocess.Popen([sys.executable,web_service_path])
         # MessageBox.information(mainwindow = self, text = 'Click OK when done viewing in UrbanCanvas')
         # p.kill()
         
         MessageBox.information(mainwindow = self, text = 'Variable exported to the project data directory for viewing in UrbanCanvas')
         
     else:
         MessageBox.information(mainwindow = self, text = 'Not a parcel variable. Only parcel variables can be sent to UrbanCanvas')
Esempio n. 20
0
 def send_to_urbancanvas(self):
     '''
     Sends to UrbanCanvas for visualization.
     '''
     self._update_variable_from_fields()
     func = batch_check_data
     dummy, result, msgs = func([self.variable,], self.validator)[0]
     expression = dummy['definition']
     if dummy['dataset'] == 'parcel':
         from opus_core.storage_factory import StorageFactory
         from opus_core.datasets.dataset_pool import DatasetPool
         import os, sys
         base_year = self.validator.project.xml_config.get_estimation_configuration()['base_year']
         project_name = self.validator.project.name
         opus_data_path = self.validator.project.xml_config.get_opus_data_path()
         logger.log_note(base_year)
         logger.log_note(project_name)
         logger.log_note(opus_data_path)
         cache = os.path.join(opus_data_path,project_name,'base_year_data',str(base_year))
         logger.log_note(cache)
         storage = StorageFactory().get_storage('flt_storage',storage_location=cache)
         dataset_pool = DatasetPool(storage=storage, package_order=[project_name,'urbansim_parcel','urbansim','opus_core'])
         parcels = dataset_pool.get_dataset('parcel')
         parcel_ids = pd.Series(parcels.get_attribute('parcel_id'))
         values = pd.Series(parcels.compute_variables([expression],dataset_pool=dataset_pool).astype('float'))
         parcels = pd.DataFrame({"parcel_id":parcel_ids,"vl_values":values})
         #parcels.set_index(keys='parcel_id',inplace=True)
         #parcels["vl_values"][parcels["vl_values"]==0] = np.nan
         parcels = parcels[parcels["vl_values"]>0]
         
         os.chdir(os.path.join(opus_data_path,project_name))
         parcels.to_csv('variable_library_indicator.csv',index=False)
         #np.savez('variable_library_indicator',parcel_id=parcels.vl_values.index.values.astype('int32'),values=parcels.vl_values.values.astype('int32'))
         
         ##############UNCOMMENT IF WEBSERVICE IS DESIRED
         # parcels.save('variable_library.pkl') ##I believe 'save' was just deprectated in pandas- its now to_pickle or some such thing... change this later
         # web_service_path = os.path.join(os.getenv("OPUS_HOME"),'src',project_name,'scripts','web_service.py')
         # logger.log_note(web_service_path)
         # p = subprocess.Popen([sys.executable,web_service_path])
         # MessageBox.information(mainwindow = self, text = 'Click OK when done viewing in UrbanCanvas')
         # p.kill()
         
         MessageBox.information(mainwindow = self, text = 'Variable exported to the project data directory for viewing in UrbanCanvas')
         
     else:
         MessageBox.information(mainwindow = self, text = 'Not a parcel variable. Only parcel variables can be sent to UrbanCanvas')
Esempio n. 21
0
        def write_table(self, table_name, table_data, overwrite_existing=True):
            """
            This method writes a dataset (table_data) to the specified table (table_name).
            Set overwrite_existing = True if the table should be overwritten.
            """

            # Replace dashes in the table name with underscores
            table_name = table_name.replace('-', '_')

            # Reset the workspace
            self.gp.Workspace = self._storage_location
            # Get full path to table
            full_table_location = self.get_full_table_location(table_name)
            if overwrite_existing:
                self.gp.OverwriteOutput= 1
                if self.table_exists(table_name):
                    logger.log_note('The table with the name "%s" already exists.' % (table_name))
                    logger.log_note('This table will be overwritten.')
                    self.gp.Delete(full_table_location)
            else:
                self.gp.OverwriteOutput = 0
                if self.table_exists(table_name):
                    logger.log_note('The table with the name "%s" already exists.' % (table_name))
                    logger.log_note('This table will not be overwritten.')
                    return None

            # Determine table type to write
            storage_location = self.get_storage_location()
            if storage_location.find('.sde') > -1:
                dbf = False
            elif storage_location.find('.gdb') > -1:
                dbf = False
            elif storage_location.find('.mdb') > -1:
                dbf = False
            else:
                dbf = True

            # Create table
            if dbf:
                if table_name.find('.dbf') == -1:
                    table_name = table_name + '.dbf'
                    self.gp.CreateTable(storage_location, table_name)
                else:
                    self.gp.CreateTable(storage_location, table_name)
            else:
                table_name = self.gp.ValidateTableName(table_name)
                self.gp.CreateTable(storage_location, table_name)

            # Get column names
            column_names = []
            for i in table_data:
                column_names.append(i)
            # Get shortened column names
            short_column_names = []
            if dbf:
                for i in column_names:
                    if len(i) <= 10:
                        short_column_names.append(i)
                    else:
                        short_name = self._get_shortened_column_name(i, 8)
                        short_column_names.append(short_name)
            else:
                for i in column_names:
                    if len(i) <= 31:
                        short_column_names.append(i)
                    else:
                        short_name = self._get_shortened_column_name(i, 29)
                        short_column_names.append(i)
            # Create column_names to short_column_names mapping
            column_names_mapping = dict(zip(column_names, short_column_names))
            # Get column types
            numpy_column_types = []
            for i in column_names:
                numpy_column_types.append(table_data[i].dtype.kind)
            # Get ESRI column types
            esri_column_types = []
            for i in numpy_column_types:
                esri_column_types.append(self._get_esri_type_from_numpy_dtype(i))

            full_table_location = self.get_full_table_location(table_name)

            # Add columns
            x = 0
            for i in short_column_names:
                self.gp.AddField(full_table_location, i, esri_column_types[x])
                x += 1
            # Delete automatically added field if table of type .dbf
            if dbf:
                self.gp.DeleteField(full_table_location, 'Field1')

            # Insert records
            #
            # Get an ESRI InsertCursor on the table
            rows = self.gp.InsertCursor(full_table_location)
            # Get the number_of_records to insert
            number_of_records = len(table_data[column_names[0]])
            # Do the inserts
            for i in range(0, number_of_records):
                # Get an ESRI NewRow object
                row = rows.NewRow()
                for column_name, column_value in table_data.iteritems():
                    # Check for string value, if yes, insert quotes
                    if column_value[i].dtype.kind == 'S':
                        if "\'" in column_value[i]:
                            column_value[i] = column_value[i].replace("'", "\'")
                            strng = '''"''' + column_value[i] + '''"'''
                            exec_stmt = """row.%s = %s""" % (column_names_mapping[column_name], strng)
                        elif '\"' in column_value[i]:
                            column_value[i] = column_value[i].replace('"', '\"')
                            strng = """'""" + column_value[i] + """'"""
                            exec_stmt = """row.%s = %s""" % (column_names_mapping[column_name], strng)
                        else:
                            strng = """'""" + column_value[i] + """'"""
                            exec_stmt = """row.%s = %s""" % (column_names_mapping[column_name], strng)
                    else:
                        exec_stmt = """row.%s = %s""" % (column_names_mapping[column_name], column_value[i])
                    # Execute the statement built above
                    exec exec_stmt
                # Insert the row
                rows.InsertRow(row)
Esempio n. 22
0
        dataset_name = 'zone',
        name = 'zone Indicators',
        output_type='dbf',
        attributes = attrs
        ) ]
                       
    IndicatorFactory().create_indicators(indicators = zonedbf_indicators,
                                         display_error_box = False,
                                         show_results = False)


if __name__ == '__main__':

    # takes 9.5 mins.  :p
    starttime = time()
    logger.log_note(strftime("%x %X", localtime(starttime)) + ": Starting")
    
    cache_directory=sys.argv[1]

    make_multiyear_workbook(cache_directory=cache_directory,
                             yearstart=2010,
                             yearend=2035)
    make_topsheet(cache_directory)
    make_zone_dbfs(cache_directory)

    endtime = time()
    logger.log_note(strftime("%x %X", localtime(endtime)) + " Completed. Total time: " + str((endtime-starttime)/60.0) + " mins")


#===============================================================================
#    for root, dirs, files in os.walk(cache_directory):
Esempio n. 23
0
    def load_table(self,
                   table_name,
                   column_names=Storage.ALL_COLUMNS,
                   lowercase=True):
        db = self._get_db()

        table = db.get_table(
            table_name)  #Table(table_name, db.metadata, autoload=True)

        available_column_names = self.get_column_names(table_name, lowercase)
        final_cols = self._select_columns(column_names, available_column_names)

        col_data = {}
        selectable_columns = []
        table_data = {}

        for column in table.columns:
            if lowercase:
                col_name = column.name.lower()
            else:
                col_name = column.name

            if col_name in final_cols:
                if isinstance(column.type, PGGeometry):
                    logger.log_warning(
                        'column %s ignored: Column_type not supported by Python'
                        % col_name)
                    continue
                col_type = self._get_numpy_dtype_from_sql_alchemy_type(
                    column.type)
                col_data[col_name] = (column, col_type)
                table_data[col_name] = []
                selectable_columns.append(column)

        if len(selectable_columns) == 0:
            return {}

        query = select(columns=selectable_columns)

        query_results = db.execute(query)

        while True:
            row = query_results.fetchone()
            if row is None: break
            for col_name, (column, col_type) in col_data.items():
                table_data[col_name].append(row[column])

        len_all = len(table_data.values()[0])

        problem_rows = set()
        problem_columns = {}
        remove_columns = {}
        for key, column in table_data.items():
            problem_rows_for_column = []
            for i in range(len_all):
                if column[i] is None:
                    problem_rows_for_column.append(i)
            num_problem_rows_for_column = len(problem_rows_for_column)
            # ignore column if it contains more than 50% NULL values
            if num_problem_rows_for_column * 2 > len(column):
                remove_columns[key] = num_problem_rows_for_column
            elif num_problem_rows_for_column > 0:
                problem_columns[key] = num_problem_rows_for_column
                problem_rows.update(problem_rows_for_column)

        len_rm = len(remove_columns)
        if len_rm > 0:
            logger.log_warning(
                '%s of %s columns ignored in %s '
                'due to NULL values in column(s) (with row count in parens) "%s)"'
                % (len_rm, len(table_data.keys()), table_name, '), "'.join(
                    '%s" (%s' % (k, remove_columns[k])
                    for k in sorted(list(remove_columns)))))

        len_pr = len(problem_rows)
        if len_pr > 0:
            rate_failed = float(len_pr) / len_all
            rate_succeeded = 1.0 - rate_failed
            percentage_succeeded = round(100.0 * rate_succeeded, 2)
            logger.log_warning(
                '%s of %s rows ignored in %s (%s%% successful) '
                'due to NULL values in column(s) (with row count in parens) "%s)'
                % (len_pr, len_all, table_name, percentage_succeeded,
                   '), "'.join('%s" (%s' % (k, problem_columns[k])
                               for k in sorted(list(problem_columns)))))

        if len_pr + len_rm == 0:
            logger.log_note('All rows and columns imported successfully')

        for col_name, (column, col_type) in col_data.items():
            if col_name in remove_columns:
                del table_data[col_name]
                continue

            try:
                clean_column_data = table_data[col_name]
                if len_pr > 0:
                    # select only those rows that can be loaded (as determined before)
                    clean_column_data = [
                        x for (r, x) in enumerate(clean_column_data)
                        if r not in problem_rows
                    ]

                # Unicode is currently NOT supported, but will be returned
                # by the database server.  To avoid import errors with non-ASCII
                # characters in Python 2.6 or earlier, encode as UTF-8
                # explicitly.  Proper Unicode support will require using the 'U'
                # column type -- a rather big change.
                #
                # See http://stackoverflow.com/a/7183618/946850 for details
                # on what will be observed if the following two lines
                # are omitted..
                if col_type.kind == 'S':
                    clean_column_data = [
                        x.encode('utf8') for x in clean_column_data
                    ]

                table_data[col_name] = array(clean_column_data, dtype=col_type)
            except:
                logger.log_error(
                    "Error occurred when exporting column %s; it may be caused by NULL values."
                    % col_name)
                raise

        self._dispose_db(db)
        return table_data
Esempio n. 24
0
    def on_pb_urbancanvas_clicked(self):

        run_name = self.current_run
        indicator_name = self.current_indicator
        indicator_dataset = self.current_indicator_dataset
        if indicator_dataset != 'parcel':
            MessageBox.information(
                mainwindow=self,
                text=
                'Not a parcel variable. Only parcel variables can be sent to UrbanCanvas'
            )
        else:
            start_year = int(self.current_year)
            end_year = start_year

            if run_name is None or indicator_name is None or start_year is None:
                return

            key = (run_name, indicator_name, start_year)

            self.pb_urbancanvas.setText('Sending to UrbanCanvas...')

            indicator_nodes = get_available_indicator_nodes(self.project)

            dataset = None
            for indicator_node in indicator_nodes:
                ind_dataset, name = get_variable_dataset_and_name(
                    indicator_node)
                if name == indicator_name and ind_dataset == indicator_dataset:
                    dataset = ind_dataset
                    break

            if dataset is None:
                raise Exception('Could not find dataset for indicator %s' %
                                indicator_name)

            table_params = {
                'name': None,
                'output_type': 'tab',
                'indicators': [indicator_name],
            }
            expression_library = self.project.xml_config.get_expression_library(
            )
            expression = expression_library[(dataset, name)]
            logger.log_note(expression)

            base_year = end_year
            project_name = self.project.name
            opus_data_path = self.project.xml_config.get_opus_data_path()
            logger.log_note(base_year)
            logger.log_note(project_name)
            logger.log_note(opus_data_path)
            interface = IndicatorFrameworkInterface(self.project)
            source_data = interface.get_source_data(source_data_name=run_name,
                                                    years=[
                                                        end_year,
                                                    ])
            cache = os.path.join(source_data.cache_directory, str(end_year))
            logger.log_note(cache)
            storage = StorageFactory().get_storage('flt_storage',
                                                   storage_location=cache)
            dataset_pool = DatasetPool(storage=storage,
                                       package_order=[
                                           project_name, 'urbansim_parcel',
                                           'urbansim', 'opus_core'
                                       ])
            parcels = dataset_pool.get_dataset('parcel')
            parcel_ids = pd.Series(parcels.get_attribute('parcel_id'))
            values = pd.Series(
                parcels.compute_variables(
                    [expression], dataset_pool=dataset_pool).astype('float'))
            parcels = pd.DataFrame({
                "parcel_id": parcel_ids,
                "vl_values": values
            })
            #parcels.set_index(keys='parcel_id',inplace=True)
            #parcels["vl_values"][parcels["vl_values"]==0] = np.nan
            parcels = parcels[parcels["vl_values"] > 0]

            os.chdir(os.path.join(opus_data_path, project_name))
            parcels.to_csv('results_browser_indicator.csv', index=False)
            #np.savez('results_browser_indicator',parcel_id=parcels.vl_values.index.values.astype('int32'),values=parcels.vl_values.values.astype('int32'))

            ##############UNCOMMENT IF WEBSERVICE IS DESIRED
            # parcels.save('variable_library.pkl') ##I believe 'save' was just deprectated in pandas- its now to_pickle or some such thing... change this later
            # web_service_path = os.path.join(os.getenv("OPUS_HOME"),'src',project_name,'scripts','web_service.py')
            # logger.log_note(web_service_path)
            # p = subprocess.Popen([sys.executable,web_service_path])
            # MessageBox.information(mainwindow = self, text = 'Click OK when done viewing in UrbanCanvas')
            # p.kill()
            # self.pb_urbancanvas.setText('View in UrbanCanvas')

            MessageBox.information(
                mainwindow=self,
                text=
                'Variable exported to the project data directory for viewing in UrbanCanvas'
            )
            self.pb_urbancanvas.setText('View in UrbanCanvas')
Esempio n. 25
0
    def _run_finished(self, success):
        key = self.running_key
        self.running_key = None

        size = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
        self.tabwidget_visualizations.setSizePolicy(size)

        name = '%s/%s/%s' % key
        new_tab = QTabWidget(self.tabwidget_visualizations)
        self.tabwidget_visualizations.addTab(new_tab, name)

        map_widget = None
        tab_widget = None
        for (visualization_type,
             visualizations) in self.batch_processor.get_visualizations():
            if len(visualizations) > 0:
                if visualization_type == 'mapnik_map':
                    viz = visualizations[0]
                    map_widget = ViewImageForm(viz, new_tab)
                    map_widget.setSizePolicy(size)
                elif visualization_type == 'mapnik_animated_map':
                    viz = visualizations[0]
                    map_widget = ViewAnimationForm(viz, new_tab)
                    map_widget.setSizePolicy(size)
                elif visualization_type == 'table_per_year':
                    viz = visualizations[0]
                    tab_widget = ViewTableForm(viz, new_tab)
                    tab_widget.setSizePolicy(size)
#            else:
#                map_widget = self.tabMap
#                tab_widget = self.tabTable

#        if not map_widget or not tab_widget: return

#        self.tabMap = map_widget
#        self.tabTable = tab_widget

        if tab_widget:
            new_tab.addTab(tab_widget, "Table")

        if map_widget:
            new_tab.addTab(map_widget, "Map")

        self.already_browsed[key] = (tab_widget, map_widget)

        #        self.lblViewIndicator.setText(QString(key[1]))
        #        self.lblViewRun.setText(QString(key[0]))
        #        self.lblViewYear.setText(QString(repr(key[2])))

        swap = self.queued_results is not None and self.queued_results[
            0] == 'swap'

        if self.queued_results is not None and not swap:
            self.running_key = self.queued_results[0]

            logger.log_note(
                'Generating queued results for %s on run %s for year %i' %
                self.running_key)
            self.batch_processor = self.queued_results[1]
            self.queued_results = None

            runThread = OpusGuiThread(parentThread=get_mainwindow_instance(),
                                      parentGuiElement=self,
                                      thread_object=self.batch_processor)

            # Use this signal from the thread if it is capable of producing its own status signal
            QObject.connect(runThread, SIGNAL("runFinished(PyQt_PyObject)"),
                            self._run_finished)
            QObject.connect(runThread, SIGNAL("runError(PyQt_PyObject)"),
                            self._run_error)
            runThread.start()
        else:
            #            if swap:
            #                (map_widget, tab_widget) = self.queued_results[1]
            #
            ##                self.swap_visualizations(map_widget, tab_widget)
            #                name = '%s/%s/%s'%key
            #        #        self.swap_visualizations(map_widget, tab_widget)
            #                self.add_visualization(map_widget = map_widget, tab_widget = tab_widget, name = name)

            self.queued_results = None

            self.generating_results = False
            self.pb_generate_results.setText('Results Generated')
 def run(self, xsd_file=None, destination=None, binding_class_name=None, test_run=False):
     logger.start_block('Starting to update xml parser for UrbanSim ...')
     
     self.output_pyxb_package_name = None
     self.output_pyxb_package_file = None
     
     # location of xsd file
     if xsd_file == None:
         # download xsd from matsim.org
         xsd_location = self.get_xsd_from_matsim_org()
     else:
         xsd_location = xsd_file
     
     # name of output package, where the generated bindig classes will be stored
     if binding_class_name == None:
         logger.log_note('Name for PyXB binding class is None! ')
         self.output_pyxb_package_name = 'pyxb_matsim_config_parser'
         logger.log_note('Setting default name for PyXB binding class: %s' %self.output_pyxb_package_name)
     else:
         self.output_pyxb_package_name = binding_class_name
     self.output_pyxb_package_file = self.output_pyxb_package_name + '.py'
     
     # path to the PyXB executables
     pyxb_gen = os.path.join( os.getenv('HOME'), 'bin', 'pyxbgen')
     # checking if PyXB is available
     if not os.path.exists( pyxb_gen ):
         raise StandardError('PyXB seems not to be installed on this machine.\nPlease download and install PyXB first. It is available on http://sourceforge.net/projects/pyxb/ (Accessed July 2010).')
     
     # print status information
     logger.log_status('Found PyXB executable: %s' % pyxb_gen)
     binding_class_destination = destination
     if binding_class_destination == None:
         logger.log_note('Destination for binding classes not given. Using default location...')
         binding_class_destination = pyxb_path.__path__[0]
     logger.log_status('Destination directory for PyXB binding classes: %s' % binding_class_destination)
     logger.log_status('XSD reposit: %s' % xsd_location)
     logger.log_status('New pyxb xml binding class: %s' % self.output_pyxb_package_file)
     
     # checking if a previous binding class exsists
     # get current directory
     binding_class = os.path.join(binding_class_destination, self.output_pyxb_package_file)
     if os.path.exists(binding_class):
         logger.log_status('Found a previous binding class')
         if test_run:
             os.remove( binding_class)
         else: # archiving previous pyxb parser versions
             archive_folder = os.path.join(binding_class_destination, 'xsd_archive')
             if not os.path.exists(archive_folder):
                 logger.log_status("Creating archive folder %s" % archive_folder)
                 os.mkdir(archive_folder)
             # create subfolder
             datetime = time.strftime("%Y_%m_%d-%H:%M:%S", time.gmtime())
             subfolder = os.path.join(archive_folder, datetime)
             os.mkdir(subfolder)
             destination = os.path.join(subfolder, self.output_pyxb_package_file)
             # moving prevoius binding class into archive
             logger.log_status("Moving previous binding class into archive: %s" %destination)
             shutil.move(binding_class, destination)
     
     #===========================================================================
     # EXAMPLE:
     # Generating xml binding classes manually.
     #
     # 1) Start a terminal and switch to the place where the xsd is stored. Here its "xsds".
     #
     # 2) Enter the following commandline:
     # /Users/thomas/bin/pyxbgen \
     # > -u Products.xsd -m pro1
     #
     # 3) The following output appears:
     # urn:uuid:4b416ad0-11a5-11df-a29e-001b63930ac1
     # Python for AbsentNamespace0 requires 1 modules
     # Saved binding source to ./pro1.py
     # thomas-nicolais-macbook-pro:xsds thomas$ 
     #
     # 4) The generated classes are ready to use.
     #===========================================================================
     
     # comand line to generate xml bindig classes as explained above
     cmd = 'cd %(binding_class_destination)s ; %(pyxbgen)s -u %(xsd_location)s -m %(output)s' % {
         'binding_class_destination': binding_class_destination,
         'pyxbgen': pyxb_gen,
         'xsd_location': xsd_location,
         'output': self.output_pyxb_package_name}
 
     logger.log_status('Executing command : %s' % cmd)
     # executing comand line
     cmd_result = os.system(cmd)
     # checking if some error occured
     if cmd_result != 0:
         raise StandardError('Executing command faild! Returncode = %i' %cmd_result)
     
     # At this point executing comand line was successful
     # Now a UrbanSim header is added to the generated binding classes
     
     # read whole file
     f = open(binding_class, "r")
     # binding class will be extended by the UrbanSim header
     content = "# Opus/UrbanSim urban simulation software\n# Copyright (C) 2005-2009 University of Washington\n# See opus_core/LICENSE\n\n"
     line = f.readline()
     while line:
         content += line
         line = f.readline()
     f.close()
     
     # get current binding class and overwrite with the actual content containing the header
     binding_class = os.path.join(binding_class_destination, self.output_pyxb_package_file)
     print "Path to generated binding class: %s" % binding_class
     # open binding class to add the header
     f = open(binding_class, 'w')
     try:
         f.write(content)
     except Exception:
         logger.log_error("Error occured while adding the UrbanSim header to the binding class.")
     finally:
         f.close()
         
     
     logger.log_status('Successful finished. Exit program.')
     logger.end_block()
     return 1 # return code for test class (1 == ok)
    def run( self, vacancy_table, frequency_table, template_table, year, location_set, resources=None ):
        self.pre_check( location_set, vacancy_table, [] )
        target_residential_vacancy_rate = vacancy_table.get_data_element_by_id( year ).target_total_residential_vacancy
        target_non_residential_vacancy_rate = vacancy_table.get_data_element_by_id( year ).target_total_non_residential_vacancy
        compute_resources = Resources(resources)
#        compute_resources.merge({"household":household_set, "job":job_set, "debug":self.debug})
        location_set.compute_variables( ["urbansim.gridcell.vacant_residential_units",
                                        "urbansim.gridcell.vacant_commercial_sqft",
                                        "urbansim.gridcell.vacant_industrial_sqft"],
                                        resources = compute_resources )

        # determine current-year vacancy rates
        vacant_resunits_sum = location_set.get_attribute( "vacant_residential_units" ).sum()
        resunits_sum = float( location_set.get_attribute( "residential_units" ).sum() )
        vacant_residential_rate = self.safe_divide(vacant_resunits_sum, resunits_sum)

        vacant_commercial_sqft_sum = location_set.get_attribute( "vacant_commercial_sqft" ).sum()
        commercial_sqft_sum =  float( location_set.get_attribute( "commercial_sqft" ).sum() )
        vacant_commercial_rate =  self.safe_divide(vacant_commercial_sqft_sum, commercial_sqft_sum)

        vacant_industrial_sqft_sum = location_set.get_attribute( "vacant_industrial_sqft" ).sum()
        industrial_sqft_sum = float( location_set.get_attribute( "industrial_sqft" ).sum() )
        vacant_industrial_rate =  self.safe_divide(vacant_industrial_sqft_sum, industrial_sqft_sum)

        logger.log_status("Res: vacant res units: %d, should be vacant: %f, sum res units: %d"
                          % (vacant_resunits_sum, target_residential_vacancy_rate * resunits_sum, resunits_sum))
        logger.log_status("Com: vacant sqft: %d, should be vacant: %f, sum sqft: %d"
                          % (vacant_commercial_sqft_sum, target_non_residential_vacancy_rate * commercial_sqft_sum,
                             commercial_sqft_sum))
        logger.log_status("Ind: vacant sqft: %d, should be vacant: %f, sum sqft: %d"
                          % (vacant_industrial_sqft_sum, target_non_residential_vacancy_rate * industrial_sqft_sum,
                             industrial_sqft_sum))

        should_develop_resunits = max( 0, ( target_residential_vacancy_rate * resunits_sum - vacant_resunits_sum ) /
                                         ( 1 - target_residential_vacancy_rate ) )
        if not should_develop_resunits:
            logger.log_note(("Will not build any residential units, because the current residential vacancy of %d units\n"
                             + "is more than the %d units desired for the vacancy rate of %f.")
                            % (vacant_resunits_sum,
                               target_residential_vacancy_rate * resunits_sum,
                               target_residential_vacancy_rate))
        should_develop_commercial = max( 0, ( target_non_residential_vacancy_rate * commercial_sqft_sum - vacant_commercial_sqft_sum ) /
                                           ( 1 - target_non_residential_vacancy_rate ) )
        if not should_develop_commercial:
            logger.log_note(("Will not build any commercial sqft, because the current commercial vacancy of %d sqft\n"
                             + "is more than the %d sqft desired for the vacancy rate of %f.")
                            % (vacant_commercial_sqft_sum,
                               target_non_residential_vacancy_rate * commercial_sqft_sum,
                               target_non_residential_vacancy_rate))
        should_develop_industrial = max( 0, ( target_non_residential_vacancy_rate * industrial_sqft_sum - vacant_industrial_sqft_sum ) /
                                           ( 1 - target_non_residential_vacancy_rate ) )
        if not should_develop_industrial:
            logger.log_note(("Will not build any industrial sqft, because the current industrial vacancy of %d sqft\n"
                             + "is more than the %d sqft desired for the vacancy rate of %f.")
                            % (vacant_industrial_sqft_sum,
                               target_non_residential_vacancy_rate * industrial_sqft_sum,
                               target_non_residential_vacancy_rate))

#        projects = {}
#        should_develop = {"residential":should_develop_resunits,
#                          "commercial":should_develop_commercial,
#                          "industrial":should_develop_industrial}

#        average_improvement_value = {}
#        average_improvement_value["residential"] = self.safe_divide(
#            location_set.get_attribute("residential_improvement_value" ).sum(), resunits_sum)
#        average_improvement_value["commercial"] = self.safe_divide(
#            location_set.get_attribute("commercial_improvement_value" ).sum(), commercial_sqft_sum)
#        average_improvement_value["industrial"] = self.safe_divide(
#            location_set.get_attribute("industrial_improvement_value" ).sum(), industrial_sqft_sum)

        #create projects

        development_type_ids = []
        units = []; com_sqfts=[]; ind_sqfts=[]; gov_sqfts=[];
        while should_develop_resunits > 0 or should_develop_commercial > 0 or should_develop_industrial > 0:
            n = 1   # sample n developments at a time
            sampled_ids = probsample_replace(frequency_table.get_attribute('development_type_id'),
                                             n,
                                             frequency_table.get_attribute('frequency').astype(float32)/frequency_table.get_attribute('frequency').sum())
            for id in sampled_ids:
                index = where(template_table.get_attribute('development_type_id') == id)[0]
                res_unit = template_table.get_attribute_by_index('residential_units', index)
                com_sqft = template_table.get_attribute_by_index('commercial_sqft', index)
                ind_sqft = template_table.get_attribute_by_index('industrial_sqft', index)
                gov_sqft = template_table.get_attribute_by_index('governmental_sqft', index)

                should_develop_resunits -= res_unit[0]
                should_develop_commercial -= com_sqft[0]
                should_develop_industrial -= ind_sqft[0]

                development_type_ids.append(id)
                units.append(res_unit)
                com_sqfts.append(com_sqft)
                ind_sqfts.append(ind_sqft)
                gov_sqfts.append(gov_sqft)

        sizes = len(development_type_ids)
        if sizes > 0:
            storage = StorageFactory().get_storage('dict_storage')

            developments_table_name = 'developments'
            storage.write_table(
                    table_name=developments_table_name,
                    table_data={
                        "landuse_development_id": arange( sizes ),
                        "grid_id": -1 * ones( ( sizes, ), dtype=int32),
                        "development_type_id": array(development_type_ids),
                        "residential_units":array(units),
                        "commercial_sqft":array(com_sqfts),
                        "industrial_sqft":array(ind_sqfts),
                        "governmental_sqft":array(gov_sqfts),
                        "improvement_value": zeros( ( sizes, ), dtype="int32"),
                        },
                )

            developments = LandUseDevelopmentDataset(
                in_storage = storage,
                in_table_name = developments_table_name,
                )

        else:
            developments = None

        return developments
Esempio n. 28
0
    def run( self, building_set,
             new_building_copy_attrs,
             building_type_table,
             building_type_classification_table,
             vacancy_table,
             history_table,
             year,
             location_set,
             resources=None ):
        building_classes = building_type_classification_table.get_attribute("name")
        unit_attributes = building_type_classification_table.get_attribute('units')
        building_id_name = building_set.get_id_name()[0]
        location_id_name = location_set.get_id_name()[0]
        calc_attributes = [building_id_name, location_id_name, "year_built"]
        new_buildings   = {}
        for attribute in new_building_copy_attrs:
            new_buildings[attribute] = array([], dtype=building_set.get_data_type(attribute))
        for attribute in calc_attributes:
            new_buildings[attribute] = array([], dtype=building_set.get_data_type(attribute))
            
        # for convenience, make a map of building_type_id => (building_type)class_id
        # these names are hard-wired elsewhere
        building_type_id_to_class_id = {}
        building_type_ids = building_type_table.get_attribute("building_type_id")
        for idx in range(building_type_table.size()):
            building_type_id_to_class_id[building_type_ids[idx]] = \
                building_type_table.get_attribute("class_id")[idx]
        logger.log_status("building_type_id_to_class_id = " + str(building_type_id_to_class_id))
        
        # and make an column for the history table of the use classes
        history_type_classes = zeros( (history_table.size()), dtype=int8)
        history_types = history_table.get_attribute("building_type_id")
        for idx in range(history_table.size()):
            history_type_classes[idx] = building_type_id_to_class_id[history_types[idx]]
        logger.log_status("history_types=" + str(history_types))
        logger.log_status("history_type_classes=" + str(history_type_classes))

        max_id = building_set.get_id_attribute().max()
        new_building_id_start = max_id + 1
        new_building_id_end = max_id + 1
        building_set_size_orig = building_set.size()

        for itype in range(building_type_classification_table.size()): # iterate over building types
            building_class = building_classes[itype]
            building_class_id = building_type_classification_table.get_attribute("class_id")[itype]
            
            vacancy_attribute = 'target_total_%s_vacancy' % building_class.lower()
            if vacancy_attribute not in vacancy_table.get_known_attribute_names():
                logger.log_warning("No target vacancy for building class '%s' (e.g. no '%s' in target_vacancies). Transition model for this building class skipped." 
                                   % (building_class,vacancy_attribute)) 
                continue
            vacancy_table.get_attribute(vacancy_attribute)  # ensures that the attribute is loaded
            target_vacancy_rate = eval("vacancy_table.get_data_element_by_id( year ).%s" % vacancy_attribute)
            logger.log_status("Target vacancy rate for building_class %s is %f" % (building_class, target_vacancy_rate))

            compute_resources = Resources(resources)
            compute_resources.merge({"debug":self.debug})
            units_attribute         = unit_attributes[itype]
            occupied_sqft_attribute = 'occupied_sqft_of_typeclass_%s' % building_class.lower()
            total_sqft_attribute    = 'where(sanfrancisco.building.building_typeclass_name==\'%s\',sanfrancisco.building.building_sqft,0)' % building_class.lower()

            # determine current-year vacancy rates
            building_set.compute_variables(("sanfrancisco.building." + occupied_sqft_attribute,
                                            total_sqft_attribute), 
                                            resources = compute_resources)

            occupied_sqft_sum   = building_set.get_attribute(occupied_sqft_attribute).sum()
            total_sqft_sum      = float( building_set.get_attribute(total_sqft_attribute).sum() )
            occupancy_rate      = self.safe_divide(occupied_sqft_sum, total_sqft_sum)
            # cap it at 1.0
            if occupancy_rate > 1.0: occupancy_rate = 1.0
            vacancy_rate        = 1.0 - occupancy_rate
            vacant_sqft_sum     = vacancy_rate * total_sqft_sum

            should_develop_sqft = (target_vacancy_rate*total_sqft_sum) - vacant_sqft_sum
            logger.log_status("%s: vacancy rate: %4.3f   occupancy rate: %4.3f" 
                              % (building_class, vacancy_rate, occupancy_rate))
            logger.log_status("%s: vacant: %d, should be vacant: %f, sum units: %d"
                          % (building_class, vacant_sqft_sum, target_vacancy_rate*total_sqft_sum, total_sqft_sum))

            if should_develop_sqft <= 0:
                logger.log_note(("Will not build any %s units, because the current vacancy of %d sqft\n"
                             + "is more than the %d sqft desired for the vacancy rate of %f.")
                            % (building_class,
                               vacant_sqft_sum,
                               target_vacancy_rate*total_sqft_sum,
                               target_vacancy_rate))
                continue

            #create buildings

            # find sample set of qualifying buildings in the events history, 
            # e.g. where the building_type is in the correct class, and a positive 
            # number of units or sqft (or whatever) were present
            history_sqft = history_table.get_attribute('building_sqft')
            index_sampleset = where( (history_sqft > 0) & (history_type_classes==building_class_id))[0]

            # Ensure that there are some development projects to choose from.
            logger.log_status("should_develop_sqft=" + str(should_develop_sqft))
            if index_sampleset.shape[0] == 0:
                logger.log_warning("Cannot create new buildings for building use class %s; no buildings in the event history table from which to sample."
                                   % building_class) 
                continue
            
            history_sqft_sampleset = history_sqft[index_sampleset]            
            logger.log_status("history_sqft_sampleset = " + str(history_sqft_sampleset))

            mean_size = history_sqft_sampleset.mean()
            idx = array( [] ,dtype="int32")
            #TODO: should the 'int' in the following line be 'ceil'?
            num_of_projects_to_select = max( 10, int( should_develop_sqft / mean_size ) )
            while True:
                idx = concatenate( ( idx, randint( 0, history_sqft_sampleset.size,
                                                   size=num_of_projects_to_select) ) )
                csum = history_sqft_sampleset[idx].cumsum()
                idx = idx[where( csum <= should_develop_sqft )]
                if csum[-1] >= should_develop_sqft:
                    break
            
            logger.log_status("idx = " + str(idx))

            nbuildings = idx.size
            if nbuildings == 0: continue

            new_building_id_end = new_building_id_start + nbuildings

            # copy_attributes
            for attribute in new_building_copy_attrs:
                attr_values = history_table.get_attribute(attribute)[index_sampleset[idx]]
                new_buildings[attribute] = concatenate((new_buildings[attribute], attr_values))
            
            # calc_attributes
            new_buildings[building_id_name] =concatenate((new_buildings[building_id_name], arange(new_building_id_start, new_building_id_end)))
            new_buildings[location_id_name] = concatenate((new_buildings[location_id_name], zeros(nbuildings)))
            new_buildings["year_built"] = concatenate((new_buildings["year_built"], year*ones(nbuildings)))
            logger.log_status("Creating %s sqft of %s %s buildings." % (history_sqft_sampleset[idx].sum(),
                                                                      nbuildings, building_class))
            new_building_id_start = new_building_id_end + 1
            logger.log_status(new_buildings)
        building_set.add_elements(new_buildings, require_all_attributes=False)

        difference = building_set.size() - building_set_size_orig
        index = arange(difference) + building_set_size_orig
        return index
Esempio n. 29
0
    def compute(self,  dataset_pool):
        with logger.block('Analyzing sectors'):
            sectors = dataset_pool.get_dataset("sector")
            name_equals_sector = sectors.get_attribute("name") == self.sector
            name_equals_sector_indexes = where(name_equals_sector)
            assert(len(name_equals_sector_indexes) == 1)
            name_equals_sector_index = name_equals_sector_indexes[0]
            sector_ids = sectors.get_attribute("sector_id")
            sector_id = sector_ids[name_equals_sector_index][0]
            sqft_per_jobs = sectors.get_attribute("sqm_per_job")
            sqft_per_job = sqft_per_jobs[name_equals_sector_index][0]
            logger.log_note("sqft_per_job: %s" % sqft_per_job)

        with logger.block('Analyzing jobs'):
            logger.log_note("sector_id: %s" % sector_id)
            jobs = dataset_pool.get_dataset("job")
            logger.log_note("jobs.size: %s" % jobs.size())
            buildings = self.get_dataset()
            logger.log_note("buildings.size: %s" % buildings.size())
            job_sqft = ma.masked_where(jobs.get_attribute('sector_id') != sector_id, [sqft_per_job] * jobs.size(), 0)
            logger.log_note("job_sqft: %s" % repr(job_sqft))
            logger.log_note("job_sqft.sum(): %s" % (job_sqft.sum()))
            logger.log_note("job_sqft.sum() / sqft_per_job: %s" % (job_sqft.sum() / sqft_per_job))
            jobs_building_id = jobs.get_attribute('building_id')
            buildings_id = buildings.get_id_attribute()
            logger.log_note("building_id difference: %s" % (set(jobs_building_id) - set(buildings_id)))
            job_area_raw = buildings.sum_over_ids(jobs_building_id, job_sqft)
            logger.log_note("job_area_raw: %s" % repr(job_area_raw))
            logger.log_note("job_area_raw.sum(): %s" % (job_area_raw.sum()))
            logger.log_note("job_area_raw.sum() / sqft_per_job: %s" % (job_area_raw.sum() / sqft_per_job))
            job_area = clip(job_area_raw, 0,
                            buildings.get_attribute("building_sqft"))
            logger.log_note("job_area: %s" % repr(job_area))
            logger.log_note("job_area.sum(): %s" % (job_area.sum()))
            logger.log_note("job_area.sum() / sqft_per_job: %s" % (job_area.sum() / sqft_per_job))

        return job_area
    def run( self, building_set, building_types_table, vacancy_table, year, location_set,
            building_categories=None, dataset_pool=None, resources=None ):
        building_types = building_types_table.get_attribute("name")
        building_id_name = building_set.get_id_name()[0]
        location_id_name = location_set.get_id_name()[0]
        new_buildings = {building_id_name: array([], dtype=building_set.get_data_type(building_id_name)),
                         "building_type_id":array([], dtype=building_set.get_data_type("building_type_id", int8)),
                         "year_built": array([], dtype=building_set.get_data_type("year_built", int32)),
                         "sqft": array([], dtype=building_set.get_data_type("sqft", int32)),
                         "residential_units": array([], dtype=building_set.get_data_type("residential_units", int32)),
                         "improvement_value": array([], dtype= building_set.get_data_type("improvement_value", float32)),
                         "land_value": array([], dtype= building_set.get_data_type("land_value", float32)),
                         location_id_name: array([], dtype=building_set.get_data_type(location_id_name, int32))}
        max_id = building_set.get_id_attribute().max()
        buildings_set_size_orig = building_set.size()

        for itype in range(building_types_table.size()): # iterate over building types
            type = building_types[itype]
            type_code = building_types_table.get_id_attribute()[itype]
            is_residential = building_types_table.get_attribute("is_residential")[itype]
            vacancy_attribute = 'target_total_%s_vacancy' % type
            if vacancy_attribute not in vacancy_table.get_known_attribute_names():
                logger.log_warning("No target vacancy for building type '%s'. Transition model for this building type skipped." % type)
                continue
            vacancy_table.get_attribute(vacancy_attribute)  # ensures that the attribute is loaded
            target_vacancy_rate = eval("vacancy_table.get_data_element_by_id( year ).%s" % vacancy_attribute)

            compute_resources = Resources(resources)
            compute_resources.merge({"debug":self.debug})
            units_attribute = building_types_table.get_attribute('units')[itype]

            # determine current-year vacancy rates
            if is_residential:
                default_vacancy_variable = "urbansim.%s.vacant_%s_units_from_buildings" % (
                                                                   location_set.get_dataset_name(), type)
            else:
                default_vacancy_variable = "urbansim.%s.vacant_%s_sqft_from_buildings" % (
                                                                   location_set.get_dataset_name(), type)
            variable_for_vacancy = compute_resources.get(
                                    "%s_vacant_variable" % type, default_vacancy_variable)
            location_set.compute_variables([variable_for_vacancy, "urbansim.%s.buildings_%s_space" % (
                                                                      location_set.get_dataset_name(),type)],
                                        dataset_pool=dataset_pool, resources = compute_resources)

            vacant_units_sum = location_set.get_attribute(variable_for_vacancy).sum()
            units_sum = float( location_set.get_attribute("buildings_%s_space" % type).sum() )
            vacant_rate = self.safe_divide(vacant_units_sum, units_sum)

            should_develop_units = int(round(max( 0, ( target_vacancy_rate * units_sum - vacant_units_sum ) /
                                         ( 1 - target_vacancy_rate ) )))
            logger.log_status(type + ": vacant units: %d, should be vacant: %f, sum units: %d"
                          % (vacant_units_sum, target_vacancy_rate * units_sum, units_sum))

            if not should_develop_units:
                logger.log_note(("Will not build any " + type + " units, because the current vacancy of %d units\n"
                             + "is more than the %d units desired for the vacancy rate of %f.")
                            % (vacant_units_sum,
                               target_vacancy_rate * units_sum,
                               target_vacancy_rate))
                continue

            improvement_value = building_set.compute_variables("urbansim.%s.%s_improvement_value" % (
                                                                     building_set.get_dataset_name(), type),
                                                                   dataset_pool=dataset_pool,
                                                                   resources=compute_resources)
            average_improvement_value = improvement_value.sum()/ units_sum

            #create buildings
            is_building_type = building_set.compute_variables("urbansim.building.is_building_type_%s" % type,
                                                              dataset_pool=dataset_pool,
                                                              resources=compute_resources)
            units_of_this_type = building_set.compute_variables(units_attribute, dataset_pool=dataset_pool,
                                           resources=compute_resources)
            units_of_this_type = units_of_this_type*is_building_type
            units_without_zeros_idx = where(units_of_this_type > 0)[0]
            history_values_without_zeros = units_of_this_type[units_without_zeros_idx]
            history_improvement_values_without_zeros = where(improvement_value[units_without_zeros_idx]>0,
                                                             improvement_value[units_without_zeros_idx],
                                                             average_improvement_value)
            mean_size = history_values_without_zeros.mean()
            idx = array( [], dtype="int32" )
            # Ensure that there are some development projects to choose from.
            num_of_projects_to_select = max( 10, int( should_develop_units / mean_size ) )
            while True:
                idx = concatenate( ( idx, randint( 0, history_values_without_zeros.size,
                                                   size=num_of_projects_to_select) ) )
                csum = history_values_without_zeros[idx].cumsum()
                idx = idx[where( csum <= should_develop_units )]
                if csum[-1] >= should_develop_units:
                    break
            nbuildings = idx.size
            new_buildings["building_type_id"] = concatenate((new_buildings["building_type_id"], type_code*ones(nbuildings)))
            new_buildings["year_built"] = concatenate((new_buildings["year_built"], year*ones(nbuildings)))
            new_max_id = max_id + nbuildings
            new_buildings[building_id_name]=concatenate((new_buildings[building_id_name], arange(max_id+1, new_max_id+1)))
            max_id = new_max_id
            new_buildings["improvement_value"] = concatenate((new_buildings["improvement_value"],
                                                              history_improvement_values_without_zeros[idx]))

            if is_residential:
                target_size_attribute = "residential_units"
                zero_attribute = "sqft"
            else:
                target_size_attribute = "sqft"
                zero_attribute = "residential_units"
            new_buildings[target_size_attribute] = concatenate((new_buildings[target_size_attribute], history_values_without_zeros[idx]))
            new_buildings[zero_attribute] = concatenate((new_buildings[zero_attribute], zeros(nbuildings)))
            new_buildings[location_id_name] = concatenate((new_buildings[location_id_name], zeros(nbuildings)))
            new_buildings["land_value"] = concatenate((new_buildings["land_value"], zeros(nbuildings)))
            logger.log_status("Creating %s %s of %s %s buildings." % (history_values_without_zeros[idx].sum(),
                                                                   target_size_attribute, nbuildings, type))

        building_set.add_elements(new_buildings, require_all_attributes=False)
        if building_categories: # should be a dictionary of categories for each building type
            building_set.resources['building_categories'] = building_categories
        # add submodel attribute
        category_variables = map(lambda type: "urbansim.%s.size_category_%s" % (building_set.get_dataset_name(), type),
                                           building_types)

        for category_var in category_variables:
            var = VariableName(category_var)
            if var.get_alias() in building_set.get_known_attribute_names():
                building_set.delete_one_attribute(var)
            building_set.compute_variables(var, dataset_pool=dataset_pool, resources = compute_resources)
            building_set.add_primary_attribute(building_set.get_attribute(var), var.get_alias())

        difference = building_set.size() - buildings_set_size_orig
        return difference
Esempio n. 31
0
    def run( self, building_set,
#             building_use_table,
             building_use_classification_table,
             vacancy_table,
             history_table,
             year,
             location_set,
             resources=None ):
        building_classes = building_use_classification_table.get_attribute("name")
        unit_attributes = building_use_classification_table.get_attribute('units')
        building_id_name = building_set.get_id_name()[0]
        location_id_name = location_set.get_id_name()[0]
        new_buildings = {building_id_name: array([], dtype='int32'),
                         "building_use_id":array([], dtype=int8),
                         "year_built": array([], dtype='int32'),
#                         "building_sqft": array([], dtype='int32'),
#                         "residential_units": array([], dtype='int32'),
                         "unit_price": array([], dtype= float32),
                         location_id_name: array([], dtype='int32')}
        for attribute in unit_attributes:
            new_buildings[attribute] = array([], dtype='int32')

        max_id = building_set.get_id_attribute().max()
        building_set_size_orig = building_set.size()

        for itype in range(building_use_classification_table.size()): # iterate over building types
            building_class = building_classes[itype]
#            type_code = building_types_table.get_id_attribute()[itype]
            vacancy_attribute = 'target_total_%s_vacancy' % building_class
            if vacancy_attribute not in vacancy_table.get_known_attribute_names():
                logger.log_warning("No target vacancy for building class '%s'. Transition model for this building class skipped." % type)
                continue
            vacancy_table.get_attribute(vacancy_attribute)  # ensures that the attribute is loaded
            target_vacancy_rate = eval("vacancy_table.get_data_element_by_id( year ).%s" % vacancy_attribute)

            compute_resources = Resources(resources)
            compute_resources.merge({"debug":self.debug})
            units_attribute = unit_attributes[itype]
            vacant_units_attribute = 'vacant_' + units_attribute

            # determine current-year vacancy rates
            building_set.compute_variables("urbansim_parcel.building." + vacant_units_attribute,
                                           resources = compute_resources)

            vacant_units_sum = building_set.get_attribute(vacant_units_attribute).sum()
            units_sum = float( building_set.get_attribute(units_attribute).sum() )
            vacant_rate = self.safe_divide(vacant_units_sum, units_sum)

            should_develop_units = max( 0, ( target_vacancy_rate * units_sum - vacant_units_sum ) /
                                         ( 1 - target_vacancy_rate ) )
            logger.log_status(building_class + ": vacant units: %d, should be vacant: %f, sum units: %d"
                          % (vacant_units_sum, target_vacancy_rate * units_sum, units_sum))

            if not should_develop_units:
                logger.log_note(("Will not build any " + building_class + " units, because the current vacancy of %d units\n"
                             + "is more than the %d units desired for the vacancy rate of %f.")
                            % (vacant_units_sum,
                               target_vacancy_rate * units_sum,
                               target_vacancy_rate))
                continue

#            average_buildings_value = None
#            if (type+"_improvement_value") in location_set.get_known_attribute_names():
#                average_buildings_value = self.safe_divide(
#                    location_set.get_attribute(type+"_improvement_value" ).sum(), units_sum)

            #create buildings

            history_values = history_table.get_attribute(units_attribute)
            index_non_zeros_values = where( history_values > 0 )[0]
            history_values_without_zeros = history_values[index_non_zeros_values]
            history_type = history_table.get_attribute("building_use_id")
            history_type_without_zeros = history_type[index_non_zeros_values]
            history_price = history_table.get_attribute("unit_price")
            history_price_without_zeros = history_price[index_non_zeros_values]

            #TODO: what happens if history has only zeroes?
            mean_size = history_values_without_zeros.mean()
            idx = array( [] )
            # Ensure that there are some development projects to choose from.
            #TODO: should the 'int' in the following line be 'ceil'?
            num_of_projects_to_select = max( 10, int( should_develop_units / mean_size ) )
            while True:
                idx = concatenate( ( idx, randint( 0, history_values_without_zeros.size,
                                                   size= num_of_projects_to_select ) ) )
                csum = history_values_without_zeros[idx].cumsum()
                idx = idx[where( csum <= should_develop_units )]
                if csum[-1] >= should_develop_units:
                    break

            nbuildings = idx.size

            for attribute in unit_attributes:

                #if attribute == units_attribute:
                    #new_unit_values = history_values_without_zeros[idx]
                #else:
                    #new_unit_values = zeros(nbuildings)
                #to accomodate mixed use buildings, allow non units_attribute to be non-zero
                new_unit_values = history_table.get_attribute(attribute)[index_non_zeros_values[idx]]

                new_buildings[attribute] = concatenate((new_buildings[attribute], new_unit_values))

            new_max_id = max_id + nbuildings
            new_buildings[building_id_name]=concatenate((new_buildings[building_id_name], arange(max_id+1, new_max_id+1)))
            new_buildings["building_use_id"] = concatenate((new_buildings["building_use_id"], history_type_without_zeros[idx]))
            new_buildings["year_built"] = concatenate((new_buildings["year_built"], year*ones(nbuildings, dtype="int32")))
            new_buildings["unit_price"] = concatenate((new_buildings["unit_price"], history_price_without_zeros[idx]))
            new_buildings[location_id_name] = concatenate((new_buildings[location_id_name], zeros(nbuildings, dtype="int32")))
            logger.log_status("Creating %s %s of %s %s buildings." % (history_values_without_zeros[idx].sum(),
                                                                      units_attribute, nbuildings, building_class))

        building_set.add_elements(new_buildings, require_all_attributes=False)

        difference = building_set.size() - building_set_size_orig
        index = arange(difference) + building_set_size_orig
        return index
Esempio n. 32
0
    if len(sys.argv) < 2:
        usage()
    
    chunks = int(sys.argv[1])

    if len(sys.argv) != 3 + 2 * chunks:
        usage()
        
    in_fnames = {}
    in_fnames['household'] = sys.argv[2:(2+chunks)]
    in_fnames['person'] = sys.argv[(2+chunks):(2+2*chunks)]
    out_fname = sys.argv[2+2*chunks]

    try:
        os.unlink(out_fname)
        logger.log_note('Deleted file %s' % out_fname)
    except:
        pass

    rename_attrs = {'household': {'id':'household_id'},
                    'person':{'id':'person_id', 
                              'hh_id':'household_id'}
                    }

    for dataset_name in ('household', 'person'):
        ## read
        #data = read_csv_with_numpy(in_fname) 
        #data = read_csv_native(in_fname) 
        in_fnamel = in_fnames[dataset_name]
        data = read_native_write_h5py(in_fnamel, out_fname, dataset_name,
                                      rename_and_fix_attrs=rename_attrs[dataset_name])
    def _run_finished(self, success):
        key = self.running_key
        self.running_key = None

        size = QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)
        self.tabwidget_visualizations.setSizePolicy(size)

        name = '%s/%s/%s'%key
        new_tab = QTabWidget(self.tabwidget_visualizations)
        self.tabwidget_visualizations.addTab(new_tab, name)

        map_widget = None
        tab_widget = None
        for (visualization_type, visualizations) in self.batch_processor.get_visualizations():
            if len(visualizations) > 0:
                if visualization_type == 'mapnik_map':
                    viz = visualizations[0]
                    map_widget = ViewImageForm(viz, new_tab)
                    map_widget.setSizePolicy(size)
                elif visualization_type == 'mapnik_animated_map':
                    viz = visualizations[0]
                    map_widget = ViewAnimationForm(viz, new_tab)
                    map_widget.setSizePolicy(size)
                elif visualization_type == 'table_per_year':
                    viz = visualizations[0]
                    tab_widget = ViewTableForm(viz, new_tab)
                    tab_widget.setSizePolicy(size)
#            else:
#                map_widget = self.tabMap
#                tab_widget = self.tabTable

#        if not map_widget or not tab_widget: return

#        self.tabMap = map_widget
#        self.tabTable = tab_widget

        if tab_widget:
            new_tab.addTab(tab_widget, "Table")

        if map_widget:
            new_tab.addTab(map_widget, "Map")

        self.already_browsed[key] = (tab_widget, map_widget)

#        self.lblViewIndicator.setText(QString(key[1]))
#        self.lblViewRun.setText(QString(key[0]))
#        self.lblViewYear.setText(QString(repr(key[2])))

        swap = self.queued_results is not None and self.queued_results[0] == 'swap'

        if self.queued_results is not None and not swap:
            self.running_key = self.queued_results[0]

            logger.log_note('Generating queued results for %s on run %s for year %i'%self.running_key)
            self.batch_processor = self.queued_results[1]
            self.queued_results = None

            runThread = OpusGuiThread(
                                  parentThread = get_mainwindow_instance(),
                                  parentGuiElement = self,
                                  thread_object = self.batch_processor)

            # Use this signal from the thread if it is capable of producing its own status signal
            QObject.connect(runThread, SIGNAL("runFinished(PyQt_PyObject)"), self._run_finished)
            QObject.connect(runThread, SIGNAL("runError(PyQt_PyObject)"), self._run_error)
            runThread.start()
        else:
#            if swap:
#                (map_widget, tab_widget) = self.queued_results[1]
#
##                self.swap_visualizations(map_widget, tab_widget)
#                name = '%s/%s/%s'%key
#        #        self.swap_visualizations(map_widget, tab_widget)
#                self.add_visualization(map_widget = map_widget, tab_widget = tab_widget, name = name)

            self.queued_results = None

            self.generating_results = False
            self.pb_generate_results.setText('Results Generated')
Esempio n. 34
0
    def run(self,
            xsd_file=None,
            destination=None,
            binding_class_name=None,
            test_run=False):
        logger.start_block('Starting to update xml parser for UrbanSim ...')

        self.output_pyxb_package_name = None
        self.output_pyxb_package_file = None

        # location of xsd file
        if xsd_file == None:
            # download xsd from matsim.org
            xsd_location = self.get_xsd_from_matsim_org()
        else:
            xsd_location = xsd_file

        # name of output package, where the generated bindig classes will be stored
        if binding_class_name == None:
            logger.log_note('Name for PyXB binding class is None! ')
            self.output_pyxb_package_name = 'pyxb_matsim_config_parser'
            logger.log_note('Setting default name for PyXB binding class: %s' %
                            self.output_pyxb_package_name)
        else:
            self.output_pyxb_package_name = binding_class_name
        self.output_pyxb_package_file = self.output_pyxb_package_name + '.py'

        # path to the PyXB executables
        pyxb_gen = os.path.join(os.getenv('HOME'), 'bin', 'pyxbgen')
        # checking if PyXB is available
        if not os.path.exists(pyxb_gen):
            raise StandardError(
                'PyXB seems not to be installed on this machine.\nPlease download and install PyXB first. It is available on http://sourceforge.net/projects/pyxb/ (Accessed July 2010).'
            )

        # print status information
        logger.log_status('Found PyXB executable: %s' % pyxb_gen)
        binding_class_destination = destination
        if binding_class_destination == None:
            logger.log_note(
                'Destination for binding classes not given. Using default location...'
            )
            binding_class_destination = pyxb_path.__path__[0]
        logger.log_status(
            'Destination directory for PyXB binding classes: %s' %
            binding_class_destination)
        logger.log_status('XSD reposit: %s' % xsd_location)
        logger.log_status('New pyxb xml binding class: %s' %
                          self.output_pyxb_package_file)

        # checking if a previous binding class exsists
        # get current directory
        binding_class = os.path.join(binding_class_destination,
                                     self.output_pyxb_package_file)
        if os.path.exists(binding_class):
            logger.log_status('Found a previous binding class')
            if test_run:
                os.remove(binding_class)
            else:  # archiving previous pyxb parser versions
                archive_folder = os.path.join(binding_class_destination,
                                              'xsd_archive')
                if not os.path.exists(archive_folder):
                    logger.log_status("Creating archive folder %s" %
                                      archive_folder)
                    os.mkdir(archive_folder)
                # create subfolder
                datetime = time.strftime("%Y_%m_%d-%H:%M:%S", time.gmtime())
                subfolder = os.path.join(archive_folder, datetime)
                os.mkdir(subfolder)
                destination = os.path.join(subfolder,
                                           self.output_pyxb_package_file)
                # moving prevoius binding class into archive
                logger.log_status(
                    "Moving previous binding class into archive: %s" %
                    destination)
                shutil.move(binding_class, destination)

        #===========================================================================
        # EXAMPLE:
        # Generating xml binding classes manually.
        #
        # 1) Start a terminal and switch to the place where the xsd is stored. Here its "xsds".
        #
        # 2) Enter the following commandline:
        # /Users/thomas/bin/pyxbgen \
        # > -u Products.xsd -m pro1
        #
        # 3) The following output appears:
        # urn:uuid:4b416ad0-11a5-11df-a29e-001b63930ac1
        # Python for AbsentNamespace0 requires 1 modules
        # Saved binding source to ./pro1.py
        # thomas-nicolais-macbook-pro:xsds thomas$
        #
        # 4) The generated classes are ready to use.
        #===========================================================================

        # comand line to generate xml bindig classes as explained above
        cmd = 'cd %(binding_class_destination)s ; %(pyxbgen)s -u %(xsd_location)s -m %(output)s' % {
            'binding_class_destination': binding_class_destination,
            'pyxbgen': pyxb_gen,
            'xsd_location': xsd_location,
            'output': self.output_pyxb_package_name
        }

        logger.log_status('Executing command : %s' % cmd)
        # executing comand line
        cmd_result = os.system(cmd)
        # checking if some error occured
        if cmd_result != 0:
            raise StandardError('Executing command faild! Returncode = %i' %
                                cmd_result)

        # At this point executing comand line was successful
        # Now a UrbanSim header is added to the generated binding classes

        # read whole file
        f = open(binding_class, "r")
        # binding class will be extended by the UrbanSim header
        content = "# Opus/UrbanSim urban simulation software\n# Copyright (C) 2005-2009 University of Washington\n# See opus_core/LICENSE\n\n"
        line = f.readline()
        while line:
            content += line
            line = f.readline()
        f.close()

        # get current binding class and overwrite with the actual content containing the header
        binding_class = os.path.join(binding_class_destination,
                                     self.output_pyxb_package_file)
        print "Path to generated binding class: %s" % binding_class
        # open binding class to add the header
        f = open(binding_class, 'w')
        try:
            f.write(content)
        except Exception:
            logger.log_error(
                "Error occured while adding the UrbanSim header to the binding class."
            )
        finally:
            f.close()

        logger.log_status('Successful finished. Exit program.')
        logger.end_block()
        return 1  # return code for test class (1 == ok)
Esempio n. 35
0
    def on_pb_urbancanvas_clicked(self):
        
        run_name = self.current_run
        indicator_name = self.current_indicator
        indicator_dataset = self.current_indicator_dataset
        if indicator_dataset != 'parcel':
            MessageBox.information(mainwindow = self, text = 'Not a parcel variable. Only parcel variables can be sent to UrbanCanvas')
        else:
            start_year = int(self.current_year)
            end_year = start_year

            if run_name is None or indicator_name is None or start_year is None:
                return

            key = (run_name, indicator_name, start_year)

            self.pb_urbancanvas.setText('Sending to UrbanCanvas...')

            indicator_nodes = get_available_indicator_nodes(self.project)

            dataset = None
            for indicator_node in indicator_nodes:
                ind_dataset, name = get_variable_dataset_and_name(indicator_node)
                if name == indicator_name and ind_dataset == indicator_dataset:
                    dataset = ind_dataset
                    break

            if dataset is None:
                raise Exception('Could not find dataset for indicator %s' % indicator_name)

            table_params = {
                'name': None,
                'output_type' : 'tab',
                'indicators' : [indicator_name],
            }
            expression_library = self.project.xml_config.get_expression_library()
            expression = expression_library[(dataset,name)]
            logger.log_note(expression)
            
            base_year = end_year
            project_name = self.project.name
            opus_data_path = self.project.xml_config.get_opus_data_path()
            logger.log_note(base_year)
            logger.log_note(project_name)
            logger.log_note(opus_data_path)
            interface = IndicatorFrameworkInterface(self.project)
            source_data = interface.get_source_data(
                                 source_data_name = run_name,
                                 years = [end_year,]
            )
            cache = os.path.join(source_data.cache_directory,str(end_year))
            logger.log_note(cache)
            storage = StorageFactory().get_storage('flt_storage',storage_location=cache)
            dataset_pool = DatasetPool(storage=storage, package_order=[project_name,'urbansim_parcel','urbansim','opus_core'])
            parcels = dataset_pool.get_dataset('parcel')
            parcel_ids = pd.Series(parcels.get_attribute('parcel_id'))
            values = pd.Series(parcels.compute_variables([expression],dataset_pool=dataset_pool).astype('float'))
            parcels = pd.DataFrame({"parcel_id":parcel_ids,"vl_values":values})
            #parcels.set_index(keys='parcel_id',inplace=True)
            #parcels["vl_values"][parcels["vl_values"]==0] = np.nan
            parcels = parcels[parcels["vl_values"]>0]
            
            os.chdir(os.path.join(opus_data_path,project_name))
            parcels.to_csv('results_browser_indicator.csv',index=False)
            #np.savez('results_browser_indicator',parcel_id=parcels.vl_values.index.values.astype('int32'),values=parcels.vl_values.values.astype('int32'))
            
            ##############UNCOMMENT IF WEBSERVICE IS DESIRED
            # parcels.save('variable_library.pkl') ##I believe 'save' was just deprectated in pandas- its now to_pickle or some such thing... change this later
            # web_service_path = os.path.join(os.getenv("OPUS_HOME"),'src',project_name,'scripts','web_service.py')
            # logger.log_note(web_service_path)
            # p = subprocess.Popen([sys.executable,web_service_path])
            # MessageBox.information(mainwindow = self, text = 'Click OK when done viewing in UrbanCanvas')
            # p.kill()
            # self.pb_urbancanvas.setText('View in UrbanCanvas')
            
            MessageBox.information(mainwindow = self, text = 'Variable exported to the project data directory for viewing in UrbanCanvas')
            self.pb_urbancanvas.setText('View in UrbanCanvas')
Esempio n. 36
0
    def __init__(self):
        config = AbstractUrbansimConfiguration()

        config_changes = {
            "project_name": "washtenaw",
            "description": "Region Pilot Baseline",
            "scenario_database_configuration": ScenarioDatabaseConfiguration(database_name="washtenaw_class"),
            "models": [
                "prescheduled_events",
                "events_coordinator",
                "residential_land_share_model",
                "land_price_model",
                "regional_development_project_transition_model",
                "residential_regional_development_project_location_choice_model",
                "commercial_regional_development_project_location_choice_model",
                "industrial_regional_development_project_location_choice_model",
                "development_event_transition_model",
                "events_coordinator",
                "residential_land_share_model",
                "jobs_event_model",
                #'households_event_model',
                "regional_household_transition_model",
                "regional_household_relocation_model",
                "regional_household_location_choice_model",
                "regional_employment_transition_model",
                "regional_employment_relocation_model",
                {"regional_employment_location_choice_model": {"group_members": ["_all_"]}},
                "regional_distribute_unplaced_jobs_model",
            ],
            "cache_directory": None,  ### TODO: Set this cache_directory to something useful.
            "creating_baseyear_cache_configuration": CreatingBaseyearCacheConfiguration(
                cache_directory_root="/urbansim_cache/washtenaw",
                cache_from_database=True,
                baseyear_cache=BaseyearCacheConfiguration(
                    existing_cache_to_copy="/urbansim_cache/washtenaw/cache_source"
                ),
                cache_scenario_database="urbansim.model_coordinators.cache_scenario_database",
                tables_to_cache=self.tables_to_cache,
                tables_to_cache_nchunks={"gridcells": 1},
                tables_to_copy_to_previous_years=self.tables_to_copy_to_previous_years,
            ),
            "datasets_to_preload": {
                "development_constraint": {},
                "development_event_history": {},
                "development_type": {},
                "gridcell": {"nchunks": 2},
                "household": {},
                "job": {},
                "job_building_type": {},
                "target_vacancy": {},
                "zone": {},
                "jobs_event": {},
                #'households_event': {},
            },
            "dataset_pool_configuration": DatasetPoolConfiguration(
                package_order=["washtenaw", "urbansim", "opus_core"]
            ),
            "base_year": 2005,
            "years": (2006, 2010),
        }
        config.merge(config_changes)
        self.merge(config)
        self.merge_with_controller()
        try:
            exec("from %s_local_config import my_configuration" % getuser())
            local_config = True
        except:
            logger.log_note("No user's settings found or error occured when loading.")
            local_config = False
        if local_config:
            self.merge(my_configuration)
    def run(self):
        """
        """
        try:
            import pydevd
            pydevd.settrace()
        except:
            pass
        logger.start_block("Starting RunDummyTravelModel.run(...)")

        print >> sys.stderr, "\nThis should also check if get_cache_data_into_matsim did something reasonable"

        logger.log_status('would normally run MATSim')

        #        if not (sys.path == None) and len(sys.path) > 0:
        #            module_path = sys.path[0]
        #            logger.log_note("project path: %s" % module_path)
        #
        #        in_file_name = os.path.join( module_path, "data", "travel_data_manipulated.csv" )
        #        logger.log_note("open file : %s" % in_file_name)
        #        file_in = open(in_file_name, 'r')
        out_file_name = os.path.join(os.environ['OPUS_HOME'], "opus_matsim",
                                     "tmp", "travel_data.csv")
        logger.log_note("open file : %s" % out_file_name)
        file_out = open(out_file_name, 'w')

        # cbd_zone = "129"

        file_out.write(
            "from_zone_id:i4,to_zone_id:i4,single_vehicle_to_work_travel_cost:f4\n"
        )
        file_out.write("1,1,0.0\n")
        file_out.write("1,102,999.9999999999999\n")
        file_out.write("1,109,999.9999999999999\n")
        file_out.write("1,126,999.9999999999999\n")
        file_out.write("1,128,999.9999999999999\n")
        file_out.write("1,134,999.9999999999999\n")
        file_out.write("1,139,999.9999999999999\n")
        file_out.write("1,140,999.9999999999999\n")
        file_out.write("1,2,999.9999999999999\n")
        file_out.write("102,1,999.9999999999999\n")
        file_out.write("102,102,0.0\n")
        file_out.write("102,109,999.9999999999999\n")
        file_out.write("102,126,999.9999999999999\n")
        file_out.write("102,128,999.9999999999999\n")
        file_out.write("102,134,999.9999999999999\n")
        file_out.write("102,139,999.9999999999999\n")
        file_out.write("102,140,999.9999999999999\n")
        file_out.write("102,2,999.9999999999999\n")
        file_out.write("109,1,999.9999999999999\n")
        file_out.write("109,102,999.9999999999999\n")
        file_out.write("109,109,0.0\n")
        file_out.write("109,126,999.9999999999999\n")
        file_out.write("109,128,999.9999999999999\n")
        file_out.write("109,134,999.9999999999999\n")
        file_out.write("109,139,999.9999999999999\n")
        file_out.write("109,140,999.9999999999999\n")
        file_out.write("109,2,999.9999999999999\n")
        file_out.write("126,1,999.9999999999999\n")
        file_out.write("126,102,999.9999999999999\n")
        file_out.write("126,109,999.9999999999999\n")
        file_out.write("126,126,0.0\n")
        file_out.write("126,128,999.9999999999999\n")
        file_out.write("126,134,999.9999999999999\n")
        file_out.write("126,139,999.9999999999999\n")
        file_out.write("126,140,999.9999999999999\n")
        file_out.write("126,2,999.9999999999999\n")
        file_out.write("128,1,999.9999999999999\n")
        file_out.write("128,102,999.9999999999999\n")
        file_out.write("128,109,999.9999999999999\n")
        file_out.write("128,126,999.9999999999999\n")
        file_out.write("128,128,0.0\n")
        file_out.write("128,134,999.9999999999999\n")
        file_out.write("128,139,999.9999999999999\n")
        file_out.write("128,140,999.9999999999999\n")
        file_out.write("128,2,999.9999999999999\n")
        file_out.write("134,1,999.9999999999999\n")
        file_out.write("134,102,999.9999999999999\n")
        file_out.write("134,109,999.9999999999999\n")
        file_out.write("134,126,999.9999999999999\n")
        file_out.write("134,128,999.9999999999999\n")
        file_out.write("134,134,0.0\n")
        file_out.write("134,139,999.9999999999999\n")
        file_out.write("134,140,999.9999999999999\n")
        file_out.write("134,2,999.9999999999999\n")
        file_out.write("139,1,999.9999999999999\n")
        file_out.write("139,102,999.9999999999999\n")
        file_out.write("139,109,999.9999999999999\n")
        file_out.write("139,126,999.9999999999999\n")
        file_out.write("139,128,999.9999999999999\n")
        file_out.write("139,134,999.9999999999999\n")
        file_out.write("139,139,0.0\n")
        file_out.write("139,140,999.9999999999999\n")
        file_out.write("139,2,999.9999999999999\n")
        file_out.write("140,1,999.9999999999999\n")
        file_out.write("140,102,999.9999999999999\n")
        file_out.write("140,109,999.9999999999999\n")
        file_out.write("140,126,999.9999999999999\n")
        file_out.write("140,128,999.9999999999999\n")
        file_out.write("140,134,999.9999999999999\n")
        file_out.write("140,139,999.9999999999999\n")
        file_out.write("140,140,0.0\n")
        file_out.write("140,2,999.9999999999999\n")
        file_out.write("2,1,999.9999999999999\n")
        file_out.write("2,102,999.9999999999999\n")
        file_out.write("2,109,999.9999999999999\n")
        file_out.write("2,126,999.9999999999999\n")
        file_out.write("2,128,999.9999999999999\n")
        file_out.write("2,134,999.9999999999999\n")
        file_out.write("2,139,999.9999999999999\n")
        file_out.write("2,140,999.9999999999999\n")
        file_out.write("2,2,0.0\n")

        try:
            #file_in.close()
            file_out.close()
        except:
            logger.log_warning("file not closed")

        logger.end_block()
Esempio n. 38
0
    def on_pb_generate_results_clicked(self):
        run_name = self.current_run
        indicator_name = self.current_indicator
        indicator_dataset = self.current_indicator_dataset
        start_year = int(self.current_year)
        end_year = start_year

        if run_name is None or indicator_name is None or start_year is None:
            return

        key = (run_name, indicator_name, start_year)

        if key in self.already_browsed:
            if not self.generating_results:
                (tab_widget, map_widget) = self.already_browsed[key]
                #                self.swap_visualizations(map_widget, tab_widget)
                self.pb_generate_results.setText('Results Generated')
            else:
                self.queued_results = ('swap', (map_widget, tab_widget))
            return

        self.pb_generate_results.setEnabled(False)
        self.pb_generate_results.setText('Generating Results...')

        indicator_nodes = get_available_indicator_nodes(self.project)

        dataset = None
        for indicator_node in indicator_nodes:
            ind_dataset, name = get_variable_dataset_and_name(indicator_node)
            if name == indicator_name and ind_dataset == indicator_dataset:
                dataset = ind_dataset
                break

        if dataset is None:
            raise Exception('Could not find dataset for indicator %s' %
                            indicator_name)

        table_params = {
            'name': None,
            'output_type': 'tab',
            'indicators': [indicator_name],
        }

        map_params = {'name': None, 'indicators': [indicator_name]}

        visualizations = [('table_per_year', dataset, table_params),
                          ('mapnik_map', dataset, map_params)]

        batch_processor = BatchProcessor(self.project)
        batch_processor.guiElement = self

        batch_processor.set_data(visualizations=visualizations,
                                 source_data_name=run_name,
                                 years=range(start_year, end_year + 1))

        if not self.generating_results:
            self.generating_results = True
            logger.log_note(
                'Generating results for %s on run %s for year %indicator_node'
                % (run_name, indicator_name, start_year))
            self.running_key = key
            self.batch_processor = batch_processor
            batch_processor_thread = OpusGuiThread(
                parentThread=get_mainwindow_instance(),
                parentGuiElement=self,
                thread_object=self.batch_processor)

            # Use this signal from the thread if it is capable of producing its own status signal
            self.connect(batch_processor_thread,
                         SIGNAL("runFinished(PyQt_PyObject)"),
                         self._run_finished)
            self.connect(batch_processor_thread,
                         SIGNAL("runError(PyQt_PyObject)"), self._run_error)

            batch_processor_thread.start()
        else:
            self.queued_results = (key, batch_processor)
    def run(
        self,
        vacancy_table,
        history_table,
        year,
        location_set,
        resources=None,
        development_models=None,
        models_configuration=None,
        model_configuration=None,
    ):
        """
        Defining the development project types can be done in two ways;
        either by using a small part of configuration located under
        'development_project_types' that lists only the needed information
        OR: they can be defined as part of the development project models.
        Configurations that pass the development_models argument assume to 
        use the latter method.
        """
        # check that we get the correct arguments
        if development_models is not None and models_configuration is None:
            raise StandardError(
                "Configurations that pass a list of development"
                ' models (argument: "development_models") must '
                "also pass a reference to the entire models "
                'configuration (argument: "models_'
                'configuration") note: plural model[s].'
            )

        dev_model_configs = {}
        if development_models is None:  # assume this means that we use old conf
            # try to get a reference to the external information for development
            # project types
            try:
                dev_model_configs = model_configuration["development_project_types"]
            except:
                dev_model_configs = models_configuration["development_project_types"]
        else:
            # pull in information from the specified development project models
            for dev_proj_model in development_models:
                model_conf = models_configuration[dev_proj_model]
                proj_type = model_conf["controller"]["init"]["arguments"]["project_type"].strip("'\"")
                dev_model_configs[proj_type] = {}
                dev_model_configs[proj_type]["units"] = model_conf["controller"]["init"]["arguments"]["units"].strip(
                    "'\""
                )
                dev_model_configs[proj_type]["residential"] = model_conf["controller"]["init"]["arguments"][
                    "residential"
                ]
                dev_model_configs[proj_type]["categories"] = model_conf["controller"]["prepare_for_estimate"][
                    "arguments"
                ]["categories"]

        self.pre_check(location_set, vacancy_table, dev_model_configs)
        target_residential_vacancy_rate, target_non_residential_vacancy_rate = self._get_target_vacancy_rates(
            vacancy_table, year
        )
        self._compute_vacancy_variables(location_set, dev_model_configs, resources)
        projects = {}
        for project_type in dev_model_configs:
            # determine current-year vacancy rates
            vacant_units_sum = location_set.get_attribute(self.variable_for_vacancy[project_type]).sum()
            units_sum = float(location_set.get_attribute(self.units_variable[project_type]).sum())
            if dev_model_configs[project_type]["residential"]:
                target_vacancy_rate = target_residential_vacancy_rate
            else:
                target_vacancy_rate = target_non_residential_vacancy_rate
            should_develop_units = int(
                round(max(0, (target_vacancy_rate * units_sum - vacant_units_sum) / (1 - target_vacancy_rate)))
            )
            logger.log_status(
                project_type
                + ": vacant units: %d, should be vacant: %f, sum units: %d"
                % (vacant_units_sum, target_vacancy_rate * units_sum, units_sum)
            )

            if not should_develop_units:
                logger.log_note(
                    (
                        "Will not build any "
                        + project_type
                        + " units, because the current vacancy of %d units\n"
                        + "is more than the %d units desired for the vacancy rate of %f."
                    )
                    % (vacant_units_sum, target_vacancy_rate * units_sum, target_vacancy_rate)
                )
            # create projects
            if should_develop_units > 0:
                projects[project_type] = self._create_projects(
                    should_develop_units,
                    project_type,
                    history_table,
                    location_set,
                    units_sum,
                    dev_model_configs,
                    resources,
                )
                projects[project_type].add_submodel_categories()
            else:
                projects[project_type] = None
        return projects
    def run(self,
            vacancy_table,
            frequency_table,
            template_table,
            year,
            location_set,
            resources=None):
        self.pre_check(location_set, vacancy_table, [])
        target_residential_vacancy_rate = vacancy_table.get_data_element_by_id(
            year).target_total_residential_vacancy
        target_non_residential_vacancy_rate = vacancy_table.get_data_element_by_id(
            year).target_total_non_residential_vacancy
        compute_resources = Resources(resources)
        #        compute_resources.merge({"household":household_set, "job":job_set, "debug":self.debug})
        location_set.compute_variables([
            "urbansim.gridcell.vacant_residential_units",
            "urbansim.gridcell.vacant_commercial_sqft",
            "urbansim.gridcell.vacant_industrial_sqft"
        ],
                                       resources=compute_resources)

        # determine current-year vacancy rates
        vacant_resunits_sum = location_set.get_attribute(
            "vacant_residential_units").sum()
        resunits_sum = float(
            location_set.get_attribute("residential_units").sum())
        vacant_residential_rate = self.safe_divide(vacant_resunits_sum,
                                                   resunits_sum)

        vacant_commercial_sqft_sum = location_set.get_attribute(
            "vacant_commercial_sqft").sum()
        commercial_sqft_sum = float(
            location_set.get_attribute("commercial_sqft").sum())
        vacant_commercial_rate = self.safe_divide(vacant_commercial_sqft_sum,
                                                  commercial_sqft_sum)

        vacant_industrial_sqft_sum = location_set.get_attribute(
            "vacant_industrial_sqft").sum()
        industrial_sqft_sum = float(
            location_set.get_attribute("industrial_sqft").sum())
        vacant_industrial_rate = self.safe_divide(vacant_industrial_sqft_sum,
                                                  industrial_sqft_sum)

        logger.log_status(
            "Res: vacant res units: %d, should be vacant: %f, sum res units: %d"
            % (vacant_resunits_sum,
               target_residential_vacancy_rate * resunits_sum, resunits_sum))
        logger.log_status(
            "Com: vacant sqft: %d, should be vacant: %f, sum sqft: %d" %
            (vacant_commercial_sqft_sum, target_non_residential_vacancy_rate *
             commercial_sqft_sum, commercial_sqft_sum))
        logger.log_status(
            "Ind: vacant sqft: %d, should be vacant: %f, sum sqft: %d" %
            (vacant_industrial_sqft_sum, target_non_residential_vacancy_rate *
             industrial_sqft_sum, industrial_sqft_sum))

        should_develop_resunits = max(
            0, (target_residential_vacancy_rate * resunits_sum -
                vacant_resunits_sum) / (1 - target_residential_vacancy_rate))
        if not should_develop_resunits:
            logger.log_note((
                "Will not build any residential units, because the current residential vacancy of %d units\n"
                +
                "is more than the %d units desired for the vacancy rate of %f."
            ) % (vacant_resunits_sum, target_residential_vacancy_rate *
                 resunits_sum, target_residential_vacancy_rate))
        should_develop_commercial = max(
            0, (target_non_residential_vacancy_rate * commercial_sqft_sum -
                vacant_commercial_sqft_sum) /
            (1 - target_non_residential_vacancy_rate))
        if not should_develop_commercial:
            logger.log_note((
                "Will not build any commercial sqft, because the current commercial vacancy of %d sqft\n"
                +
                "is more than the %d sqft desired for the vacancy rate of %f."
            ) % (vacant_commercial_sqft_sum,
                 target_non_residential_vacancy_rate * commercial_sqft_sum,
                 target_non_residential_vacancy_rate))
        should_develop_industrial = max(
            0, (target_non_residential_vacancy_rate * industrial_sqft_sum -
                vacant_industrial_sqft_sum) /
            (1 - target_non_residential_vacancy_rate))
        if not should_develop_industrial:
            logger.log_note((
                "Will not build any industrial sqft, because the current industrial vacancy of %d sqft\n"
                +
                "is more than the %d sqft desired for the vacancy rate of %f."
            ) % (vacant_industrial_sqft_sum,
                 target_non_residential_vacancy_rate * industrial_sqft_sum,
                 target_non_residential_vacancy_rate))

#        projects = {}
#        should_develop = {"residential":should_develop_resunits,
#                          "commercial":should_develop_commercial,
#                          "industrial":should_develop_industrial}

#        average_improvement_value = {}
#        average_improvement_value["residential"] = self.safe_divide(
#            location_set.get_attribute("residential_improvement_value" ).sum(), resunits_sum)
#        average_improvement_value["commercial"] = self.safe_divide(
#            location_set.get_attribute("commercial_improvement_value" ).sum(), commercial_sqft_sum)
#        average_improvement_value["industrial"] = self.safe_divide(
#            location_set.get_attribute("industrial_improvement_value" ).sum(), industrial_sqft_sum)

#create projects

        development_type_ids = []
        units = []
        com_sqfts = []
        ind_sqfts = []
        gov_sqfts = []
        while should_develop_resunits > 0 or should_develop_commercial > 0 or should_develop_industrial > 0:
            n = 1  # sample n developments at a time
            sampled_ids = probsample_replace(
                frequency_table.get_attribute('development_type_id'), n,
                frequency_table.get_attribute('frequency').astype(float32) /
                frequency_table.get_attribute('frequency').sum())
            for id in sampled_ids:
                index = where(
                    template_table.get_attribute('development_type_id') ==
                    id)[0]
                res_unit = template_table.get_attribute_by_index(
                    'residential_units', index)
                com_sqft = template_table.get_attribute_by_index(
                    'commercial_sqft', index)
                ind_sqft = template_table.get_attribute_by_index(
                    'industrial_sqft', index)
                gov_sqft = template_table.get_attribute_by_index(
                    'governmental_sqft', index)

                should_develop_resunits -= res_unit[0]
                should_develop_commercial -= com_sqft[0]
                should_develop_industrial -= ind_sqft[0]

                development_type_ids.append(id)
                units.append(res_unit)
                com_sqfts.append(com_sqft)
                ind_sqfts.append(ind_sqft)
                gov_sqfts.append(gov_sqft)

        sizes = len(development_type_ids)
        if sizes > 0:
            storage = StorageFactory().get_storage('dict_storage')

            developments_table_name = 'developments'
            storage.write_table(
                table_name=developments_table_name,
                table_data={
                    "landuse_development_id": arange(sizes),
                    "grid_id": -1 * ones((sizes, ), dtype=int32),
                    "development_type_id": array(development_type_ids),
                    "residential_units": array(units),
                    "commercial_sqft": array(com_sqfts),
                    "industrial_sqft": array(ind_sqfts),
                    "governmental_sqft": array(gov_sqfts),
                    "improvement_value": zeros((sizes, ), dtype="int32"),
                },
            )

            developments = LandUseDevelopmentDataset(
                in_storage=storage,
                in_table_name=developments_table_name,
            )

        else:
            developments = None

        return developments
    def run(self,
            vacancy_table,
            history_table,
            year,
            location_set,
            resources=None,
            development_models=None,
            models_configuration=None,
            model_configuration=None):
        """
        Defining the development project types can be done in two ways;
        either by using a small part of configuration located under
        'development_project_types' that lists only the needed information
        OR: they can be defined as part of the development project models.
        Configurations that pass the development_models argument assume to 
        use the latter method.
        """
        # check that we get the correct arguments
        if development_models is not None and models_configuration is None:
            raise StandardError(
                'Configurations that pass a list of development'
                ' models (argument: "development_models") must '
                'also pass a reference to the entire models '
                'configuration (argument: "models_'
                'configuration") note: plural model[s].')

        dev_model_configs = {}
        if development_models is None:  # assume this means that we use old conf
            # try to get a reference to the external information for development
            # project types
            try:
                dev_model_configs = model_configuration[
                    'development_project_types']
            except:
                dev_model_configs = models_configuration[
                    'development_project_types']
        else:
            # pull in information from the specified development project models
            for dev_proj_model in development_models:
                model_conf = models_configuration[dev_proj_model]
                proj_type = model_conf['controller']['init']['arguments'][
                    'project_type'].strip('\'"')
                dev_model_configs[proj_type] = {}
                dev_model_configs[proj_type]['units'] = model_conf[
                    'controller']['init']['arguments']['units'].strip('\'"')
                dev_model_configs[proj_type]['residential'] = model_conf[
                    'controller']['init']['arguments']['residential']
                dev_model_configs[proj_type]['categories'] = model_conf[
                    'controller']['prepare_for_estimate']['arguments'][
                        'categories']

        self.pre_check(location_set, vacancy_table, dev_model_configs)
        target_residential_vacancy_rate, target_non_residential_vacancy_rate = self._get_target_vacancy_rates(
            vacancy_table, year)
        self._compute_vacancy_variables(location_set, dev_model_configs,
                                        resources)
        projects = {}
        for project_type in dev_model_configs:
            # determine current-year vacancy rates
            vacant_units_sum = location_set.get_attribute(
                self.variable_for_vacancy[project_type]).sum()
            units_sum = float(
                location_set.get_attribute(
                    self.units_variable[project_type]).sum())
            if dev_model_configs[project_type]['residential']:
                target_vacancy_rate = target_residential_vacancy_rate
            else:
                target_vacancy_rate = target_non_residential_vacancy_rate
            should_develop_units = int(
                round(
                    max(0,
                        (target_vacancy_rate * units_sum - vacant_units_sum) /
                        (1 - target_vacancy_rate))))
            logger.log_status(
                project_type +
                ": vacant units: %d, should be vacant: %f, sum units: %d" %
                (vacant_units_sum, target_vacancy_rate * units_sum, units_sum))

            if not should_develop_units:
                logger.log_note((
                    "Will not build any " + project_type +
                    " units, because the current vacancy of %d units\n" +
                    "is more than the %d units desired for the vacancy rate of %f."
                ) % (vacant_units_sum, target_vacancy_rate * units_sum,
                     target_vacancy_rate))
            #create projects
            if should_develop_units > 0:
                projects[project_type] = self._create_projects(
                    should_develop_units, project_type, history_table,
                    location_set, units_sum, dev_model_configs, resources)
                projects[project_type].add_submodel_categories()
            else:
                projects[project_type] = None
        return projects
                     dataset_name='zone',
                     name='zone Indicators',
                     output_type='dbf',
                     attributes=attrs)
    ]

    IndicatorFactory().create_indicators(indicators=zonedbf_indicators,
                                         display_error_box=False,
                                         show_results=False)


if __name__ == '__main__':

    # takes 9.5 mins.  :p
    starttime = time()
    logger.log_note(strftime("%x %X", localtime(starttime)) + ": Starting")

    cache_directory = r'C:\opus\data\sanfrancisco\runs\run_46.2010_09_06_12_00'

    make_multiyear_workbook(cache_directory=cache_directory,
                            yearstart=2010,
                            yearend=2035)
    make_topsheet(cache_directory)
    make_zone_dbfs(cache_directory)

    endtime = time()
    logger.log_note(
        strftime("%x %X", localtime(endtime)) + " Completed. Total time: " +
        str((endtime - starttime) / 60.0) + " mins")

#===============================================================================
Esempio n. 43
0
    def run(
            self,
            building_set,
            #             building_use_table,
            building_use_classification_table,
            vacancy_table,
            history_table,
            year,
            location_set,
            resources=None):
        building_classes = building_use_classification_table.get_attribute(
            "name")
        unit_attributes = building_use_classification_table.get_attribute(
            'units')
        building_id_name = building_set.get_id_name()[0]
        location_id_name = location_set.get_id_name()[0]
        new_buildings = {
            building_id_name: array([], dtype='int32'),
            "building_use_id": array([], dtype=int8),
            "year_built": array([], dtype='int32'),
            #                         "building_sqft": array([], dtype='int32'),
            #                         "residential_units": array([], dtype='int32'),
            "unit_price": array([], dtype=float32),
            location_id_name: array([], dtype='int32')
        }
        for attribute in unit_attributes:
            new_buildings[attribute] = array([], dtype='int32')

        max_id = building_set.get_id_attribute().max()
        building_set_size_orig = building_set.size()

        for itype in range(building_use_classification_table.size()
                           ):  # iterate over building types
            building_class = building_classes[itype]
            #            type_code = building_types_table.get_id_attribute()[itype]
            vacancy_attribute = 'target_total_%s_vacancy' % building_class
            if vacancy_attribute not in vacancy_table.get_known_attribute_names(
            ):
                logger.log_warning(
                    "No target vacancy for building class '%s'. Transition model for this building class skipped."
                    % type)
                continue
            vacancy_table.get_attribute(
                vacancy_attribute)  # ensures that the attribute is loaded
            target_vacancy_rate = eval(
                "vacancy_table.get_data_element_by_id( year ).%s" %
                vacancy_attribute)

            compute_resources = Resources(resources)
            compute_resources.merge({"debug": self.debug})
            units_attribute = unit_attributes[itype]
            vacant_units_attribute = 'vacant_' + units_attribute

            # determine current-year vacancy rates
            building_set.compute_variables("urbansim_parcel.building." +
                                           vacant_units_attribute,
                                           resources=compute_resources)

            vacant_units_sum = building_set.get_attribute(
                vacant_units_attribute).sum()
            units_sum = float(
                building_set.get_attribute(units_attribute).sum())
            vacant_rate = self.safe_divide(vacant_units_sum, units_sum)

            should_develop_units = max(
                0, (target_vacancy_rate * units_sum - vacant_units_sum) /
                (1 - target_vacancy_rate))
            logger.log_status(
                building_class +
                ": vacant units: %d, should be vacant: %f, sum units: %d" %
                (vacant_units_sum, target_vacancy_rate * units_sum, units_sum))

            if not should_develop_units:
                logger.log_note((
                    "Will not build any " + building_class +
                    " units, because the current vacancy of %d units\n" +
                    "is more than the %d units desired for the vacancy rate of %f."
                ) % (vacant_units_sum, target_vacancy_rate * units_sum,
                     target_vacancy_rate))
                continue

#            average_buildings_value = None
#            if (type+"_improvement_value") in location_set.get_known_attribute_names():
#                average_buildings_value = self.safe_divide(
#                    location_set.get_attribute(type+"_improvement_value" ).sum(), units_sum)

#create buildings

            history_values = history_table.get_attribute(units_attribute)
            index_non_zeros_values = where(history_values > 0)[0]
            history_values_without_zeros = history_values[
                index_non_zeros_values]
            history_type = history_table.get_attribute("building_use_id")
            history_type_without_zeros = history_type[index_non_zeros_values]
            history_price = history_table.get_attribute("unit_price")
            history_price_without_zeros = history_price[index_non_zeros_values]

            #TODO: what happens if history has only zeroes?
            mean_size = history_values_without_zeros.mean()
            idx = array([])
            # Ensure that there are some development projects to choose from.
            #TODO: should the 'int' in the following line be 'ceil'?
            num_of_projects_to_select = max(
                10, int(should_develop_units / mean_size))
            while True:
                idx = concatenate((idx,
                                   randint(0,
                                           history_values_without_zeros.size,
                                           size=num_of_projects_to_select)))
                csum = history_values_without_zeros[idx].cumsum()
                idx = idx[where(csum <= should_develop_units)]
                if csum[-1] >= should_develop_units:
                    break

            nbuildings = idx.size

            for attribute in unit_attributes:

                #if attribute == units_attribute:
                #new_unit_values = history_values_without_zeros[idx]
                #else:
                #new_unit_values = zeros(nbuildings)
                #to accomodate mixed use buildings, allow non units_attribute to be non-zero
                new_unit_values = history_table.get_attribute(attribute)[
                    index_non_zeros_values[idx]]

                new_buildings[attribute] = concatenate(
                    (new_buildings[attribute], new_unit_values))

            new_max_id = max_id + nbuildings
            new_buildings[building_id_name] = concatenate(
                (new_buildings[building_id_name],
                 arange(max_id + 1, new_max_id + 1)))
            new_buildings["building_use_id"] = concatenate(
                (new_buildings["building_use_id"],
                 history_type_without_zeros[idx]))
            new_buildings["year_built"] = concatenate(
                (new_buildings["year_built"],
                 year * ones(nbuildings, dtype="int32")))
            new_buildings["unit_price"] = concatenate(
                (new_buildings["unit_price"],
                 history_price_without_zeros[idx]))
            new_buildings[location_id_name] = concatenate(
                (new_buildings[location_id_name],
                 zeros(nbuildings, dtype="int32")))
            logger.log_status("Creating %s %s of %s %s buildings." %
                              (history_values_without_zeros[idx].sum(),
                               units_attribute, nbuildings, building_class))

        building_set.add_elements(new_buildings, require_all_attributes=False)

        difference = building_set.size() - building_set_size_orig
        index = arange(difference) + building_set_size_orig
        return index
Esempio n. 44
0
    def __init__(self):
        config = AbstractUrbansimConfiguration()

        config_changes = {
            'project_name':
            'washtenaw',
            'description':
            'Region Pilot Baseline',
            'scenario_database_configuration':
            ScenarioDatabaseConfiguration(database_name='washtenaw_class', ),
            'models': [
                'prescheduled_events',
                'events_coordinator',
                'residential_land_share_model',
                'land_price_model',
                'regional_development_project_transition_model',
                'residential_regional_development_project_location_choice_model',
                'commercial_regional_development_project_location_choice_model',
                'industrial_regional_development_project_location_choice_model',
                'development_event_transition_model',
                'events_coordinator',
                'residential_land_share_model',
                'jobs_event_model',
                #'households_event_model',
                'regional_household_transition_model',
                'regional_household_relocation_model',
                'regional_household_location_choice_model',
                'regional_employment_transition_model',
                'regional_employment_relocation_model',
                {
                    'regional_employment_location_choice_model': {
                        'group_members': ['_all_']
                    }
                },
                'regional_distribute_unplaced_jobs_model'
            ],
            'cache_directory':
            None,  ### TODO: Set this cache_directory to something useful.
            'creating_baseyear_cache_configuration':
            CreatingBaseyearCacheConfiguration(
                cache_directory_root="/urbansim_cache/washtenaw",
                cache_from_database=True,
                baseyear_cache=BaseyearCacheConfiguration(
                    existing_cache_to_copy=
                    "/urbansim_cache/washtenaw/cache_source", ),
                cache_scenario_database=
                'urbansim.model_coordinators.cache_scenario_database',
                tables_to_cache=self.tables_to_cache,
                tables_to_cache_nchunks={'gridcells': 1},
                tables_to_copy_to_previous_years=self.
                tables_to_copy_to_previous_years,
            ),
            'datasets_to_preload': {
                'development_constraint': {},
                'development_event_history': {},
                'development_type': {},
                'gridcell': {
                    'nchunks': 2
                },
                'household': {},
                'job': {},
                'job_building_type': {},
                'target_vacancy': {},
                'zone': {},
                'jobs_event': {},
                #'households_event': {},
            },
            'dataset_pool_configuration':
            DatasetPoolConfiguration(
                package_order=['washtenaw', 'urbansim', 'opus_core'], ),
            'base_year':
            2005,
            'years': (2006, 2010),
        }
        config.merge(config_changes)
        self.merge(config)
        self.merge_with_controller()
        try:
            exec('from %s_local_config import my_configuration' % getuser())
            local_config = True
        except:
            logger.log_note(
                "No user's settings found or error occured when loading.")
            local_config = False
        if local_config:
            self.merge(my_configuration)
    def on_pb_generate_results_released(self):
        run_name = self.current_run
        indicator_name = self.current_indicator
        indicator_dataset = self.current_indicator_dataset
        start_year = int(self.current_year)
        end_year = start_year

        if run_name is None or indicator_name is None or start_year is None:
            return

        key = (run_name, indicator_name, start_year)

        if key in self.already_browsed:
            if not self.generating_results:
                (tab_widget,map_widget) = self.already_browsed[key]
#                self.swap_visualizations(map_widget, tab_widget)
                self.pb_generate_results.setText('Results Generated')
            else:
                self.queued_results = ('swap', (map_widget, tab_widget))
            return

        self.pb_generate_results.setEnabled(False)
        self.pb_generate_results.setText('Generating Results...')

        indicator_nodes = get_available_indicator_nodes(self.project)

        dataset = None
        for indicator_node in indicator_nodes:
            ind_dataset, name = get_variable_dataset_and_name(indicator_node)
            if name == indicator_name and ind_dataset == indicator_dataset:
                dataset = ind_dataset
                break

        if dataset is None:
            raise Exception('Could not find dataset for indicator %s' % indicator_name)

        table_params = {
            'name': None,
            'output_type' : 'tab',
            'indicators' : [indicator_name],
        }

        map_params = {'name':None,
                      'indicators':[indicator_name]}

        visualizations = [
            ('table_per_year', dataset, table_params),
            ('mapnik_map', dataset, map_params)
        ]

        batch_processor = BatchProcessor(self.project)
        batch_processor.guiElement = self

        batch_processor.set_data(
            visualizations = visualizations,
            source_data_name = run_name,
            years = range(start_year, end_year + 1))

        if not self.generating_results:
            self.generating_results = True
            logger.log_note('Generating results for %s on run %s for year %indicator_node'%(run_name, indicator_name, start_year))
            self.running_key = key
            self.batch_processor = batch_processor
            batch_processor_thread = OpusGuiThread(
                                  parentThread = get_mainwindow_instance(),
                                  parentGuiElement = self,
                                  thread_object = self.batch_processor)

            # Use this signal from the thread if it is capable of producing its own status signal
            self.connect(batch_processor_thread, SIGNAL("runFinished(PyQt_PyObject)"), self._run_finished)
            self.connect(batch_processor_thread, SIGNAL("runError(PyQt_PyObject)"), self._run_error)

            batch_processor_thread.start()
        else:
            self.queued_results = (key, batch_processor)
Esempio n. 46
0
    def run(self,
            building_set,
            new_building_copy_attrs,
            building_type_table,
            building_type_classification_table,
            vacancy_table,
            history_table,
            year,
            location_set,
            resources=None):
        building_classes = building_type_classification_table.get_attribute(
            "name")
        unit_attributes = building_type_classification_table.get_attribute(
            'units')
        building_id_name = building_set.get_id_name()[0]
        location_id_name = location_set.get_id_name()[0]
        calc_attributes = [building_id_name, location_id_name, "year_built"]
        new_buildings = {}
        for attribute in new_building_copy_attrs:
            new_buildings[attribute] = array(
                [], dtype=building_set.get_data_type(attribute))
        for attribute in calc_attributes:
            new_buildings[attribute] = array(
                [], dtype=building_set.get_data_type(attribute))

        # for convenience, make a map of building_type_id => (building_type)class_id
        # these names are hard-wired elsewhere
        building_type_id_to_class_id = {}
        building_type_ids = building_type_table.get_attribute(
            "building_type_id")
        for idx in range(building_type_table.size()):
            building_type_id_to_class_id[building_type_ids[idx]] = \
                building_type_table.get_attribute("class_id")[idx]
        logger.log_status("building_type_id_to_class_id = " +
                          str(building_type_id_to_class_id))

        # and make an column for the history table of the use classes
        history_type_classes = zeros((history_table.size()), dtype=int8)
        history_types = history_table.get_attribute("building_type_id")
        for idx in range(history_table.size()):
            history_type_classes[idx] = building_type_id_to_class_id[
                history_types[idx]]
        logger.log_status("history_types=" + str(history_types))
        logger.log_status("history_type_classes=" + str(history_type_classes))

        max_id = building_set.get_id_attribute().max()
        new_building_id_start = max_id + 1
        new_building_id_end = max_id + 1
        building_set_size_orig = building_set.size()

        for itype in range(building_type_classification_table.size()
                           ):  # iterate over building types
            building_class = building_classes[itype]
            building_class_id = building_type_classification_table.get_attribute(
                "class_id")[itype]

            vacancy_attribute = 'target_total_%s_vacancy' % building_class.lower(
            )
            if vacancy_attribute not in vacancy_table.get_known_attribute_names(
            ):
                logger.log_warning(
                    "No target vacancy for building class '%s' (e.g. no '%s' in target_vacancies). Transition model for this building class skipped."
                    % (building_class, vacancy_attribute))
                continue
            vacancy_table.get_attribute(
                vacancy_attribute)  # ensures that the attribute is loaded
            target_vacancy_rate = eval(
                "vacancy_table.get_data_element_by_id( year ).%s" %
                vacancy_attribute)
            logger.log_status(
                "Target vacancy rate for building_class %s is %f" %
                (building_class, target_vacancy_rate))

            compute_resources = Resources(resources)
            compute_resources.merge({"debug": self.debug})
            units_attribute = unit_attributes[itype]
            occupied_sqft_attribute = 'occupied_sqft_of_typeclass_%s' % building_class.lower(
            )
            total_sqft_attribute = 'where(sanfrancisco.building.building_typeclass_name==\'%s\',sanfrancisco.building.building_sqft,0)' % building_class.lower(
            )

            # determine current-year vacancy rates
            building_set.compute_variables(
                ("sanfrancisco.building." + occupied_sqft_attribute,
                 total_sqft_attribute),
                resources=compute_resources)

            occupied_sqft_sum = building_set.get_attribute(
                occupied_sqft_attribute).sum()
            total_sqft_sum = float(
                building_set.get_attribute(total_sqft_attribute).sum())
            occupancy_rate = self.safe_divide(occupied_sqft_sum,
                                              total_sqft_sum)
            # cap it at 1.0
            if occupancy_rate > 1.0: occupancy_rate = 1.0
            vacancy_rate = 1.0 - occupancy_rate
            vacant_sqft_sum = vacancy_rate * total_sqft_sum

            should_develop_sqft = (target_vacancy_rate *
                                   total_sqft_sum) - vacant_sqft_sum
            logger.log_status(
                "%s: vacancy rate: %4.3f   occupancy rate: %4.3f" %
                (building_class, vacancy_rate, occupancy_rate))
            logger.log_status(
                "%s: vacant: %d, should be vacant: %f, sum units: %d" %
                (building_class, vacant_sqft_sum,
                 target_vacancy_rate * total_sqft_sum, total_sqft_sum))

            if should_develop_sqft <= 0:
                logger.log_note((
                    "Will not build any %s units, because the current vacancy of %d sqft\n"
                    +
                    "is more than the %d sqft desired for the vacancy rate of %f."
                ) % (building_class, vacant_sqft_sum, target_vacancy_rate *
                     total_sqft_sum, target_vacancy_rate))
                continue

            #create buildings

            # find sample set of qualifying buildings in the events history,
            # e.g. where the building_type is in the correct class, and a positive
            # number of units or sqft (or whatever) were present
            history_sqft = history_table.get_attribute('building_sqft')
            index_sampleset = where((history_sqft > 0) & (
                history_type_classes == building_class_id))[0]

            # Ensure that there are some development projects to choose from.
            logger.log_status("should_develop_sqft=" +
                              str(should_develop_sqft))
            if index_sampleset.shape[0] == 0:
                logger.log_warning(
                    "Cannot create new buildings for building use class %s; no buildings in the event history table from which to sample."
                    % building_class)
                continue

            history_sqft_sampleset = history_sqft[index_sampleset]
            logger.log_status("history_sqft_sampleset = " +
                              str(history_sqft_sampleset))

            mean_size = history_sqft_sampleset.mean()
            idx = array([], dtype="int32")
            #TODO: should the 'int' in the following line be 'ceil'?
            num_of_projects_to_select = max(
                10, int(should_develop_sqft / mean_size))
            while True:
                idx = concatenate((idx,
                                   randint(0,
                                           history_sqft_sampleset.size,
                                           size=num_of_projects_to_select)))
                csum = history_sqft_sampleset[idx].cumsum()
                idx = idx[where(csum <= should_develop_sqft)]
                if csum[-1] >= should_develop_sqft:
                    break

            logger.log_status("idx = " + str(idx))

            nbuildings = idx.size
            if nbuildings == 0: continue

            new_building_id_end = new_building_id_start + nbuildings

            # copy_attributes
            for attribute in new_building_copy_attrs:
                attr_values = history_table.get_attribute(attribute)[
                    index_sampleset[idx]]
                new_buildings[attribute] = concatenate(
                    (new_buildings[attribute], attr_values))

            # calc_attributes
            new_buildings[building_id_name] = concatenate(
                (new_buildings[building_id_name],
                 arange(new_building_id_start, new_building_id_end)))
            new_buildings[location_id_name] = concatenate(
                (new_buildings[location_id_name], zeros(nbuildings)))
            new_buildings["year_built"] = concatenate(
                (new_buildings["year_built"], year * ones(nbuildings)))
            logger.log_status("Creating %s sqft of %s %s buildings." %
                              (history_sqft_sampleset[idx].sum(), nbuildings,
                               building_class))
            new_building_id_start = new_building_id_end + 1
            logger.log_status(new_buildings)
        building_set.add_elements(new_buildings, require_all_attributes=False)

        difference = building_set.size() - building_set_size_orig
        index = arange(difference) + building_set_size_orig
        return index
Esempio n. 47
0
 def __init__(self):
     config = AbstractUrbansimConfiguration()
     
     config_changes = {
         'project_name':'washtenaw',
         'description':'Region Pilot Baseline',
         'scenario_database_configuration': ScenarioDatabaseConfiguration(
             database_name = 'washtenaw_class',
             ),
         'models': [
             'prescheduled_events',
             'events_coordinator',
             'residential_land_share_model',
             'land_price_model',
             'regional_development_project_transition_model',
             'residential_regional_development_project_location_choice_model',
             'commercial_regional_development_project_location_choice_model',
             'industrial_regional_development_project_location_choice_model',
             'development_event_transition_model',
             'events_coordinator',
             'residential_land_share_model',
             'jobs_event_model',
             #'households_event_model',
             'regional_household_transition_model',
             'regional_household_relocation_model',
             'regional_household_location_choice_model',
             'regional_employment_transition_model',
             'regional_employment_relocation_model',
             {'regional_employment_location_choice_model': {'group_members': ['_all_']}},
             'regional_distribute_unplaced_jobs_model'
             ],
         'cache_directory':None, ### TODO: Set this cache_directory to something useful.
         'creating_baseyear_cache_configuration':CreatingBaseyearCacheConfiguration(
             cache_directory_root = "/urbansim_cache/washtenaw",
             cache_from_database = True,
              baseyear_cache = BaseyearCacheConfiguration(
                 existing_cache_to_copy = "/urbansim_cache/washtenaw/cache_source",
                 ),
             cache_scenario_database = 'urbansim.model_coordinators.cache_scenario_database',
             tables_to_cache = self.tables_to_cache,
             tables_to_cache_nchunks = {'gridcells': 1},
             tables_to_copy_to_previous_years = self.tables_to_copy_to_previous_years,
             ),
         'datasets_to_preload': {
             'development_constraint': {},
             'development_event_history': {},
             'development_type': {},
             'gridcell': {'nchunks': 2},
             'household': {},
             'job': {},
             'job_building_type': {},
             'target_vacancy': {},
             'zone': {},
             'jobs_event': {},
             #'households_event': {},
             
             },
         'dataset_pool_configuration': DatasetPoolConfiguration(
             package_order=['washtenaw', 'urbansim', 'opus_core'],
             ),
         'base_year':2005,
         'years':(2006, 2010),
         }
     config.merge(config_changes)
     self.merge(config)
     self.merge_with_controller()
     try:
         exec('from %s_local_config import my_configuration' % getuser())
         local_config = True
     except:
         logger.log_note("No user's settings found or error occured when loading.")
         local_config = False
     if local_config:
         self.merge(my_configuration)
    def run(self,
            vacancy_table,
            history_table,
            year,
            location_set,
            dataset_pool=None,
            resources=None):
        self.dataset_pool = dataset_pool
        building_types = self.dataset_pool.get_dataset('building_type')
        target_vacancy_this_year = DatasetSubset(
            vacancy_table,
            index=where(vacancy_table.get_attribute("year") == year)[0])
        building_type_ids = target_vacancy_this_year.get_attribute(
            'building_type_id')
        building_type_idx = building_types.get_id_index(building_type_ids)
        self.used_building_types = DatasetSubset(building_types,
                                                 index=building_type_idx)
        project_types = self.used_building_types.get_attribute(
            'building_type_name')
        is_residential = self.used_building_types.get_attribute(
            'is_residential')
        unit_names = where(is_residential, 'residential_units',
                           'non_residential_sqft')
        specific_unit_names = where(is_residential, 'residential_units',
                                    '_sqft')
        rates = target_vacancy_this_year.get_attribute('target_total_vacancy')
        self.project_units = {}
        self.project_specific_units = {}
        target_rates = {}
        for i in range(self.used_building_types.size()):
            self.project_units[project_types[i]] = unit_names[i]
            if is_residential[i]:
                self.project_specific_units[
                    project_types[i]] = specific_unit_names[i]
            else:
                self.project_specific_units[project_types[i]] = "%s%s" % (
                    project_types[i], specific_unit_names[i])
            target_rates[building_type_ids[i]] = rates[i]

        self._compute_vacancy_and_total_units_variables(
            location_set, project_types, resources)
        self.pre_check(location_set, target_vacancy_this_year, project_types)

        projects = None
        for project_type_id, target_vacancy_rate in target_rates.iteritems():
            # determine current-year vacancy rates
            project_type = building_types.get_attribute_by_id(
                'building_type_name', project_type_id)
            vacant_units_sum = location_set.get_attribute(
                self.variable_for_vacancy[project_type]).sum()
            units_sum = float(
                location_set.get_attribute(
                    self.variable_for_total_units[project_type]).sum())
            should_develop_units = int(
                round(
                    max(0,
                        (target_vacancy_rate * units_sum - vacant_units_sum) /
                        (1 - target_vacancy_rate))))
            logger.log_status(
                project_type +
                ": vacant units: %d, should be vacant: %f, sum units: %d" %
                (vacant_units_sum, target_vacancy_rate * units_sum, units_sum))

            if not should_develop_units:
                logger.log_note((
                    "Will not build any " + project_type +
                    " units, because the current vacancy of %d units\n" +
                    "is more than the %d units desired for the vacancy rate of %f."
                ) % (vacant_units_sum, target_vacancy_rate * units_sum,
                     target_vacancy_rate))
            #create projects
            if should_develop_units > 0:
                this_project = self._create_projects(
                    should_develop_units, project_type, project_type_id,
                    history_table, location_set, units_sum, resources)
                if projects is None:
                    projects = this_project
                else:
                    projects.join_by_rows(this_project,
                                          change_ids_if_not_unique=True)
        return projects