コード例 #1
0
    def get_od_pair_index_not_in_dataset(self, O, D):
        """Return indices to O (D) from whose elements an od pair is not included in the travel data
        see unittest for an example
        """

        assert O.shape == D.shape

        origin_ids = self.get_attribute(self.origin_id_name)
        destination_ids = self.get_attribute(self.destination_id_name)

        max_id = max(O.max(), D.max(), origin_ids.max(), destination_ids.max())
        digits = len(str(max_id)) + 1
        multiplier = 10**digits

        ODpair = O * multiplier + D
        idpair = origin_ids * multiplier + destination_ids
        missing_pairs = setdiff1d(unique(ODpair), unique(idpair))

        results = zeros_like(D)
        for pair in missing_pairs:
            results += logical_and(O == pair // multiplier,
                                   D == pair % multiplier)

        results += logical_or(O < origin_ids.min(), O > origin_ids.max())
        results += logical_or(D < destination_ids.min(),
                              D > destination_ids.max())

        return where(results)
コード例 #2
0
 def run(self, year, job_set, control_totals, job_building_types, data_objects=None, resources=None):
     self._do_initialize_for_run(job_set, job_building_types, data_objects)
     large_area_ids = control_totals.get_attribute("large_area_id")
     jobs_large_area_ids = job_set.compute_variables("washtenaw.job.large_area_id")
     unique_large_areas = unique(large_area_ids)
     is_year = control_totals.get_attribute("year")==year
     all_jobs_index = arange(job_set.size())
     sectors = unique(control_totals.get_attribute("sector_id")[is_year])
     self._compute_sector_variables(sectors, job_set)
     for area in unique_large_areas:
         idx = where(logical_and(is_year, large_area_ids == area))[0]
         self.control_totals_for_this_year = DatasetSubset(control_totals, idx)
         jobs_index = where(jobs_large_area_ids == area)[0]
         jobs_for_this_area = DatasetSubset(job_set, jobs_index)
         logger.log_status("ETM for area %s (currently %s jobs)" % (area, jobs_for_this_area.size()))
         last_remove_idx = self.remove_jobs.size
         self._do_run_for_this_year(jobs_for_this_area)
         add_jobs_size = self.new_jobs[self.location_id_name].size-self.new_jobs["large_area_id"].size
         remove_jobs_size = self.remove_jobs.size-last_remove_idx
         logger.log_status("add %s, remove %s, total %s" % (add_jobs_size, remove_jobs_size,
                                                            jobs_for_this_area.size()+add_jobs_size-remove_jobs_size))
         self.new_jobs["large_area_id"] = concatenate((self.new_jobs["large_area_id"],
                 array(add_jobs_size*[area], dtype="int32")))
         # transform indices of removing jobs into indices of the whole dataset
         self.remove_jobs[last_remove_idx:self.remove_jobs.size] = all_jobs_index[jobs_index[self.remove_jobs[last_remove_idx:self.remove_jobs.size]]]
     self._update_job_set(job_set)
     idx_new_jobs = arange(job_set.size()-self.new_jobs["large_area_id"].size, job_set.size())
     jobs_large_area_ids = job_set.compute_variables("washtenaw.job.large_area_id")
     jobs_large_area_ids[idx_new_jobs] = self.new_jobs["large_area_id"]
     job_set.delete_one_attribute("large_area_id")
     job_set.add_attribute(jobs_large_area_ids, "large_area_id", metadata=AttributeType.PRIMARY)
     # return an index of new jobs
     return arange(job_set.size()-self.new_jobs["large_area_id"].size, job_set.size())  
コード例 #3
0
    def get_od_pair_index_not_in_dataset(self, O, D):
        """Return indices to O (D) from whose elements an od pair is not included in the travel data
        see unittest for an example
        """

        assert O.shape == D.shape

        origin_ids = self.get_attribute(self.origin_id_name)
        destination_ids = self.get_attribute(self.destination_id_name)

        max_id = max(O.max(), D.max(), origin_ids.max(), destination_ids.max())
        digits = len(str(max_id)) + 1
        multiplier = 10 ** digits

        ODpair = O * multiplier + D
        idpair = origin_ids * multiplier + destination_ids
        missing_pairs = setdiff1d(unique(ODpair), unique(idpair))

        results = zeros_like(D)
        for pair in missing_pairs:
            results += logical_and(O == pair // multiplier, D == pair % multiplier)

        results += logical_or(O < origin_ids.min(), O > origin_ids.max())
        results += logical_or(D < destination_ids.min(), D > destination_ids.max())

        return where(results)
コード例 #4
0
 def run_chunk (self, index, dataset, specification, coefficients):
     self.specified_coefficients = SpecifiedCoefficients().create(coefficients, specification, neqs=1)
     compute_resources = Resources({"debug":self.debug})
     submodels = self.specified_coefficients.get_submodels()
     self.get_status_for_gui().update_pieces_using_submodels(submodels=submodels, leave_pieces=2)
     self.map_agents_to_submodels(submodels, self.submodel_string, dataset, index,
                                   dataset_pool=self.dataset_pool, resources = compute_resources)
     variables = self.specified_coefficients.get_full_variable_names_without_constants()
     self.debug.print_debug("Compute variables ...",4)
     self.increment_current_status_piece()
     dataset.compute_variables(variables, dataset_pool = self.dataset_pool, resources = compute_resources)
     data = {}
     coef = {}
     outcome=self.initial_values[index].copy()
     for submodel in submodels:
         coef[submodel] = SpecifiedCoefficientsFor1Submodel(self.specified_coefficients,submodel)
         self.coefficient_names[submodel] = coef[submodel].get_coefficient_names_without_constant()[0,:]
         self.debug.print_debug("Compute regression for submodel " +str(submodel),4)
         self.increment_current_status_piece()
         self.data[submodel] = dataset.create_regression_data(coef[submodel],
                                                             index = index[self.observations_mapping[submodel]])
         nan_index = where(isnan(self.data[submodel]))[1]
         inf_index = where(isinf(self.data[submodel]))[1]
         if nan_index.size > 0:
             nan_var_index = unique(nan_index)
             raise ValueError, "NaN(Not A Number) is returned from variable %s; check the model specification table and/or attribute values used in the computation for the variable." % coef[submodel].get_variable_names()[nan_var_index]
         if inf_index.size > 0:
             inf_var_index = unique(inf_index)
             raise ValueError, "Inf is returned from variable %s; check the model specification table and/or attribute values used in the computation for the variable." % coef[submodel].get_variable_names()[inf_var_index]
         
         if (self.data[submodel].shape[0] > 0) and (self.data[submodel].size > 0): # observations for this submodel available
             outcome[self.observations_mapping[submodel]] = \
                 self.regression.run(self.data[submodel], coef[submodel].get_coefficient_values()[0,:],
                     resources=self.run_config).astype(outcome.dtype)
     return outcome
コード例 #5
0
 def run(self,
         year,
         job_set,
         control_totals,
         job_building_types,
         data_objects=None,
         resources=None):
     self._do_initialize_for_run(job_set, job_building_types, data_objects)
     subarea_ids = control_totals.get_attribute(self.subarea_id_name)
     jobs_subarea_ids = job_set.compute_one_variable_with_unknown_package(
         variable_name="%s" % (self.subarea_id_name),
         dataset_pool=self.dataset_pool)
     unique_subareas = unique(subarea_ids)
     is_year = control_totals.get_attribute("year") == year
     all_jobs_index = arange(job_set.size())
     sectors = unique(control_totals.get_attribute("sector_id")[is_year])
     self._compute_sector_variables(sectors, job_set)
     for area in unique_subareas:
         idx = where(logical_and(is_year, subarea_ids == area))[0]
         self.control_totals_for_this_year = DatasetSubset(
             control_totals, idx)
         jobs_index = where(jobs_subarea_ids == area)[0]
         jobs_for_this_area = DatasetSubset(job_set, jobs_index)
         logger.log_status("ETM for area %s (currently %s jobs)" %
                           (area, jobs_for_this_area.size()))
         last_remove_idx = self.remove_jobs.size
         self._do_run_for_this_year(jobs_for_this_area)
         add_jobs_size = self.new_jobs[
             self.location_id_name].size - self.new_jobs[
                 self.subarea_id_name].size
         remove_jobs_size = self.remove_jobs.size - last_remove_idx
         logger.log_status(
             "add %s, remove %s, total %s" %
             (add_jobs_size, remove_jobs_size,
              jobs_for_this_area.size() + add_jobs_size - remove_jobs_size))
         self.new_jobs[self.subarea_id_name] = concatenate(
             (self.new_jobs[self.subarea_id_name],
              array(add_jobs_size * [area], dtype="int32")))
         # transform indices of removing jobs into indices of the whole dataset
         self.remove_jobs[last_remove_idx:self.remove_jobs.
                          size] = all_jobs_index[jobs_index[
                              self.remove_jobs[last_remove_idx:self.
                                               remove_jobs.size]]]
     self._update_job_set(job_set)
     idx_new_jobs = arange(
         job_set.size() - self.new_jobs[self.subarea_id_name].size,
         job_set.size())
     jobs_subarea_ids = job_set.compute_one_variable_with_unknown_package(
         variable_name="%s" % (self.subarea_id_name),
         dataset_pool=self.dataset_pool)
     jobs_subarea_ids[idx_new_jobs] = self.new_jobs[self.subarea_id_name]
     job_set.delete_one_attribute(self.subarea_id_name)
     job_set.add_attribute(jobs_subarea_ids,
                           self.subarea_id_name,
                           metadata=AttributeType.PRIMARY)
     # return an index of new jobs
     return arange(
         job_set.size() - self.new_jobs[self.subarea_id_name].size,
         job_set.size())
コード例 #6
0
    def run_chunk(self, index, dataset, specification, coefficients):
        self.specified_coefficients = SpecifiedCoefficients().create(
            coefficients, specification, neqs=1)
        compute_resources = Resources({"debug": self.debug})
        submodels = self.specified_coefficients.get_submodels()
        self.get_status_for_gui().update_pieces_using_submodels(
            submodels=submodels, leave_pieces=2)
        self.map_agents_to_submodels(submodels,
                                     self.submodel_string,
                                     dataset,
                                     index,
                                     dataset_pool=self.dataset_pool,
                                     resources=compute_resources)
        variables = self.specified_coefficients.get_full_variable_names_without_constants(
        )
        self.debug.print_debug("Compute variables ...", 4)
        self.increment_current_status_piece()
        dataset.compute_variables(variables,
                                  dataset_pool=self.dataset_pool,
                                  resources=compute_resources)
        data = {}
        coef = {}
        outcome = self.initial_values[index].copy()
        for submodel in submodels:
            coef[submodel] = SpecifiedCoefficientsFor1Submodel(
                self.specified_coefficients, submodel)
            self.coefficient_names[submodel] = coef[
                submodel].get_coefficient_names_without_constant()[0, :]
            self.debug.print_debug(
                "Compute regression for submodel " + str(submodel), 4)
            self.increment_current_status_piece()
            self.data[submodel] = dataset.create_regression_data(
                coef[submodel],
                index=index[self.observations_mapping[submodel]])
            nan_index = where(isnan(self.data[submodel]))[1]
            inf_index = where(isinf(self.data[submodel]))[1]
            vnames = asarray(coef[submodel].get_variable_names())
            if nan_index.size > 0:
                nan_var_index = unique(nan_index)
                self.data[submodel] = nan_to_num(self.data[submodel])
                logger.log_warning(
                    "NaN(Not A Number) is returned from variable %s; it is replaced with %s."
                    % (vnames[nan_var_index], nan_to_num(nan)))
                #raise ValueError, "NaN(Not A Number) is returned from variable %s; check the model specification table and/or attribute values used in the computation for the variable." % vnames[nan_var_index]
            if inf_index.size > 0:
                inf_var_index = unique(inf_index)
                self.data[submodel] = nan_to_num(self.data[submodel])
                logger.log_warning(
                    "Inf is returned from variable %s; it is replaced with %s."
                    % (vnames[inf_var_index], nan_to_num(inf)))
                #raise ValueError, "Inf is returned from variable %s; check the model specification table and/or attribute values used in the computation for the variable." % vnames[inf_var_index]

            if (self.data[submodel].shape[0] >
                    0) and (self.data[submodel].size >
                            0):  # observations for this submodel available
                outcome[self.observations_mapping[submodel]] = \
                    self.regression.run(self.data[submodel], coef[submodel].get_coefficient_values()[0,:],
                        resources=self.run_config).astype(outcome.dtype)
        return outcome
コード例 #7
0
def get_category_and_frequency(agent_set, agent_category_definition,
                               choice_set, choice_category_definition,
                               agent_filter_attribute,
                               category_inflating_factor, dataset_pool):

    agent_category_variable = []
    for i in range(len(agent_category_definition)):
        agent_category_variable.append( VariableName(agent_category_definition[i]).get_alias() + \
                '*%i' % category_inflating_factor**i )
    if len(agent_category_variable) > 0:
        agent_category_id = agent_set.compute_variables(
            "agent_category_id=" + ' + '.join(agent_category_variable),
            dataset_pool=dataset_pool)
    else:
        agent_category_id = agent_set.get_id_attribute()
    unique_agent_category_id = unique(agent_category_id)

    choice_category_variable = []
    for i in range(len(choice_category_definition)):
        #choice_category_variable.append( VariableName(choice_category_definition[i]).get_alias() + \
        #        '*%i' % category_inflating_factor**i )
        choice_category_variable.append( choice_category_definition[i] + \
                '*%i' % category_inflating_factor**i )
    if len(choice_category_variable) > 0:
        choice_category_id = choice_set.compute_variables(
            "choice_category_id=" + ' + '.join(choice_category_variable),
            dataset_pool=dataset_pool)
        agent_choice_category_id = agent_set.compute_variables('choice_category_id=%s.disaggregate(%s.choice_category_id)' % \
                                                               (agent_set.get_dataset_name(), choice_set.get_dataset_name()),
                                                               dataset_pool=dataset_pool)
    else:
        choice_category_id = choice_set.get_id_attribute()
        agent_choice_category_id = agent_set.get_attribute(
            choice_set.get_id_name()[0])

    unique_choice_category_id = unique(choice_category_id)

    if agent_filter_attribute is not None and len(agent_filter_attribute) > 0:
        agent_filter = agent_set.get_attribute(agent_filter_attribute)
    else:
        agent_filter = ones(agent_set.size(), dtype='bool')

    frequency = zeros(
        (unique_agent_category_id.size, unique_choice_category_id.size),
        dtype="int32")
    for i in range(
            unique_agent_category_id.size):  # iterate over agent category
        is_agent_of_this_category = logical_and(
            agent_filter,
            agent_category_id == unique_agent_category_id[i]).astype("int32")
        frequency[i, :] = ndimage.sum(
            is_agent_of_this_category,
            labels=agent_choice_category_id.astype("int32"),
            index=unique_choice_category_id.astype("int32"))
    return frequency, unique_agent_category_id, unique_choice_category_id, agent_category_id, choice_category_id
コード例 #8
0
    def simulate_chunk(self):
        """ Like ChoiceModel, but here we need to simulate probabilities directly in the submodel loop, 
         because the logsums are passed directly from utilities to probabilities.
         Does not work with availability and prices.
         """
        self.debug.print_debug("Compute variables ...",4)
        self.increment_current_status_piece()
        self.model_interaction.compute_variables()
        
        logger.log_status("Choice set size: %i" % self.get_choice_set_size())
        
        coef = {}
        index = self.run_config["index"]
        self.debug.print_debug("Simulate ...",4)
        choices = empty((self.observations_mapping["index"].size,), dtype='int32')

        for submodel in self.model_interaction.get_submodels():
            self.model_interaction.prepare_data_for_simulation(submodel)
            coef[submodel] = self.model_interaction.get_submodel_coefficients(submodel)
            self.coefficient_names[submodel] = self.model_interaction.get_variable_names_for_simulation(submodel)
            self.debug.print_debug("   submodel: %s   nobs: %s" % (submodel, self.observations_mapping[submodel].size), 5)
            self.increment_current_status_piece()
            if self.model_interaction.is_there_data(submodel): # observations for this submodel available
                self.run_config.merge({"specified_coefficients": coef[submodel]})
                coef_vals = coef[submodel].get_coefficient_values()
                coef_names = coef[submodel].get_coefficient_names()
                data = self.get_all_data(submodel)

                nan_index = where(isnan(data))[2]
                inf_index = where(isinf(data))[2]
                vnames = asarray(coef[submodel].get_variable_names())
                if nan_index.size > 0:
                    nan_var_index = unique(nan_index)
                    data = nan_to_num(data)
                    logger.log_warning("NaN(Not A Number) is returned from variable %s; it is replaced with %s." % (vnames[nan_var_index], nan_to_num(nan)))
                    #raise ValueError, "NaN(Not a Number) is returned from variable %s; check the model specification table and/or attribute values used in the computation for the variable." % vnames[nan_var_index]
                if inf_index.size > 0:
                    inf_var_index = unique(inf_index)
                    data = nan_to_num(data)
                    logger.log_warning("Inf is returned from variable %s; it is replaced with %s." % (vnames[inf_var_index], nan_to_num(inf)))                    
                    #raise ValueError, "Inf is returned from variable %s; check the model specification table and/or attribute values used in the computation for the variable." % vnames[inf_var_index]
                    
                choices[self.observations_mapping[submodel], :] = self.upc_sequence.run(data, coef_vals, resources=self.run_config)

        if self.run_config.get("export_simulation_data", False):
            self.export_probabilities(self.upc_sequence.probabilities, 
                                      self.run_config.get("simulation_data_file_name", './choice_model_data.txt'))
       
        if self.compute_demand_flag:
            self.compute_demand(self.upc_sequence.probabilities)

        return choices
コード例 #9
0
def create_building_sqft_per_job_dataset(dataset_pool,
                                         minimum_median=25,
                                         maximum_median=2000):
    buildings = dataset_pool.get_dataset('building')
    jobs = dataset_pool.get_dataset('job')
    job_sqft = jobs.get_attribute('sqft')
    has_sqft = job_sqft > 0
    job_building_index = buildings.try_get_id_index(
        jobs.get_attribute('building_id'))
    is_valid = logical_and(job_building_index >= 0, has_sqft)
    job_building_index = job_building_index[is_valid]
    sqft_of_jobs = buildings.sum_over_ids(
        jobs.get_attribute("building_id")[is_valid], job_sqft[is_valid])
    buildings._compute_if_needed("urbansim_parcel.building.zone_id",
                                 dataset_pool=dataset_pool)
    building_zones = buildings.get_attribute("zone_id")
    building_types = buildings.get_attribute("building_type_id")
    unique_zones = unique(building_zones)
    unique_types = unique(building_types)
    result_zone = []
    result_bt = []
    result_sqft = []
    for zone in unique_zones:
        is_zone = building_zones == zone
        for bt in unique_types:
            is_bt = logical_and(is_zone, building_types == bt)
            if (is_bt.sum() > 0) and sqft_of_jobs[is_bt].sum() > 0:
                result_zone.append(zone)
                result_bt.append(bt)
                mid = min(
                    maximum_median,
                    max(
                        minimum_median,
                        round(
                            median(job_sqft[is_valid][
                                is_bt[job_building_index]]))))
                result_sqft.append(mid)

    storage = StorageFactory().get_storage('dict_storage')
    storage.write_table(table_name='building_sqft_per_job',
                        table_data={
                            "zone_id":
                            array(result_zone),
                            "building_type_id":
                            array(result_bt),
                            "building_sqft_per_job":
                            array(result_sqft, dtype="int32")
                        })
    return BuildingSqftPerJobDataset(in_storage=storage)
コード例 #10
0
    def compute(self, dataset_pool):
        proposals = self.get_dataset()
        templates = dataset_pool.get_dataset("development_template")
        parcels = dataset_pool.get_dataset("parcel")
        constraints = dataset_pool.get_dataset("development_constraint")
        try:
            index1 = proposals.index1
        except:
            index1 = None
        parcels.get_development_constraints(constraints, dataset_pool, index=index1)
        parcel_index = parcels.get_id_index(proposals.get_attribute("parcel_id"))
        # transform parcel_index to be relative to index of parcels.development_constraints
        i_sort = parcels.development_constraints["index"].argsort()
        # i_sort_sort = i_sort.argsort()
        parcel_index = parcels.development_constraints["index"][i_sort].searchsorted(parcel_index)
        constraint_types = unique(constraints.get_attribute("constraint_type"))
        templates.compute_variables(
            map(lambda x: "%s.%s" % (self.template_opus_path, x), constraint_types), dataset_pool
        )
        template_ids = templates.get_id_attribute()
        generic_land_use_type_ids = templates.get_attribute("generic_land_use_type_id")
        proposal_template_ids = proposals.get_attribute("template_id")
        results = zeros(proposals.size(), dtype=bool8)
        unique_templates = unique(proposal_template_ids)
        for this_template_id in unique_templates:
            i_template = templates.get_id_index(this_template_id)
            fit_indicator = proposal_template_ids == this_template_id
            building_type_id = generic_land_use_type_ids[i_template]
            for constraint_type, constraint in parcels.development_constraints[building_type_id].iteritems():
                template_attribute = templates.get_attribute(constraint_type)[
                    i_template
                ]  # density converted to constraint variable name
                min_constraint = constraint[:, 0][parcel_index].copy()
                max_constraint = constraint[:, 1][parcel_index].copy()
                ## treat -1 as a constant for unconstrainted
                w_unconstr = min_constraint == -1
                if w_unconstr.any():
                    min_constraint[w_unconstr] = template_attribute.min()

                w_unconstr = max_constraint == -1
                if w_unconstr.any():
                    max_constraint[w_unconstr] = template_attribute.max()

                fit_indicator = logical_and(
                    fit_indicator,
                    logical_and(template_attribute >= min_constraint, template_attribute <= max_constraint),
                )
            results[fit_indicator] = True
        return results
コード例 #11
0
    def run(self, utilities, resources=None):
        """ Compute probabilities of a discrete choice model from the given utitlities.
        'utilities' is a 2D array (nobservations x nequations).
        The return value is a 2D array (same shape as utilities).
        """
        if utilities.ndim < 2:
            raise StandardError, "Argument 'utilities' must be a 2D numpy array."

        util_min = utilities.min()
        util_max = utilities.max()
        if (util_min < self.computable_range[0]) or (util_max > self.computable_range[1]):
            # shift utilities to zero (maximum is at zero)
            to_be_transformed=where((utilities < self.computable_range[0]) + (utilities > self.computable_range[1]))
            to_be_transformed=unique(to_be_transformed[0])
            for idx in arange(to_be_transformed.size):
                i = to_be_transformed[idx]
                this_max = utilities[i,:].max()
                utilities[i,:]=utilities[i,:]-this_max

        availability = resources.get('availability', None)
        if availability is None:
            exponentiated_utility = exp(utilities)
        else:
            exponentiated_utility = exp(utilities) * (availability).astype('b')
        sum_exponentiated_utility = sum(exponentiated_utility, axis=1, dtype="float64")
        sum_exponentiated_utility = ma.masked_where(sum_exponentiated_utility==0, sum_exponentiated_utility)
        return ma.filled(exponentiated_utility/reshape(sum_exponentiated_utility,(utilities.shape[0], 1)), 0)
コード例 #12
0
 def run(self,
         specification,
         coefficients,
         agent_set,
         agents_index=None,
         **kwargs):
     choices = ChoiceModel.run(self,
                               specification,
                               coefficients,
                               agent_set,
                               agents_index=agents_index,
                               **kwargs)
     if agents_index is None:
         agents_index = arange(agent_set.size())
     movers_indices = agents_index[where(choices > 0)]
     if self.movers_ratio is not None:
         n = rint(self.movers_ratio * agents_index.size)
         if n < movers_indices.size:
             movers_indices = sample_noreplace(movers_indices, n)
     # add unplaced agents
     unplaced_agents = agents_index[agent_set.get_attribute_by_index(
         self.location_id_name, agents_index) <= 0]
     logger.log_status(
         "%s agents selected by the logit model; %s agents without %s." %
         (movers_indices.size, unplaced_agents.size, self.location_id_name))
     movers_indices = unique(concatenate((movers_indices, unplaced_agents)))
     logger.log_status("Number of movers: " + str(movers_indices.size))
     return movers_indices
コード例 #13
0
ファイル: scaling_agents_model.py プロジェクト: psrc/urbansim
 def run(self, location_set, agent_set, agents_index=None, ignore_agents_distribution=False):
     """
         'location_set', 'agent_set' are of type Dataset,
         'agent_index' are indices of individuals in the agent_set for which 
         the model runs. If it is None, the whole agent_set is considered.
         If ignore_agents_distribution is True, the agents in place are ignored and 
         the scaling is done proportionally to the weights only.
     """
     if agents_index is None:
         agents_index=arange(agent_set.size())
     
     if agents_index.size == 0:
         logger.log_status('Nothing to be done.')
         return array([], dtype='int32')
     if self.submodel_string is not None:
         submodels = unique(agent_set[self.submodel_string][agents_index])
     else:
         submodels = [-2]    
     self.map_agents_to_submodels(submodels, self.submodel_string, agent_set, agents_index,
                                   dataset_pool=self.dataset_pool, 
                                   resources = Resources({"debug": self.debug}))
     result = array(agents_index.size*[-1], dtype="int32")
     if self.observations_mapping['mapped_index'].size == 0:
         logger.log_status("No agents mapped to submodels.")
         return result
     
     for submodel in submodels:
         result[self.observations_mapping[submodel]] = self._simulate_submodel(submodel, 
                             location_set, agent_set, agents_index, ignore_agents_distribution=ignore_agents_distribution).astype(result.dtype)
     return result
コード例 #14
0
def compute_lambda(nbs):
    com_dept_id = nbs.get_attribute("dept")
    depts = unique(com_dept_id)
    unitsvac9 = []
    unitssec9 = []
    units9 = []
    stayers98 = []
    for d in depts:
        com_in_this_dept = where(com_dept_id == d)[0]
        unitsvac9.append(
            nbs.get_attribute("unitsvac9")[com_in_this_dept].sum())
        unitssec9.append(
            nbs.get_attribute("unitssec9")[com_in_this_dept].sum())
        units9.append(nbs.get_attribute("units9")[com_in_this_dept].sum())
        stayers98.append(
            nbs.get_attribute("stayers98")[com_in_this_dept].sum())

    unitsvac9 = array(unitsvac9)
    unitssec9 = array(unitssec9)
    units9 = array(units9)
    stayers98 = array(stayers98)

    movers98 = units9 - stayers98 - unitssec9 - unitsvac9
    availableratio = unitsvac9 / units9.astype(float32)
    lambda_value = (units9 - unitssec9).astype(float32) / (
        units9 - unitssec9 - unitsvac9) - unitsvac9.astype(float32) / movers98

    lambda_value = lambda_value * 0.9

    return (depts, lambda_value)
コード例 #15
0
    def run(self, agent_set, 
            resources=None, 
            reset_attribute_value={},
            append_unplaced_agents_index=True):
        self.resources.merge(resources)
        
        if agent_set.size()<=0:
            agent_set.get_id_attribute()
            if agent_set.size()<= 0:
                self.debug.print_debug("Nothing to be done.",2)
                return array([], dtype='int32')

        if self.upc_sequence and (self.upc_sequence.probability_class.rate_set or self.resources.get('relocation_rate', None)):
            self.resources.merge({agent_set.get_dataset_name():agent_set}) #to be compatible with old-style one-relocation_probabilities-module-per-model
            self.resources.merge({'agent_set':agent_set})
            choices = self.upc_sequence.run(resources=self.resources)
            # choices have value 1 for agents that should be relocated, otherwise 0.
            movers_indices = where(choices>0)[0]
        else:
            movers_indices = array([], dtype='int32')

        if reset_attribute_value and movers_indices.size > 0:
            for key, value in reset_attribute_value.items():
                agent_set.modify_attribute(name=key, 
                                           data=resize(asarray(value), movers_indices.size),
                                           index=movers_indices)            
        if append_unplaced_agents_index:
            # add unplaced agents
            unplaced_agents = where(agent_set.get_attribute(self.location_id_name) <= 0)[0]
            movers_indices = unique(concatenate((movers_indices, unplaced_agents)))
        
        logger.log_status("Number of movers: " + str(movers_indices.size))
        return movers_indices
コード例 #16
0
    def run(self, utilities, resources=None):
        """ Compute probabilities of a discrete choice model from the given utitlities.
        'utilities' is a 2D array (nobservations x nequations).
        The return value is a 2D array (same shape as utilities).
        """
        if utilities.ndim < 2:
            raise StandardError, "Argument 'utilities' must be a 2D numpy array."

        util_min = utilities.min()
        util_max = utilities.max()
        if (util_min < self.computable_range[0]) or (util_max >
                                                     self.computable_range[1]):
            # shift utilities to zero (maximum is at zero)
            to_be_transformed = where((utilities < self.computable_range[0]) +
                                      (utilities > self.computable_range[1]))
            to_be_transformed = unique(to_be_transformed[0])
            for idx in arange(to_be_transformed.size):
                i = to_be_transformed[idx]
                this_max = utilities[i, :].max()
                utilities[i, :] = utilities[i, :] - this_max

        exponentiated_utility = exp(utilities)
        sum_exponentiated_utility = sum(exponentiated_utility,
                                        axis=1,
                                        dtype="float64")
        return exponentiated_utility / reshape(sum_exponentiated_utility,
                                               (utilities.shape[0], 1))
コード例 #17
0
 def choose_agents_to_move_from_overfilled_locations(self, capacity,
                                                     agent_set, agents_index, agents_locations):
     """Agents with the smallest number of units should move again.
     """
     if capacity is None:
         return array([], dtype='int32')
     index_valid_agents_locations = where(agents_locations > 0)[0]
     valid_agents_locations = agents_locations[index_valid_agents_locations].astype("int32")
     unique_locations = unique(valid_agents_locations).astype("int32")
     index_consider_capacity = self.choice_set.get_id_index(unique_locations)
     capacity_of_affected_locations = capacity[index_consider_capacity]
     overfilled = where(capacity_of_affected_locations < 0)[0]
     movers = array([], dtype='int32')
     indexed_individuals = DatasetSubset(agent_set, agents_index[index_valid_agents_locations])
     ordered_agent_indices = self.get_agents_order(indexed_individuals)
     sizes = indexed_individuals.get_attribute(self.units_full_name)[ordered_agent_indices]
     choice_ids = self.choice_set.get_id_attribute()
     for loc in overfilled:
         agents_to_move = where(valid_agents_locations == choice_ids[index_consider_capacity[loc]])[0]
         if agents_to_move.size > 0:
             n = int(-1*capacity_of_affected_locations[loc])
             this_sizes = sizes[agents_to_move]
             csum = this_sizes[arange(this_sizes.size-1,-1,-1)].cumsum() # ordered increasingly
             csum = csum[arange(csum.size-1, -1,-1)] # ordered back decreasingly
             w = where(csum < n)[0]
             if w.size < agents_to_move.size: #add one more agent in order the cumsum be larger than n
                 w = concatenate((array([agents_to_move.size-w.size-1]), w))
             idx = ordered_agent_indices[agents_to_move[w]]
             movers = concatenate((movers, idx))
     return movers
コード例 #18
0
    def create_interaction_dataset(self, agent_set, agents_index, config, *args, **kwargs):
        if config is not None and config.get("estimate", False):
                id_name = self.choice_set.get_id_name()[0]
                mod_id_name = "__%s__" % id_name
                if mod_id_name in agent_set.get_known_attribute_names():
                    agent_set.set_values_of_one_attribute(id_name, agent_set.get_attribute(mod_id_name))
                result = LocationChoiceModel.create_interaction_dataset(self, agent_set,
                                                                        agents_index, config, **kwargs)
                # select randomly buildings to unplace
                ntounplace = int(agents_index.size/4.0)
                #ntounplace = 1
                #self.dataset_pool.get_dataset("urbansim_constant")["recent_years"])
                #idx = sample_noreplace(agents_index, ntounplace)
                tmp = randint(0, agents_index.size, ntounplace)
                utmp = unique(tmp)
                idx = agents_index[utmp]
                logger.log_status("Unplace %s buildings." % utmp.size)
                if  (mod_id_name not in agent_set.get_known_attribute_names()):
                    agent_set.add_attribute(name=mod_id_name, data=array(agent_set.get_attribute(id_name)))
                agent_set.set_values_of_one_attribute(id_name,-1.0*ones((idx.size,)), idx)
                
                return result

        return LocationChoiceModel.create_interaction_dataset(self, agent_set,
                                                              agents_index, config, **kwargs)
コード例 #19
0
def compute_lambda(nbs):
        com_dept_id = nbs.get_attribute("dept")
        depts = unique(com_dept_id)
        unitsvac9 = []
        unitssec9 = []
        units9=[]
        stayers98 = []
        for d in depts:
                com_in_this_dept = where(com_dept_id==d)[0]
                unitsvac9.append(nbs.get_attribute("unitsvac9")[com_in_this_dept].sum())
                unitssec9.append(nbs.get_attribute("unitssec9")[com_in_this_dept].sum())
                units9.append(nbs.get_attribute("units9")[com_in_this_dept].sum())
                stayers98.append(nbs.get_attribute("stayers98")[com_in_this_dept].sum())
        
        unitsvac9 = array(unitsvac9)
        unitssec9 = array(unitssec9)
        units9 = array(units9)
        stayers98 = array(stayers98)

        movers98=units9 - stayers98 - unitssec9 - unitsvac9
        availableratio = unitsvac9 / units9.astype(float32)
        lambda_value = (units9 - unitssec9).astype(float32)/ (units9 - unitssec9 - unitsvac9) - unitsvac9.astype(float32) / movers98

        lambda_value = lambda_value * 0.9
        
        return (depts, lambda_value)
コード例 #20
0
    def compute(self, dataset_pool):
        dataset = self.get_dataset()
        #zones = dataset_pool.get_dataset('zone')
        travel_data = dataset_pool.get_dataset('travel_data')
        travel_data_attr_mat = travel_data.get_attribute_as_matrix(self.travel_data_attribute, 
                                                                   fill=self.travel_data_attribute_default_value)
        agent_resource = dataset.get_attribute(self.agent_resource)
        from_zone = dataset.get_attribute(self.origin_zone_id).astype("int32")
        to_zone = dataset.get_attribute(self.destination_zone_id).astype("int32")
        
        #zone_ids = zones.get_id_attribute()
        zone_ids = unique(travel_data["from_zone_id"])
        results = zeros((dataset.size(), zone_ids.max()+1), dtype='bool')        
        for zone in zone_ids:
            tmp_zone = zone * ones(from_zone.shape, dtype="int32")
            t1 = travel_data_attr_mat[from_zone, tmp_zone]
            t2 = travel_data_attr_mat[tmp_zone, to_zone]
            results[where( t1 + t2 <= agent_resource)[0], zone] = 1
        
#        missing_pairs_index = travel_data.get_od_pair_index_not_in_dataset(from_zone, to_zone)
#        if missing_pairs_index[0].size > 0:
#            results[missing_pairs_index] = self.default_value
#            logger.log_warning("zone pairs at index %s are not in travel data; value set to %s." % ( str(missing_pairs_index), self.default_value) )

        return results
コード例 #21
0
 def run(self, individual_dataset, fraction_dataset, id_name1='blockgroup_id', 
         id_name2='zone_id', fraction_attribute_name='fraction'):
     
     """
     """
     assert id_name1 in individual_dataset.get_known_attribute_names()
     if id_name2 not in individual_dataset.get_known_attribute_names():           
         individual_dataset.add_primary_attribute(-1*ones(individual_dataset.size()), id_name2)
     fraction_id1 = fraction_dataset.get_attribute(id_name1)
     individual_id1 = individual_dataset.get_attribute(id_name1)
     unique_ids = unique(fraction_id1)
     
     for id1 in unique_ids:
         individual_of_id1 = where(individual_id1==id1)[0]
         n = individual_of_id1.size
         logger.log_status("Processing %s %s: %s individuals" % (id_name1, id1, n) )
         if n > 0:
             fractions = fraction_dataset.get_attribute(fraction_attribute_name)[fraction_id1==id1]
             id2 = fraction_dataset.get_attribute(id_name2)[fraction_id1==id1]
             ## ignore households in geography with sum of fractions less than 1.0e-6
             if fractions.sum() < 1.0e-2:
                 continue
             if not allclose(fractions.sum(), 1.0, rtol=1.e-2):
                 fractions = normalize(fractions)
             fractions_cumsum = ncumsum(fractions)
             R = random(n)
             index = searchsorted(fractions_cumsum, R)
             individual_dataset.modify_attribute(id_name2, id2[index], index=individual_of_id1)
コード例 #22
0
 def _get_distinct_values(self, values):
     coefmap = self.get_coefficient_mapping()
     coefmapflat = coefmap.ravel()
     coefmapflat = coefmapflat[where(coefmapflat>=0)]
     coef_idx = unique(coefmapflat)
     result = map(lambda x: values[coefmap==x][0], coef_idx)
     return array(result)
コード例 #23
0
    def run(self, run_choice_model=True, choose_job_only_in_residence_zone=False, *args, **kwargs):
        agent_set = kwargs['agent_set']
        if run_choice_model:
            choices = ChoiceModel.run(self, *args, **kwargs)
            #prob_work_at_home = self.upc_sequence.probabilities[:, 0]
                
            agent_set.set_values_of_one_attribute(self.choice_attribute_name, 
                                                  choices, 
                                                  index=kwargs['agents_index'])
            at_home_worker_index = kwargs['agents_index'][choices==1]
            logger.log_status("%s workers choose to work at home, %s workers chose to work out of home." % 
                              (where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 1)[0].size,
                               where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 0)[0].size))            
        else:
            at_home_worker_index = where(logical_and( 
                                                     agent_set.get_attribute(self.choice_attribute_name) == 1,
                                                     agent_set.get_attribute('job_id') <= 0
                                                     )
                                        )[0]
        
        if self.filter is not None:
            jobs_set_index = where( self.job_set.compute_variables(self.filter) )[0]
        else:
            jobs_set_index = arange( self.job_set.size() )
            
        logger.log_status("Total: %s workers work at home, (%s workers work out of home), will try to assign %s workers to %s jobs." % 
                          (where(agent_set.get_attribute(self.choice_attribute_name) == 1)[0].size,
                           where(agent_set.get_attribute(self.choice_attribute_name) == 0)[0].size,
                          at_home_worker_index.size,
                          jobs_set_index.size
                          ))

        if not choose_job_only_in_residence_zone:
            assigned_worker_index, assigned_job_index = self._assign_job_to_worker(at_home_worker_index, jobs_set_index)
        else:
            agent_set.compute_variables("urbansim_parcel.person.zone_id")
            self.job_set.compute_variables("urbansim_parcel.job.zone_id")
            agent_zone_ids = agent_set.get_attribute_by_index('zone_id', at_home_worker_index)
            job_zone_ids = self.job_set.get_attribute_by_index('zone_id', jobs_set_index)
            unique_zones = unique(job_zone_ids)
            assigned_worker_index = array([], dtype="int32")
            assigned_job_index = array([], dtype="int32")
            for this_zone in unique_zones:
                logger.log_status("zone_id: %s" % this_zone)
                if this_zone <= 0: continue
                at_home_worker_in_this_zone = where(agent_zone_ids == this_zone)[0]
                job_set_in_this_zone = where(job_zone_ids == this_zone)[0]
                assigned_worker_in_this_zone, assigned_job_set_in_this_zone = self._assign_job_to_worker(at_home_worker_in_this_zone, job_set_in_this_zone)
                assigned_worker_index = concatenate((assigned_worker_index, at_home_worker_index[assigned_worker_in_this_zone]))
                assigned_job_index = concatenate((assigned_job_index, jobs_set_index[assigned_job_set_in_this_zone]))

        ## each worker can only be assigned to 1 job
        #assert assigned_worker_index.size == unique(assigned_worker_index).size
        agent_set.set_values_of_one_attribute(self.job_set.get_id_name()[0], 
                                              self.job_set.get_id_attribute()[assigned_job_index], 
                                              index=assigned_worker_index)
        agent_set.compute_variables([self.location_id_name], dataset_pool=self.dataset_pool)
        self.job_set.modify_attribute(name=VariableName(self.location_id_name).get_alias(), 
                                      data=agent_set.get_attribute_by_index(self.location_id_name, assigned_worker_index),
                                      index=assigned_job_index)
コード例 #24
0
 def create_edges(self, input_file_dir, input_file_name, output_file_name):
     storage = StorageFactory().get_storage(type='tab_storage', subdir='store', 
         storage_location=input_file_dir)
     dataset = Dataset(in_storage = storage, id_name = ['stop_id','sch_time'], in_table_name = input_file_name)
     
     n = dataset.size()
     trip_ids = dataset.get_attribute("stop_id")
     unique_trip_ids = unique(trip_ids)
     source_list = list()
     target_list = list()
     time_list = list()
     
     for trip in unique_trip_ids:
         idx = where(dataset.get_attribute("stop_id") == trip)[0]
         nodes = dataset.get_attribute_by_index("node_id", idx)
         times = dataset.get_attribute_by_index("sch_time", idx)
         for inode in range(nodes.size-1):
             source_list.append(nodes[inode])
             target_list.append(nodes[inode+1])
             time_list.append(times[inode+1] - times[inode])
    
     storage = StorageFactory().get_storage('dict_storage')
     
     storage.write_table(table_name='edges',
         table_data={
             'edge_id': arange(len(source_list))+1, 
             'source': array(source_list), #type=int64), # <<<< OUTPUT FIELD, USE array
             'target': array(target_list), #type=int64), # <<<< OUTPUT FIELD, USE array
             'cost': array(time_list, dtype=int32)
             }
         )
    
     edges = Dataset(in_storage=storage, in_table_name='edges', id_name = "edge_id")
     
     edges.write_dataset(attributes = ["source", "target", "cost"], out_storage = storage, out_table_name = output_file_name)
コード例 #25
0
    def create_interaction_dataset(self, agent_set, agents_index, config,
                                   *args, **kwargs):
        if config is not None and config.get("estimate", False):
            id_name = self.choice_set.get_id_name()[0]
            mod_id_name = "__%s__" % id_name
            if mod_id_name in agent_set.get_known_attribute_names():
                agent_set.set_values_of_one_attribute(
                    id_name, agent_set.get_attribute(mod_id_name))
            result = LocationChoiceModel.create_interaction_dataset(
                self, agent_set, agents_index, config, **kwargs)
            # select randomly buildings to unplace
            ntounplace = int(agents_index.size / 4.0)
            #ntounplace = 1
            #self.dataset_pool.get_dataset("urbansim_constant")["recent_years"])
            #idx = sample_noreplace(agents_index, ntounplace)
            tmp = randint(0, agents_index.size, ntounplace)
            utmp = unique(tmp)
            idx = agents_index[utmp]
            logger.log_status("Unplace %s buildings." % utmp.size)
            if (mod_id_name not in agent_set.get_known_attribute_names()):
                agent_set.add_attribute(name=mod_id_name,
                                        data=array(
                                            agent_set.get_attribute(id_name)))
            agent_set.set_values_of_one_attribute(id_name, -1.0 * ones(
                (idx.size, )), idx)

            return result

        return LocationChoiceModel.create_interaction_dataset(
            self, agent_set, agents_index, config, **kwargs)
コード例 #26
0
    def run(self, location_set, agent_set, agents_index=None, data_objects=None, **kwargs):
        if agents_index is None:
            agents_index = arange(agent_set.size())
        regions = agent_set.get_attribute(self.subarea_id_name)

        location_region = location_set.compute_one_variable_with_unknown_package(variable_name="%s" % (self.subarea_id_name), dataset_pool=self.dataset_pool)
        valid_region = where(regions[agents_index] > 0)[0]
        if valid_region.size > 0:
            unique_regions = unique(regions[agents_index][valid_region])
            cond_array = zeros(agent_set.size(), dtype="bool8")
            cond_array[agents_index[valid_region]] = True
            for area in unique_regions:
                new_index = where(logical_and(cond_array, regions == area))[0]
                self.filter = "%s.%s == %s" % (location_set.get_dataset_name(), self.subarea_id_name, area)
                logger.log_status("SJM for area %s" % area)
                ScalingJobsModel.run(self, location_set, agent_set, agents_index=new_index, **kwargs)

        no_region = where(regions[agents_index] <= 0)[0]
        if no_region.size > 0: # run the model for jobs that don't have assigned region
            self.filter = None
            logger.log_status("SJM for jobs with no area assigned")
            choices = ScalingJobsModel.run(self, location_set, agent_set, agents_index=agents_index[no_region], **kwargs)
            where_valid_choice = where(choices > 0)[0]
            choices_index = location_set.get_id_index(choices[where_valid_choice])
            chosen_regions = location_set.get_attribute_by_index(self.subarea_id_name, choices_index)
            agent_set.modify_attribute(name=self.subarea_id_name, data=chosen_regions, 
                                       index=no_region[where_valid_choice])
コード例 #27
0
    def compute(self, dataset_pool):
        dataset = self.get_dataset()
        #zones = dataset_pool.get_dataset('zone')
        travel_data = dataset_pool.get_dataset('travel_data')
        travel_data_attr_mat = travel_data.get_attribute_as_matrix(
            self.travel_data_attribute,
            fill=self.travel_data_attribute_default_value)
        agent_resource = dataset.get_attribute(self.agent_resource)
        from_zone = dataset.get_attribute(self.origin_zone_id).astype("int32")
        to_zone = dataset.get_attribute(
            self.destination_zone_id).astype("int32")

        #zone_ids = zones.get_id_attribute()
        zone_ids = unique(travel_data["from_zone_id"])
        results = zeros((dataset.size(), zone_ids.max() + 1), dtype='bool')
        for zone in zone_ids:
            tmp_zone = zone * ones(from_zone.shape, dtype="int32")
            t1 = travel_data_attr_mat[from_zone, tmp_zone]
            t2 = travel_data_attr_mat[tmp_zone, to_zone]
            results[where(t1 + t2 <= agent_resource)[0], zone] = 1


#        missing_pairs_index = travel_data.get_od_pair_index_not_in_dataset(from_zone, to_zone)
#        if missing_pairs_index[0].size > 0:
#            results[missing_pairs_index] = self.default_value
#            logger.log_warning("zone pairs at index %s are not in travel data; value set to %s." % ( str(missing_pairs_index), self.default_value) )

        return results
コード例 #28
0
    def run(self, location_set, agent_set, agents_index=None, data_objects=None, **kwargs):
        if agents_index is None:
            agents_index = arange(agent_set.size())
        large_areas = agent_set.get_attribute(self.large_area_id_name)
        location_large_area = location_set.compute_variables(["washtenaw.%s.%s" % (location_set.get_dataset_name(), self.large_area_id_name)],
                                                  dataset_pool=self.dataset_pool)
        valid_large_area = where(large_areas[agents_index] > 0)[0]
        if valid_large_area.size > 0:
            unique_large_areas = unique(large_areas[agents_index][valid_large_area])
            cond_array = zeros(agent_set.size(), dtype="bool8")
            cond_array[agents_index[valid_large_area]] = True
            for area in unique_large_areas:
                new_index = where(logical_and(cond_array, large_areas == area))[0]
                self.filter = "%s.%s == %s" % (location_set.get_dataset_name(), self.large_area_id_name, area)
                logger.log_status("%s for area %s" % (self.model_short_name, area))
                ScalingJobsModel.run(self, location_set, agent_set, agents_index=new_index, **kwargs)

        no_large_area = where(large_areas[agents_index] <= 0)[0]
        if no_large_area.size > 0: # run the model for jobs that don't have assigned large_area
            self.filter = None
            logger.log_status("%s for jobs with no area assigned" % self.model_short_name)
            choices = ScalingJobsModel.run(self, location_set, agent_set, agents_index=agents_index[no_large_area], **kwargs)
            where_valid_choice = where(choices > 0)[0]
            choices_index = location_set.get_id_index(choices[where_valid_choice])
            chosen_large_areas = location_set.get_attribute_by_index(self.large_area_id_name, choices_index)
            agent_set.modify_attribute(name=self.large_area_id_name, data=chosen_large_areas, 
                                       index=no_large_area[where_valid_choice])
コード例 #29
0
 def demolish_buildings(self, buildings_to_be_demolished, building_dataset, dataset_pool):
     if isinstance(buildings_to_be_demolished, list):
         buildings_to_be_demolished = array(buildings_to_be_demolished)
         
     if buildings_to_be_demolished.size <= 0:
         return
     
     id_index_in_buildings = building_dataset.get_id_index(buildings_to_be_demolished)
     parcels = dataset_pool.get_dataset('parcel')
     idx_pcl = parcels.get_id_index(unique(building_dataset['parcel_id'][id_index_in_buildings]))
     # remove occupants from buildings to be demolished
     JAMM = JoinAttributeModificationModel()
     for agent_name in ['household', 'job']:            
         agents = dataset_pool.get_dataset(agent_name)
         JAMM.run(agents, building_dataset, index=id_index_in_buildings, value=-1)
         
     building_dataset.remove_elements(id_index_in_buildings)
     logger.log_status("%s buildings demolished." % buildings_to_be_demolished.size)
     
     # set land_use_type 'vacant' to parcels with demolished buildings
     land_types = dataset_pool.get_dataset('land_use_type')
     vac_idx = land_types["land_use_name"] == 'vacant'
     if vac_idx.sum() > 0:
         code = land_types.get_id_attribute()[vac_idx][0]
         nvac = (parcels['land_use_type_id'][idx_pcl] == code).sum()
         parcels['land_use_type_id'][idx_pcl] = code
         logger.log_status("%s parcels set to vacant." % (idx_pcl.size - nvac))
 def run(self, specification, coefficients, agent_set, agents_index=None, **kwargs):
     if agents_index is None:
         agents_index = arange(agent_set.size())
     large_areas = agent_set.get_attribute(self.large_area_id_name)
     self.choice_set.compute_variables(["washtenaw.%s.%s" % (self.choice_set.get_dataset_name(), self.large_area_id_name)],
                                               dataset_pool=self.dataset_pool)
     valid_large_area = where(large_areas[agents_index] > 0)[0]
     if valid_large_area.size > 0:
         unique_large_areas = unique(large_areas[agents_index][valid_large_area])
         cond_array = zeros(agent_set.size(), dtype="bool8")
         cond_array[agents_index[valid_large_area]] = True
         for area in unique_large_areas:
             new_index = where(logical_and(cond_array, large_areas == area))[0]
             self.filter = "%s.%s == %s" % (self.choice_set.get_dataset_name(), self.large_area_id_name, area)
             logger.log_status("ELCM for area %s" % area)
             EmploymentLocationChoiceModel.run(self, specification, coefficients, agent_set, 
                                              agents_index=new_index, **kwargs)
     agent_index_no_large_area = agents_index[ large_areas[agents_index] <= 0 ]
     if agent_index_no_large_area.size > 0: # run the ELCM for jobs that don't have assigned large_area
         self.filter = None
         logger.log_status("ELCM for jobs with no area assigned")
         choices = EmploymentLocationChoiceModel.run(self, specification, coefficients, agent_set, 
                                                     agents_index=agent_index_no_large_area, **kwargs)
         where_valid_choice = where(choices > 0)[0]
         choices_index = self.choice_set.get_id_index(choices[where_valid_choice])
         chosen_large_areas = self.choice_set.get_attribute_by_index(self.large_area_id_name, choices_index)
         agent_set.modify_attribute(name=self.large_area_id_name, data=chosen_large_areas, 
                                    index=agent_index_no_large_area[where_valid_choice])
コード例 #31
0
    def log_descriptives(self, data_array=None, dataset=None, attribute=None, by=None, show_values={}):
        if data_array is None:
            data_array=dataset.get_attribute(attribute)
        if by is None:
            logger.log_status("#N: %s\tmean: %.3f\tstd: %.3f" % (data_array.size, data_array.mean(), data_array.std()) )
            logger.log_status("min: %.3f\tmedian: %.3f\tmax: %.3f" % (data_array.min(), median(data_array), data_array.max()) )
        else:
            #if type(by)==tuple:
                #by_value, show_values = by
            #else:
                #by_value = by

            if type(by)==str:
                by_str = by
                by_value = dataset.get_attribute(by_str)
            else:
                by_str = "" 
                by_value = by

            if show_values is None or show_values == {}:
                show_values = unique(by_value)

            assert(by_value.size==data_array.size)

            for item in show_values.iteritems():
                if type(item) == tuple:
                    value, description = item
                    description_str = "(%s)" % description
                else:
                    value = item
                    description_str = ""
                data = data_array[by_value==value]
                logger.log_status("%s %s%s:" % (by_str, value, description_str))
                logger.log_status("#N: %s\tmean: %.3f\tstd: %.3f" % (data.size, data.mean(), data.std()) )
                logger.log_status("min: %.3f\tmedian: %.3f\tmax: %.3f" % (data.min(), median(data), data.max()) )
コード例 #32
0
    def compute(self, dataset_pool):
        proposals = self.get_dataset()
        templates = dataset_pool.get_dataset("development_template")
        parcels = dataset_pool.get_dataset("parcel")
        constraints = dataset_pool.get_dataset("development_constraint") 
        try:
            index1 = proposals.index1
        except:
            index1 = None
        parcels.get_development_constraints(constraints, dataset_pool, 
                                            index= index1)       
        parcel_index = parcels.get_id_index(proposals.get_attribute("parcel_id"))
      # transform parcel_index to be relative to index of parcels.development_constraints
        i_sort = parcels.development_constraints['index'].argsort()
        #i_sort_sort = i_sort.argsort()
        parcel_index = parcels.development_constraints['index'][i_sort].searchsorted(parcel_index)
        constraint_types = unique(constraints.get_attribute("constraint_type"))
        templates.compute_variables(map(lambda x: "%s.%s" % (self.template_opus_path, x), constraint_types), dataset_pool)
        template_ids = templates.get_id_attribute()
        generic_land_use_type_ids = templates.get_attribute("generic_land_use_type_id")
        proposal_template_ids = proposals.get_attribute("template_id")
        results = zeros(proposals.size(), dtype=bool8)
        unique_templates = unique(proposal_template_ids)
        for this_template_id in unique_templates:
            i_template = templates.get_id_index(this_template_id)
            fit_indicator = (proposal_template_ids == this_template_id )
            building_type_id = generic_land_use_type_ids[i_template]
            for constraint_type, constraint in parcels.development_constraints[building_type_id].iteritems():                
                template_attribute = templates.get_attribute(constraint_type)[i_template]  #density converted to constraint variable name           
                min_constraint = constraint[:, 0][parcel_index].copy() 
                max_constraint = constraint[:, 1][parcel_index].copy()
                ## treat -1 as a constant for unconstrainted
                w_unconstr = min_constraint == -1
                if w_unconstr.any():
                    min_constraint[w_unconstr] = template_attribute.min()

                w_unconstr = max_constraint == -1
                if w_unconstr.any():
                    max_constraint[w_unconstr] = template_attribute.max()
                
                fit_indicator = logical_and(fit_indicator,
                                            logical_and(template_attribute >= min_constraint,
                                                        template_attribute <= max_constraint)
                                            )
            results[fit_indicator] = True
        return results
コード例 #33
0
    def run(self, refinement_dataset=None, current_year=None, 
            action_order=['subtract', 'add', 'multiple', 'set_value', 'convert', 'demolish', 'delete'],
            dataset_pool=None):
        
        """
        """
        
        if refinement_dataset is None:
            refinement_dataset = dataset_pool.get_dataset('refinement')
        self.id_names = (refinement_dataset.get_id_name()[0], 'transaction_id')
        
        if current_year is None:
            current_year = SimulationState().get_current_time()
        
        #refinements_this_year = copy.deepcopy(refinement_dataset)
        refinements_this_year = refinement_dataset
        this_year_index = where(refinement_dataset.get_attribute('year')==current_year)[0]
        all_years_index = where(refinement_dataset.get_attribute('year')==-1)[0]
        refinements_this_year.subset_by_index(concatenate( (this_year_index, all_years_index) ), 
                                              flush_attributes_if_not_loaded=False)
        
        transactions = refinements_this_year.get_attribute('transaction_id')
        actions = refinements_this_year.get_attribute('action')
        for this_transaction in sort( unique(transactions) ):
            #transaction_list = [] # list of each action in this transaction
            agents_pool = []  # index to agents to keep track agent within 1 transaction
            logger.start_block("Transaction %i" % this_transaction)
            for action_type in action_order:
                action_function = getattr(self, '_' + action_type)
                for refinement_index in where( logical_and(transactions==this_transaction, actions == action_type))[0]:
                    this_refinement = refinements_this_year.get_data_element(refinement_index)
                    ## get agent_dataset and location_dataset if specified
                    agent_dataset_name = this_refinement.agent_dataset
                    agent_dataset = dataset_pool.get_dataset( agent_dataset_name )
                    location_dataset = None
                    logger.log_status("Action: %s\nAmount: %s\nAttribute: %s\nFilter: %s" % \
                                      (action_type, this_refinement.amount, this_refinement.agent_attribute, 
                                       this_refinement.agent_filter
                                       ) )
                    action_function( agents_pool, this_refinement.amount,
                                     agent_dataset, location_dataset, 
                                     this_refinement, 
                                     dataset_pool )
                    
                    agent_dataset.flush_dataset()
                    dataset_pool._remove_dataset(agent_dataset.get_dataset_name())
                    if location_dataset is not None:
                        location_dataset.flush_dataset()
                        dataset_pool._remove_dataset(location_dataset.get_dataset_name())
                    
            ## delete agents still in agents_pool at the end of the transaction
            #agent_dataset.remove_elements( array(agents_pool) )
            
            
#            dataset_pool.flush_loaded_datasets()
#            dataset_pool.remove_all_datasets()
                        
            logger.end_block()
    def run(self, refinement_dataset=None, current_year=None, 
            action_order=['subtract', 'add', 'multiple', 'set_value', 'convert', 'demolish', 'delete'],
            dataset_pool=None):
        
        """
        """
        
        if refinement_dataset is None:
            refinement_dataset = dataset_pool.get_dataset('refinement')
        self.id_names = (refinement_dataset.get_id_name()[0], 'transaction_id')
        
        if current_year is None:
            current_year = SimulationState().get_current_time()
        
        #refinements_this_year = copy.deepcopy(refinement_dataset)
        refinements_this_year = refinement_dataset
        this_year_index = where(refinement_dataset.get_attribute('year')==current_year)[0]
        all_years_index = where(refinement_dataset.get_attribute('year')==-1)[0]
        refinements_this_year.subset_by_index(concatenate( (this_year_index, all_years_index) ), 
                                              flush_attributes_if_not_loaded=False)
        
        transactions = refinements_this_year.get_attribute('transaction_id')
        actions = refinements_this_year.get_attribute('action')
        for this_transaction in sort( unique(transactions) ):
            #transaction_list = [] # list of each action in this transaction
            agents_pool = []  # index to agents to keep track agent within 1 transaction
            logger.start_block("Transaction %i" % this_transaction)
            for action_type in action_order:
                action_function = getattr(self, '_' + action_type)
                for refinement_index in where( logical_and(transactions==this_transaction, actions == action_type))[0]:
                    this_refinement = refinements_this_year.get_data_element(refinement_index)
                    ## get agent_dataset and location_dataset if specified
                    agent_dataset_name = this_refinement.agent_dataset
                    agent_dataset = dataset_pool.get_dataset( agent_dataset_name )
                    location_dataset = None
                    logger.log_status("Action: %s\nAmount: %s\nAttribute: %s\nFilter: %s" % \
                                      (action_type, this_refinement.amount, this_refinement.agent_attribute, 
                                       this_refinement.agent_filter
                                       ) )
                    action_function( agents_pool, this_refinement.amount,
                                     agent_dataset, location_dataset, 
                                     this_refinement, 
                                     dataset_pool )
                    
                    agent_dataset.flush_dataset()
                    dataset_pool._remove_dataset(agent_dataset.get_dataset_name())
                    if location_dataset is not None:
                        location_dataset.flush_dataset()
                        dataset_pool._remove_dataset(location_dataset.get_dataset_name())
                    
            ## delete agents still in agents_pool at the end of the transaction
            #agent_dataset.remove_elements( array(agents_pool) )
            
            
#            dataset_pool.flush_loaded_datasets()
#            dataset_pool.remove_all_datasets()
                        
            logger.end_block()
コード例 #35
0
 def plot_choice_set(self):
     """Plot map of the sampled choice set."""
     choice_set = self.get_choice_set()
     result = zeros(choice_set.size(), dtype='int16')
     result[unique(self.get_choice_set_index().ravel())] = 1
     dummy_attribute_name = '__sampled_choice_set__'
     choice_set.add_attribute(name=dummy_attribute_name, data=result)
     choice_set.plot_map(dummy_attribute_name, background=-1)
     choice_set.delete_one_attribute(dummy_attribute_name)
コード例 #36
0
ファイル: units.py プロジェクト: christianurich/VIBe2UrbanSim
 def compute(self,  dataset_pool):
     bldg = self.get_dataset()
     bt = dataset_pool.get_dataset("building_type")
     results = zeros(bldg.size(), dtype = self._return_type)
     for unit_name in unique(bt.get_attribute('unit_name')):
         self.add_and_solve_dependencies("sanfrancisco.building.%s" % unit_name, dataset_pool)
         is_of_this_unit_name = bldg.get_attribute('unit_name')==unit_name
         results[is_of_this_unit_name] = bldg.get_attribute(unit_name)[is_of_this_unit_name].astype(self._return_type)
     return results
コード例 #37
0
 def plot_choice_set_attribute(self, name):
     """Plot map of the given attribute for the sampled choice set."""
     choice_set = self.get_choice_set()
     filter_var = ones(choice_set.size(), dtype='int16')
     filter_var[unique(self.get_choice_set_index().ravel())] = 0
     dummy_attribute_name = '__sampled_choice_set_filter__'
     choice_set.add_attribute(name=dummy_attribute_name, data=filter_var)
     choice_set.plot_map(name, filter=dummy_attribute_name)
     choice_set.delete_one_attribute(dummy_attribute_name)
コード例 #38
0
 def plot_choice_set(self):
     """Plot map of the sampled choice set."""
     choice_set = self.get_choice_set()
     result = zeros(choice_set.size(), dtype='int16')
     result[unique(self.get_choice_set_index().ravel())] = 1
     dummy_attribute_name = '__sampled_choice_set__'
     choice_set.add_attribute(name=dummy_attribute_name, data=result)
     choice_set.plot_map(dummy_attribute_name, background=-1)
     choice_set.delete_one_attribute(dummy_attribute_name)
コード例 #39
0
ファイル: total_spaces.py プロジェクト: psrc/urbansim
 def compute(self,  dataset_pool):
     buildings = self.get_dataset()
     results = zeros(buildings.size(), dtype=self._return_type)        
     for unit_name in unique(buildings["unit_name"]):
         #should not count parcel_sqft
         if unit_name == "parcel_sqft" or unit_name == "":continue
         self.add_and_solve_dependencies(["urbansim_parcel.building." + unit_name], dataset_pool)
         matched = buildings["unit_name"] == unit_name
         results[matched] = buildings[unit_name][matched].astype(self._return_type)
     return results
コード例 #40
0
 def plot_choice_set_attribute(self, name):
     """Plot map of the given attribute for the sampled choice set."""
     choice_set = self.get_choice_set()
     filter_var = ones(choice_set.size(), dtype='int16')
     filter_var[unique(self.get_choice_set_index().ravel())] = 0
     dummy_attribute_name = '__sampled_choice_set_filter__'
     choice_set.add_attribute(name=dummy_attribute_name, data=filter_var)
     choice_set.plot_map(name, filter=dummy_attribute_name)
     choice_set.delete_one_attribute(dummy_attribute_name)
     
コード例 #41
0
ファイル: units_occupied.py プロジェクト: psrc/urbansim
 def compute(self,  dataset_pool):
     buildings = self.get_dataset()
     results = zeros(buildings.size(), dtype=self._return_type)
     ##TODO: these dummy values are used when the businesses and households tables aren't ready
     for unit_name in unique(dataset_pool.get_dataset("building_type").get_attribute("unit_name")):
         #should not count parcel_sqft
         if unit_name == "parcel_sqft":continue
         matched = buildings.get_attribute("unit_name") == unit_name
         results[matched] = buildings.get_attribute(unit_name)[matched].astype(self._return_type)
     return results
コード例 #42
0
    def compute(self,  dataset_pool):
        bldg_x_parcel = self.get_dataset()
        bt = dataset_pool.get_dataset("building_type")
        results = zeros(bldg_x_parcel.get_2d_index().shape, dtype=self._return_type)
        for unit_capacity_name in unique(bt.get_attribute('unit_capacity_name')):
            self.add_and_solve_dependencies("sanfrancisco.parcel.%s" % unit_capacity_name, dataset_pool)
            is_of_this_unit_capacity_name = bldg_x_parcel.get_attribute('unit_capacity_name')==unit_capacity_name
            results[is_of_this_unit_capacity_name] = bldg_x_parcel.get_attribute(unit_capacity_name)[is_of_this_unit_capacity_name]

        return results
コード例 #43
0
ファイル: total_spaces.py プロジェクト: psrc/urbansim
 def compute(self,  dataset_pool):
     dppc = self.get_dataset()
     results = zeros(dppc.size(), dtype=self._return_type)
     for unit_name in unique(dppc["unit_name"]):
         #should not count parcel_sqft
         if unit_name == "parcel_sqft":continue
         self.add_and_solve_dependencies(["urbansim_parcel.development_project_proposal_component." + unit_name], dataset_pool)
         matched = dppc["unit_name"] == unit_name
         results[matched] = dppc[unit_name][matched].astype(self._return_type)
     return results
コード例 #44
0
    def run(self,
            specification,
            coefficients,
            agent_set,
            agents_index=None,
            **kwargs):
        if agents_index is None:
            agents_index = arange(agent_set.size())
        large_areas = agent_set.get_attribute(self.large_area_id_name)
        self.choice_set.compute_variables([
            "washtenaw.%s.%s" %
            (self.choice_set.get_dataset_name(), self.large_area_id_name)
        ],
                                          dataset_pool=self.dataset_pool)
        valid_large_area = where(large_areas[agents_index] > 0)[0]
        if valid_large_area.size > 0:
            unique_large_areas = unique(
                large_areas[agents_index][valid_large_area])
            cond_array = zeros(agent_set.size(), dtype="bool8")
            cond_array[agents_index[valid_large_area]] = True
            for area in unique_large_areas:
                new_index = where(logical_and(cond_array,
                                              large_areas == area))[0]
                self.filter = "%s.%s == %s" % (
                    self.choice_set.get_dataset_name(),
                    self.large_area_id_name, area)
                logger.log_status("HLCM for area %s" % area)
                HouseholdLocationChoiceModel.run(self,
                                                 specification,
                                                 coefficients,
                                                 agent_set,
                                                 agents_index=new_index,
                                                 **kwargs)

        agent_index_no_large_area = agents_index[
            large_areas[agents_index] <= 0]
        if agent_index_no_large_area.size > 0:  # run the HLCM for households that don't have assigned large_area
            self.filter = None
            logger.log_status("HLCM for households with no area assigned")
            choices = HouseholdLocationChoiceModel.run(
                self,
                specification,
                coefficients,
                agent_set,
                agents_index=agent_index_no_large_area,
                **kwargs)
            where_valid_choice = where(choices > 0)[0]
            choices_index = self.choice_set.get_id_index(
                choices[where_valid_choice])
            chosen_large_areas = self.choice_set.get_attribute_by_index(
                self.large_area_id_name, choices_index)
            agent_set.modify_attribute(
                name=self.large_area_id_name,
                data=chosen_large_areas,
                index=agent_index_no_large_area[where_valid_choice])
コード例 #45
0
    def run(self, developments, year=0, landuse_types=None, units=None, resources=None):
#        landuse_types = ['residential', 'commercial', 'industrial', 'governmental']
#        units=['residential_units', 'commercial_sqft','industrial_sqft','governmental_sqft']
        
        if not isinstance(resources, Resources):
            resources = Resources()

        grid_ids_for_project = array([], dtype=int32)
        if developments <> None:
            grid_ids_for_project = developments.get_attribute("grid_id")
        grid_ids_for_project = unique(grid_ids_for_project)
        grid_ids_for_project = grid_ids_for_project[where(grid_ids_for_project>0)]
        
        if len(grid_ids_for_project)==0: return
        sizes = grid_ids_for_project.size
        result_data = {"grid_id": grid_ids_for_project, 
                       "scheduled_year":(year*ones((sizes,), dtype=int16)),
                       "development_type_id": zeros((sizes,),dtype=int16),
                   }
        
        for unit in units:
            result_data[unit] = zeros((sizes,), dtype=int32)
        for project_type in landuse_types:
            result_data["%s_improvement_value" % project_type] = zeros((sizes,), dtype=int32)
            
        grid_idx=0
        for grid_id in grid_ids_for_project:
            w = where(developments.get_attribute('grid_id') == grid_id)[0]
            if w.size>0:
                result_data["development_type_id"][grid_idx] = \
                    developments.get_attribute_by_index("development_type_id", w[0])
                for unit_variable in units:
                    result_data[unit_variable][grid_idx] = \
                        developments.get_attribute_by_index(unit_variable , w).sum()
                    result_data["%s_improvement_value" % unit_variable.split('_')[0]][grid_idx] = \
                        developments.get_attribute_by_index("improvement_value", w).sum()
            grid_idx += 1
            
        storage = StorageFactory().get_storage('dict_storage')

        eventset_table_name = 'eventset'        
        storage.write_table(
                table_name=eventset_table_name,
                table_data=result_data,
            )
        
        eventset = DevelopmentEventDataset(
            in_storage = storage,
            in_table_name = eventset_table_name, 
            id_name=['grid_id', 'scheduled_year'],
            )
            
        self.debug.print_debug('Number of events: ' + str(grid_ids_for_project.size), 3)
        
        return eventset
コード例 #46
0
 def compute(self, dataset_pool):
     bldg = self.get_dataset()
     bt = dataset_pool.get_dataset("building_type")
     results = zeros(bldg.size(), dtype=self._return_type)
     for unit_name in unique(bt.get_attribute('unit_name')):
         self.add_and_solve_dependencies(
             "sanfrancisco.building.%s" % unit_name, dataset_pool)
         is_of_this_unit_name = bldg.get_attribute('unit_name') == unit_name
         results[is_of_this_unit_name] = bldg.get_attribute(
             unit_name)[is_of_this_unit_name].astype(self._return_type)
     return results
コード例 #47
0
 def get_active_choice_set(self, submodel=None):
     """Return choice set as seen by agents in the model.
     Works only for the ChoiceModel class.
     """
     if submodel is None:
         choices = self.get_choice_set_index()
     else:
         choices = self.get_choice_set_index_for_submodel(submodel)
     choices = unique(choices.flatten())
     ds = self.get_choice_set()
     return DatasetSubset(ds, choices)
コード例 #48
0
 def compute(self,  dataset_pool):
     buildings = self.get_dataset()
     results = zeros(buildings.size(), dtype=self._return_type)        
     for unit_name in unique(buildings["unit_name"]):
         #should not count parcel_sqft
         if unit_name == "parcel_sqft":continue
         vname = "occupied_" + unit_name
         self.add_and_solve_dependencies(["urbansim_parcel.building." + vname], dataset_pool)
         matched = buildings["unit_name"] == unit_name
         results[matched] = buildings[vname][matched].astype(self._return_type)
     return results
コード例 #49
0
    def run(self,
            location_set,
            agent_set,
            agents_index=None,
            data_objects=None,
            **kwargs):
        if agents_index is None:
            agents_index = arange(agent_set.size())
        large_areas = agent_set.get_attribute(self.large_area_id_name)
        location_large_area = location_set.compute_variables(
            [
                "washtenaw.%s.%s" %
                (location_set.get_dataset_name(), self.large_area_id_name)
            ],
            dataset_pool=self.dataset_pool)
        valid_large_area = where(large_areas[agents_index] > 0)[0]
        if valid_large_area.size > 0:
            unique_large_areas = unique(
                large_areas[agents_index][valid_large_area])
            cond_array = zeros(agent_set.size(), dtype="bool8")
            cond_array[agents_index[valid_large_area]] = True
            for area in unique_large_areas:
                new_index = where(logical_and(cond_array,
                                              large_areas == area))[0]
                self.filter = "%s.%s == %s" % (location_set.get_dataset_name(),
                                               self.large_area_id_name, area)
                logger.log_status("%s for area %s" %
                                  (self.model_short_name, area))
                ScalingJobsModel.run(self,
                                     location_set,
                                     agent_set,
                                     agents_index=new_index,
                                     **kwargs)

        no_large_area = where(large_areas[agents_index] <= 0)[0]
        if no_large_area.size > 0:  # run the model for jobs that don't have assigned large_area
            self.filter = None
            logger.log_status("%s for jobs with no area assigned" %
                              self.model_short_name)
            choices = ScalingJobsModel.run(
                self,
                location_set,
                agent_set,
                agents_index=agents_index[no_large_area],
                **kwargs)
            where_valid_choice = where(choices > 0)[0]
            choices_index = location_set.get_id_index(
                choices[where_valid_choice])
            chosen_large_areas = location_set.get_attribute_by_index(
                self.large_area_id_name, choices_index)
            agent_set.modify_attribute(name=self.large_area_id_name,
                                       data=chosen_large_areas,
                                       index=no_large_area[where_valid_choice])
コード例 #50
0
ファイル: faz_dataset.py プロジェクト: urban-ai/VIBe2UrbanSim
 def create_same_age_table(self, agents, age_name="age_of_head"):
     max_age = 120
     n = self.size()
     hh_age = agents.get_attribute(age_name).astype(int16)
     ages = unique(hh_age)
     self.same_age_table = zeros((ages.size, n), dtype=int32)
     self.same_age_table_mapping = {}
     faz_ids = agents.get_attribute(self.get_id_name()[0])
     for iage in arange(ages.size):
         is_age = hh_age == ages[iage]
         self.same_age_table[iage, :] = self.sum_over_ids(
             faz_ids, is_age.astype(int8))
         self.same_age_table_mapping[ages[iage]] = iage
コード例 #51
0
 def compute(self, dataset_pool):
     buildings = self.get_dataset()
     results = zeros(buildings.size(), dtype=self._return_type)
     ##TODO: these dummy values are used when the businesses and households tables aren't ready
     for unit_name in unique(
             dataset_pool.get_dataset("building_type").get_attribute(
                 "unit_name")):
         #should not count parcel_sqft
         if unit_name == "parcel_sqft": continue
         matched = buildings.get_attribute("unit_name") == unit_name
         results[matched] = buildings.get_attribute(
             unit_name)[matched].astype(self._return_type)
     return results
コード例 #52
0
    def run(self, year, household_set, control_totals, characteristics, resources=None):
#        self.person_set = person_set
        self._do_initialize_for_run(household_set)
        control_totals.get_attribute("total_number_of_households") # to make sure they are loaded
        self.characteristics = characteristics
        self.all_categories = self.characteristics.get_attribute("characteristic")
        self.all_categories = array(map(lambda x: x.lower(), self.all_categories))
        self.scaled_characteristic_names = get_distinct_names(self.all_categories).tolist()
        self.marginal_characteristic_names = copy(control_totals.get_id_name())
        index_year = self.marginal_characteristic_names.index("year")
        self.marginal_characteristic_names.remove("year")
        self.marginal_characteristic_names.remove(self.subarea_id_name)
        region_ids = control_totals.get_attribute(self.subarea_id_name)
        households_region_ids = household_set.compute_one_variable_with_unknown_package(variable_name="%s" % (self.subarea_id_name), dataset_pool=self.dataset_pool)

        unique_regions = unique(region_ids)
        is_year = control_totals.get_attribute("year")==year
        all_households_index = arange(household_set.size())
        for area in unique_regions:
            idx = where(logical_and(is_year, region_ids == area))[0]
            self.control_totals_for_this_year = DatasetSubset(control_totals, idx)
            households_index = where(households_region_ids == area)[0]
            if households_index.size == 0:
                continue
            households_for_this_area = DatasetSubset(household_set, households_index)
            logger.log_status("HTM for area %s (currently %s households)" % (area, households_for_this_area.size()))
            last_remove_idx = self.remove_households.size
            last_new_hhs_idx = self.mapping_existing_hhs_to_new_hhs.size
            self._do_run_for_this_year(households_for_this_area)
            add_hhs_size = self.new_households[self.location_id_name].size-self.new_households[self.subarea_id_name].size+self.mapping_existing_hhs_to_new_hhs.size-last_new_hhs_idx
            remove_hhs_size = self.remove_households.size-last_remove_idx
            logger.log_status("add %s, remove %s, total %s" % (add_hhs_size, remove_hhs_size,
                                                               households_for_this_area.size()+add_hhs_size-remove_hhs_size
                                                               ))
            self.new_households[self.subarea_id_name] = concatenate((self.new_households[self.subarea_id_name],
                                            array((self.new_households[self.location_id_name].size-self.new_households[self.subarea_id_name].size)*[area], dtype="int32")))
            # transform indices of removing households into indices of the whole dataset
            self.remove_households[last_remove_idx:self.remove_households.size] = all_households_index[households_index[self.remove_households[last_remove_idx:self.remove_households.size]]]
            # do the same for households to be duplicated
            self.mapping_existing_hhs_to_new_hhs[last_new_hhs_idx:self.mapping_existing_hhs_to_new_hhs.size] = all_households_index[households_index[self.mapping_existing_hhs_to_new_hhs[last_new_hhs_idx:self.mapping_existing_hhs_to_new_hhs.size]]]
            
        self._update_household_set(household_set)
        idx_new_households = arange(household_set.size()-self.new_households[self.subarea_id_name].size, household_set.size())
        #household_region_ids = household_set.compute_variables("urbansim_parcel.household.%s" % self.subarea_id_name)
        #household_region_ids[idx_new_households] = self.new_households[self.subarea_id_name]
        region_ids = household_set.get_attribute(self.subarea_id_name).copy()
        household_set.delete_one_attribute(self.subarea_id_name)
        household_set.add_attribute(region_ids, self.subarea_id_name, metadata=AttributeType.PRIMARY)
        # return an index of new households
        return idx_new_households
コード例 #53
0
    def __init__(self, id_values=None, gridcellset=None, **kwargs):

        UrbansimDataset.__init__(self, **kwargs)

        if id_values <> None:
            self._add_id_attribute(data=id_values, name=self.get_id_name()[0])
        elif gridcellset <> None:
            if (self.get_id_name()[0] not in gridcellset.get_attribute_names()) and \
                (self.get_id_name()[0] not in gridcellset.get_primary_attribute_names()):
                raise StandardError, "Given gridcellset does not contain " + self.get_id_name()[0]
            large_area_ids = gridcellset.get_attribute(self.get_id_name()[0])
            idx = large_area_ids >=0
            unique_ids = unique(large_area_ids[idx])
            self._add_id_attribute(data=unique_ids, name=self.get_id_name()[0])
        self._create_id_mapping_array()
コード例 #54
0
 def _unroll_field(self, attr_name, gridcells, buildings, bldgs_idx, dataset_pool=None):
     """Unroll the values for this field.
     """
     grid_ids = buildings.get_attribute('grid_id')[bldgs_idx]
     unique_grid_ids = unique(grid_ids)
     grid_idx = gridcells.get_id_index(unique_grid_ids)
     attr_values = gridcells.get_attribute_by_index(attr_name, grid_idx)
     buildings.compute_variables("urbansim.%s.%s" % (buildings.get_dataset_name(), attr_name), 
                                 dataset_pool=dataset_pool)
     # sum the amount over the same gridcells
     change_amounts = array(ndimage_sum(buildings.get_attribute(attr_name)[bldgs_idx], labels=grid_ids,
                                         index=unique_grid_ids))
     attr_values = clip(attr_values - change_amounts,
                        0, attr_values.max())
     gridcells.set_values_of_one_attribute(attr_name, attr_values, index=grid_idx)
    def compute(self, dataset_pool):
        interaction_dataset = self.get_dataset()
        choice_set_name = interaction_dataset.get_dataset(2).get_dataset_name()
        agent_set = interaction_dataset.get_dataset(1)
        ## TODO: this is a work-around to the problem that a 1d array indexed by None becomes a 2d array (numpy 1.0.4 - 1.1.1 at least)
        agents_index = interaction_dataset.get_index(1)
        if agents_index is None:
            agents_index = arange(agent_set.size())
        agent_attr = agent_set.get_attribute_by_index(
            self.agent_attribute_name, agents_index)
        if not hasattr(self, 'agent_attribute_unique_values'
                       ) or self.agent_attribute_unique_values is None or len(
                           self.agent_attribute_unique_values) == 0:
            self.agent_attribute_unique_values = unique(agent_attr)

        agents_of_attribute_by_geography = [
            eval(
                self.expression_agents_of_attribute_by_geography % {
                    'agent_attribute_name': self.agent_attribute_name,
                    'agent_attribute_value': i,
                    'geography_dataset_name': self.geography_dataset_name,
                    'choice_set_name': choice_set_name
                }) for i in self.agent_attribute_unique_values
        ]
        self.add_and_solve_dependencies(agents_of_attribute_by_geography,
                                        dataset_pool)

        agents_of_attribute = [
            eval(
                self.expression_agents_of_attribute % {
                    'agent_attribute_name': self.agent_attribute_name,
                    'agent_attribute_value': i,
                    'geography_dataset_name': self.geography_dataset_name,
                    'choice_set_name': choice_set_name
                }) for i in self.agent_attribute_unique_values
        ]
        self.add_and_solve_dependencies(agents_of_attribute, dataset_pool)

        results = zeros(interaction_dataset.get_2d_index().shape,
                        dtype=self._return_type)
        for i in self.agent_attribute_unique_values:
            index_agents_of_this_value = where(agent_attr == i)[0]
            results[
                index_agents_of_this_value, :] = interaction_dataset.get_2d_dataset_attribute(
                    "agents_of_attribute_%s" %
                    i)[index_agents_of_this_value, :]

        return results
コード例 #56
0
ファイル: faz_dataset.py プロジェクト: urban-ai/VIBe2UrbanSim
 def create_same_job_sector_table(self,
                                  agents,
                                  sector_field_name="sector_id"):
     max_sector = agents.get_attribute(sector_field_name).max()
     n = self.size()
     sectors = agents.get_attribute(sector_field_name).astype(int16)
     unique_sectors = unique(sectors)
     self.same_job_sector_table = zeros((unique_sectors.size, n),
                                        dtype=int32)
     self.same_job_sector_table_mapping = {}
     faz_ids = agents.get_attribute(self.get_id_name()[0])
     for isec in arange(unique_sectors.size):
         is_sector = sectors == unique_sectors[isec]
         self.same_job_sector_table[isec, :] = self.sum_over_ids(
             faz_ids, is_sector.astype(int8))
         self.same_job_sector_table_mapping[unique_sectors[isec]] = isec
コード例 #57
0
    def compute(self, dataset_pool):
        bldg_x_parcel = self.get_dataset()
        bt = dataset_pool.get_dataset("building_type")
        results = zeros(bldg_x_parcel.get_2d_index().shape,
                        dtype=self._return_type)
        for unit_capacity_name in unique(
                bt.get_attribute('unit_capacity_name')):
            self.add_and_solve_dependencies(
                "sanfrancisco.parcel.%s" % unit_capacity_name, dataset_pool)
            is_of_this_unit_capacity_name = bldg_x_parcel.get_attribute(
                'unit_capacity_name') == unit_capacity_name
            results[
                is_of_this_unit_capacity_name] = bldg_x_parcel.get_attribute(
                    unit_capacity_name)[is_of_this_unit_capacity_name]

        return results
コード例 #58
0
    def run(self,
            location_set,
            agent_set,
            agents_index=None,
            data_objects=None,
            **kwargs):
        if agents_index is None:
            agents_index = arange(agent_set.size())
        regions = agent_set.get_attribute(self.subarea_id_name)

        location_region = location_set.compute_one_variable_with_unknown_package(
            variable_name="%s" % (self.subarea_id_name),
            dataset_pool=self.dataset_pool)
        valid_region = where(regions[agents_index] > 0)[0]
        if valid_region.size > 0:
            unique_regions = unique(regions[agents_index][valid_region])
            cond_array = zeros(agent_set.size(), dtype="bool8")
            cond_array[agents_index[valid_region]] = True
            for area in unique_regions:
                new_index = where(logical_and(cond_array, regions == area))[0]
                self.filter = "%s.%s == %s" % (location_set.get_dataset_name(),
                                               self.subarea_id_name, area)
                logger.log_status("SJM for area %s" % area)
                ScalingJobsModel.run(self,
                                     location_set,
                                     agent_set,
                                     agents_index=new_index,
                                     **kwargs)

        no_region = where(regions[agents_index] <= 0)[0]
        if no_region.size > 0:  # run the model for jobs that don't have assigned region
            self.filter = None
            logger.log_status("SJM for jobs with no area assigned")
            choices = ScalingJobsModel.run(
                self,
                location_set,
                agent_set,
                agents_index=agents_index[no_region],
                **kwargs)
            where_valid_choice = where(choices > 0)[0]
            choices_index = location_set.get_id_index(
                choices[where_valid_choice])
            chosen_regions = location_set.get_attribute_by_index(
                self.subarea_id_name, choices_index)
            agent_set.modify_attribute(name=self.subarea_id_name,
                                       data=chosen_regions,
                                       index=no_region[where_valid_choice])