def __init__(self, proposal_set, sampler="opus_core.samplers.weighted_sampler", weight_string = None, run_config=None, estimate_config=None, debuglevel=0, dataset_pool=None, filter="development_project_proposal.status_id==%s" % DevelopmentProjectProposalDataset.id_tentative, choice_attribute_name='is_chosen', **kwargs): self.id_selected = 9 self.proposal_set = proposal_set self.filter = filter self.choice_attribute_name = copy.copy(choice_attribute_name) ChoiceModel.__init__(self, [1, 2], choice_attribute_name=choice_attribute_name, **kwargs) DevelopmentProjectProposalSamplingModel.__init__(self, proposal_set, sampler="opus_core.samplers.weighted_sampler", weight_string = "development_project_proposal.status_id==%s" % self.id_selected, #weight_string = "development_project_proposal.status_id==%s" % self.id_selected, run_config=run_config, estimate_config=estimate_config, debuglevel=debuglevel, dataset_pool=dataset_pool)
def __init__(self, choice_set=[0, 1], location_id_name="grid_id", movers_ratio=None, **kwargs): self.location_id_name = location_id_name self.movers_ratio = movers_ratio ChoiceModel.__init__(self, choice_set, **kwargs)
def __init__(self, location_set, sampler="opus_core.samplers.weighted_sampler", utilities="opus_core.linear_utilities", probabilities="opus_core.mnl_probabilities", choices="opus_core.random_choices", interaction_pkg="urbansim.datasets", filter=None, submodel_string=None, location_id_string=None, run_config=None, estimate_config=None, debuglevel=0, dataset_pool=None, **kwargs): """ Arguments: location_set - Dataset of locations to be chosen from. sampler - name of sampling module to be used for sampling locations. If it is None, no sampling is performed and all locations are considered for choice. utilities - name of utilities module probabilities - name of probabilities module choices - name of module for computing agent choices filter - filter is applied on location weights for sampling (by multiplication). It is either a string specifying an attribute name of the filter, or a 1D/2D array giving the filter directly, or a dictionary specifying filter for each submodel. If it is None, no filter is applied. submodel_string - character string specifying what agent attribute determines submodels. location_id_string - character string giving the fully qualified name of the agent attribute that specifies the location. Only needed when the attribute is a variable. Use it without the "as" clausel, since the code adds an alias which is the id name of the location set. run_config - collection of additional arguments that control a simulation run. It is of class Resources. estimate_config - collection of additional arguments that control an estimation run. It is of class Resources. debuglevel - debuglevel for the constructor. The level is overwritten by the argument in the run and estimate method. An instance of upc_sequence class with components utilities, probabilities and choices is created. Also an instance of Sampler class for given sampler procedure is created. """ self.dataset_pool = self.create_dataset_pool(dataset_pool, ["urbansim", "opus_core"]) ChoiceModel.__init__(self, choice_set=location_set, utilities=utilities, probabilities=probabilities, choices=choices, sampler=sampler, submodel_string=submodel_string, interaction_pkg=interaction_pkg, run_config=run_config, estimate_config=estimate_config, debuglevel=debuglevel, dataset_pool=dataset_pool, **kwargs) self.filter = filter self.location_id_string = location_id_string if self.location_id_string is not None: self.location_id_string = VariableName(self.location_id_string)
def __init__(self, choice_set, filter=None, choice_attribute_name='work_at_home', location_id_name='urbansim_parcel.person.building_id', match_number_of_jobs=False, **kwargs): """If match_number_of_jobs is True, the choices are drawn from the probability distribution in a way that the final number matches the number of jobs. """ self.job_set = choice_set self.filter = filter self.choice_attribute_name = choice_attribute_name self.location_id_name = location_id_name self.match_number_of_jobs=match_number_of_jobs ChoiceModel.__init__(self, [0, 1], choice_attribute_name=choice_attribute_name, **kwargs)
def __init__(self, choice_set, nested_structure=None, stratum=None, **kwargs): """'nested_structure' is a dictionary with keys being the nest identifiers and each value being a list of identifiers of the elemental alternatives belonging to that nest. 'stratum' is either a string giving the name of variable/expression determining the membership of choice's elements to nests. Or, it is an array of the size as choice set giving directly the membership of choice's elements to nests. Either 'nested_structure' or 'stratum' must be given. All arguments of the Choice Model can be used. """ ChoiceModel.__init__(self, choice_set, **kwargs) self.create_nested_and_tree_structure(nested_structure, stratum, **kwargs) self.set_model_interaction(**kwargs)
def __init__(self, choice_set, filter=None, choice_attribute_name='work_at_home', location_id_name='urbansim_parcel.person.building_id', **kwargs): self.job_set = choice_set self.filter = filter self.choice_attribute_name = choice_attribute_name self.location_id_name = location_id_name ChoiceModel.__init__(self, [0, 1], choice_attribute_name=choice_attribute_name, **kwargs)
def get_sampling_weights(self, config, **kwargs): ## there are cases where filter and weights are mutual dependent (e.g. DPLCM) ## pass the filter through self.filter_index to apply_filter, ## which is either boolean array of the same size as self.choice_set or ## index of self.choice_set self.filter_index = None return ChoiceModel.get_sampling_weights(self, config, **kwargs)
def estimate(self, specification, agent_set_year1, agent_set_year2, agents_index=None, procedure="opus_core.bhhh_mnl_estimation", calibrate_constants=False, data_objects=None, estimate_config=None, debuglevel=0): """Set calibrate_constants to True only if agent_set_year1 is the full dataset. Otherwise, the calibration can be done separately by calling the method "calibrate" with the full dataset. """ if self.choice_attribute_name.get_alias() not in agent_set_year2.get_known_attribute_names(): agent_set_year2.compute_variables([self.choice_attribute_name]) if self.submodel_string not in agent_set_year1.get_known_attribute_names(): agent_set_year1.compute_variables([self.submodel_string]) lct_y2 = agent_set_year2.get_attribute(self.choice_attribute_name) attributes_switched = False if self.submodel_string == self.choice_attribute_name.get_alias(): new_submodel_string = self.choice_attribute_name.get_alias() + "_start" original_submodel_string = self.submodel_string agent_set_year1.add_attribute(name=new_submodel_string, data=agent_set_year1.get_attribute(original_submodel_string).astype(int16)) agent_set_year1.add_attribute(name=original_submodel_string, data=lct_y2.astype(int16)) self.submodel_string = new_submodel_string attributes_switched = True self.specification = specification results = ChoiceModel.estimate(self,specification, agent_set_year1, agents_index, procedure, data_objects, estimate_config, debuglevel=debuglevel) if calibrate_constants: self.calibrate(agent_set_year1, agent_set_year2, agents_index) if attributes_switched: agent_set_year1.add_attribute(name=self.choice_attribute_name.get_alias() + "_end", data=agent_set_year1.get_attribute(self.choice_attribute_name)) agent_set_year1.add_attribute(name=original_submodel_string, data=agent_set_year1.get_attribute(new_submodel_string)) agent_set_year1.delete_one_attribute(new_submodel_string) return self.coefficients, results[1]
def simulate_submodel(self, data, coefficients, submodel): if self.run_config.get("agent_units_all", None) is not None: self.run_config["agent_units"] = self.run_config[ "agent_units_all"][self.observations_mapping[submodel]] return ChoiceModel.simulate_submodel(self, data, coefficients, submodel)
def run_chunk(self, agents_index, agent_set, specification, coefficients): # unplaced agents in agents_index location_id_name = self.choice_set.get_id_name()[0] agent_set.set_values_of_one_attribute( location_id_name, resize(array([-1]), agents_index.size), agents_index) ## capacity may need to be re-computed for every chunk if self.compute_capacity_flag: self.capacity = ma.filled( self.determine_capacity(capacity_string=self.run_config.get( "capacity_string", None), agent_set=agent_set, agents_index=agents_index), 0.0) if self.capacity is not None: logger.log_status("Available capacity: %s units." % self.capacity.sum()) self.run_config.merge({"capacity": self.capacity}) if self.run_config.get("agent_units_string", None): self.run_config[ "agent_units_all"] = agent_set.get_attribute_by_index( self.run_config["agent_units_string"], agents_index) choices = ChoiceModel.run_chunk(self, agents_index, agent_set, specification, coefficients) ## this is done in choice_model #modify locations #agent_set.set_values_of_one_attribute(location_id_name, choices, agents_index) if self.run_config.has_key("capacity"): del self.run_config["capacity"] return choices
def run(self, run_choice_model=True, choose_job_only_in_residence_zone=False, *args, **kwargs): agent_set = kwargs['agent_set'] if run_choice_model: choices = ChoiceModel.run(self, *args, **kwargs) #prob_work_at_home = self.upc_sequence.probabilities[:, 0] agent_set.set_values_of_one_attribute(self.choice_attribute_name, choices, index=kwargs['agents_index']) at_home_worker_index = kwargs['agents_index'][choices==1] logger.log_status("%s workers choose to work at home, %s workers chose to work out of home." % (where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 1)[0].size, where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 0)[0].size)) else: at_home_worker_index = where(logical_and( agent_set.get_attribute(self.choice_attribute_name) == 1, agent_set.get_attribute('job_id') <= 0 ) )[0] if self.filter is not None: jobs_set_index = where( self.job_set.compute_variables(self.filter) )[0] else: jobs_set_index = arange( self.job_set.size() ) logger.log_status("Total: %s workers work at home, (%s workers work out of home), will try to assign %s workers to %s jobs." % (where(agent_set.get_attribute(self.choice_attribute_name) == 1)[0].size, where(agent_set.get_attribute(self.choice_attribute_name) == 0)[0].size, at_home_worker_index.size, jobs_set_index.size )) if not choose_job_only_in_residence_zone: assigned_worker_index, assigned_job_index = self._assign_job_to_worker(at_home_worker_index, jobs_set_index) else: agent_set.compute_variables("urbansim_parcel.person.zone_id") self.job_set.compute_variables("urbansim_parcel.job.zone_id") agent_zone_ids = agent_set.get_attribute_by_index('zone_id', at_home_worker_index) job_zone_ids = self.job_set.get_attribute_by_index('zone_id', jobs_set_index) unique_zones = unique(job_zone_ids) assigned_worker_index = array([], dtype="int32") assigned_job_index = array([], dtype="int32") for this_zone in unique_zones: logger.log_status("zone_id: %s" % this_zone) if this_zone <= 0: continue at_home_worker_in_this_zone = where(agent_zone_ids == this_zone)[0] job_set_in_this_zone = where(job_zone_ids == this_zone)[0] assigned_worker_in_this_zone, assigned_job_set_in_this_zone = self._assign_job_to_worker(at_home_worker_in_this_zone, job_set_in_this_zone) assigned_worker_index = concatenate((assigned_worker_index, at_home_worker_index[assigned_worker_in_this_zone])) assigned_job_index = concatenate((assigned_job_index, jobs_set_index[assigned_job_set_in_this_zone])) ## each worker can only be assigned to 1 job #assert assigned_worker_index.size == unique(assigned_worker_index).size agent_set.set_values_of_one_attribute(self.job_set.get_id_name()[0], self.job_set.get_id_attribute()[assigned_job_index], index=assigned_worker_index) agent_set.compute_variables([self.location_id_name], dataset_pool=self.dataset_pool) self.job_set.modify_attribute(name=VariableName(self.location_id_name).get_alias(), data=agent_set.get_attribute_by_index(self.location_id_name, assigned_worker_index), index=assigned_job_index)
def run(self, specification, coefficients, agent_set, agents_index=None, **kwargs): choices = ChoiceModel.run(self, specification, coefficients, agent_set, agents_index=agents_index, **kwargs) if agents_index is None: agents_index = arange(agent_set.size()) movers_indices = agents_index[where(choices > 0)] if self.movers_ratio is not None: n = rint(self.movers_ratio * agents_index.size) if n < movers_indices.size: movers_indices = sample_noreplace(movers_indices, n) # add unplaced agents unplaced_agents = agents_index[agent_set.get_attribute_by_index( self.location_id_name, agents_index) <= 0] logger.log_status( "%s agents selected by the logit model; %s agents without %s." % (movers_indices.size, unplaced_agents.size, self.location_id_name)) movers_indices = unique(concatenate((movers_indices, unplaced_agents))) logger.log_status("Number of movers: " + str(movers_indices.size)) return movers_indices
def run_chunk(self, agents_index, agent_set, specification, coefficients): # unplaced agents in agents_index location_id_name = self.choice_set.get_id_name()[0] agent_set.set_values_of_one_attribute(location_id_name, resize(array([-1]), agents_index.size), agents_index) ## capacity may need to be re-computed for every chunk if self.compute_capacity_flag: self.capacity = ma.filled(self.determine_capacity(capacity_string=self.run_config.get("capacity_string", None), agent_set=agent_set, agents_index=agents_index), 0.0) if self.capacity is not None: logger.log_status("Available capacity: %s units." % self.capacity.sum()) self.run_config.merge({"capacity":self.capacity}) if self.run_config.get("agent_units_string", None): self.run_config["agent_units_all"] = agent_set.get_attribute_by_index(self.run_config["agent_units_string"], agents_index) choices = ChoiceModel.run_chunk(self, agents_index, agent_set, specification, coefficients) ## this is done in choice_model #modify locations #agent_set.set_values_of_one_attribute(location_id_name, choices, agents_index) if self.run_config.has_key("capacity"): del self.run_config["capacity"] return choices
def estimate(self, specification, agent_set, agents_index=None, procedure=None, data_objects=None, estimate_config=None, debuglevel=0): """ Computes capacity if required and calls the estimate method of ChoiceModel. See ChoiceModel.estimate for details on arguments. """ if agents_index==None: agents_index=arange(agent_set.size()) if agents_index.size <= 0: logger.log_status("Nothing to be done.") return (None, None) if estimate_config == None: estimate_config = Resources() self.estimate_config = estimate_config.merge_with_defaults(self.estimate_config) if data_objects is not None: self.dataset_pool.add_datasets_if_not_included(data_objects) if self.location_id_string is not None: agent_set.compute_variables(self.location_id_string, dataset_pool=self.dataset_pool) capacity_for_estimation = None if self.estimate_config.get("compute_capacity_flag", False): capacity_string_for_estimation = self.estimate_config.get("capacity_string", None) capacity_for_estimation = self.determine_capacity(capacity_string=capacity_string_for_estimation, agent_set=agent_set, agents_index=agents_index) self.estimate_config.merge({"capacity":capacity_for_estimation}) return ChoiceModel.estimate(self,specification, agent_set, agents_index, procedure, estimate_config=self.estimate_config, debuglevel=debuglevel)
def __init__(self, choice_set, utilities="opus_core.linear_utilities", probabilities="opus_core.mnl_probabilities", choices="opus_core.random_choices", interaction_pkg="biocomplexity.datasets", submodel_string="lct", choice_attribute_name="lct", run_config=None, estimate_config=None, debuglevel=0): self.choice_attribute_name = VariableName(choice_attribute_name) ChoiceModel.__init__(self, choice_set=choice_set, utilities=utilities, probabilities=probabilities, choices=choices, submodel_string=submodel_string, interaction_pkg=interaction_pkg, choice_attribute_name=self.choice_attribute_name.get_alias(), run_config=run_config, estimate_config=estimate_config, debuglevel=debuglevel)
def __init__(self, location_set, sampler="opus_core.samplers.weighted_sampler", utilities="opus_core.linear_utilities", probabilities="opus_core.mnl_probabilities", choices="opus_core.random_choices", interaction_pkg="urbansim.datasets", filter=None, submodel_string=None, location_id_string = None, run_config=None, estimate_config=None, debuglevel=0, dataset_pool=None, **kwargs): """ Arguments: location_set - Dataset of locations to be chosen from. sampler - name of sampling module to be used for sampling locations. If it is None, no sampling is performed and all locations are considered for choice. utilities - name of utilities module probabilities - name of probabilities module choices - name of module for computing agent choices filter - filter is applied on location weights for sampling (by multiplication). It is either a string specifying an attribute name of the filter, or a 1D/2D array giving the filter directly, or a dictionary specifying filter for each submodel. If it is None, no filter is applied. submodel_string - character string specifying what agent attribute determines submodels. location_id_string - character string giving the fully qualified name of the agent attribute that specifies the location. Only needed when the attribute is a variable. Use it without the "as" clausel, since the code adds an alias which is the id name of the location set. run_config - collection of additional arguments that control a simulation run. It is of class Resources. estimate_config - collection of additional arguments that control an estimation run. It is of class Resources. debuglevel - debuglevel for the constructor. The level is overwritten by the argument in the run and estimate method. An instance of upc_sequence class with components utilities, probabilities and choices is created. Also an instance of Sampler class for given sampler procedure is created. """ self.dataset_pool = self.create_dataset_pool(dataset_pool, ["urbansim", "opus_core"]) ChoiceModel.__init__(self, choice_set=location_set, utilities=utilities, probabilities=probabilities, choices=choices, sampler=sampler, submodel_string=submodel_string, interaction_pkg=interaction_pkg, run_config=run_config, estimate_config=estimate_config, debuglevel=debuglevel, dataset_pool=dataset_pool, **kwargs) self.filter = filter self.location_id_string = location_id_string if self.location_id_string is not None: self.location_id_string = VariableName(self.location_id_string)
def run(self, zones, run_choice_model=True, choose_job_only_in_residence_zone=True, **kwargs): agent_set = kwargs['agent_set'] agents_index = kwargs.get('agents_index', None) if agents_index is None: agents_index = arange(agent_set.size()) cond_array = zeros(agent_set.size(), dtype="bool8") cond_array[agents_index] = True zone_ids = zones.get_id_attribute() agents_zones = agent_set.compute_variables(['urbansim_parcel.%s.%s' % (agent_set.get_dataset_name(), zones.get_id_name()[0])], dataset_pool=self.dataset_pool) if self.filter is not None: jobs_set_index = where( self.job_set.compute_variables(self.filter) )[0] else: jobs_set_index = arange( self.job_set.size() ) #self.job_set.compute_variables("urbansim_parcel.job.zone_id") agent_set.compute_variables("urbansim_parcel.person.zone_id") # remove job links from all workers agent_set.set_values_of_one_attribute(self.choice_attribute_name, -1*ones(agents_index.size, dtype='int32'), index=agents_index) for zone_id in zone_ids: new_index = where(logical_and(cond_array, agents_zones == zone_id))[0] logger.log_status("%s for zone %s" % (self.model_short_name, zone_id)) if run_choice_model: kwargs['agents_index'] = new_index choices = ChoiceModel.run(self, **kwargs) prob_work_at_home = self.upc_sequence.get_probabilities()[:, 1] job_set_in_this_zone = jobs_set_index[self.job_set['zone_id'][jobs_set_index] == zone_id] number_of_hb_jobs = job_set_in_this_zone.size # sample workers for the number of jobs draw = probsample_noreplace(kwargs['agents_index'], min(kwargs['agents_index'].size, number_of_hb_jobs), prob_work_at_home) agent_set.set_values_of_one_attribute(self.choice_attribute_name, ones(draw.size, dtype=agent_set[self.choice_attribute_name].dtype), index=draw) logger.log_status("%s workers choose to work at home, %s workers chose to work out of home." % (where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 1)[0].size, where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 0)[0].size)) at_home_worker_in_this_zone = kwargs['agents_index'][agent_set[self.choice_attribute_name][kwargs['agents_index']] == 1] assigned_worker_in_this_zone, assigned_job_set_in_this_zone = self._assign_job_to_worker(at_home_worker_in_this_zone, job_set_in_this_zone) agent_set.set_values_of_one_attribute(self.job_set.get_id_name()[0], self.job_set.get_id_attribute()[assigned_job_set_in_this_zone], index=assigned_worker_in_this_zone) agent_set.compute_variables([self.location_id_name], dataset_pool=self.dataset_pool) self.job_set.modify_attribute(name=VariableName(self.location_id_name).get_alias(), data=agent_set.get_attribute_by_index(self.location_id_name, assigned_worker_in_this_zone), index=assigned_job_set_in_this_zone) agent_set.flush_dataset() self.job_set.flush_dataset() logger.log_status("Total: %s workers work at home, %s workers work out of home." % (where(agent_set.get_attribute(self.choice_attribute_name) == 1)[0].size, where(agent_set.get_attribute(self.choice_attribute_name) == 0)[0].size ))
def run_chunk(self, index, agent_set, *args, **kwargs): # **kwargs hold EquationSpecification and Coefficients objects result = ChoiceModel.run_chunk(self, index, agent_set, *args, **kwargs) ## choice_model.py, Ln 159 # store probabilities # logger.log_status("choice set size: %s" % self.choice_set.size()) for ichoice in range(self.choice_set.size()): probname = "probs_" + str(self.choice_set.get_id_attribute()[ichoice]) try: # because some problem (need to investigate it) agent_set.modify_attribute(name=probname, data=self.get_probabilities()[:,ichoice], index=index) except: logger.log_warning("Something wrong with probabilities for choice %s" % self.choice_set.get_id_attribute()[ichoice]) return result
def run(self, specification, coefficients, agent_set, agents_index=None, chunk_specification=None, data_objects=None, run_config=None, debuglevel=0): """ Run a simulation and return a numpy array of length agents_index, giving agent choices (ids of locations). 'specification' is of type EquationSpecification, 'coefficients' is of type Coefficients, 'agent_set' is of type Dataset, 'agent_index' are indices of individuals in the agent_set for which the model runs. If it is None, the whole agent_set is considered. 'chunk_specification' determines number of chunks in which the simulation is processed. Default is to use 300 rows per chunk. 'data_objects' is a dictionary where each key is the name of an data object ('zone', ...) and its value is an object of class Dataset. 'run_config' is of type Resources, it gives additional arguments for the run. 'debuglevel' overwrites the constructor 'debuglevel'. """ if run_config == None: run_config = Resources() self.run_config = run_config.merge_with_defaults(self.run_config) if data_objects is not None: self.dataset_pool.add_datasets_if_not_included(data_objects) self.dataset_pool.add_datasets_if_not_included({agent_set.get_dataset_name():agent_set}) ## what is the use of compute location_id string in run? it gets new values anyway #if self.location_id_string is not None: # location_id = agent_set.compute_variables(self.location_id_string, dataset_pool=self.dataset_pool) ## done in choice_model #location_id_name = self.choice_set.get_id_name()[0] #if (location_id_name not in agent_set.get_known_attribute_names()): # agent_set.add_attribute(name=location_id_name, data=resize(array([-1]), agent_set.size())) if self.run_config.get("agent_units_string", None): # used when agents take different amount of capacity from the total capacity agent_set.compute_variables([self.run_config["agent_units_string"]], dataset_pool=self.dataset_pool) self.compute_capacity_flag = self.run_config.get("compute_capacity_flag", False) capacity_string = None self.capacity = None if self.compute_capacity_flag: capacity_string = self.run_config.get("capacity_string", None) if capacity_string is None: raise KeyError, \ "Entry 'capacity_string' has to be specified in 'run_config' if 'compute_capacity_flag' is True" ## if weights is None, use capacity for weights if self.run_config.get("weights_for_simulation_string", None) is None and capacity_string is not None: self.run_config.merge({"weights_for_simulation_string" : capacity_string}) return ChoiceModel.run(self,specification, coefficients, agent_set, agents_index=agents_index, chunk_specification=chunk_specification, run_config=self.run_config, debuglevel=debuglevel)
def run(self, specification, coefficients, agent_set, agents_index=None, **kwargs): hh_to_init_workers = agent_set.compute_variables("_hh_to_init = ((household.workers)==-1)") idx_to_init = where(hh_to_init_workers)[0] if idx_to_init.size > 0: results = ChoiceModel.run(self, specification, coefficients, agent_set, agents_index=idx_to_init, **kwargs) agent_set.modify_attribute('workers', results, idx_to_init) #Ensure that predicted workers does not exceed # persons who are eligible to work workeligible = agent_set.compute_variables('_work_eligible = household.aggregate(person.age>15)') agent_set.add_attribute(name='work_eligible', data=agent_set.compute_variables('household.aggregate(person.age>15)')) workers_exceeds_eligible = agent_set.compute_variables("_overpredict_workers = ((household.workers) > _work_eligible)") idx_excess_workers = where(logical_and(workers_exceeds_eligible,hh_to_init_workers))[0] if idx_excess_workers.size > 0: agent_set.modify_attribute('workers', workeligible[idx_excess_workers], idx_excess_workers) #When a household with 4+ eligible workers is assigned "3+ workers", a more specific number of workers needs to be assigned #When household with 4 eligible workers is predicted to have 3+ workers, the household can be assigned 3 workers or 4 workers four_eligible_three_predicted = agent_set.compute_variables('_four_eligible_three_predicted = (_work_eligible==4)*((household.workers)==3)') idx_four_elig = where(logical_and(four_eligible_three_predicted,hh_to_init_workers))[0] if idx_four_elig.size > 0: four_numworker_prob = ([.62,.38]) #probabilities from crosstab of base-year workers vs. base-year persons eligible to work four_cum_prob = cumsum(four_numworker_prob) for hh in idx_four_elig: r = uniform(0,1) agent_set['workers'][hh] = searchsorted(four_cum_prob, r) + 3 #When household with 5 eligible workers is predicted to have 3+ workers, the household can be assigned 3, 4, or 5 workers five_eligible_three_predicted = agent_set.compute_variables('_five_eligible_three_predicted = (_work_eligible==5)*((household.workers)==3)') idx_five_elig = where(logical_and(five_eligible_three_predicted,hh_to_init_workers))[0] if idx_five_elig.size > 0: five_numworker_prob = ([.44,.33,.23]) five_cum_prob = cumsum(five_numworker_prob) for hh in idx_five_elig: r = uniform(0,1) agent_set['workers'][hh] = searchsorted(five_cum_prob, r) + 3 #When household with 6 eligible workers is predicted to have 3+ workers, the household can be assigned 3, 4, 5, or 6 workers six_eligible_three_predicted = agent_set.compute_variables('_six_eligible_three_predicted = (_work_eligible==6)*((household.workers)==3)') idx_six_elig = where(logical_and(six_eligible_three_predicted,hh_to_init_workers))[0] if idx_six_elig.size > 0: six_numworker_prob = ([.32,.23,.24,.21]) six_cum_prob = cumsum(six_numworker_prob) for hh in idx_six_elig: r = uniform(0,1) agent_set['workers'][hh] = searchsorted(six_cum_prob, r) + 3 #When household with 7+ eligible workers is predicted to have 3+ workers, the household is randomly asssigned a number of workers no less than 3 and no greater than the number of persons eligible to work many_eligible_three_predicted = agent_set.compute_variables('_many_eligible_three_predicted = (_work_eligible>6)*((household.workers)==3)') idx_many_elig = where(logical_and(many_eligible_three_predicted,hh_to_init_workers))[0] if idx_many_elig.size > 0: for hh in idx_many_elig: agent_set['workers'][hh] = randint(3, ((agent_set['work_eligible'][hh])+1)) if 'numworker_id' in agent_set.get_primary_attribute_names(): agent_set.delete_one_attribute('numworker_id') agent_set.delete_one_attribute('work_eligible')
def run(self, specification, coefficients, agent_set, agents_index=None, **kwargs): choices = ChoiceModel.run(self, specification, coefficients, agent_set, agents_index=agents_index, **kwargs) if agents_index is None: agents_index=arange(agent_set.size()) movers_indices = agents_index[where(choices>0)] if self.movers_ratio is not None: n = rint(self.movers_ratio*agents_index.size) if n < movers_indices.size: movers_indices = sample_noreplace(movers_indices, n) # add unplaced agents unplaced_agents = agents_index[agent_set.get_attribute_by_index(self.location_id_name, agents_index) <= 0] logger.log_status("%s agents selected by the logit model; %s agents without %s." % (movers_indices.size, unplaced_agents.size, self.location_id_name)) movers_indices = unique(concatenate((movers_indices, unplaced_agents))) logger.log_status("Number of movers: " + str(movers_indices.size)) return movers_indices
def run(self, agents_index=None, n=500, *args, **kwargs): agent_set = self.proposal_set if self.filter is not None: agents_index = where( self.proposal_set.compute_variables(self.filter) )[0] choices = ChoiceModel.run(self, agent_set=agent_set, agents_index=agents_index, *args, **kwargs) #logger.log_status("%s workers chose to work at home, %s workers chose to work out of home." % #(where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 1)[0].size, #where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 2)[0].size)) #logger.log_status("Total: %s workers work at home, %s workers work out of home." % #(where(agent_set.get_attribute(self.choice_attribute_name) == 1)[0].size, #where(agent_set.get_attribute(self.choice_attribute_name) == 2)[0].size)) self.proposal_set.set_values_of_one_attribute('is_chosen', choices, index=agents_index)
def estimate(self, specification, agent_set, agents_index=None, procedure=None, data_objects=None, estimate_config=None, debuglevel=0): """ Computes capacity if required and calls the estimate method of ChoiceModel. See ChoiceModel.estimate for details on arguments. """ if agents_index == None: agents_index = arange(agent_set.size()) if agents_index.size <= 0: logger.log_status("Nothing to be done.") return (None, None) if estimate_config == None: estimate_config = Resources() self.estimate_config = estimate_config.merge_with_defaults( self.estimate_config) if data_objects is not None: self.dataset_pool.add_datasets_if_not_included(data_objects) if self.location_id_string is not None: agent_set.compute_variables(self.location_id_string, dataset_pool=self.dataset_pool) capacity_for_estimation = None if self.estimate_config.get("compute_capacity_flag", False): capacity_string_for_estimation = self.estimate_config.get( "capacity_string", None) capacity_for_estimation = self.determine_capacity( capacity_string=capacity_string_for_estimation, agent_set=agent_set, agents_index=agents_index) self.estimate_config.merge({"capacity": capacity_for_estimation}) return ChoiceModel.estimate(self, specification, agent_set, agents_index, procedure, estimate_config=self.estimate_config, debuglevel=debuglevel)
def setUp(self): # create a dataset self.dataset = Dataset(data={ "car_time": array([ 52.9, 4.10, 4.10, 56.20, 51.80, 0.20, 27.60, 89.90, 41.50, 95.00, 99.10, 18.50, 82.00, 8.60, 22.50, 51.40, 81.00, 51.00, 62.20, 95.10, 41.60 ]), "bus_time": array([ 4.4, 28.50, 86.90, 31.60, 20.20, 91.20, 79.70, 2.20, 24.50, 43.50, 8.40, 84.00, 38.00, 1.60, 74.10, 83.80, 19.20, 85.00, 90.10, 22.20, 91.50 ]), "choice": array([ 2, 2, 1, 2, 2, 1, 1, 2, 2, 2, 2, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1 ]), "id": arange(21) + 1 }, id_name="id", dataset_name="person") choices = Dataset(data={ "choice": array([1, 2]), "names": array(["car", "bus"]) }, id_name="choice", dataset_name="transport") self.choicemodel = ChoiceModel( choice_set=choices, utilities="opus_core.linear_utilities", probabilities="opus_core.mnl_probabilities", choices="opus_core.random_choices") self.specification = EquationSpecification( coefficients=("beta1", "beta2", "beta2"), variables=("constant", "biogeme.person_transport.time", "biogeme.person_transport.time"), equations=(1, 1, 2))
def run(self, specification, coefficients, agent_set, agents_index=None, chunk_specification=None, data_objects=None, run_config=None, debuglevel=0): self.lct_probabilities = {} for ichoice in range(self.choice_set.size()): probname = "probs_" + str(self.choice_set.get_id_attribute()[ichoice]) if probname not in agent_set.get_known_attribute_names(): agent_set.add_attribute(name=probname, data=zeros(agent_set.size(), dtype=float32)) result = ChoiceModel.run(self,specification, coefficients, agent_set, agents_index=agents_index, chunk_specification=chunk_specification, data_objects=data_objects, run_config=run_config, debuglevel=debuglevel) ## next four lines creates recoded lct (shifts index).... changed_idx = where(result>0)[0] if agents_index is None: agents_index = arange(agent_set.size()) agent_set.modify_attribute(data=result[changed_idx].astype(int8), name=self.choice_attribute_name.get_alias(), index=agents_index[changed_idx]) ## <-------------- lct recode occurs here agent_set.compute_variables("biocomplexity.land_cover.lct_recoded") return result
def estimate_step(self): self.set_correct_for_sampling() self.init_membership_in_nests() result = ChoiceModel.estimate_step(self) self.add_logsum_to_coefficients(result) return result
def run(self, run_choice_model=True, choose_job_only_in_residence_zone=False, residence_id='zone_id', *args, **kwargs): agent_set = kwargs['agent_set'] agents_index = kwargs.get('agents_index', None) if agents_index is None: agents_index = arange(agent_set.size()) if agents_index.size <= 0: logger.log_status("Nothing to be done.") return if self.filter is not None: jobs_set_index = where( self.job_set.compute_variables(self.filter) )[0] else: jobs_set_index = arange( self.job_set.size() ) if run_choice_model: choices = ChoiceModel.run(self, *args, **kwargs) if self.match_number_of_jobs: prob_work_at_home = self.upc_sequence.probabilities[:, 1] # sample as many workers as there are jobs draw = probsample_noreplace(arange(agents_index.size), min(agents_index.size, jobs_set_index.size), prob_work_at_home) choices = zeros(agents_index.size, dtype='int32') choices[draw] = 1 agent_set.set_values_of_one_attribute(self.choice_attribute_name, choices, index=agents_index) at_home_worker_index = agents_index[choices==1] logger.log_status("%s workers choose to work at home, %s workers chose to work out of home." % (where(agent_set.get_attribute_by_index(self.choice_attribute_name, agents_index) == 1)[0].size, where(agent_set.get_attribute_by_index(self.choice_attribute_name, agents_index) == 0)[0].size)) else: at_home_worker_index = where(logical_and( agent_set.get_attribute(self.choice_attribute_name) == 1, agent_set.get_attribute('job_id') <= 0 ) )[0] logger.log_status("Total: %s workers work at home, (%s workers work out of home), will try to assign %s workers to %s jobs." % (where(agent_set.get_attribute(self.choice_attribute_name) == 1)[0].size, where(agent_set.get_attribute(self.choice_attribute_name) == 0)[0].size, at_home_worker_index.size, jobs_set_index.size )) if not choose_job_only_in_residence_zone: assigned_worker_index, assigned_job_index = self._assign_job_to_worker(at_home_worker_index, jobs_set_index) else: agent_set.compute_one_variable_with_unknown_package(residence_id, dataset_pool=self.dataset_pool) self.job_set.compute_one_variable_with_unknown_package(residence_id, dataset_pool=self.dataset_pool) agent_zone_ids = agent_set.get_attribute_by_index(residence_id, at_home_worker_index) job_zone_ids = self.job_set.get_attribute_by_index(residence_id, jobs_set_index) unique_zones = unique(job_zone_ids) assigned_worker_index = array([], dtype="int32") assigned_job_index = array([], dtype="int32") for this_zone in unique_zones: logger.log_status("%s: %s" % (residence_id, this_zone)) if this_zone <= 0: continue at_home_worker_in_this_zone = where(agent_zone_ids == this_zone)[0] job_set_in_this_zone = where(job_zone_ids == this_zone)[0] assigned_worker_in_this_zone, assigned_job_set_in_this_zone = self._assign_job_to_worker(at_home_worker_in_this_zone, job_set_in_this_zone) assigned_worker_index = concatenate((assigned_worker_index, at_home_worker_index[assigned_worker_in_this_zone])) assigned_job_index = concatenate((assigned_job_index, jobs_set_index[assigned_job_set_in_this_zone])) ## each worker can only be assigned to 1 job #assert assigned_worker_index.size == unique(assigned_worker_index).size agent_set.set_values_of_one_attribute(self.job_set.get_id_name()[0], self.job_set.get_id_attribute()[assigned_job_index], index=assigned_worker_index) agent_set.compute_variables([self.location_id_name], dataset_pool=self.dataset_pool) self.job_set.modify_attribute(name=VariableName(self.location_id_name).get_alias(), data=agent_set.get_attribute_by_index(self.location_id_name, assigned_worker_index), index=assigned_job_index)
def create_interaction_datasets(self, agent_set, agents_index, config, submodels=[], **kwargs): """Create interactiondataset with or without sampling of alternatives arguments to sampler_class is passed through config (run_config or estimation_config in configuration file), such as: 'include_chosen_choice', 'with_replacement', 'stratum', 'sample_size_from_each_stratum', 'sample_size_from_chosen_stratum' (for stratified sampler) """ nchoices = self.get_choice_set_size() if nchoices==self.choice_set.size(): if self.filter is None: ChoiceModel.create_interaction_datasets(self, agent_set, agents_index, config) return else: # apply filter without doing sampling filter_index = self.apply_filter(self.filter, agent_set, agents_index) self.model_interaction.create_interaction_datasets(agents_index, filter_index) self.update_choice_set_size(filter_index.size) return sampling_weights = self.get_sampling_weights(config, agent_set=agent_set, agents_index=agents_index) #if filter is specified by submodel in a dict, call sampler submodel by submodel if isinstance(self.filter, dict) or config.get("sample_alternatives_by_submodel", False): index2 = -1 + zeros((agents_index.size, nchoices), dtype="int32") attributes = {} #submodels = self.model_interaction.get_submodels() ###TODO: it may be possible to merge this loop with sample_alternatives_by_chunk or put it in a common function for submodel in submodels: agents_index_in_submodel = agents_index[self.observations_mapping[submodel]] if agents_index_in_submodel.size==0: continue choice_index = self.apply_filter(self.filter, agent_set=agent_set, agents_index=agents_index_in_submodel, submodel=submodel) if choice_index is not None and choice_index.size == 0: logger.log_error("There is no alternative that passes filter %s; %s agents with id %s will remain unplaced." % \ (self.filter, agents_index_in_submodel.size, agent_set.get_id_attribute()[agents_index])) continue submodel_sampling_weights = sampling_weights if isinstance(sampling_weights, str): submodel_sampling_weights = re.sub('SUBMODEL', str(submodel), sampling_weights) chunk_specification = config.get("chunk_specification_for_sampling", {"nchunks":1}) if type(chunk_specification) == str: chunk_specification = eval(chunk_specification) chunk_specification = ChunkSpecification(chunk_specification) nchunks = chunk_specification.nchunks(agents_index_in_submodel) chunksize = chunk_specification.chunk_size(agents_index_in_submodel) interaction_dataset = self.sample_alternatives_by_chunk(agent_set, agents_index_in_submodel, choice_index, nchoices, weights=submodel_sampling_weights, config=config, nchunks=nchunks, chunksize=chunksize) if len(submodels)>1: index2[self.observations_mapping[submodel],:] = interaction_dataset.index2 for name in interaction_dataset.get_known_attribute_names(): attr_val = interaction_dataset.get_attribute(name) if not attributes.has_key(name): attributes[name] = zeros(index2.shape, dtype=attr_val.dtype) attributes[name][self.observations_mapping[submodel],:] = attr_val if len(submodels)>1: ## if there are more than 1 submodel, merge the data by submodel and recreate interaction_dataset interaction_dataset = self.sampler_class.create_interaction_dataset(interaction_dataset.dataset1, interaction_dataset.dataset2, index1=agents_index, index2=index2) for name in attributes.keys(): interaction_dataset.add_attribute(attributes[name], name) self.update_choice_set_size(interaction_dataset.get_reduced_m()) else: choice_index = self.apply_filter(self.filter, agent_set=agent_set, agents_index=agents_index) if choice_index is not None and choice_index.size == 0: logger.log_error("There is no alternative that passes filter %s; %s agents with id %s will remain unplaced." % \ (self.filter, agents_index.size, agent_set.get_id_attribute()[agents_index])) return #OR raise? chunk_specification = config.get("chunk_specification_for_sampling", {"nchunks":1}) if type(chunk_specification) == str: chunk_specification = eval(chunk_specification) chunk_specification = ChunkSpecification(chunk_specification) nchunks = chunk_specification.nchunks(agents_index) chunksize = chunk_specification.chunk_size(agents_index) interaction_dataset = self.sample_alternatives_by_chunk(agent_set, agents_index, choice_index, nchoices, weights=sampling_weights, config=config, nchunks=nchunks, chunksize=chunksize) self.update_choice_set_size(interaction_dataset.get_reduced_m()) self.model_interaction.interaction_dataset = interaction_dataset
def estimate(self, specification, *args, **kwargs): self.init_membership_in_nests() # This is because there will be __logsum_ variables in the specification when configured from the GUI, # in order to define starting values. They are not supposed to be included there. self.delete_logsum_from_specification(specification) return ChoiceModel.estimate(self, specification, *args, **kwargs)
def create_interaction_datasets(self, agent_set, agents_index, config, submodels=[], **kwargs): """Create interactiondataset with or without sampling of alternatives arguments to sampler_class is passed through config (run_config or estimation_config in configuration file), such as: 'include_chosen_choice', 'with_replacement', 'stratum', 'sample_size_from_each_stratum', 'sample_size_from_chosen_stratum' (for stratified sampler) """ nchoices = self.get_choice_set_size() if nchoices == self.choice_set.size(): if self.filter is None: ChoiceModel.create_interaction_datasets( self, agent_set, agents_index, config) return else: # apply filter without doing sampling filter_index = self.apply_filter(self.filter, agent_set, agents_index) self.model_interaction.create_interaction_datasets( agents_index, filter_index) self.update_choice_set_size(filter_index.size) return sampling_weights = self.get_sampling_weights(config, agent_set=agent_set, agents_index=agents_index) #if filter is specified by submodel in a dict, call sampler submodel by submodel if isinstance(self.filter, dict) or config.get( "sample_alternatives_by_submodel", False): index2 = -1 + zeros((agents_index.size, nchoices), dtype="int32") attributes = {} #submodels = self.model_interaction.get_submodels() ###TODO: it may be possible to merge this loop with sample_alternatives_by_chunk or put it in a common function for submodel in submodels: agents_index_in_submodel = agents_index[ self.observations_mapping[submodel]] if agents_index_in_submodel.size == 0: continue choice_index = self.apply_filter( self.filter, agent_set=agent_set, agents_index=agents_index_in_submodel, submodel=submodel) if choice_index is not None and choice_index.size == 0: logger.log_error("There is no alternative that passes filter %s; %s agents with id %s will remain unplaced." % \ (self.filter, agents_index_in_submodel.size, agent_set.get_id_attribute()[agents_index])) continue submodel_sampling_weights = sampling_weights if isinstance(sampling_weights, str): submodel_sampling_weights = re.sub('SUBMODEL', str(submodel), sampling_weights) chunk_specification = config.get( "chunk_specification_for_sampling", {"nchunks": 1}) if type(chunk_specification) == str: chunk_specification = eval(chunk_specification) chunk_specification = ChunkSpecification(chunk_specification) nchunks = chunk_specification.nchunks(agents_index_in_submodel) chunksize = chunk_specification.chunk_size( agents_index_in_submodel) interaction_dataset = self.sample_alternatives_by_chunk( agent_set, agents_index_in_submodel, choice_index, nchoices, weights=submodel_sampling_weights, config=config, nchunks=nchunks, chunksize=chunksize) if len(submodels) > 1: index2[self.observations_mapping[ submodel], :] = interaction_dataset.index2 for name in interaction_dataset.get_known_attribute_names( ): attr_val = interaction_dataset.get_attribute(name) if not attributes.has_key(name): attributes[name] = zeros(index2.shape, dtype=attr_val.dtype) attributes[name][ self.observations_mapping[submodel], :] = attr_val if len( submodels ) > 1: ## if there are more than 1 submodel, merge the data by submodel and recreate interaction_dataset interaction_dataset = self.sampler_class.create_interaction_dataset( interaction_dataset.dataset1, interaction_dataset.dataset2, index1=agents_index, index2=index2) for name in attributes.keys(): interaction_dataset.add_attribute(attributes[name], name) self.update_choice_set_size(interaction_dataset.get_reduced_m()) else: choice_index = self.apply_filter(self.filter, agent_set=agent_set, agents_index=agents_index) if choice_index is not None and choice_index.size == 0: logger.log_error("There is no alternative that passes filter %s; %s agents with id %s will remain unplaced." % \ (self.filter, agents_index.size, agent_set.get_id_attribute()[agents_index])) return #OR raise? chunk_specification = config.get( "chunk_specification_for_sampling", {"nchunks": 1}) if type(chunk_specification) == str: chunk_specification = eval(chunk_specification) chunk_specification = ChunkSpecification(chunk_specification) nchunks = chunk_specification.nchunks(agents_index) chunksize = chunk_specification.chunk_size(agents_index) interaction_dataset = self.sample_alternatives_by_chunk( agent_set, agents_index, choice_index, nchoices, weights=sampling_weights, config=config, nchunks=nchunks, chunksize=chunksize) self.update_choice_set_size(interaction_dataset.get_reduced_m()) self.model_interaction.interaction_dataset = interaction_dataset
def run(self, run_choice_model=True, choose_job_only_in_residence_zone=False, *args, **kwargs): agent_set = kwargs['agent_set'] if run_choice_model: choices = ChoiceModel.run(self, *args, **kwargs) #prob_work_at_home = self.upc_sequence.probabilities[:, 0] agent_set.set_values_of_one_attribute(self.choice_attribute_name, choices, index=kwargs['agents_index']) at_home_worker_index = kwargs['agents_index'][choices == 1] logger.log_status( "%s workers choose to work at home, %s workers chose to work out of home." % (where( agent_set.get_attribute_by_index( self.choice_attribute_name, kwargs['agents_index']) == 1)[0].size, where( agent_set.get_attribute_by_index( self.choice_attribute_name, kwargs['agents_index']) == 0)[0].size)) else: at_home_worker_index = where( logical_and( agent_set.get_attribute(self.choice_attribute_name) == 1, agent_set.get_attribute('job_id') <= 0))[0] if self.filter is not None: jobs_set_index = where(self.job_set.compute_variables( self.filter))[0] else: jobs_set_index = arange(self.job_set.size()) logger.log_status( "Total: %s workers work at home, (%s workers work out of home), will try to assign %s workers to %s jobs." % (where(agent_set.get_attribute(self.choice_attribute_name) == 1)[0].size, where(agent_set.get_attribute(self.choice_attribute_name) == 0) [0].size, at_home_worker_index.size, jobs_set_index.size)) if not choose_job_only_in_residence_zone: assigned_worker_index, assigned_job_index = self._assign_job_to_worker( at_home_worker_index, jobs_set_index) else: agent_set.compute_variables("urbansim_parcel.person.zone_id") self.job_set.compute_variables("urbansim_parcel.job.zone_id") agent_zone_ids = agent_set.get_attribute_by_index( 'zone_id', at_home_worker_index) job_zone_ids = self.job_set.get_attribute_by_index( 'zone_id', jobs_set_index) unique_zones = unique(job_zone_ids) assigned_worker_index = array([], dtype="int32") assigned_job_index = array([], dtype="int32") for this_zone in unique_zones: logger.log_status("zone_id: %s" % this_zone) if this_zone <= 0: continue at_home_worker_in_this_zone = where( agent_zone_ids == this_zone)[0] job_set_in_this_zone = where(job_zone_ids == this_zone)[0] assigned_worker_in_this_zone, assigned_job_set_in_this_zone = self._assign_job_to_worker( at_home_worker_in_this_zone, job_set_in_this_zone) assigned_worker_index = concatenate( (assigned_worker_index, at_home_worker_index[assigned_worker_in_this_zone])) assigned_job_index = concatenate( (assigned_job_index, jobs_set_index[assigned_job_set_in_this_zone])) ## each worker can only be assigned to 1 job #assert assigned_worker_index.size == unique(assigned_worker_index).size agent_set.set_values_of_one_attribute( self.job_set.get_id_name()[0], self.job_set.get_id_attribute()[assigned_job_index], index=assigned_worker_index) agent_set.compute_variables([self.location_id_name], dataset_pool=self.dataset_pool) self.job_set.modify_attribute( name=VariableName(self.location_id_name).get_alias(), data=agent_set.get_attribute_by_index(self.location_id_name, assigned_worker_index), index=assigned_job_index)
def __init__(self, choice_set=[0,1], location_id_name="grid_id", movers_ratio=None, **kwargs): self.location_id_name = location_id_name self.movers_ratio = movers_ratio ChoiceModel.__init__(self, choice_set, **kwargs)
def simulate_submodel(self, data, coefficients, submodel): if self.run_config.get("agent_units_all", None) is not None: self.run_config["agent_units"] = self.run_config["agent_units_all"][self.observations_mapping[submodel]] return ChoiceModel.simulate_submodel(self, data, coefficients, submodel)
def set_choice_set_size(self): if self.sampler_size is None: self.sampler_size = 0 for nest, values in self.nested_structure.iteritems(): self.sampler_size += len(values) ChoiceModel.set_choice_set_size(self)
def simulate_submodel(self, data, coefficients, submodel=0): result = ChoiceModel.simulate_submodel(self, data, coefficients, submodel) self.lct_probabilities[submodel] = self.upc_sequence.get_probabilities() return result
households.summary() households.add_primary_attribute(data=[4,6,9,2,4,8,2,1,3,2], name="location") households.get_attribute_names() households.modify_attribute(name="location", data=[0,0], index=[0,1]) households.get_attribute("location") households.get_data_element_by_id(5).location #households.write_dataset(out_storage=storage, out_table_name="households_output") households.get_dataset_name() # Working with models from opus_core.choice_model import ChoiceModel choicemodel = ChoiceModel(choice_set=[1,2,3], utilities = "opus_core.linear_utilities", probabilities = "opus_core.mnl_probabilities", choices = "opus_core.random_choices") from numpy import array from opus_core.equation_specification import EquationSpecification specification = EquationSpecification( coefficients = array([ "beta01", "beta12", "beta03", "beta13" ]), variables = array([ "constant","household.persons", "constant", "household.persons" ]), equations = array([ 1, 2, 3, 3 ]) )
def run_chunk(self, agents_index, agent_set, specification, coefficients): self.add_logsum_to_specification(specification, coefficients) self.init_membership_in_nests() return ChoiceModel.run_chunk(self, agents_index, agent_set, specification, coefficients)
def run(self, specification, coefficients, agent_set, agents_index=None, chunk_specification=None, data_objects=None, run_config=None, debuglevel=0): """ Run a simulation and return a numpy array of length agents_index, giving agent choices (ids of locations). 'specification' is of type EquationSpecification, 'coefficients' is of type Coefficients, 'agent_set' is of type Dataset, 'agent_index' are indices of individuals in the agent_set for which the model runs. If it is None, the whole agent_set is considered. 'chunk_specification' determines number of chunks in which the simulation is processed. Default is to use 300 rows per chunk. 'data_objects' is a dictionary where each key is the name of an data object ('zone', ...) and its value is an object of class Dataset. 'run_config' is of type Resources, it gives additional arguments for the run. 'debuglevel' overwrites the constructor 'debuglevel'. """ if run_config == None: run_config = Resources() self.run_config = run_config.merge_with_defaults(self.run_config) if data_objects is not None: self.dataset_pool.add_datasets_if_not_included(data_objects) self.dataset_pool.add_datasets_if_not_included( {agent_set.get_dataset_name(): agent_set}) ## what is the use of compute location_id string in run? it gets new values anyway #if self.location_id_string is not None: # location_id = agent_set.compute_variables(self.location_id_string, dataset_pool=self.dataset_pool) ## done in choice_model #location_id_name = self.choice_set.get_id_name()[0] #if (location_id_name not in agent_set.get_known_attribute_names()): # agent_set.add_attribute(name=location_id_name, data=resize(array([-1]), agent_set.size())) if self.run_config.get( "agent_units_string", None ): # used when agents take different amount of capacity from the total capacity agent_set.compute_variables( [self.run_config["agent_units_string"]], dataset_pool=self.dataset_pool) self.compute_capacity_flag = self.run_config.get( "compute_capacity_flag", False) capacity_string = None self.capacity = None if self.compute_capacity_flag: capacity_string = self.run_config.get("capacity_string", None) if capacity_string is None: raise KeyError, \ "Entry 'capacity_string' has to be specified in 'run_config' if 'compute_capacity_flag' is True" ## if weights is None, use capacity for weights if self.run_config.get("weights_for_simulation_string", None) is None and capacity_string is not None: self.run_config.merge( {"weights_for_simulation_string": capacity_string}) return ChoiceModel.run(self, specification, coefficients, agent_set, agents_index=agents_index, chunk_specification=chunk_specification, run_config=self.run_config, debuglevel=debuglevel)