コード例 #1
0
 def test_interaction_set_component(self):
     # test a fully-qualified variable that applies to a component of an interaction set
     expr = "opus_core.test_agent.income_times_2"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(
         table_name='test_agents', 
         table_data={'id': array([1, 2, 3]), 'income': array([1, 20, 500])}
         )
     storage.write_table(
         table_name='test_locations', 
         table_data={'id': array([1,2]), 'cost': array([1000, 2000])}
         )
     dataset_pool = DatasetPool(package_order=['opus_core'], storage=storage)
     test_agent_x_test_location = dataset_pool.get_dataset('test_agent_x_test_location')
     result = test_agent_x_test_location.compute_variables(expr, dataset_pool=dataset_pool)
     should_be = array([[2, 2], [40, 40], [1000, 1000]])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6), msg = "Error in " + expr)
     # test that the interaction set now has this as an attribute
     result2 = test_agent_x_test_location.get_attribute('income_times_2')
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6), msg = "Error in " + expr)
     # test that the variable can now also be accessed using its short name
     result3 = test_agent_x_test_location.compute_variables(['income_times_2'])
     self.assert_(ma.allclose(result3, should_be, rtol=1e-6), msg = "Error in " + expr)
     # even though we're using this with an interaction set, the dataset name for expr
     # should be the name of the component set (since that's the only one mentioned in expr)
     name = VariableName(expr)
     self.assertEqual(name.get_dataset_name(), 'test_agent', msg="bad value for dataset")
コード例 #2
0
 def compute(self, indicator, year):        
     year_replaced_attribute = indicator.attribute.replace('DDDD',repr(year))
     name = VariableName(year_replaced_attribute)
     
     if name.get_alias() not in self.dataset.get_known_attribute_names():
         self.dataset.compute_variables(name)
     self.computed.append((year_replaced_attribute, year))
コード例 #3
0
 def _do_flush_dependent_variables_if_required(self):
     try:
         if not SessionConfiguration().get('flush_variables', False):
             return
     except:
         return
     from opus_core.datasets.interaction_dataset import InteractionDataset
     dataset = self.get_dataset()
     dependencies = self.get_current_dependencies()
     my_dataset_name = dataset.get_dataset_name()
     for iattr in range(len(dependencies)): # iterate over dependent variables
         dep_item = dependencies[iattr][0]
         if isinstance(dep_item, str):
             depvar_name = VariableName(dep_item)
         else:
             depvar_name = dep_item.get_variable_name() # dep_item should be an instance of AttributeBox
         dataset_name = depvar_name.get_dataset_name()
         if dataset_name == my_dataset_name:
             ds = dataset
         else:
             ds = SessionConfiguration().get_dataset_from_pool(dataset_name)
             #ds = dataset_pool.get_dataset('dataset_name')
         if not isinstance(ds, InteractionDataset):
             short_name = depvar_name.get_alias()
             if short_name not in ds.get_id_name():   
                 ds.flush_attribute(depvar_name)
コード例 #4
0
 def test_alias_fully_qualified_variable_same_name(self):
     expr = "a_test_variable = opus_core.tests.a_test_variable"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='tests',
                         table_data={
                             "a_dependent_variable": array([1, 5, 10]),
                             "id": array([1, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='tests',
                       id_name="id",
                       dataset_name="tests")
     result = dataset.compute_variables([expr])
     should_be = array([10, 50, 100])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_alias_fully_qualified_variable")
     result2 = dataset.compute_variables(['a_test_variable'])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6),
                  "Error in accessing a_test_variable")
     v = VariableName(expr)
     # check that no autogen class was generated
     self.assertEqual(v.get_autogen_class(),
                      None,
                      msg="bad value for autogen_class")
     # check that the alias is correct
     self.assertEqual(v.get_alias(),
                      'a_test_variable',
                      msg="bad value for alias")
コード例 #5
0
 def test_alias_attribute_same_name(self):
     # this tests an expression consisting of an alias for a primary attribute that is the same name as the primary attribute
     expr = "persons = persons"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='tests',
                         table_data={
                             "persons": array([1, 5, 10]),
                             "id": array([1, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='tests',
                       id_name="id",
                       dataset_name="tests")
     result = dataset.compute_variables([expr])
     self.assertEqual(ma.allclose(result, [1, 5, 10], rtol=1e-7),
                      True,
                      msg="error in test_alias_attribute")
     name = VariableName(expr)
     self.assertEqual(name.get_short_name(),
                      'persons',
                      msg="bad value for shortname")
     self.assertEqual(name.get_alias(),
                      'persons',
                      msg="bad value for alias")
     self.assertEqual(name.get_autogen_class(),
                      None,
                      msg="bad value for autogen_class")
コード例 #6
0
 def test_alias_complex_expression(self):
     # aliasing a complex expression
     expr = "x = 2*sqrt(var1+var2)"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='dataset',
                         table_data={
                             "var1": array([4, -8, 0.5, 1]),
                             "var2": array([3, 3, 7, 7]),
                             "id": array([1, 2, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='dataset',
                       id_name="id",
                       dataset_name="mydataset")
     result = dataset.compute_variables([expr])
     should_be = array([5.29150262, 0.0, 5.47722558, 5.65685425])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_alias_complex_expression")
     # check that the new var has x as an alias
     v = VariableName(expr)
     self.assertEqual(v.get_alias(), 'x', msg="bad value for alias")
     # check that the alias gives the correct value
     result2 = dataset.compute_variables(['x'])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6),
                  "Error in accessing a_test_variable")
コード例 #7
0
 def _compute_if_needed(self, name, dataset_pool, resources=None, quiet=False, version=None):
     """ Compute variable given by the argument 'name' only if this variable
     has not been computed before.
     Check first if this variable belongs to dataset1 or dataset2.
     dataset_pool holds available datasets.
     """
     if not isinstance(name, VariableName):
         variable_name = VariableName(name)
     else:
         variable_name = name
     short_name = variable_name.get_alias()
     if (short_name in self.get_attribute_names()) and (self.are_dependent_variables_up_to_date(
                         variable_name, version=version)):
         return version #nothing to be done
     dataset_name = variable_name.get_dataset_name()
     if dataset_name == self.get_dataset_name():
         new_version = self._compute_one_variable(variable_name, dataset_pool, resources)
     else:
         owner_dataset, index = self.get_owner_dataset_and_index(dataset_name)
         if owner_dataset is None:
             self._raise_error(StandardError, "Cannot find variable '%s'\nin either dataset or in the interaction set." %
                             variable_name.get_expression())
         owner_dataset.compute_variables([variable_name], dataset_pool, resources=resources, quiet=True)
         new_version = self.add_attribute(data = owner_dataset.get_attribute_by_index(variable_name, index),
             name = variable_name, metadata = AttributeType.COMPUTED)
         attribute_box = owner_dataset._get_attribute_box(variable_name)
         variable = attribute_box.get_variable_instance()
         my_attribute_box = self._get_attribute_box(variable_name)
         my_attribute_box.set_variable_instance(variable)
     return new_version
コード例 #8
0
 def compute_expression(self, attribute_name):
     """Compute any expression and return its values."""
     var_name = VariableName(attribute_name)
     dataset_name = var_name.get_dataset_name()
     ds = self.get_dataset(dataset_name)
     return ds.compute_variables([var_name],
                                 dataset_pool=self.get_dataset_pool())
コード例 #9
0
 def run(self, year, condition=None, max_iter=10):
     """
     'year' is the current year of the simulation.
     'condition' should be a boolean expression defined on any dataset.
     The method iterates over the given models until all values of the expression are True. 
     'max_iter' gives the maximum number of iterations to run, if 'condition' is not fulfilled.
     If it is None, there is no limit and thus, the condition must be fulfilled in order to terminate.
     If 'condition' is None, the set of models is run only once.
     """
     self.config['years'] = (year, year)
     if condition is None:
         return self.model_system.run_in_same_process(self.config)
     dataset_pool = SessionConfiguration().get_dataset_pool()
     variable_name = VariableName(condition)
     dataset = dataset_pool.get_dataset(variable_name.get_dataset_name())
     condition_value = dataset.compute_variables(variable_name, dataset_pool=dataset_pool)
     result = None
     iter = 1
     while not alltrue(condition_value):
         result = self.model_system.run_in_same_process(self.config)
         if max_iter is None or iter > max_iter:
             break
         iter = iter + 1
         # force to recompute the condition
         dataset = SessionConfiguration().get_dataset_pool().get_dataset(variable_name.get_dataset_name())
         dataset.delete_computed_attributes()
         condition_value = dataset.compute_variables(variable_name, 
                                                     dataset_pool=SessionConfiguration().get_dataset_pool())
     if not alltrue(condition_value):
         logger.log_status('%s did not converge. Maximum number of iterations (%s) reached.' % (self.model_name, max_iter))
     else:
         logger.log_status('%s converged in %s iterations.' % (self.model_name, iter-1))  
     return result
コード例 #10
0
 def test_multiply(self):
     expr = 'test_agent.income*test_location.cost'
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(
         table_name='test_agents', 
         table_data={'id': array([1, 2, 3]), 'income': array([1, 20, 500])}
         )
     storage.write_table(
         table_name='test_locations', 
         table_data={'id': array([1,2]), 'cost': array([1000, 2000])}
         )
     dataset_pool = DatasetPool(package_order=['opus_core'], storage=storage)
     test_agent_x_test_location = dataset_pool.get_dataset('test_agent_x_test_location')
     result = test_agent_x_test_location.compute_variables(expr, dataset_pool=dataset_pool)
     should_be = array([[1000, 2000], 
                        [20000, 40000], 
                        [500000, 1000000]])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6), msg = "Error in " + expr)
     name = VariableName(expr)
     # since the expression involves both test_agent and test_location, the dataset name should be None
     # and the interaction set names should be (test_agent, test_location) or (test_location, test_agent)
     self.assertEqual(name.get_dataset_name(), None)
     names = name.get_interaction_set_names()
     self.assertEqual(len(names),2)
     self.assert_('test_agent' in names)
     self.assert_('test_location' in names)
コード例 #11
0
    def get_attribute(self, name):
        """ Return an array of the (by the argument name) given attribute. """
        if not isinstance(name, VariableName):
            attr_name = VariableName(name)
        else:
            attr_name = name
        alias = attr_name.get_alias()
        dataset_name = attr_name.get_dataset_name()
        if not (alias in self.get_attribute_names()):
            if dataset_name == self.get_dataset(1).dataset_name:
                index = self.get_2d_index_of_dataset1()
                return self.get_dataset(1).get_attribute_by_index(
                    attr_name, index)
            if dataset_name == self.get_dataset(2).dataset_name:
                index = self.get_2d_index()
                return self.get_dataset(2).get_attribute_by_index(
                    attr_name, index)

            if alias in self.get_dataset(1).get_known_attribute_names():
                index = self.get_2d_index_of_dataset1()
                return self.get_dataset(1).get_attribute_by_index(
                    attr_name, index)
            if alias in self.get_dataset(2).get_known_attribute_names():
                index = self.get_2d_index()
                return self.get_dataset(2).get_attribute_by_index(
                    attr_name, index)
            self._raise_error(NameError, "Variable %s not found!" % alias)
        return self.attribute_boxes[alias].get_data()
コード例 #12
0
    def __init__(self,
                 location_set,
                 project_type,
                 units,
                 developable_maximum_unit_variable_full_name,
                 developable_minimum_unit_variable_full_name=None,
                 model_name=None,
                 **kargs):
        """
        'project_type' is a string such as 'Residential', or 'Commercial'.
        """
        self.project_type = project_type
        self.units = units
        if model_name is not None:
            self.model_name = model_name
        else:
            self.model_name = "%s %s" % (self.project_type, self.model_name)
        self.model_short_name = "%s %s" % (self.project_type[:3],
                                           self.model_short_name)

        self.developable_maximum_unit_full_name = developable_maximum_unit_variable_full_name
        self.developable_maximum_unit_short_name = VariableName(
            self.developable_maximum_unit_full_name).get_alias()
        self.developable_minimum_unit_full_name = developable_minimum_unit_variable_full_name
        if self.developable_minimum_unit_full_name is not None:
            self.developable_minimum_unit_short_name = VariableName(
                self.developable_minimum_unit_full_name).get_alias()
        else:
            self.developable_minimum_unit_short_name = None
        LocationChoiceModel.__init__(self, location_set=location_set, **kargs)
コード例 #13
0
    def apply_filter(self, filter, agent_set, agents_index, submodel=-2):
        """ apply filter comparing to mean project size by submodel instead of 0, by shifting self.filter
        """
        project_size_filter = None
        if (filter is not None):
            if isinstance(filter, dict):
                submodel_filter = filter[submodel]
            else:
                submodel_filter = filter

            mean_project_size = agent_set.get_attribute(
                agent_set.get_attribute_name())[agents_index].mean()

            if isinstance(submodel_filter, str):
                resources = Resources({"debug": self.debug})
                self.choice_set.compute_variables(
                    [submodel_filter],
                    dataset_pool=self.dataset_pool,
                    resources=resources)
                filter_name = VariableName(submodel_filter)
                project_size_filter = self.choice_set.get_attribute(
                    filter_name.get_alias()) - mean_project_size
            else:
                project_size_filter = submodel_filter - mean_project_size

        return LocationChoiceModel.apply_filter(self,
                                                project_size_filter,
                                                agent_set=agent_set,
                                                agents_index=agents_index,
                                                submodel=submodel)
コード例 #14
0
 def test_constants(self):
     # test an expression involving two dataset names, one of which is *_constant
     expr = "test_agent.age<=opus_constant.young_age"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='test_agents',
                         table_data={
                             "age": array([30, 20, 60, 80]),
                             "id": array([1, 3, 4, 10])
                         })
     storage.write_table(table_name='opus_constants',
                         table_data={
                             "young_age": array([35]),
                             "opus_constant_id": array([1])
                         })
     dataset_pool = DatasetPool(storage=storage)
     # Test that the dataset name is correct for expr.  It should be test_agent -- opus_constant just holds constants,
     # and is ignored as far as finding the dataset name for the expression.
     name = VariableName(expr)
     autogen = name.get_autogen_class()
     self.assertEqual(name.get_package_name(), None)
     self.assertEqual(name.get_dataset_name(), 'test_agent')
     # make an instance of the class and check the dependencies (it shouldn't depend on urbansim_constant)
     self.assertEqual(autogen().dependencies(), ['test_agent.age'])
     dataset = Dataset(in_storage=storage,
                       in_table_name='test_agents',
                       id_name="id",
                       dataset_name="test_agent")
     result = dataset.compute_variables([expr], dataset_pool=dataset_pool)
     should_be = array([True, True, False, False])
     self.assertEqual(ma.allequal(result, should_be), True)
コード例 #15
0
 def test_constants(self):
     # test an expression involving two dataset names, one of which is *_constant
     expr = "test_agent.age<=opus_constant.young_age"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(
         table_name='test_agents',
         table_data={
             "age":array([30,20,60,80]),
             "id":array([1,3,4,10])
             }
         )
     storage.write_table(
         table_name='opus_constants',
         table_data={
             "young_age":array([35]),
             "opus_constant_id":array([1])
             }
         )
     dataset_pool = DatasetPool(storage=storage)
     # Test that the dataset name is correct for expr.  It should be test_agent -- opus_constant just holds constants, 
     # and is ignored as far as finding the dataset name for the expression.
     name = VariableName(expr)
     autogen = name.get_autogen_class()
     self.assertEqual(name.get_package_name(), None)
     self.assertEqual(name.get_dataset_name(), 'test_agent')
     # make an instance of the class and check the dependencies (it shouldn't depend on urbansim_constant)
     self.assertEqual(autogen().dependencies(), ['test_agent.age'])
     dataset = Dataset(in_storage=storage, in_table_name='test_agents', id_name="id", dataset_name="test_agent")
     result = dataset.compute_variables([expr], dataset_pool=dataset_pool)
     should_be = array( [True,True,False,False] )
     self.assertEqual( ma.allequal( result, should_be), True)
コード例 #16
0
 def _do_flush_dependent_variables_if_required(self):
     try:
         if not SessionConfiguration().get('flush_variables', False):
             return
     except:
         return
     from opus_core.datasets.interaction_dataset import InteractionDataset
     dataset = self.get_dataset()
     dependencies = self.get_current_dependencies()
     my_dataset_name = dataset.get_dataset_name()
     for iattr in range(
             len(dependencies)):  # iterate over dependent variables
         dep_item = dependencies[iattr][0]
         if isinstance(dep_item, str):
             depvar_name = VariableName(dep_item)
         else:
             depvar_name = dep_item.get_variable_name(
             )  # dep_item should be an instance of AttributeBox
         dataset_name = depvar_name.get_dataset_name()
         if dataset_name == my_dataset_name:
             ds = dataset
         else:
             ds = SessionConfiguration().get_dataset_from_pool(dataset_name)
             #ds = dataset_pool.get_dataset('dataset_name')
         if not isinstance(ds, InteractionDataset):
             short_name = depvar_name.get_alias()
             if short_name not in ds.get_id_name():
                 ds.flush_attribute(depvar_name)
コード例 #17
0
 def compute_expression(self, attribute_name):
     """Compute any expression and return its values."""
     var_name = VariableName(attribute_name)
     dataset_name = var_name.get_dataset_name()
     ds = self.get_dataset(dataset_name)
     return ds.compute_variables([var_name],
                                 dataset_pool=self.get_dataset_pool())
コード例 #18
0
 def check_parse_errors(self, variables):
     # check the variables in the expression library as indexed by the list 'variables'.
     errors = []
     for (var_name, dataset_name, use, source, expr)  in variables:
         # special case -- the 'constant' expression always passes
         if expr.strip()=='constant' and var_name=='constant':
             continue
         try:
             n = VariableName(expr)
             # check that the expression is of the correct form given the source
             if source=='primary attribute':
                 if n.get_autogen_class() is not None:
                     errors.append("Error - this is parsing as an expression rather than as a primary attribute: (%s, %s): %s" % (var_name, dataset_name, expr))
                 elif n.get_dataset_name() is None:
                     errors.append("Error in primary attribute - missing dataset name: (%s, %s): %s" % (var_name, dataset_name, expr))
                 elif dataset_name!=n.get_dataset_name():
                     errors.append("Error in primary attribute - dataset name mismatch: (%s, %s): %s" % (var_name, dataset_name, expr))
                 elif n.get_package_name() is not None:
                     errors.append("Error in primary attribute - shouldn't have package name: (%s, %s): %s" % (var_name, dataset_name, expr))
             elif source=='expression':
                 if n.get_autogen_class() is None:
                     errors.append("Error - this doesn't seem to be an expression.  Maybe it should be a Python class or primary attribute?: (%s, %s): %s" % (var_name, dataset_name, expr))
             elif source=='Python class':
                 if n.get_autogen_class() is not None:
                     errors.append("Error - this is parsing as an expression rather than as a Python class reference: (%s, %s): %s" % (var_name, dataset_name, expr))
                 elif n.get_package_name() is None:
                     errors.append("Error - missing package name in Python class reference: (%s, %s): %s" % (var_name, dataset_name, expr))
                 elif n.get_dataset_name() is None:
                     errors.append("Error - missing dataset name in Python class reference: (%s, %s): %s" % (var_name, dataset_name, expr))
                 elif dataset_name!=n.get_dataset_name():
                     errors.append("Error - dataset name  mismatch in Python class reference: (%s, %s): %s" % (var_name, dataset_name, expr))
             else:
                 errors.append("Unknown source type %s: (%s, %s): %s" % (source, var_name, dataset_name, expr))
         except (SyntaxError, ValueError), e:
             errors.append("Parsing error: (%s, %s): %s" % (var_name, dataset_name, str(e)))
コード例 #19
0
 def test_expression(self):
     # test an expression.  Also make sure that the generated variable can be accessued
     # using its short name and that dependencies are correct.
     expr = "2*sqrt(my_variable+10)"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='dataset',
                         table_data={
                             "my_variable": array([4, -8, 0.5, 1]),
                             "id": array([1, 2, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='dataset',
                       id_name="id",
                       dataset_name="mydataset")
     result = dataset.compute_variables([expr])
     should_be = array([7.48331477, 2.82842712, 6.4807407, 6.63324958])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_expression")
     # check the name
     v = VariableName(expr)
     var = VariableFactory().get_variable(v, dataset)
     self.assertEqual(var.name(), expr, msg="name is incorrect")
     # check the dependencies
     self.assertEqual(var.dependencies(), ['mydataset.my_variable'],
                      msg="dependencies are incorrect")
     # test that the variable can now also be accessed using its short name in an expression
     result2 = dataset.compute_variables([v.get_short_name()])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6),
                  "Error in accessing a_test_variable")
    def _compute_if_needed(self, name, dataset_pool, resources=None, quiet=False, version=None):
        """ Compute variable given by the argument 'name' only if this variable
        has not been computed before.
        Check first if this variable belongs to dataset1 or dataset2.
        dataset_pool holds available datasets.
        """
        if not isinstance(name, VariableName):
            variable_name = VariableName(name)
        else:
            variable_name = name
        short_name = variable_name.get_alias()

        dataset_name = variable_name.get_dataset_name()
        if dataset_name == self.get_dataset_name():
            new_version = UrbansimDataset._compute_if_needed(self, variable_name, dataset_pool, resources, quiet=quiet, version=version)
        else:
            if dataset_name == self.dataset1.get_dataset_name():
                owner_dataset = self.dataset1
#                index = self.get_2d_index_of_dataset1()
            elif dataset_name == self.dataset2.get_dataset_name():
                owner_dataset = self.dataset2
#                index = self.get_2d_index()
            else:
                self._raise_error(StandardError, "Cannot find variable '%s'\nin either dataset or in the interaction set." %
                                variable_name.get_expression())
            owner_dataset.compute_variables([variable_name], dataset_pool, resources=resources, quiet=True)
            new_version =  self.compute_variables_return_versions_and_final_value("%s = %s.disaggregate(%s.%s)" % \
                                   ( short_name, self.get_dataset_name(), owner_dataset.get_dataset_name(), short_name ),
                                   dataset_pool=dataset_pool, resources=resources, quiet=quiet )[0]
        return new_version
コード例 #21
0
 def __init__(self, variable_name, observed_data, filename=None,  transformation=None, inverse_transformation=None, 
              filter=None, match=False, dependent_datasets={}, **kwargs):
     """  'variable_name' is a quantity about which we have data available.
     'observed_data' is of type ObservedData, it is the grouping parent. 
     'filename' is the name of file where 
     the data is stored. It can be None, if the observed_data.directory is a cache.
     'transformation' is an operation to be performed on the data (e.g. sqrt, log),
     'inverse_transformation' is the inverse function of 'transformation'. If it not given, it
     is determined automatically.
     'filter' is a variable that will be applied to both, the observed data and the simulated data.
     'match' (logical) determines if the dataset should be matched (by ids) with the simulated dataset. Elements
     that don't match are eliminated from the simulated dataset.
     'dependent_datasets' (if any) should be a dictionary of dataset_name:{'filename': filename, 'match': True|False, **kwargs}. 
     They will be added to the dataset_pool. 
     Remaining arguments are passed into DatasetFactory, thus it can contain information about how 
     to create the corresponding dataset.
     """
     self.variable_name = VariableName(variable_name)
     self.dataset_name = self.variable_name.get_dataset_name()
     dataset_pool = observed_data.get_dataset_pool()
     self.matching_datasets = {}
     
     if dataset_pool is None:
         kwargs.update({'in_storage':observed_data.get_storage(), 'in_table_name': filename})
         try:
             self.dataset = DatasetFactory().search_for_dataset(self.dataset_name, observed_data.get_package_order(), arguments=kwargs)
         except: # take generic dataset
             self.dataset = Dataset(dataset_name=self.dataset_name, **kwargs)
     else:
         self.dataset = dataset_pool.get_dataset(self.dataset_name)
     if match:
         self.add_match(self.dataset)
     for dep_dataset_name, info in dependent_datasets.iteritems():
         if dataset_pool is None:
             dataset_pool = DatasetPool(storage=observed_data.get_storage(), package_order=observed_data.get_package_order())
         info.update({'in_storage':observed_data.get_storage(), 'in_table_name': info.get('filename')})
         del info['filename']
         match = False
         if 'match' in info.keys():
             match = info['match']
             del info['match']
         try:
             dep_dataset = DatasetFactory().search_for_dataset(dep_dataset_name, observed_data.get_package_order(), arguments=info)
         except:
             dep_dataset = Dataset(dataset_name=dep_dataset_name, **info)
         dataset_pool.replace_dataset(dep_dataset_name, dep_dataset)
         if match:
             self.add_match(dep_dataset)
     if self.variable_name.get_alias() not in self.dataset.get_known_attribute_names():
         self.dataset.compute_variables([self.variable_name], dataset_pool=dataset_pool)
     if filter is not None:
         filter_values = self.dataset.compute_variables([filter], dataset_pool=dataset_pool)
         idx = where(filter_values > 0)[0]
         self.add_match(self.dataset, idx)
         self.dataset.subset_by_index(idx)
     self.transformation = transformation
     self.inverse_transformation = inverse_transformation
     if (self.transformation is not None) and (self.inverse_transformation is None):
         self.inverse_transformation = self.transformation_pairs[self.transformation]
コード例 #22
0
 def __init__(self,
              name,
              constant_string="constant",
              reserved_name_prefix='__'):
     VariableName.__init__(self, name)
     self.is_constant = self.is_variable_constant(constant_string)
     self.is_reserved_name = self.is_variable_reserved_name(
         reserved_name_prefix)
コード例 #23
0
 def compute_expression(self, attribute_name):
     """Compute any expression and return its values."""
     var_name = VariableName(attribute_name)
     dataset_name = var_name.get_dataset_name()
     ds = self.get_dataset(dataset_name)
     return ds.compute_variables(
         [var_name],
         dataset_pool=self.model_system.run_year_namespace["dataset_pool"])
コード例 #24
0
    def compute(self, indicator, year):
        year_replaced_attribute = indicator.attribute.replace(
            'DDDD', repr(year))
        name = VariableName(year_replaced_attribute)

        if name.get_alias() not in self.dataset.get_known_attribute_names():
            self.dataset.compute_variables(name)
        self.computed.append((year_replaced_attribute, year))
コード例 #25
0
 def compute_m(self, year, quantity_of_interest):
     variable_name = VariableName(quantity_of_interest)
     dataset_name = variable_name.get_dataset_name()
     for i in range(self.number_of_runs):
         ds = self._compute_variable_for_one_run(i, variable_name, dataset_name, year, self.observed_data.get_quantity_object(quantity_of_interest))
         if i == 0: # first run
             self.m = zeros((ds.size(), self.number_of_runs), dtype=float32)
             self.m_ids = ds.get_id_attribute()
         self.m[:, i] = try_transformation(ds.get_attribute(variable_name), self.transformation_pair_for_prediction[0])
コード例 #26
0
 def prepare_for_run(self, expressions_to_compute=None, dataset_pool=None):
     if expressions_to_compute is not None:
         if dataset_pool is None:
             dataset_pool = SessionConfiguration().get_dataset_pool()
         for expression in expressions_to_compute:
             vn = VariableName(expression)
             dataset_name = vn.get_dataset_name()
             dataset = dataset_pool[dataset_name]
             dataset.compute_variables(expression)
コード例 #27
0
 def run(self, year, cache_directory=None):
     """The class is initialized with the appropriate configuration info from the 
     travel_model_configuration part of this config, and then copies the specified 
     UrbanSim data into files for daysim to read.
     The variables/expressions to export are defined in the node travel_model_configuration/urbansim_to_tm_variable_mapping
     of the configuration file.
     """
     if cache_directory is None:
         cache_directory = self.config['cache_directory']
     simulation_state = SimulationState()
     simulation_state.set_cache_directory(cache_directory)
     simulation_state.set_current_time(year)
     attribute_cache = AttributeCache()
     sc = SessionConfiguration(new_instance=True,
                               package_order=self.config['dataset_pool_configuration'].package_order,
                               in_storage=attribute_cache)
     dataset_pool = sc.get_dataset_pool()
     tm_config = self.config['travel_model_configuration']
     data_to_export = tm_config['urbansim_to_tm_variable_mapping']
     
     table_names = data_to_export.keys()
     variable_names = {}
     datasets = {}
     filenames = {}
     in_table_names = {}
     for table_name in table_names:
         filter = data_to_export[table_name].get('__filter__', None)
         if filter is not None:
             del data_to_export[table_name]['__filter__']
         out_table_name = data_to_export[table_name].get('__out_table_name__', None)
         if out_table_name is not None:
             del data_to_export[table_name]['__out_table_name__']
         else:
             out_table_name = table_name
         variables_to_export = map(lambda alias: "%s = %s" % (alias, data_to_export[table_name][alias]), data_to_export[table_name].keys())
         dataset_name = None            
         for var in variables_to_export:
             var_name = VariableName(var)
             if dataset_name is None:
                 dataset_name = var_name.get_dataset_name()
                 ds = dataset_pool.get_dataset(dataset_name)
                 
                 datasets[dataset_name] = ds
                 filenames[dataset_name] = out_table_name
                 in_table_names[dataset_name] = table_name
                 if dataset_name not in variable_names.keys():
                     variable_names[dataset_name] = []
             variable_names[dataset_name].append(var_name.get_alias())                
             ds.compute_variables([var_name], dataset_pool=dataset_pool)
         if filter is not None:
             filter_idx = where(ds.compute_variables(["__filter__ = %s" % filter], dataset_pool=dataset_pool)>0)[0]
             ds = DatasetSubset(ds, index = filter_idx)
             datasets[dataset_name] = ds
             
     return self._call_input_file_writer(year, datasets, in_table_names, filenames, variable_names, dataset_pool)
コード例 #28
0
 def test_fully_qualified_variable(self):
     # this tests an expression consisting of a fully-qualified variable
     expr = "opus_core.test_agent.income_times_2"
     storage = StorageFactory().get_storage("dict_storage")
     storage.write_table(table_name="test_agents", table_data={"income": array([1, 5, 10]), "id": array([1, 3, 4])})
     dataset = Dataset(in_storage=storage, in_table_name="test_agents", id_name="id", dataset_name="test_agent")
     result = dataset.compute_variables([expr])
     should_be = array([2, 10, 20])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6), "Error in test_fully_qualified_variable")
     # check that expr is in the cache of known expressions
     # (normally we shouldn't be accessing this private field, but just this once ...)
     cache = VariableName._cache
     self.assert_(expr in cache, msg="did not find expr in cache")
     # check that the access methods for the variable all return the correct values
     name = VariableName(expr)
     self.assertEqual(name.get_package_name(), "opus_core", msg="bad value for package")
     self.assertEqual(name.get_dataset_name(), "test_agent", msg="bad value for dataset")
     self.assertEqual(name.get_short_name(), "income_times_2", msg="bad value for shortname")
     self.assertEqual(name.get_alias(), "income_times_2", msg="bad value for alias")
     self.assertEqual(name.get_autogen_class(), None, msg="bad value for autogen_class")
     # test that the variable can now also be accessed using its short name in an expression
     result2 = dataset.compute_variables(["income_times_2"])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6), "Error in accessing a_test_variable")
     # check that the cache uses the variable name with whitespace removed
     oldsize = len(cache)
     expr_with_spaces = "opus_core . test_agent. income_times_2  "
     name2 = VariableName(expr_with_spaces)
     newsize = len(cache)
     self.assertEqual(oldsize, newsize, msg="caching error")
     self.assert_(expr_with_spaces not in cache, msg="caching error")
     self.assertEqual(expr_with_spaces, name2.get_expression(), msg="caching error")
     self.assertEqual(name2.get_short_name(), "income_times_2", msg="bad value for shortname")
    def __init__(
            self,
            proposal_set,
            weight_string="exp_roi=exp(urbansim_parcel.development_project_proposal.expected_rate_of_return_on_investment)",
            filter_attribute=None,
            run_config=None,
            estimate_config=None,
            debuglevel=0,
            dataset_pool=None):
        """
        This model samples project proposals from proposal set weighted by weight_string
        """
        self.proposal_set = proposal_set
        if self.proposal_set.n <= 0:
            ## to be skipped if proposal_set has no data
            return

        self.dataset_pool = self.create_dataset_pool(
            dataset_pool,
            pool_packages=['urbansim_parcel', 'urbansim', 'opus_core'])
        self.dataset_pool.add_datasets_if_not_included(
            {proposal_set.get_dataset_name(): proposal_set})

        if not self.dataset_pool.has_dataset(
                "development_project_proposal_component"):
            self.proposal_component_set = create_from_proposals_and_template_components(
                proposal_set,
                self.dataset_pool.get_dataset(
                    'development_template_component'))
            self.dataset_pool.replace_dataset(
                self.proposal_component_set.get_dataset_name(),
                self.proposal_component_set)
        else:
            self.proposal_component_set = self.dataset_pool.get_dataset(
                "development_project_proposal_component")

        if weight_string is not None:
            if VariableName(weight_string).get_alias(
            ) not in self.proposal_set.get_known_attribute_names():
                self.proposal_set.compute_variables(
                    weight_string, dataset_pool=self.dataset_pool)
            self.weight = self.proposal_set.get_attribute(
                weight_string).astype("float64")
        else:
            self.weight = ones(self.proposal_set.size(),
                               dtype="float32")  #equal weight

        ## handling of filter_attribute
        if filter_attribute is not None:
            if VariableName(filter_attribute).get_alias(
            ) not in self.proposal_set.get_known_attribute_names():
                self.proposal_set.compute_variables(filter_attribute)

            self.weight = self.weight * self.proposal_set.get_attribute(
                filter_attribute)
コード例 #30
0
 def get_before_after_attribute(self, attribute_name):
     """Return a dictionary with elements 'before' (contains an array of the given attribute
     that is reloaded from the cache) and 'after' (contains an array of the given attribute 
     with the current values).
     """
     from opus_core.store.attribute_cache import AttributeCache
     var_name = VariableName(attribute_name)
     storage = AttributeCache(self.simulation_state.get_cache_directory())
     ds = self._get_before_after_dataset_from_attribute(var_name, storage=storage,
                package_order=self.get_dataset_pool().get_package_order())       
     return {'after': ds[var_name.get_alias()],
             'before': ds.get_attribute('%s_reload__' % var_name.get_alias())}
コード例 #31
0
 def get_before_after_attribute(self, attribute_name):
     """Return a dictionary with elements 'before' (contains an array of the given attribute
     that is reloaded from the cache) and 'after' (contains an array of the given attribute 
     with the current values).
     """
     from opus_core.store.attribute_cache import AttributeCache
     var_name = VariableName(attribute_name)
     storage = AttributeCache(self.simulation_state.get_cache_directory())
     ds = self._get_before_after_dataset_from_attribute(var_name, storage=storage,
                package_order=self.get_dataset_pool().get_package_order())       
     return {'after': ds[var_name.get_alias()],
             'before': ds.get_attribute('%s_reload__' % var_name.get_alias())}
コード例 #32
0
 def compute_m(self, year, quantity_of_interest, values=None, ids=None):
     if (values is not None) and (ids is not None):
         self._get_m_from_values(values, ids)
         return
     variable_name = VariableName(quantity_of_interest)
     dataset_name = variable_name.get_dataset_name()
     for i in range(self.cache_set.size):
         ds = self._compute_variable_for_one_run(i, variable_name, dataset_name, year)
         if i == 0: # first run
             m = zeros((ds.size(), self.cache_set.size), dtype=float32)
             self.m_ids = ds.get_id_attribute()
         m[:, i] = try_transformation(ds.get_attribute(variable_name), self.transformation_pair_for_prediction[0])
     self.m = resize(average(m, axis=1), (m.shape[0], 1))
コード例 #33
0
 def _get_data(self, year, dataset_name, attribute_name):
     current_year = SimulationState().get_current_time()
     SimulationState().set_current_time(year)
     dataset = DatasetFactory().get_dataset(dataset_name, package='urbansim',
                                            subdir='datasets',
                                            arguments={'in_storage':AttributeCache()})
     dataset.compute_variables(attribute_name,
                               resources=self.simulation.config)
     variable_name = VariableName(attribute_name)
     short_name = variable_name.get_short_name()
     
     values = dataset.get_attribute(short_name)
     SimulationState().set_current_time(current_year)
     return values
 def __init__(self, regression_procedure="opus_core.linear_regression",
               submodel_string=None, outcome_attribute = None,
               run_config=None, estimate_config=None, debuglevel=None, dataset_pool=None):
     """'outcome_attribute' must be specified in order to compute the residuals.
     """
     RegressionModel.__init__(self,
                              regression_procedure=regression_procedure,
                              submodel_string=submodel_string,
                              run_config=run_config,
                              estimate_config=estimate_config,
                              debuglevel=debuglevel, dataset_pool=dataset_pool)
     self.outcome_attribute = outcome_attribute
     if (self.outcome_attribute is not None) and not isinstance(self.outcome_attribute, VariableName):
         self.outcome_attribute = VariableName(self.outcome_attribute)
コード例 #35
0
def get_variables_coefficients_from_list(list_spec, definition={}):
    variables = []
    coefficients = []
    fixed_values = []
    error = False

    for var_coef in list_spec:
        if isinstance(var_coef, str):
            #var_coef is just variables long names or alias
            var_name, var_index = get_full_variable_specification_for_var_coef(
                var_coef, definition)
            variables.append(var_name)
            if var_index is not None:
                coefficients.append(definition["coefficients"][var_index])
                fixed_values.append(definition["fixed_values"][var_index])
            else:
                coefficients.append(VariableName(var_coef).get_alias())
                fixed_values.append(0)
        elif isinstance(var_coef, tuple) or isinstance(var_coef, list):
            var_name, var_index = get_full_variable_specification_for_var_coef(
                var_coef[0], definition)
            variables.append(var_name)
            if len(var_coef
                   ) == 1:  # coefficient name is created from variable alias
                coefficients.append(VariableName(var_coef[0]).get_alias())
                fixed_values.append(0)
            elif len(var_coef) > 1:  # coefficient names explicitly given
                coefficients.append(var_coef[1])
                if len(var_coef
                       ) > 2:  # third item is the coefficient fixed value
                    fixed_values.append(var_coef[2])
                else:
                    fixed_values.append(0)
            else:
                logger.log_error("Wrong specification format for variable %s" %
                                 var_coef)
                error = True
        elif isinstance(var_coef, dict):
            var_name, var_index = get_full_variable_specification_for_var_coef(
                var_coef.keys()[0], definition)
            variables.append(var_name)
            coefficients.append(var_coef.values()[0])
            fixed_values.append(0)
        else:
            logger.log_error("Wrong specification format for variable %s" %
                             var_coef)
            error = True

    return (variables, coefficients, fixed_values, error)
コード例 #36
0
 def compute_values_from_multiple_runs(self, year, quantity_of_interest, dtype='float32', dataset_arguments={}):
     """
     'quantity_of_interest' is a variable name in its fully-qualified name.
     Return a matrix of size (dataset.size x number_of_runs), with values of the variable
     for each dataset member and run. Dataset is the one to which the 
     quantity_of_interest belongs to. 
     """
     variable_name = VariableName(quantity_of_interest)
     dataset_name = variable_name.get_dataset_name()
     for i in range(self.cache_set.size):
         ds = self._compute_variable_for_one_run(i, variable_name, dataset_name, year, dataset_arguments=dataset_arguments)
         if i == 0: # first run
             result = zeros((ds.size(), self.cache_set.size), dtype=dtype)
         result[:, i] = ds.get_attribute(variable_name)
     return result
コード例 #37
0
 def create_and_check_qualified_variable_name(self, name):
     """Convert name to a VariableName if it isn't already, and add dataset_name to
     the VariableName if it is missing.  If it already has a dataset_name, make sure
     it is the same as the name of this dataset.
     """
     if isinstance(name, VariableName):
         vname = name
     else:
         vname = VariableName(name)
     if vname.get_dataset_name() is None:
         vname.set_dataset_name(self.get_dataset_name())
     else:
         self._check_dataset_name(vname)
         
     return vname
コード例 #38
0
 def test_expression_2vars(self):
     # test an expression with 2 variables
     expr = "2*sqrt(var1+var2)"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='dataset',
                         table_data={
                             "var1": array([4, -8, 0.5, 1]),
                             "var2": array([3, 3, 7, 7]),
                             "id": array([1, 2, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='dataset',
                       id_name="id",
                       dataset_name="mydataset")
     result = dataset.compute_variables([expr])
     should_be = array([5.29150262, 0.0, 5.47722558, 5.65685425])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_expression_2vars")
     # check the dependencies (will depend on two different other variables)
     v = VariableName(expr)
     var = VariableFactory().get_variable(v, dataset)
     # use sets for the equality test, since we don't know in what order the dependencies will be returned
     self.assertEqual(set(var.dependencies()),
                      set(['mydataset.var1', 'mydataset.var2']),
                      msg="dependencies are incorrect")
コード例 #39
0
    def _get_data(self, year, dataset_name, attribute_name):
        current_year = SimulationState().get_current_time()
        SimulationState().set_current_time(year)
        dataset = DatasetFactory().get_dataset(
            dataset_name,
            package='urbansim',
            subdir='datasets',
            arguments={'in_storage': AttributeCache()})
        dataset.compute_variables(attribute_name,
                                  resources=self.simulation.config)
        variable_name = VariableName(attribute_name)
        short_name = variable_name.get_short_name()

        values = dataset.get_attribute(short_name)
        SimulationState().set_current_time(current_year)
        return values
コード例 #40
0
    def prepare_for_run(self,
                        specification_storage=None,
                        specification_table=None,
                        coefficients_storage=None,
                        coefficients_table=None,
                        agent_set=None,
                        agents_filter=None,
                        data_objects=None,
                        **kwargs):

        spec, coeff = prepare_specification_and_coefficients(
            specification_storage=specification_storage,
            specification_table=specification_table,
            coefficients_storage=coefficients_storage,
            coefficients_table=coefficients_table,
            **kwargs)

        if agents_filter is not None:
            agent_set.compute_variables(agents_filter,
                                        resources=Resources(data_objects))
            index = where(
                agent_set.get_attribute(
                    VariableName(agents_filter).get_alias()) > 0)[0]

        return (spec, coeff, index)
コード例 #41
0
 def aggregate(self, base_pkg, base_dataset, base_attribute, intermediates,
               function):
     dataset = self._get_dataset()
     if intermediates == []:
         aggregated_dataset = base_dataset
         dependent_attribute = base_attribute
     else:
         expr = make_aggregation_call('aggregate', base_pkg, base_dataset,
                                      base_attribute, function,
                                      intermediates)
         aggregated_dataset = intermediates[-1]
         dependent_attribute = VariableName(expr).get_alias()
     ds = self._dataset_pool.get_dataset(aggregated_dataset)
     ds.compute_one_variable_with_unknown_package(
         dataset.get_id_name()[0], dataset_pool=self._dataset_pool)
     if function is None:
         result = dataset.aggregate_dataset_over_ids(
             ds, attribute_name=dependent_attribute)
     else:
         result = dataset.aggregate_dataset_over_ids(
             ds, function=function, attribute_name=dependent_attribute)
     self._var.add_and_solve_dependencies(
         [ds._get_attribute_box(dataset.get_id_name()[0])],
         dataset_pool=self._dataset_pool)
     return self._coerce_result(result, dataset)
コード例 #42
0
class ln_sampling_probability_for_bias_correction_mnl(Variable):
    """Abstract variable to be used for correcting for sampling bias when sampling alternatives.
    It is assumed to be an interaction variable. The init function gets the name of the attribute that is used 
    for weighting alternatives in the model. It doesn't need to be normalized, that is done within the function.
    """
    def __init__(self, weights_attribute):
        self.weights_attribute_name = weights_attribute
        Variable.__init__(self)
        
    def dependencies_to_add(self, dataset_name, package="urbansim"):
        """Will be added to the dependencies from the compute method, because before that we don't 
        know the dataset name."""
        self.weights_attribute = VariableName("%s.%s.%s" % (package, dataset_name, self.weights_attribute_name))
        return [self.weights_attribute.get_expression(),
                "_normalized_weights_%s = %s/float(sum(%s))" % (self.weights_attribute_name, self.weights_attribute.get_expression(), self.weights_attribute.get_expression()),
                "_log_weights_%s = ln(%s._normalized_weights_%s)" % (self.weights_attribute_name, self.weights_attribute.get_dataset_name(), self.weights_attribute_name),
                "_log_1_minus_weights_%s = ln(1 - %s._normalized_weights_%s)" % (self.weights_attribute_name, self.weights_attribute.get_dataset_name(), self.weights_attribute_name)]
        
    def compute(self, dataset_pool):
        ds = self.get_dataset() # interaction dataset
        self.add_and_solve_dependencies(self.dependencies_to_add(ds.get_dataset(2).get_dataset_name()), dataset_pool)
        log_1_minus_weights = ds.get_dataset(2).get_attribute("_log_1_minus_weights_%s" % self.weights_attribute_name)
        result = log_1_minus_weights.sum() - ds.get_attribute("_log_1_minus_weights_%s" % self.weights_attribute_name).sum(axis=1).reshape((ds.get_reduced_n(),1)) - \
               ds.get_attribute("_log_weights_%s" % self.weights_attribute_name) + ds.get_attribute("_log_weights_%s" % self.weights_attribute_name).sum(axis=1).reshape((ds.get_reduced_n(),1))
        return result - result.max() # shift the values to zero
コード例 #43
0
    def get_indicator(self,
                      indicator_name,
                      dataset_name,
                      indicator_definition=None):

        if indicator_definition is not None:
            attribute, source = indicator_definition
        else:
            indicator_nodes = get_available_indicator_nodes(self.project)
            for indicator_node in indicator_nodes:
                dataset, name = get_variable_dataset_and_name(indicator_node)
                if name == indicator_name and dataset == dataset_name:
                    attribute = (indicator_node.text or '').strip()
                    source = indicator_node.get('source')
                    break
            else:
                raise Exception('Could not find an indicator %s for dataset %s'\
                                 %(indicator_name, dataset_name))

        # Make sure that expressions are prepended by their names
        # WAS the line below, but it fails if the expression includes an argument like 'function=mean'
        #if attribute.find('=') == -1 and source == 'expression':
        if is_anonymous_autogen_name(VariableName(attribute).get_short_name()):
            attribute = str(indicator_name) + '=' + attribute

        new_indicator = Indicator(name=indicator_name,
                                  dataset_name=dataset_name,
                                  attribute=attribute)
        return new_indicator
コード例 #44
0
    def generate_posterior_distribution(
            self,
            year,
            quantity_of_interest,
            procedure="opus_core.bm_normal_posterior",
            use_bias_and_variance_from=None,
            transformed_back=True,
            aggregate_to=None,
            intermediates=[],
            **kwargs):
        """
        'quantity_of_interest' is a variable name about which we want to get the posterior distribution.
        If there is multiple known_output, it must be made clear from which one the bias and variance
        is to be used (argument use_bias_and_variance_from) If it is None, the first known output is used.
        """
        self.set_posterior(year, quantity_of_interest,
                           use_bias_and_variance_from)
        procedure_class = ModelComponentCreator().get_model_component(
            procedure)
        self.simulated_values = procedure_class.run(self, **kwargs)
        if transformed_back and (self.transformation_pair_for_prediction[0]
                                 is not None):  # need to transform back
            self.simulated_values = try_transformation(
                self.simulated_values,
                self.transformation_pair_for_prediction[1])
        if aggregate_to is not None:
            self.simulated_values = self.aggregate(
                self.simulated_values,
                aggregate_from=VariableName(
                    quantity_of_interest).get_dataset_name(),
                aggregate_to=aggregate_to,
                intermediates=intermediates)

        return self.simulated_values
コード例 #45
0
 def summary_before_after(self, attribute_name):
     """Print summary of the given attribute 'before' (values
     reloaded from the cache) and 'after' (current values).
     """
     from opus_core.store.attribute_cache import AttributeCache
     var_name = VariableName(attribute_name)
     storage = AttributeCache(self.simulation_state.get_cache_directory())
     ds = self._get_before_after_dataset_from_attribute(var_name, storage=storage, 
                package_order=self.model_system.run_year_namespace["dataset_pool"].get_package_order())
     print ''
     print 'Before model run:'
     print '================='
     ds.summary(names=['%s_reload__' % var_name.get_alias()])
     print ''
     print 'After model run:'
     print '================='
     ds.summary(names=[var_name.get_alias()])
コード例 #46
0
 def variable_dependencies(self, name):
     """Prints out dependencies of this variable. 'name' can be either an alias from 
     the model specification or an expression."""
     from opus_core.variables.dependency_query import DependencyChart
     varname = None
     allvars = self.get_specification().get_variable_names()
     for ivar in range(len(allvars)):
         thisvar = allvars[ivar]
         if not isinstance(thisvar, VariableName):
             thisvar = VariableName(thisvar)
         if name == thisvar.get_alias():
             varname = thisvar
             break
     if varname is None:
         varname = VariableName(name)
     chart = DependencyChart(self.xml_configuration)
     chart.print_dependencies(varname.get_expression())
コード例 #47
0
 def dependencies_to_add(self, dataset_name, package="urbansim"):
     """Will be added to the dependencies from the compute method, because before that we don't 
     know the dataset name."""
     self.weights_attribute = VariableName("%s.%s.%s" % (package, dataset_name, self.weights_attribute_name))
     return [self.weights_attribute.get_expression(),
             "_normalized_weights_%s = %s/float(sum(%s))" % (self.weights_attribute_name, self.weights_attribute.get_expression(), self.weights_attribute.get_expression()),
             "_log_weights_%s = ln(%s._normalized_weights_%s)" % (self.weights_attribute_name, self.weights_attribute.get_dataset_name(), self.weights_attribute_name),
             "_log_1_minus_weights_%s = ln(1 - %s._normalized_weights_%s)" % (self.weights_attribute_name, self.weights_attribute.get_dataset_name(), self.weights_attribute_name)]
コード例 #48
0
 def match_agent_attribute_to_choice(self, name, dataset_pool=None):
     """ Return a tuple where the first element is a 2D array of the attribute 'name_{postfix}'. 
     It is assumed to be an attribute
     of dataset1 (possibly computed). {postfix} is created either by values of the attribute
     'name' of dataset2 (if it has any such attribute), or by the id values of dataset2.
     The second value of the resulting tuple is a list of dependent variables.
     """
     if 'name' in self.get_dataset(2).get_known_attribute_names():
         name_postfix = self.get_attribute_of_dataset('name', 2)
     else:
         name_postfix = self.get_id_attribute_of_dataset(2)
     name_postfix_alt = self.get_id_attribute_of_dataset(2)
     
     dependencies = []
     for i in range(self.get_reduced_m()):
         full_name = VariableName("%s_%s" % (name, name_postfix[i]))
         if full_name.get_dataset_name() is None:
             full_name = VariableName("%s.%s" % (self.get_dataset(1).get_dataset_name(), full_name.get_expression()))
         try:
             self.get_dataset(1).compute_variables(full_name, dataset_pool=dataset_pool)
         except:
             full_name = VariableName("%s_%s" % (name, name_postfix_alt[i]))
             if full_name.get_dataset_name() is None:
                 full_name = VariableName("%s.%s" % (self.get_dataset(1).get_dataset_name(), full_name.get_expression()))
             self.get_dataset(1).compute_variables(full_name, dataset_pool=dataset_pool)
         
         dependencies.append(full_name.get_expression())
         if i == 0:
             result = self.get_attribute(full_name)
         else:
             result[:,i] = self.get_attribute_of_dataset(full_name, 1)
     return result, dependencies
コード例 #49
0
 def run(self, datasets_variables={}, dataset_pool=None, flush_dataset=True):
     """
     datasets_variables is a dictionary where keys are dataset objects and each 
     value is a list of variables (as fully qualified names) to be computed.
     data_objects is a dictionary to be passed in the variable computation.
     If 'flush_dataset' is True, the datasets given as keys in 'datasets_variables'
     are flushed to cache.
     """
     for dataset in datasets_variables.keys():
         variables = datasets_variables[dataset]
         dataset.compute_variables(variables, dataset_pool=dataset_pool)
         for var in variables:
             varname = VariableName(var)
             values = dataset.get_attribute(varname)
             dataset.delete_one_attribute(varname)
             dataset.add_primary_attribute(values, varname.get_alias())
         if flush_dataset:
             dataset.flush_dataset()
コード例 #50
0
 def test_alias_attribute_same_name(self):
     # this tests an expression consisting of an alias for a primary attribute that is the same name as the primary attribute
     expr = "persons = persons"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(
         table_name='tests',
         table_data={
             "persons":array([1,5,10]),
             "id":array([1,3,4])
             }
         )
     dataset = Dataset(in_storage=storage, in_table_name='tests', id_name="id", dataset_name="tests")
     result = dataset.compute_variables([expr])
     self.assertEqual(ma.allclose(result, [1,5,10], rtol=1e-7), True, msg="error in test_alias_attribute")
     name = VariableName(expr)
     self.assertEqual(name.get_short_name(), 'persons', msg="bad value for shortname")
     self.assertEqual(name.get_alias(), 'persons', msg="bad value for alias")
     self.assertEqual(name.get_autogen_class(), None, msg="bad value for autogen_class")
コード例 #51
0
 def test_alias_complex_expression(self):
     # aliasing a complex expression
     expr = "x = 2*sqrt(var1+var2)"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(
         table_name='dataset',
         table_data={"var1": array([4,-8,0.5,1]), "var2": array([3,3,7,7]), "id": array([1,2,3,4])}
         )
     dataset = Dataset(in_storage=storage, in_table_name='dataset', id_name="id", dataset_name="mydataset")
     result = dataset.compute_variables([expr])
     should_be = array([ 5.29150262, 0.0,  5.47722558,  5.65685425])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6), "Error in test_alias_complex_expression")
     # check that the new var has x as an alias
     v = VariableName(expr)
     self.assertEqual(v.get_alias(), 'x', msg="bad value for alias")
     # check that the alias gives the correct value
     result2 = dataset.compute_variables(['x'])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6), "Error in accessing a_test_variable")
コード例 #52
0
 def _solve_dependencies(self, dataset_pool):
     dataset = self.get_dataset()
     my_dataset_name = dataset.get_dataset_name()
     dependencies_list = self.get_current_dependencies()
     for i in range(len(dependencies_list)): # compute dependent variables
         dep_item = dependencies_list[i][0]
         if isinstance(dep_item, str):
             depvar_name = VariableName(dep_item)
         else:
             depvar_name = dep_item.get_variable_name() # dep_item should be an instance of AttributeBox
         dataset_name = depvar_name.get_dataset_name()
         version = dependencies_list[i][1]
         if dataset_name == my_dataset_name:
             ds = dataset
         else:
             ds = dataset_pool.get_dataset(dataset_name)
         (new_versions, value) = ds.compute_variables_return_versions_and_final_value([(depvar_name, version)], dataset_pool)
         self.dependencies_list[i] = (ds._get_attribute_box(depvar_name), new_versions[0])
コード例 #53
0
 def add_prefix_to_variable_names(self, variable_names, dataset, variable_package, resources):
     """Add a prefix of 'package.dataset_name.' to variable_names from resources.
     """
     if resources is None:
         return
     if not isinstance(variable_names, list):
         variable_names = [variable_names]
     for variable_name in variable_names:
         variable_string = resources.get(variable_name, None)
         if variable_string is not None:
             variable_string_name = VariableName(variable_string)
             if (variable_string_name.get_dataset_name() == None) and \
                         (variable_string_name.get_autogen_class() is None) :
                 add_string = ""
                 if variable_string_name.get_package_name() == None:
                     add_string = "%s." % variable_package
                 add_string = add_string + dataset.get_dataset_name() + "."
                 resources.merge({
                     variable_name:add_string+variable_string})