Example #1
0
 def test_alias_file(self):
     # this tests aliases in the file 'aliases.py'
     # expr1 and expr2 are aliases, while expr3 is an ordinary variable,
     # just to make sure that aliases and ordinary variables interoperate correctly
     expr1 = "opus_core.test_agent.income_times_10"
     expr2 = "opus_core.test_agent.income_times_5"
     expr3 = "opus_core.test_agent.income_times_2"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='test_agents',
                         table_data={
                             "income": array([1, 5, 10]),
                             "id": array([1, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='test_agents',
                       id_name="id",
                       dataset_name="test_agent")
     result1 = dataset.compute_variables([expr1])
     self.assert_(ma.allclose(result1, array([10, 50, 100]), rtol=1e-6),
                  "Error in test_alias_file")
     result2 = dataset.compute_variables([expr2])
     self.assert_(ma.allclose(result2, array([5, 25, 50]), rtol=1e-6),
                  "Error in test_alias_file")
     result3 = dataset.compute_variables([expr3])
     self.assert_(ma.allclose(result3, array([2, 10, 20]), rtol=1e-6),
                  "Error in test_alias_file")
Example #2
0
    def test_deletion_of_jobs_with_one_characteristics(self):
        dataset_pool = DatasetPool(storage=self.storage, package_order=["washtenaw","urbansim", "opus_core"])
        gridcell_set = dataset_pool.get_dataset('gridcell')
        event_set = self._create_job_deletion_event_set_with_characteristics()
        jobs = dataset_pool.get_dataset("job")
        AgentEventModel().run(gridcell_set, event_set, jobs, 2000, dataset_pool)
        number_of_jobs_of_sector_1 = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs_of_sector_1", 
                                                                    dataset_pool=dataset_pool)
        number_of_jobs_of_sector_2 = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs_of_sector_2", 
                                                                    dataset_pool=dataset_pool)
        number_of_jobs_of_sector_4 = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs_of_sector_4", 
                                                                    dataset_pool=dataset_pool)
        # the model should remove 2 jobs of sector 1 from gridcell 1, 
        #                         5 jobs of sector 1 from gridcell 5
        self.assert_(ma.allclose(number_of_jobs_of_sector_1, array( [2,4,4,4,0,4,4,4,4,4]))) 
        # other sectors don't change
        self.assert_(ma.allclose(number_of_jobs_of_sector_2, array( 10 * [3]))) 
        self.assert_(ma.allclose(number_of_jobs_of_sector_4, array( 10 * [3]))) 

        AgentEventModel().run(gridcell_set, event_set, jobs, 2001, dataset_pool)
        number_of_jobs_of_sector_1 = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs_of_sector_1", 
                                                                    dataset_pool=dataset_pool)
        number_of_jobs_of_sector_2 = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs_of_sector_2", 
                                                                    dataset_pool=dataset_pool)
        number_of_jobs_of_sector_4 = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs_of_sector_4", 
                                                                    dataset_pool=dataset_pool)
        # the model should remove 2 jobs of sector 2 from gridcell 5, 
        #                         1 job of sector 1 from gridcell 1,
        #                       all jobs of sector 2 from gridcell 2
        #                        70% jobs of sector 2 from gridcell 3
        self.assert_(ma.allclose(number_of_jobs_of_sector_1, array( [1,4,4,4,0,4,4,4,4,4]))) 
        self.assert_(ma.allclose(number_of_jobs_of_sector_2, array( [3, 0, 1, 3, 1, 3, 3, 3, 3, 3]))) 
Example #3
0
 def test_interaction_set_component(self):
     # test a fully-qualified variable that applies to a component of an interaction set
     expr = "opus_core.test_agent.income_times_2"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(
         table_name='test_agents', 
         table_data={'id': array([1, 2, 3]), 'income': array([1, 20, 500])}
         )
     storage.write_table(
         table_name='test_locations', 
         table_data={'id': array([1,2]), 'cost': array([1000, 2000])}
         )
     dataset_pool = DatasetPool(package_order=['opus_core'], storage=storage)
     test_agent_x_test_location = dataset_pool.get_dataset('test_agent_x_test_location')
     result = test_agent_x_test_location.compute_variables(expr, dataset_pool=dataset_pool)
     should_be = array([[2, 2], [40, 40], [1000, 1000]])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6), msg = "Error in " + expr)
     # test that the interaction set now has this as an attribute
     result2 = test_agent_x_test_location.get_attribute('income_times_2')
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6), msg = "Error in " + expr)
     # test that the variable can now also be accessed using its short name
     result3 = test_agent_x_test_location.compute_variables(['income_times_2'])
     self.assert_(ma.allclose(result3, should_be, rtol=1e-6), msg = "Error in " + expr)
     # even though we're using this with an interaction set, the dataset name for expr
     # should be the name of the component set (since that's the only one mentioned in expr)
     name = VariableName(expr)
     self.assertEqual(name.get_dataset_name(), 'test_agent', msg="bad value for dataset")
Example #4
0
 def test_join_by_rows(self):
     storage = StorageFactory().get_storage('dict_storage')
     
     storage.write_table(
         table_name='dataset1', 
         table_data={    
             'id':array([2,4,6,8]), 
             'attr':array([4,7,2,1])
             }
         )
         
     storage.write_table(
         table_name='dataset2',
         table_data={
             'id':array([1,5,9]), 
             'attr':array([55,66,100])
             }
         )
     
     ds1 = Dataset(in_storage=storage, in_table_name='dataset1', id_name='id')
     ds2 = Dataset(in_storage=storage, in_table_name='dataset2', id_name='id')
     
     ds1.join_by_rows(ds2)
     self.assert_(ma.allclose(ds1.get_attribute('attr'), array([4,7,2,1,55,66,100])))
     self.assert_(ma.allclose(ds2.get_attribute('attr'), array([55,66,100])))
Example #5
0
 def test_alias_fully_qualified_variable_same_name(self):
     expr = "a_test_variable = opus_core.tests.a_test_variable"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='tests',
                         table_data={
                             "a_dependent_variable": array([1, 5, 10]),
                             "id": array([1, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='tests',
                       id_name="id",
                       dataset_name="tests")
     result = dataset.compute_variables([expr])
     should_be = array([10, 50, 100])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_alias_fully_qualified_variable")
     result2 = dataset.compute_variables(['a_test_variable'])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6),
                  "Error in accessing a_test_variable")
     v = VariableName(expr)
     # check that no autogen class was generated
     self.assertEqual(v.get_autogen_class(),
                      None,
                      msg="bad value for autogen_class")
     # check that the alias is correct
     self.assertEqual(v.get_alias(),
                      'a_test_variable',
                      msg="bad value for alias")
Example #6
0
def test():
    dummy_data = {
        'PRES': ma.masked_array([1.0, 100, 200, 300, 500, 5000]),
        'TEMP': ma.masked_array([27.44, 14.55, 11.96, 11.02, 7.65, 2.12]),
        'PSAL': ma.masked_array([35.71, 35.50, 35.13, 35.02, 34.72, 35.03])
        }
    features = {
            'tukey53H': ma.masked_array([0, 0, 0.3525000000000009,
                0.35249999999999915, 0, 0],
                mask=[True, True, False, False, True, True]),
            'tukey53H_norm': ma.masked_array([0, 0, 0.07388721803621254,
                0.07388721803621218, 0, 0],
                mask = [True,  True, False, False, True, True])
            }
    flags = {'tukey53H_norm': np.array([0, 0, 1, 1, 0, 0], dtype='i1')}

    cfg = {
            'l': 5,
            'threshold': 6,
            'flag_good': 1,
            'flag_bad': 4
            }

    y = Tukey53H(dummy_data, 'TEMP', cfg)
    y.test()

    assert type(y.features) is dict
    for f in y.features:
        assert ma.allclose(y.features[f], features[f])
    for f in y.flags:
        assert ma.allclose(y.flags[f], flags[f])
Example #7
0
 def test_alias_with_delete_computed_attributes(self):
     # Make an alias for an expression, then delete all computed attributes, then use the same alias
     # for a different expression.  This tests that the dictionary of aliases that have been defined
     # is cleared when you delete attributes.
     expr1 = "x = 2*sqrt(var1+var2)"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='dataset',
                         table_data={
                             "var1": array([4, -8, 0.5, 1]),
                             "var2": array([3, 3, 7, 7]),
                             "id": array([1, 2, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='dataset',
                       id_name="id",
                       dataset_name="mydataset")
     result = dataset.compute_variables([expr1])
     should_be = array([5.29150262, 0.0, 5.47722558, 5.65685425])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_alias_with_delete_computed_attributes")
     dataset.delete_computed_attributes()
     # now alias x to a different expression
     expr2 = "x = var1+10"
     # check that the new var has x as an alias
     result2 = dataset.compute_variables([expr2])
     should_be2 = array([14, 2, 10.5, 11])
     self.assert_(ma.allclose(result2, should_be2, rtol=1e-6),
                  "Error in test_alias_with_delete_computed_attributes")
Example #8
0
def test_old_style_process_class(mp_tmpdir, cleantopo_tl, old_style_process_py):
    """Test correct processing using MapcheteProcess class."""
    config = cleantopo_tl.dict
    config.update(process_file=old_style_process_py)
    with mapchete.open(config) as mp:
        for zoom in range(6):
            tiles = []
            for tile in mp.get_process_tiles(zoom):
                output = mp.execute(tile)
                tiles.append((tile, output))
                assert isinstance(output, ma.MaskedArray)
                assert output.shape == output.shape
                assert not ma.all(output.mask)
                mp.write(tile, output)
            mosaic, mosaic_affine = create_mosaic(tiles)
            try:
                temp_vrt = os.path.join(mp_tmpdir, str(zoom)+".vrt")
                gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
                    temp_vrt, mp_tmpdir, zoom)
                os.system(gdalbuildvrt)
                with rasterio.open(temp_vrt, "r") as testfile:
                    for file_item, mosaic_item in zip(
                        testfile.meta["transform"], mosaic_affine
                    ):
                        assert file_item == mosaic_item
                    band = testfile.read(1, masked=True)
                    assert band.shape == mosaic.shape
                    assert ma.allclose(band, mosaic)
                    assert ma.allclose(band.mask, mosaic.mask)
            finally:
                shutil.rmtree(mp_tmpdir, ignore_errors=True)
Example #9
0
 def test_fully_qualified_variable(self):
     # this tests an expression consisting of a fully-qualified variable
     expr = "opus_core.test_agent.income_times_2"
     storage = StorageFactory().get_storage("dict_storage")
     storage.write_table(table_name="test_agents", table_data={"income": array([1, 5, 10]), "id": array([1, 3, 4])})
     dataset = Dataset(in_storage=storage, in_table_name="test_agents", id_name="id", dataset_name="test_agent")
     result = dataset.compute_variables([expr])
     should_be = array([2, 10, 20])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6), "Error in test_fully_qualified_variable")
     # check that expr is in the cache of known expressions
     # (normally we shouldn't be accessing this private field, but just this once ...)
     cache = VariableName._cache
     self.assert_(expr in cache, msg="did not find expr in cache")
     # check that the access methods for the variable all return the correct values
     name = VariableName(expr)
     self.assertEqual(name.get_package_name(), "opus_core", msg="bad value for package")
     self.assertEqual(name.get_dataset_name(), "test_agent", msg="bad value for dataset")
     self.assertEqual(name.get_short_name(), "income_times_2", msg="bad value for shortname")
     self.assertEqual(name.get_alias(), "income_times_2", msg="bad value for alias")
     self.assertEqual(name.get_autogen_class(), None, msg="bad value for autogen_class")
     # test that the variable can now also be accessed using its short name in an expression
     result2 = dataset.compute_variables(["income_times_2"])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6), "Error in accessing a_test_variable")
     # check that the cache uses the variable name with whitespace removed
     oldsize = len(cache)
     expr_with_spaces = "opus_core . test_agent. income_times_2  "
     name2 = VariableName(expr_with_spaces)
     newsize = len(cache)
     self.assertEqual(oldsize, newsize, msg="caching error")
     self.assert_(expr_with_spaces not in cache, msg="caching error")
     self.assertEqual(expr_with_spaces, name2.get_expression(), msg="caching error")
     self.assertEqual(name2.get_short_name(), "income_times_2", msg="bad value for shortname")
    def test_addition_of_households(self):
        dataset_pool = DatasetPool(
            storage=self.storage,
            package_order=["washtenaw", "urbansim", "opus_core"])
        gridcell_set = dataset_pool.get_dataset('gridcell')
        event_set = self._create_household_addition_event_set()
        households = dataset_pool.get_dataset("household")
        AgentEventModel().run(gridcell_set, event_set, households, 2000,
                              dataset_pool)
        number_of_households = gridcell_set.compute_variables(
            "urbansim.gridcell.number_of_households",
            dataset_pool=dataset_pool)
        # the model should add 6 households to gridcell 1,
        self.assert_(
            ma.allclose(number_of_households,
                        array([16, 0, 30, 0, 5, 0, 0, 0, 0, 0])))

        AgentEventModel().run(gridcell_set, event_set, households, 2001,
                              dataset_pool)
        number_of_households = gridcell_set.compute_variables(
            "urbansim.gridcell.number_of_households",
            dataset_pool=dataset_pool)
        # the model should add 50% from gridcell 1 (8) and 25 households to gridcell 3
        self.assert_(
            ma.allclose(number_of_households,
                        array([24, 0, 55, 0, 5, 0, 0, 0, 0, 0])))
 def test_fully_qualified_DDD_SSS_variable(self):
     # this should use the test variable a_test_SSS_variable_DDD_SSS
     expr = "opus_core.tests.a_test_squid_variable_42_clam"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(
         table_name='tests',
         table_data={
             "a_dependent_variable":array([1,5,10]),
             "id":array([1,3,4])
             }
         )
     dataset = Dataset(in_storage=storage, in_table_name='tests', id_name="id", dataset_name="tests")
     result = dataset.compute_variables([expr])
     should_be = array([10,50,100])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6), "Error in test_fully_qualified_DDD_SSS_variable")
     # check that the access methods for the variable all return the correct values
     name = VariableName(expr)
     self.assertEqual(name.get_package_name(), 'opus_core', msg="bad value for package")
     self.assertEqual(name.get_dataset_name(), 'tests', msg="bad value for dataset")
     self.assertEqual(name.get_short_name(), 'a_test_squid_variable_42_clam', msg="bad value for shortname")
     self.assertEqual(name.get_alias(), 'a_test_squid_variable_42_clam', msg="bad value for alias")
     self.assertEqual(name.get_autogen_class(), None, msg="bad value for autogen_class")
     # test that the variable can now also be accessed using its short name in an expression
     result2 = dataset.compute_variables(['a_test_squid_variable_42_clam'])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6), "Error in accessing a_test_squid_variable_42_clam")
Example #12
0
 def test_alias_fully_qualified_variable(self):
     expr = "x = opus_core.tests.a_test_variable"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='tests',
                         table_data={
                             "a_dependent_variable": array([1, 5, 10]),
                             "id": array([1, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='tests',
                       id_name="id",
                       dataset_name="tests")
     result = dataset.compute_variables([expr])
     should_be = array([10, 50, 100])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_alias_fully_qualified_variable")
     # check that the new var has x as an alias
     v = VariableName(expr)
     self.assertEqual(v.get_package_name(),
                      None,
                      msg="bad value for package_name")
     self.assertEqual(v.get_dataset_name(),
                      'tests',
                      msg="bad value for dataset_name")
     self.assert_(v.get_short_name().startswith('autogen'),
                  msg="bad value for shortname")
     self.assertEqual(v.get_alias(), 'x', msg="bad value for alias")
     self.assertNotEqual(v.get_autogen_class(),
                         None,
                         msg="bad value for autogen_class")
     # check that the alias has the correct value
     result2 = dataset.compute_variables(['x'])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6),
                  "Error in accessing a_test_variable")
Example #13
0
 def test_casts_fully_qualified_variable(self):
     expr1 = "opus_core.test_agent.income_times_10.astype(int32)"
     expr2 = "opus_core.test_agent.income_times_10.astype(int32)**2"
     expr3 = "(2*opus_core.test_agent.income_times_10).astype(int32)"
     error_msg = "Error in test_casts_fully_qualified_variable"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='test_agents',
                         table_data={
                             "income": array([1, 5, 10]),
                             "id": array([1, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='test_agents',
                       id_name="id",
                       dataset_name="test_agent")
     result1 = dataset.compute_variables([expr1])
     self.assertEqual(type(result1[0]), int32, error_msg)
     self.assert_(ma.allclose(result1, array([10, 50, 100]), rtol=1e-6),
                  error_msg)
     result2 = dataset.compute_variables([expr2])
     self.assertEqual(type(result2[0]), int32, error_msg)
     self.assert_(
         ma.allclose(result2, array([100, 2500, 10000]), rtol=1e-6),
         error_msg)
     result3 = dataset.compute_variables([expr3])
     self.assertEqual(type(result3[0]), int32, error_msg)
     self.assert_(ma.allclose(result3, array([20, 100, 200]), rtol=1e-6),
                  error_msg)
Example #14
0
 def test_interaction_set_component_expression_alias(self):
     expr = "squid = 3+opus_core.test_agent.income_times_2"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='test_agents',
                         table_data={
                             'id': array([1, 2, 3]),
                             'income': array([1, 20, 500])
                         })
     storage.write_table(table_name='test_locations',
                         table_data={
                             'id': array([1, 2]),
                             'cost': array([1000, 2000])
                         })
     dataset_pool = DatasetPool(package_order=['opus_core'],
                                storage=storage)
     test_agent_x_test_location = dataset_pool.get_dataset(
         'test_agent_x_test_location')
     result = test_agent_x_test_location.compute_variables(
         expr, dataset_pool=dataset_pool)
     should_be = array([[5, 5], [43, 43], [1003, 1003]])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  msg="Error in " + expr)
     # test that the interaction set now has this as an attribute
     result2 = test_agent_x_test_location.get_attribute('squid')
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6),
                  msg="Error in " + expr)
     # test that the value can now also be accessed using the alias
     result3 = test_agent_x_test_location.compute_variables(['squid'])
     self.assert_(ma.allclose(result3, should_be, rtol=1e-6),
                  msg="Error in " + expr)
Example #15
0
def test_processing():
    """Test correct processing (read and write) outputs."""
    for cleantopo_process in [
            "testdata/cleantopo_tl.mapchete", "testdata/cleantopo_br.mapchete"
    ]:
        mp = mapchete.open(os.path.join(SCRIPTDIR, cleantopo_process))
        for zoom in range(6):
            tiles = []
            for tile in mp.get_process_tiles(zoom):
                output = mp.execute(tile)
                tiles.append(output)
                assert isinstance(output, BufferedTile)
                assert isinstance(output.data, ma.MaskedArray)
                assert output.data.shape == output.shape
                assert not ma.all(output.data.mask)
                mp.write(output)
            mosaic, mosaic_affine = create_mosaic(tiles)
            try:
                temp_vrt = os.path.join(OUT_DIR, str(zoom) + ".vrt")
                gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
                    temp_vrt, OUT_DIR, zoom)
                os.system(gdalbuildvrt)
                with rasterio.open(temp_vrt, "r") as testfile:
                    for file_item, mosaic_item in zip(
                            testfile.meta["transform"], mosaic_affine):
                        assert file_item == mosaic_item
                    band = testfile.read(1, masked=True)
                    assert band.shape == mosaic.shape
                    assert ma.allclose(band, mosaic)
                    assert ma.allclose(band.mask, mosaic.mask)
            finally:
                shutil.rmtree(OUT_DIR, ignore_errors=True)
Example #16
0
 def test_casts_attribute(self):
     expr1 = "persons.astype(float64)"
     expr2 = "persons.astype(float64)**2"
     expr3 = "(2*persons).astype(float64)"
     error_msg = "Error in test_casts_attribute"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='tests',
                         table_data={
                             "persons": array([1, 5, 10]),
                             "id": array([1, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='tests',
                       id_name="id",
                       dataset_name="tests")
     result1 = dataset.compute_variables([expr1])
     self.assertEqual(type(result1[0]), float64, error_msg)
     self.assert_(ma.allclose(result1, array([1, 5, 10]), rtol=1e-6),
                  error_msg)
     result2 = dataset.compute_variables([expr2])
     self.assertEqual(type(result2[0]), float64, error_msg)
     self.assert_(ma.allclose(result2, array([1, 25, 100]), rtol=1e-6),
                  error_msg)
     result3 = dataset.compute_variables([expr3])
     self.assertEqual(type(result3[0]), float64, error_msg)
     self.assert_(ma.allclose(result3, array([2, 10, 20]), rtol=1e-6),
                  error_msg)
Example #17
0
    def test_land_price_model(self):
        storage = StorageFactory().get_storage('dict_storage')

        gridcell_set_table_name = 'gridcell_set'
        storage.write_table(
            table_name=gridcell_set_table_name,
            table_data={
                "percent_residential_within_walking_distance":array([30, 0, 90, 100]),
                "gridcell_year_built":array([2002, 1968, 1880, 1921]),
                "fraction_residential_land":array([0.5, 0.1, 0.3, 0.9]),
                "residential_land_value":array([0, 0, 0, 0]),
                "nonresidential_land_value":array([0, 0, 0, 0]),
                "development_type_id":array(  [1, 1,  1, 1]),
                "grid_id": array([1,2,3,4])
                }
            )
        gridcell_set = GridcellDataset(in_storage=storage, in_table_name=gridcell_set_table_name)

        specification = EquationSpecification(variables=(
            "percent_residential_within_walking_distance",
            "gridcell_year_built", "constant"),
            coefficients=("PRWWD", "YB", "constant"))
        coefficients = Coefficients(names=("constant", "PRWWD", "YB"), values=(10.0, -0.0025, 0.0001))
        lp = LandPriceModel(filter=None, debuglevel=3)
        lp.run(specification, coefficients, gridcell_set)
        result1 = gridcell_set.get_attribute("residential_land_value")
        result2 = gridcell_set.get_attribute("nonresidential_land_value")
        self.assertEqual(ma.allclose(result1, array([12482.124,  2681.723,  6367.914, 18708.617]), rtol=1e-3), True)
        self.assertEqual(ma.allclose(result2, array([12482.124,  24135.510, 14858.466,  2078.735]), rtol=1e-3), True)
Example #18
0
    def test_safely_divide_two_attributes(self):
        from opus_core.datasets.dataset_pool import DatasetPool
        
        storage = StorageFactory().get_storage('dict_storage')        
        storage.write_table(
            table_name='tests',
            table_data={
                'id': array([1,2,3,4]),
                'numerator': array([1,2,3,0]),
                'denominator': array([2.,0.,2.,0.]),
            }
        )
        
        dataset_pool = DatasetPool(package_order=['opus_core'],
                                   storage=storage)
        test = dataset_pool.get_dataset('test')
        variable = Variable()
        variable.set_dataset(test)

        result = variable.safely_divide_two_attributes('opus_core.test.numerator',
                                                       'opus_core.test.denominator')
        self.assert_(ma.allclose(array([.5, 0, 1.5, 0]), result))
        
        result = variable.safely_divide_two_attributes('opus_core.test.numerator',
                                                       'opus_core.test.denominator', 
                                                        value_for_divide_by_zero=-1.0)
        self.assert_(ma.allclose(array([.5, -1., 1.5, -1.]), result))
    def test_same_distribution_after_household_subtraction(self):
        """Using the control_totals and no marginal characteristics,
        subtract households and ensure that the distribution within each group stays the same
        """
        annual_household_control_totals_data = {
            "year": array([2000]),
            "total_number_of_households": array([20000])
            }

        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='hh_set', table_data=self.households_data)
        hh_set = HouseholdDataset(in_storage=storage, in_table_name='hh_set')

        storage.write_table(table_name='hct_set', table_data=annual_household_control_totals_data)
        hct_set = ControlTotalDataset(in_storage=storage, in_table_name='hct_set', what="household", id_name="year")

        storage.write_table(table_name='hc_set', table_data=self.household_characteristics_for_ht_data)
        hc_set = HouseholdCharacteristicDataset(in_storage=storage, in_table_name='hc_set')

        model = HouseholdTransitionModel()
        model.run(year=2000, household_set=hh_set, control_totals=hct_set, characteristics=hc_set)

        #check that there are indeed 20000 total households after running the model
        results = hh_set.size()
        should_be = [20000]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the distribution of households in each group is the same as before running the model
        results = self.get_count_all_groups(hh_set)
        should_be = [6000.0/33000.0*20000.0, 2000.0/33000.0*20000.0, 3000.0/33000.0*20000.0, 4000.0/33000.0*20000.0,
                     2000.0/33000.0*20000.0, 5000.0/33000.0*20000.0, 3000.0/33000.0*20000.0, 8000.0/33000.0*20000.0]
        self.assertEqual(ma.allclose(results, should_be, rtol=0.05),
                         True, "Error, should_be: %s,\n but result: %s" % (should_be, results))
    def test_hierarchical_linear_utilities_coef_1D(self):

        data = array([[[[3,0], [5,0], [6,0], [5,0]], [[2,0], [1,0], [0,0], [0,0]], [[7,0], [2,0], [3,0], [5,0]]] + \
                      [[[0,3], [0,5], [0,6], [0,5]], [[0,2], [0,1], [0,0], [0,0]], [[0,7], [0,2], [0,3], [0,5]]],
                      [[[5,0], [1,0], [5,0], [2,0]], [[4,0], [7,0], [9,0], [2,0]], [[7,0], [2,0], [3,0], [5,0]]] + \
                      [[[0,5], [0,1], [0,5], [0,2]], [[0,4], [0,7], [0,9], [0,2]], [[0,7], [0,2], [0,1], [0,3]]]])

        #data = repeat(reshape(data, list(data.shape)+[1]), repeats=2, axis=3)

        coefficients = array([2.5, 1.2, 4, 9, 0, 1])

        utilities, mu = hierarchical_linear_utilities().run(data, coefficients)

        should_be1 = array([[82.5, 6.2, 76.9, 82.5, 6.2, 76.9],
                            [51.7, 72.4, 76.9, 51.7, 72.4, 50.9]])

        should_be2 = (array([0, 1]))
        self.assertEqual(
            ma.allclose(utilities, should_be1, rtol=1e-05),
            True,
            msg=
            "Error in test_hierarchical_linear_utilities_2d_tree_structure (1)"
        )
        self.assertEqual(
            mu.size == should_be2.size,
            True,
            msg=
            "Error in test_hierarchical_linear_utilities_2d_tree_structure (2)"
        )
        self.assertEqual(
            ma.allclose(mu, should_be2, rtol=1e-05),
            True,
            msg=
            "Error in test_hierarchical_linear_utilities_2d_tree_structure (3)"
        )
Example #21
0
    def test_safely_divide_two_attributes(self):
        from opus_core.datasets.dataset_pool import DatasetPool

        storage = StorageFactory().get_storage('dict_storage')
        storage.write_table(table_name='tests',
                            table_data={
                                'id': array([1, 2, 3, 4]),
                                'numerator': array([1, 2, 3, 0]),
                                'denominator': array([2., 0., 2., 0.]),
                            })

        dataset_pool = DatasetPool(package_order=['opus_core'],
                                   storage=storage)
        test = dataset_pool.get_dataset('test')
        variable = Variable()
        variable.set_dataset(test)

        result = variable.safely_divide_two_attributes(
            'opus_core.test.numerator', 'opus_core.test.denominator')
        self.assert_(ma.allclose(array([.5, 0, 1.5, 0]), result))

        result = variable.safely_divide_two_attributes(
            'opus_core.test.numerator',
            'opus_core.test.denominator',
            value_for_divide_by_zero=-1.0)
        self.assert_(ma.allclose(array([.5, -1., 1.5, -1.]), result))
Example #22
0
def isEqual(left, right, eps=None, masked_equal=True):
  ''' This function checks if two numpy arrays or scalars are equal within machine precision, and returns a scalar logical. '''
  diff_type = "Both arguments to function 'isEqual' must be of the same class!"
  if isinstance(left,np.ndarray):
    # ndarray
    if not isinstance(right,np.ndarray): raise TypeError(diff_type)
    if not left.dtype==right.dtype:
      right = right.astype(left.dtype) # casting='same_kind' doesn't work...
    if np.issubdtype(left.dtype, np.inexact): # also catch float32 etc
      if eps is None: return ma.allclose(left, right, masked_equal=masked_equal)
      else: return ma.allclose(left, right, masked_equal=masked_equal, atol=eps)
    elif np.issubdtype(left.dtype, np.integer) or np.issubdtype(left.dtype, np.bool):
      return np.all( left == right ) # need to use numpy's all()
  elif isinstance(left,(float,np.inexact)):
    # numbers
    if not isinstance(right,(float,np.inexact)): raise TypeError(diff_type)
    if eps is None: eps = 100.*floateps # default
    if ( isinstance(right,float) or isinstance(right,float) ) or left.dtype.itemsize == right.dtype.itemsize: 
      return np.absolute(left-right) <= eps
    else:
      if left.dtype.itemsize < right.dtype.itemsize: right = left.dtype.type(right)
      else: left = right.dtype.type(left)
      return np.absolute(left-right) <= eps  
  elif isinstance(left,(int,bool,np.integer,np.bool)):
    # logicals
    if not isinstance(right,(int,bool,np.integer,np.bool)): raise TypeError(diff_type)
    return left == right
  else: raise TypeError(left)
Example #23
0
 def test_alias_complex_expression(self):
     # aliasing a complex expression
     expr = "x = 2*sqrt(var1+var2)"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='dataset',
                         table_data={
                             "var1": array([4, -8, 0.5, 1]),
                             "var2": array([3, 3, 7, 7]),
                             "id": array([1, 2, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='dataset',
                       id_name="id",
                       dataset_name="mydataset")
     result = dataset.compute_variables([expr])
     should_be = array([5.29150262, 0.0, 5.47722558, 5.65685425])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_alias_complex_expression")
     # check that the new var has x as an alias
     v = VariableName(expr)
     self.assertEqual(v.get_alias(), 'x', msg="bad value for alias")
     # check that the alias gives the correct value
     result2 = dataset.compute_variables(['x'])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6),
                  "Error in accessing a_test_variable")
Example #24
0
def test_processing(mp_tmpdir, cleantopo_br, cleantopo_tl):
    """Test correct processing (read and write) outputs."""
    for cleantopo_process in [cleantopo_br.path, cleantopo_tl.path]:
        with mapchete.open(cleantopo_process) as mp:
            for zoom in range(6):
                tiles = []
                for tile in mp.get_process_tiles(zoom):
                    output = mp.execute(tile)
                    tiles.append((tile, output))
                    assert isinstance(output, ma.MaskedArray)
                    assert output.shape == output.shape
                    assert not ma.all(output.mask)
                    mp.write(tile, output)
                mosaic = create_mosaic(tiles)
                try:
                    temp_vrt = os.path.join(mp_tmpdir, str(zoom)+".vrt")
                    gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
                        temp_vrt, mp.config.output.path, zoom)
                    os.system(gdalbuildvrt)
                    with rasterio.open(temp_vrt, "r") as testfile:
                        for file_item, mosaic_item in zip(
                            testfile.meta["transform"], mosaic.affine
                        ):
                            assert file_item == mosaic_item
                        band = testfile.read(1, masked=True)
                        assert band.shape == mosaic.data.shape
                        assert ma.allclose(band, mosaic.data)
                        assert ma.allclose(band.mask, mosaic.data.mask)
                finally:
                    shutil.rmtree(mp_tmpdir, ignore_errors=True)
def test_mask_handling():
    """
    Test masked data handling for :func:`esmf_regrid.experimental.unstructured_scheme.GridToMeshESMFRegridder`.

    Tests masked data handling for multiple valid values for mdtol.
    """
    tgt = _flat_mesh_cube()

    n_lons = 6
    n_lats = 5
    lon_bounds = (-180, 180)
    lat_bounds = (-90, 90)
    src = _grid_cube(n_lons, n_lats, lon_bounds, lat_bounds, circular=True)

    data = np.ones([n_lats, n_lons])
    mask = np.zeros([n_lats, n_lons])
    mask[0, 0] = 1
    masked_data = ma.array(data, mask=mask)
    src.data = masked_data
    regridder_0 = GridToMeshESMFRegridder(src, tgt, mdtol=0)
    regridder_05 = GridToMeshESMFRegridder(src, tgt, mdtol=0.05)
    regridder_1 = GridToMeshESMFRegridder(src, tgt, mdtol=1)
    result_0 = regridder_0(src)
    result_05 = regridder_05(src)
    result_1 = regridder_1(src)

    expected_data = np.ones(tgt.shape)
    expected_0 = ma.array(expected_data)
    expected_05 = ma.array(expected_data, mask=[0, 0, 1, 0, 0, 0])
    expected_1 = ma.array(expected_data, mask=[1, 0, 1, 0, 0, 0])

    assert ma.allclose(expected_0, result_0.data)
    assert ma.allclose(expected_05, result_05.data)
    assert ma.allclose(expected_1, result_1.data)
 def test_expression(self):
     # test an expression.  Also make sure that the generated variable can be accessued
     # using its short name and that dependencies are correct.
     expr = "2*sqrt(my_variable+10)"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='dataset',
                         table_data={
                             "my_variable": array([4, -8, 0.5, 1]),
                             "id": array([1, 2, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='dataset',
                       id_name="id",
                       dataset_name="mydataset")
     result = dataset.compute_variables([expr])
     should_be = array([7.48331477, 2.82842712, 6.4807407, 6.63324958])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_expression")
     # check the name
     v = VariableName(expr)
     var = VariableFactory().get_variable(v, dataset)
     self.assertEqual(var.name(), expr, msg="name is incorrect")
     # check the dependencies
     self.assertEqual(var.dependencies(), ['mydataset.my_variable'],
                      msg="dependencies are incorrect")
     # test that the variable can now also be accessed using its short name in an expression
     result2 = dataset.compute_variables([v.get_short_name()])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6),
                  "Error in accessing a_test_variable")
def test_LocationAtSea_track():
    """Test standard with multiple locations

       lat & lon defined in the dataset. This would be the case for a TSG
       where each measurement is associated with a location.

       Locking etopo resolution, since it can change the values.

       Note that there is no restriction in the number of locations. In this
       example there are multiple depths but only 3 positions. It's not the
       LocationAtSea job to make sense of that. Should it match with which
       variable? It can't be done here, but should be done once the tests
       are combined.
    """
    data = DummyData()
    data.data["LATITUDE"] = [15, 12, 8]
    data.data["LONGITUDE"] = [-38, 222, 0]

    y = LocationAtSea(data, cfg={"resolution": "5min"})

    assert hasattr(y, "features")
    assert "bathymetry" in y.features
    assert ma.allclose(y.features["bathymetry"], [5036, 4995, -122])
    assert hasattr(y, "flags")
    assert "location_at_sea" in y.flags
    assert ma.allclose(y.flags["location_at_sea"], [1, 1, 4])
Example #28
0
def test():
    dummy_data = {
        'PRES': ma.masked_array([1.0, 100, 200, 300, 500, 5000]),
        'TEMP': ma.masked_array([27.44, 14.55, 11.96, 11.02, 7.65, 2.12]),
        'PSAL': ma.masked_array([35.71, 35.50, 35.13, 35.02, 34.72, 35.03])
    }
    features = {
        'tukey53H':
        ma.masked_array([0, 0, 0.3525000000000009, 0.35249999999999915, 0, 0],
                        mask=[True, True, False, False, True, True]),
        'tukey53H_norm':
        ma.masked_array([0, 0, 0.07388721803621254, 0.07388721803621218, 0, 0],
                        mask=[True, True, False, False, True, True])
    }
    flags = {'tukey53H_norm': np.array([0, 0, 1, 1, 0, 0], dtype='i1')}

    cfg = {'l': 5, 'threshold': 6, 'flag_good': 1, 'flag_bad': 4}

    y = Tukey53H(dummy_data, 'TEMP', cfg)
    y.test()

    assert type(y.features) is dict
    for f in y.features:
        assert ma.allclose(y.features[f], features[f])
    for f in y.flags:
        assert ma.allclose(y.flags[f], flags[f])
def test_esmpy_normalisation():
    """
    Integration test for :meth:`~esmf_regrid.esmf_regridder.Regridder`.

    Checks against ESMF to ensure results are consistent.
    """
    src_data = np.array(
        [
            [1.0, 1.0],
            [1.0, 0.0],
            [1.0, 0.0],
        ],
    )
    src_mask = np.array(
        [
            [True, False],
            [False, False],
            [False, False],
        ]
    )
    src_array = ma.array(src_data, mask=src_mask)

    lon, lat, lon_bounds, lat_bounds = make_grid_args(2, 3)
    src_grid = GridInfo(lon, lat, lon_bounds, lat_bounds)
    src_esmpy_grid = src_grid._make_esmf_grid()
    src_esmpy_grid.add_item(ESMF.GridItem.MASK, staggerloc=ESMF.StaggerLoc.CENTER)
    src_esmpy_grid.mask[0][...] = src_mask
    src_field = ESMF.Field(src_esmpy_grid)
    src_field.data[...] = src_data

    lon, lat, lon_bounds, lat_bounds = make_grid_args(3, 2)
    tgt_grid = GridInfo(lon, lat, lon_bounds, lat_bounds)
    tgt_field = tgt_grid.make_esmf_field()

    regridder = Regridder(src_grid, tgt_grid)

    regridding_kwargs = {
        "ignore_degenerate": True,
        "regrid_method": ESMF.RegridMethod.CONSERVE,
        "unmapped_action": ESMF.UnmappedAction.IGNORE,
        "factors": True,
        "src_mask_values": [1],
    }
    esmpy_fracarea_regridder = ESMF.Regrid(
        src_field, tgt_field, norm_type=ESMF.NormType.FRACAREA, **regridding_kwargs
    )
    esmpy_dstarea_regridder = ESMF.Regrid(
        src_field, tgt_field, norm_type=ESMF.NormType.DSTAREA, **regridding_kwargs
    )

    tgt_field_dstarea = esmpy_dstarea_regridder(src_field, tgt_field)
    result_esmpy_dstarea = tgt_field_dstarea.data
    result_dstarea = regridder.regrid(src_array, norm_type="dstarea")
    assert ma.allclose(result_esmpy_dstarea, result_dstarea)

    tgt_field_fracarea = esmpy_fracarea_regridder(src_field, tgt_field)
    result_esmpy_fracarea = tgt_field_fracarea.data
    result_fracarea = regridder.regrid(src_array, norm_type="fracarea")
    assert ma.allclose(result_esmpy_fracarea, result_fracarea)
    def test_same_distribution_after_household_addition(self):
        """Using the control_totals and no marginal characteristics,
        add households and ensure that the distribution within each group stays the same
        """

        annual_household_control_totals_data = {
            "year": array([2000, 2000]),
            "total_number_of_households": array([20000, 30000]),
            "large_area_id": array([1,2])
            }

        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name = 'hh_set', table_data = self.households_data)
        hh_set = HouseholdDataset(in_storage=storage, in_table_name='hh_set')
        
        storage.write_table(table_name = 'hct_set', table_data = annual_household_control_totals_data)
        hct_set = ControlTotalDataset(in_storage=storage, in_table_name='hct_set', what="household")

        storage.write_table(table_name = 'hc_set', table_data = self.household_characteristics_for_ht_data)
        hc_set = HouseholdCharacteristicDataset(in_storage=storage, in_table_name='hc_set')

        model = RegionalHouseholdTransitionModel()
        model.run(year=2000, household_set=hh_set, control_totals=hct_set, characteristics=hc_set)

        #check that there are 20000 (area 1) and 30000 (area 2) total households after running the model
        areas = hh_set.get_attribute("large_area_id")
        results = array([0,0])
        for iarea in [0,1]:
            results[iarea] = where(areas == [1,2][iarea])[0].size
        should_be = [20000, 30000]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the number of unplaced households is exactly the number of new households created
        results = where(hh_set.get_attribute("grid_id")<=0)[0].size
        should_be = [17000]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the distribution of households in each group and each area is the same as before running the model
        results = self.get_count_all_groups(hh_set)
        should_be = array([
                    # area 1 
                     3000.0/16500.0*20000.0, 1000.0/16500.0*20000.0, 1500.0/16500.0*20000.0, 2000.0/16500.0*20000.0,
                     1000.0/16500.0*20000.0, 2500.0/16500.0*20000.0, 1500.0/16500.0*20000.0, 4000.0/16500.0*20000.0,
                     # area 2
                     3000.0/16500.0*30000.0, 1000.0/16500.0*30000.0, 1500.0/16500.0*30000.0, 2000.0/16500.0*30000.0,
                     1000.0/16500.0*30000.0, 2500.0/16500.0*30000.0, 1500.0/16500.0*30000.0, 4000.0/16500.0*30000.0])
        self.assertEqual(ma.allclose(results, should_be, rtol=0.1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))
        # check the types of the attributes
        self.assertEqual(hh_set.get_attribute("age_of_head").dtype, int32,
                         "Error in data type of the new household set. Should be: int32, is: %s" % str(hh_set.get_attribute("age_of_head").dtype))
        self.assertEqual(hh_set.get_attribute("income").dtype, int32,
                         "Error in data type of the new household set. Should be: int32, is: %s" % str(hh_set.get_attribute("income").dtype))
        self.assertEqual(hh_set.get_attribute("persons").dtype, int8,
                         "Error in data type of the new household set. Should be: int8, is: %s" % str(hh_set.get_attribute("persons").dtype))
Example #31
0
 def test_simple_model_with_outcome_values(self):
     m = SimpleModel()
     m.run(self.dataset,  outcome_attribute='iniattr', outcome_values=zeros(10)-1)
     self.assertEqual(ma.allclose(self.dataset.get_attribute('iniattr'), array(10*[-1])), True)
     self.assertEqual('iniattr' in self.dataset.get_primary_attribute_names(), True)
     # run with filter
     m.run(self.dataset,  outcome_attribute='iniattr', outcome_values=arange(10)+1, dataset_filter='dataset.attribute>1000')
     expected = array([1, 2, -1, -1, -1, -1, 7, -1, -1, -1])
     self.assertEqual(ma.allclose(self.dataset.get_attribute('iniattr'), expected), True)
def test_estimate_anomaly():
    f1 = estimate_anomaly(dummy_features,
            {'spike': dummy_params['spike']})
    f2 = estimate_anomaly(pd.DataFrame(dummy_features),
            {'spike': dummy_params['spike']})

    assert ma.allclose(f1, f2)
    assert ma.allclose(f1,
            ma.masked_values([-999, 0.0, -5.797359001920061,
                -57.564627324851145, -999, -9.626760611162082], -999))
Example #33
0
    def test_fave_scale(self) :
        hanning.hanning_smooth(self.Data)
        rebin_freq.rebin(self.Data, 2.)
        cal_scale.scale_by_cal(self.Data, False, True, False)
        data = self.Data.data

        self.assertTrue(ma.allclose(ma.mean(data[:,0,0,:] -
                                              data[:,0,1,:], -1), 1.0))
        self.assertTrue(ma.allclose(ma.mean(data[:,3,0,:] -
                                              data[:,3,1,:], -1), 1.0))
    def test_controlling_with_three_marginal_characteristics(self):
        """Controlling with all three possible marginal characteristics in this example, age_of_head, income, and persons,
        this would partition the 8 groups into the same 8 groups, and with a control total specified for each group, we must
        ensure that the control totals for each group exactly meet the specifications.
        """

        #IMPORTANT: marginal characteristics grouping indices have to start at 0!
        annual_household_control_totals_data = {
            "year": array(8*[2000]),
            #"age_of_head": array(4*[0] + 4*[1]),
            "age_of_head_min": array([ 0, 0, 0, 0, 50, 50, 50, 50]),
            "age_of_head_max": array([49,49,49,49,100,100,100,100]),
            #"income": array(2*[0] + 2*[1] + 2*[0] + 2*[1]),
            "income_min": array([    0,    0,40000,40000,    0,    0,40000,40000]),
            "income_max": array([39999,39999,   -1,   -1,39999,39999,   -1,   -1]),
            #"persons": array([0,1,0,1,0,1,0,1]),
            "persons_min": array([0, 3,0, 3,0, 3,0, 3]),
            "persons_max": array([2,-1,2,-1,2,-1,2,-1]),
            "total_number_of_households": array([4000, 5000, 1000, 3000, 0, 6000, 3000, 8000])
            }
        ##size of columns was not even, removed last element of min and max
        #household_characteristics_for_ht_data = {
            #"characteristic": array(2*['age_of_head'] + 2*['income'] + 2*['persons']),
            #"min": array([0, 50, 0, 40000, 0, 3]),
            #"max": array([49, 100, 39999, -1, 2, -1]) 
            #}
        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='hh_set', table_data=self.households_data)
        hh_set = HouseholdDataset(in_storage=storage, in_table_name='hh_set')

        storage.write_table(table_name='hct_set', table_data=annual_household_control_totals_data)
        hct_set = ControlTotalDataset(in_storage=storage, in_table_name='hct_set', what='household', id_name=[])

        #storage.write_table(table_name='hc_set', table_data=household_characteristics_for_ht_data)
        #hc_set = HouseholdCharacteristicDataset(in_storage=storage, in_table_name='hc_set')

        # unplace some households
        where10 = where(hh_set.get_attribute("grid_id")<>10)[0]
        hh_set.modify_attribute(name="grid_id", data=zeros(where10.size), index=where10)

        model = TransitionModel(hh_set, control_total_dataset=hct_set)
        model.run(year=2000, target_attribute_name="total_number_of_households", reset_dataset_attribute_value={'grid_id':-1})

        #check that there are indeed 33000 total households after running the model
        results = hh_set.size()
        should_be = [30000]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the number of households in each group exactly match the control totals specified
        results = self.get_count_all_groups(hh_set)
        should_be = [4000, 5000, 1000, 3000, 0, 6000, 3000, 8000]
        self.assertEqual(ma.allclose(results, should_be),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))
Example #35
0
 def test_fully_qualified_variable(self):
     # this tests an expression consisting of a fully-qualified variable
     expr = "opus_core.test_agent.income_times_2"
     storage = StorageFactory().get_storage('dict_storage')
     storage.write_table(table_name='test_agents',
                         table_data={
                             "income": array([1, 5, 10]),
                             "id": array([1, 3, 4])
                         })
     dataset = Dataset(in_storage=storage,
                       in_table_name='test_agents',
                       id_name="id",
                       dataset_name="test_agent")
     result = dataset.compute_variables([expr])
     should_be = array([2, 10, 20])
     self.assert_(ma.allclose(result, should_be, rtol=1e-6),
                  "Error in test_fully_qualified_variable")
     # check that expr is in the cache of known expressions
     # (normally we shouldn't be accessing this private field, but just this once ...)
     cache = VariableName._cache
     self.assert_(expr in cache, msg="did not find expr in cache")
     # check that the access methods for the variable all return the correct values
     name = VariableName(expr)
     self.assertEqual(name.get_package_name(),
                      'opus_core',
                      msg="bad value for package")
     self.assertEqual(name.get_dataset_name(),
                      'test_agent',
                      msg="bad value for dataset")
     self.assertEqual(name.get_short_name(),
                      'income_times_2',
                      msg="bad value for shortname")
     self.assertEqual(name.get_alias(),
                      'income_times_2',
                      msg="bad value for alias")
     self.assertEqual(name.get_autogen_class(),
                      None,
                      msg="bad value for autogen_class")
     # test that the variable can now also be accessed using its short name in an expression
     result2 = dataset.compute_variables(['income_times_2'])
     self.assert_(ma.allclose(result2, should_be, rtol=1e-6),
                  "Error in accessing a_test_variable")
     # check that the cache uses the variable name with whitespace removed
     oldsize = len(cache)
     expr_with_spaces = "opus_core . test_agent. income_times_2  "
     name2 = VariableName(expr_with_spaces)
     newsize = len(cache)
     self.assertEqual(oldsize, newsize, msg="caching error")
     self.assert_(expr_with_spaces not in cache, msg="caching error")
     self.assertEqual(expr_with_spaces,
                      name2.get_expression(),
                      msg="caching error")
     self.assertEqual(name2.get_short_name(),
                      'income_times_2',
                      msg="bad value for shortname")
Example #36
0
 def test_safely_divide_two_arrays(self):
     result = Variable().safely_divide_two_arrays(array([10,20,30,0]).astype(int8), array([2,0,2,0]).astype(int8))
     self.assert_(ma.allclose(array([5,0,15,0]), result))
     # Types are done correctly
     self.assertEqual(result.dtype.type, int8)
     
     result = Variable().safely_divide_two_arrays(array([1,2,3,0]), array([2.,0.,2.,0.]))
     self.assert_(ma.allclose(array([.5, 0, 1.5, 0]), result))
     
     result = Variable().safely_divide_two_arrays(array([1,2,3,0]), array([2.,0.,2.,0.]), value_for_divide_by_zero=-1.)
     self.assert_(ma.allclose(array([.5, -1., 1.5, -1.]), result))
Example #37
0
    def test_controlling_with_one_marginal_characteristic(self):
        """Using the age_of_head as a marginal characteristic, which would partition the 8 groups into two larger groups
        (those with age_of_head < 40 and >= 40), ensure that the control totals are met and that the distribution within
        each large group is the same before and after running the model
        """

        #IMPORTANT: marginal characteristics grouping indices have to start at 0!
        #i.e. below, there is one marg. char. "age_of_head". here we indicate that the first "large group" (groups 1-4),
        #consisting of those groups with age_of_head < 40 should total 25000 households after running this model for one year,
        #and the second large group, those groups with age_of_head > 40, should total 15000 households
        annual_household_control_totals_data = {
            "year": array([2000, 2000]),
            "age_of_head": array([0,1]),
            "total_number_of_households": array([25000, 15000])
            }

        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='hh_set', table_data=self.households_data)
        hh_set = HouseholdDataset(in_storage=storage, in_table_name='hh_set')

        storage.write_table(table_name='hct_set', table_data=annual_household_control_totals_data)
        hct_set = ControlTotalDataset(in_storage=storage, in_table_name='hct_set', what='household', id_name=['year' ,'age_of_head'])

        storage.write_table(table_name='hc_set', table_data=self.household_characteristics_for_ht_data)
        hc_set = HouseholdCharacteristicDataset(in_storage=storage, in_table_name='hc_set')

        storage.write_table(table_name='prs_set', table_data=self.person_data)
        prs_set = PersonDataset(in_storage=storage, in_table_name='prs_set')
        
        model = HouseholdTransitionModel()
        model.run(year=2000, person_set=prs_set, household_set=hh_set, control_totals=hct_set, characteristics=hc_set)

        #check that there are indeed 40000 total households after running the model
        results = hh_set.size()
        should_be = [40000]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the total number of households within first four groups increased by 10000
        #and that the total number of households within last four groups decreased by 3000
        results = self.get_count_all_groups(hh_set)
        should_be = [25000, 15000]
        self.assertEqual(ma.allclose([sum(results[0:4]), sum(results[4:8])], should_be, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the distribution of households within groups 1-4 and 5-8 are the same before and after
        #running the model, respectively

        should_be = [6000.0/15000.0*25000.0, 2000.0/15000.0*25000.0, 3000.0/15000.0*25000.0, 4000.0/15000.0*25000.0,
                     2000.0/18000.0*15000.0, 5000.0/18000.0*15000.0, 3000.0/18000.0*15000.0, 8000.0/18000.0*15000.0]
        self.assertEqual(ma.allclose(results, should_be, rtol=0.05),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))
Example #38
0
def test_estimate_anomaly():
    f1 = estimate_anomaly(dummy_features, {'spike': dummy_params['spike']})
    f2 = estimate_anomaly(pd.DataFrame(dummy_features),
                          {'spike': dummy_params['spike']})

    assert ma.allclose(f1, f2)
    assert ma.allclose(
        f1,
        ma.masked_values([
            -999, 0.0, -5.797359001920061, -57.564627324851145, -999,
            -9.626760611162082
        ], -999))
    def test_same_distribution_after_household_subtraction(self):
        """Using the control_totals and no marginal characteristics,
        subtract households and ensure that the distribution within each group stays the same
        """
        annual_household_control_totals_data = {
            "year": array([2000]),
            "total_number_of_households": array([20000])
        }

        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='hh_set',
                            table_data=self.households_data)
        hh_set = HouseholdDataset(in_storage=storage, in_table_name='hh_set')

        storage.write_table(table_name='hct_set',
                            table_data=annual_household_control_totals_data)
        hct_set = ControlTotalDataset(in_storage=storage,
                                      in_table_name='hct_set',
                                      what="household",
                                      id_name="year")

        storage.write_table(
            table_name='hc_set',
            table_data=self.household_characteristics_for_ht_data)
        hc_set = HouseholdCharacteristicDataset(in_storage=storage,
                                                in_table_name='hc_set')

        model = HouseholdTransitionModel()
        model.run(year=2000,
                  household_set=hh_set,
                  control_totals=hct_set,
                  characteristics=hc_set)

        #check that there are indeed 20000 total households after running the model
        results = hh_set.size()
        should_be = [20000]
        self.assertEqual(
            ma.allclose(should_be, results, rtol=1e-1), True,
            "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the distribution of households in each group is the same as before running the model
        results = self.get_count_all_groups(hh_set)
        should_be = [
            6000.0 / 33000.0 * 20000.0, 2000.0 / 33000.0 * 20000.0,
            3000.0 / 33000.0 * 20000.0, 4000.0 / 33000.0 * 20000.0,
            2000.0 / 33000.0 * 20000.0, 5000.0 / 33000.0 * 20000.0,
            3000.0 / 33000.0 * 20000.0, 8000.0 / 33000.0 * 20000.0
        ]
        self.assertEqual(
            ma.allclose(results, should_be, rtol=0.05), True,
            "Error, should_be: %s,\n but result: %s" % (should_be, results))
Example #40
0
    def test_same_distribution_after_household_addition(self):
        """Using the control_totals and no marginal characteristics,
        add households and ensure that the distribution within each group stays the same
        """

        annual_household_control_totals_data = {
            "year": array([2000]),
            "total_number_of_households": array([50000])
            }

        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='hh_set', table_data=self.households_data)
        hh_set = HouseholdDataset(in_storage=storage, in_table_name='hh_set')

        storage.write_table(table_name='prs_set', table_data=self.person_data)
        prs_set = PersonDataset(in_storage=storage, in_table_name='prs_set')
        
        storage.write_table(table_name='hct_set', table_data=annual_household_control_totals_data)
        hct_set = ControlTotalDataset(in_storage=storage, in_table_name='hct_set', what="household", id_name="year")

        storage.write_table(table_name='hc_set', table_data=self.household_characteristics_for_ht_data)
        hc_set = HouseholdCharacteristicDataset(in_storage=storage, in_table_name='hc_set')

        model = HouseholdTransitionModel()
        model.run(year=2000, person_set=prs_set, household_set=hh_set, control_totals=hct_set, characteristics=hc_set)

        #check that there are indeed 50000 total households after running the model
        results = hh_set.size()
        should_be = [50000]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the number of unplaced households is exactly the number of new households created
        results = where(hh_set.get_attribute("building_id")<=0)[0].size
        should_be = [17000]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the distribution of households in each group is the same as before running the model
        results = self.get_count_all_groups(hh_set)
        should_be = array([6000.0/33000.0*50000.0, 2000.0/33000.0*50000.0, 3000.0/33000.0*50000.0, 4000.0/33000.0*50000.0,
                     2000.0/33000.0*50000.0, 5000.0/33000.0*50000.0, 3000.0/33000.0*50000.0, 8000.0/33000.0*50000.0])
        self.assertEqual(ma.allclose(results, should_be, rtol=0.05),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))
        # check the types of the attributes
        self.assertEqual(hh_set.get_attribute("age_of_head").dtype, int32,
                         "Error in data type of the new household set. Should be: int32, is: %s" % str(hh_set.get_attribute("age_of_head").dtype))
        self.assertEqual(hh_set.get_attribute("income").dtype, int32,
                         "Error in data type of the new household set. Should be: int32, is: %s" % str(hh_set.get_attribute("income").dtype))
        self.assertEqual(hh_set.get_attribute("persons").dtype, int8,
                         "Error in data type of the new household set. Should be: int8, is: %s" % str(hh_set.get_attribute("persons").dtype))
Example #41
0
    def test_controlling_with_three_marginal_characteristics(self):
        """Controlling with all three possible marginal characteristics in this example, age_of_head, income, and persons,
        this would partition the 8 groups into the same 8 groups, and with a control total specified for each group, we must
        ensure that the control totals for each group exactly meet the specifications.
        """

        #IMPORTANT: marginal characteristics grouping indices have to start at 0!
        annual_household_control_totals_data = {
            "year": array(8*[2000]),
            "age_of_head": array(4*[0] + 4*[1]),
            "income": array(2*[0] + 2*[1] + 2*[0] + 2*[1]),
            "persons": array([0,1,0,1,0,1,0,1]),
            "total_number_of_households": array([4000, 5000, 1000, 3000, 0, 6000, 3000, 8000])
            }
        #size of columns was not even, removed last element of min and max
        household_characteristics_for_ht_data = {
            "characteristic": array(2*['age_of_head'] + 2*['income'] + 2*['persons']),
            "min": array([0, 50, 0, 40000, 0, 3]),
            "max": array([49, 100, 39999, -1, 2, -1]) 
            }
        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='hh_set', table_data=self.households_data)
        hh_set = HouseholdDataset(in_storage=storage, in_table_name='hh_set')

        storage.write_table(table_name='hct_set', table_data=annual_household_control_totals_data)
        hct_set = ControlTotalDataset(in_storage=storage, in_table_name='hct_set', what='household', id_name=['year' ,'age_of_head', 'income', 'persons'])

        storage.write_table(table_name='hc_set', table_data=household_characteristics_for_ht_data)
        hc_set = HouseholdCharacteristicDataset(in_storage=storage, in_table_name='hc_set')

        # unplace some households
        where10 = where(hh_set.get_attribute("building_id")<>10)[0]
        hh_set.modify_attribute(name="building_id", data=zeros(where10.size), index=where10)

        storage.write_table(table_name='prs_set', table_data=self.person_data)
        prs_set = PersonDataset(in_storage=storage, in_table_name='prs_set')
        
        model = HouseholdTransitionModel()
        model.run(year=2000, person_set=prs_set, household_set=hh_set, control_totals=hct_set, characteristics=hc_set)

        #check that there are indeed 33000 total households after running the model
        results = hh_set.size()
        should_be = [30000]
        self.assertEqual(ma.allclose(should_be, results, rtol=1e-1),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))

        #check that the number of households in each group exactly match the control totals specified
        results = self.get_count_all_groups(hh_set)
        should_be = [4000, 5000, 1000, 3000, 0, 6000, 3000, 8000]
        self.assertEqual(ma.allclose(results, should_be),
                         True, "Error, should_be: %s, but result: %s" % (should_be, results))
    def test_scaling_jobs_model(self):
        # Places 1750 jobs of sector 15
        # gridcell       has              expected about
        # 1         4000 sector 15 jobs   5000 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # 2         2000 sector 15 jobs   2500 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # 3         1000 sector 15 jobs   1250 sector 15 jobs
        #           1000 sector 1 jobs    1000 sector 1 jobs
        # unplaced  1750 sector 15 jobs   0

        storage = StorageFactory().get_storage('dict_storage')

        jobs_table_name = 'building_types'
        storage.write_table(
            table_name=jobs_table_name,
            table_data={
                "job_id":
                arange(11750) + 1,
                "sector_id":
                array(7000 * [15] + 3000 * [1] + 1750 * [15]),
                "grid_id":
                array(4000 * [1] + 2000 * [2] + 1000 * [3] + 1000 * [1] +
                      1000 * [2] + 1000 * [3] + 1750 * [-1])
            })
        jobs = JobDataset(in_storage=storage, in_table_name=jobs_table_name)

        gridcells_table_name = 'gridcells'
        storage.write_table(table_name=gridcells_table_name,
                            table_data={"grid_id": arange(3) + 1})
        gridcells = GridcellDataset(in_storage=storage,
                                    in_table_name=gridcells_table_name)

        # run model
        model = ScalingJobsModel(debuglevel=4)
        model.run(gridcells, jobs, agents_index=arange(10001, 11750))
        # get results
        gridcells.compute_variables([
            "urbansim.gridcell.number_of_jobs_of_sector_15",
            "urbansim.gridcell.number_of_jobs_of_sector_1"
        ],
                                    resources=Resources({"job": jobs}))
        # sector 1 jobs should be exactly the same
        result1 = gridcells.get_attribute("number_of_jobs_of_sector_1")
        self.assertEqual(
            ma.allclose(result1, array([1000, 1000, 1000]), rtol=0), True)
        # the distribution of sector 15 jobs should be the same with higher means
        result2 = gridcells.get_attribute("number_of_jobs_of_sector_15")
        #            logger.log_status(result2)
        self.assertEqual(
            ma.allclose(result2, array([5000, 2500, 1250]), rtol=0.05), True)
Example #43
0
    def test_same_distribution_after_business_addition_control_for_business(
            self):
        """Add 1,750 new businesses of sector 1 and 1000 businesses of sector 2.
        Test that the total number of businesses in each sector after the addition matches the totals specified
        in annual_business_control_totals.
        Ensure that the number of unplaced businesses after the addition is exactly 2,750 because this model
        is not responsible for placing jobs, only for creating them.
        NOTE: unplaced businesses are indicated by building_id <= 0
        """
        storage = StorageFactory().get_storage('dict_storage')

        business_set_table_name = 'business_set'
        storage.write_table(
            table_name=business_set_table_name,
            table_data=self.business_data,
        )
        business_set = BusinessDataset(in_storage=storage,
                                       in_table_name=business_set_table_name)

        annual_employment_control_totals_data = self.annual_business_control_totals_data
        annual_employment_control_totals_data[
            "total_number_of_businesses"] = array([8750, 4000, 3000])

        ect_set_table_name = 'ect_set'
        storage.write_table(
            table_name=ect_set_table_name,
            table_data=annual_employment_control_totals_data,
        )
        ect_set = EmploymentControlTotalDataset(
            in_storage=storage, in_table_name=ect_set_table_name)

        # run model
        model = BusinessTransitionModel()
        model.run(year=2000, business_set=business_set, control_totals=ect_set)

        #check the total
        results = business_set.size()
        should_be = [15750]
        self.assertEqual(ma.allequal(should_be, results), True,
                         "Error in total number of businesses.")

        #check that total #businesses within each sector are close to what was set in the control_totals
        results = self.get_count_all_sectors(business_set)
        should_be = [8750.0, 4000, 3000]
        self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)

        #check that the number of unplaced businesses is the number of new businesses created (2750)
        results = where(business_set.get_attribute("building_id") <= 0)[0].size
        should_be = [2750.0]
        self.assertEqual(ma.allclose(results, should_be, rtol=0.00001), True)
    def test_deletion_of_jobs(self):
        dataset_pool = DatasetPool(storage=self.storage, package_order=["washtenaw","urbansim", "opus_core"])
        gridcell_set = dataset_pool.get_dataset('gridcell')
        event_set = self._create_simple_deletion_event_set()

        DeletionEventModel().run(gridcell_set, event_set, 2000, dataset_pool)
        number_of_jobs = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs", dataset_pool=dataset_pool)
        # the model should remove 5 jobs from gridcell 5 and all jobs from gridcell 10
        self.assert_(ma.allclose(number_of_jobs, array( [10,10,10,10,5,10,10,10,10,0]))) 

        DeletionEventModel().run(gridcell_set, event_set, 2001, dataset_pool)
        number_of_jobs = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs", dataset_pool=dataset_pool)
        # the model should remove another 3 jobs from gridcell 5
        self.assert_(ma.allclose(number_of_jobs, array( [10,10,10,10,2,10,10,10,10,0])))
 def test_both_scale(self) :
     data = self.Data.data
     # linear so median = mean
     gt = 0.5*sp.arange(10) + 3.
     gf = sp.cos(0.01*sp.arange(2048)) + 4
     data[:,:,1,:] = 0
     # explicit phase closure: 2 = sqrt(4*1)
     data[:,0,0,:] = gt[:,sp.newaxis]*gf
     data[:,3,0,:] = gt[:,sp.newaxis]*gf*4
     data[:,1,0,:] = gt[:,sp.newaxis]*gf*2
     data[:,2,0,:] = gt[:,sp.newaxis]*gf*2
     cal_scale.scale_by_cal(self.Data, True, True, False)
     self.assertTrue(ma.allclose(data[:,:,0,:], 1.))
     self.assertTrue(ma.allclose(data[:,:,1,:], 0.))
Example #46
0
    def test_addition_of_jobs(self):
        dataset_pool = DatasetPool(storage=self.storage, package_order=["washtenaw","urbansim", "opus_core"])
        gridcell_set = dataset_pool.get_dataset('gridcell')
        event_set = self._create_simple_job_addition_event_set()
        jobs = dataset_pool.get_dataset("job")
        AgentEventModel().run(gridcell_set, event_set, jobs, 2000, dataset_pool)
        number_of_jobs = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs", dataset_pool=dataset_pool)
        # the model should add 5 jobs to gridcell 5 and 20 jobs to gridcell 10
        self.assert_(ma.allclose(number_of_jobs, array( [10,10,10,10,15,10,10,10,10,30]))) 

        AgentEventModel().run(gridcell_set, event_set, jobs, 2001, dataset_pool)
        number_of_jobs = gridcell_set.compute_variables("urbansim.gridcell.number_of_jobs", dataset_pool=dataset_pool)
        # the model should add another 3 jobs to gridcell 5
        self.assert_(ma.allclose(number_of_jobs, array( [10,10,10,10,18,10,10,10,10,30])))
Example #47
0
 def test_two_expressions(self):
     # test having two different expressions (to make sure having two autogen'd classes at once is working)
     expr1 = "2*sqrt(my_variable+10)"
     expr2 = "3*sqrt(my_variable+10)"
     storage = StorageFactory().get_storage("dict_storage")
     storage.write_table(
         table_name="dataset", table_data={"my_variable": array([4, -8, 0.5, 1]), "id": array([1, 2, 3, 4])}
     )
     dataset = Dataset(in_storage=storage, in_table_name="dataset", id_name="id", dataset_name="mydataset")
     result1 = dataset.compute_variables([expr1])
     should_be1 = array([7.48331477, 2.82842712, 6.4807407, 6.63324958])
     self.assert_(ma.allclose(result1, should_be1, rtol=1e-6), "Error in test_two_expressions")
     result2 = dataset.compute_variables([expr2])
     should_be2 = array([11.22497216, 4.24264068, 9.72111105, 9.94987437])
     self.assert_(ma.allclose(result2, should_be2, rtol=1e-6), "Error in test_two_expressions")
    def test_nl_probabilities_with_sampling(self):
        utilities = (array([[ 7.3,   4,  10.9, 5.5,   6.2,  1.6], # utilities for agent 1
                            [ 0.3,  1.9,  4.8, 9.1,  5.3,    0]]),# utilities for agent 2
                     array([0.5, 0.8]) # scaling parameters
                     )
        resources = {'membership_in_nests': {0: #         nest 1          nest 2  (applies to both agents)
                                                 array([[0,0,0,1,1,1],  [1,1,1,0,0,0]])},
                     'correct_for_sampling': True,
                     'sampling_rate':  #  nest 1  nest 2
                                     array([0.02, 0.05])                                 
                                                       }
        result = nl_probabilities().run(utilities, resources=resources)
        # check the first agent
        nom1 = exp(array([5.5,   6.2,  1.6])/0.5)
        nom2 = exp(array([7.3,   4,  10.9])/0.8)
        denom1 = nom1.sum()
        denom2 = nom2.sum()
        logsum1 = log(denom1/0.02)
        corr_nom2 = array([exp(7.3/0.8), exp(4/0.8)/0.05, exp(10.9/0.8)/0.05])
        logsum2 = log(corr_nom2.sum())
        nomm1 = exp(0.5*logsum1)
        nomm2 = exp(0.8*logsum2)
        denomm = nomm1+nomm2
        should_be = array([nom2[0]/denom2 * nomm2/denomm, nom2[1]/denom2 * nomm2/denomm, nom2[2]/denom2 * nomm2/denomm,
                    nom1[0]/denom1 * nomm1/denomm, nom1[1]/denom1 * nomm1/denomm, nom1[2]/denom1 * nomm1/denomm])

        self.assertEqual(ma.allclose(result[0,:], should_be, atol=min(1e-8, should_be.min())), True)

        # test for a 3d structure of 'membership_in_nests', i.e. each agent has different structure of the nests
        resources['membership_in_nests'] = {0: #            nest 1        nest 2
                                                array([[[0,0,0,1,1,1], [1,1,1,0,0,0]],  # agent 1
                                                       [[1,0,0,0,1,1], [0,1,1,1,0,0]]])}# agent 2
        result = nl_probabilities().run(utilities, resources=resources)

        # check the second agent
        nom1 = exp(array([0.3,  5.3,  0])/0.5)
        nom2 = exp(array([ 1.9, 4.8,  9.1])/0.8)
        denom1 = nom1.sum()
        denom2 = nom2.sum()
        corr_nom1 = array([exp(0.3/0.5), exp(5.3/0.5)/0.02, exp(0)/0.02])
        logsum1 = log(corr_nom1.sum())
        logsum2 = log(denom2/0.05)
        nomm1 = exp(0.5*logsum1)
        nomm2 = exp(0.8*logsum2)
        denomm = nomm1+nomm2
        should_be = array([nom1[0]/denom1 * nomm1/denomm, nom2[0]/denom2 * nomm2/denomm, nom2[1]/denom2 * nomm2/denomm,
                    nom2[2]/denom2 * nomm2/denomm, nom1[1]/denom1 * nomm1/denomm, nom1[2]/denom1 * nomm1/denomm])
        self.assertEqual(ma.allclose(result[1,:], should_be, atol=min(1e-8, should_be.min()), rtol=1e-4), True)
Example #49
0
    def test_addition_of_households(self):
        dataset_pool = DatasetPool(storage=self.storage, package_order=["washtenaw","urbansim", "opus_core"])
        gridcell_set = dataset_pool.get_dataset('gridcell')
        event_set = self._create_household_addition_event_set()
        households = dataset_pool.get_dataset("household")
        AgentEventModel().run(gridcell_set, event_set, households, 2000, dataset_pool)
        number_of_households = gridcell_set.compute_variables("urbansim.gridcell.number_of_households", 
                                                              dataset_pool=dataset_pool)
        # the model should add 6 households to gridcell 1, 
        self.assert_(ma.allclose(number_of_households, array( [16,0,30,0,5,0,0,0,0,0]))) 

        AgentEventModel().run(gridcell_set, event_set, households, 2001, dataset_pool)
        number_of_households = gridcell_set.compute_variables("urbansim.gridcell.number_of_households", 
                                                              dataset_pool=dataset_pool)
        # the model should add 50% from gridcell 1 (8) and 25 households to gridcell 3
        self.assert_(ma.allclose(number_of_households, array( [24,0,55,0,5,0,0,0,0,0])))
    def test_3(self):
        variable_name = "psrc.gridcell.travel_time_hbw_am_drive_alone_to_3"
        storage = StorageFactory().get_storage("dict_storage")

        storage.write_table(
            table_name="gridcells", table_data={"grid_id": array([1, 2, 3]), "zone_id": array([1, 1, 3])}
        )
        storage.write_table(
            table_name="zones",
            table_data={
                "zone_id": array([1, 2, 3]),
                "travel_time_hbw_am_drive_alone_to_1": array([1.1, 2.2, 3.3]),
                "travel_time_hbw_am_drive_alone_to_3": array([0.1, 0.2, 0.3]),
            },
        )

        dataset_pool = DatasetPool(package_order=["urbansim"], storage=storage)

        gridcell = dataset_pool.get_dataset("gridcell")
        gridcell.compute_variables(variable_name, dataset_pool=dataset_pool)
        values = gridcell.get_attribute(variable_name)

        should_be = array([0.1, 0.1, 0.3])

        self.assert_(ma.allclose(values, should_be, rtol=1e-3), msg="Error in " + variable_name)
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')
        
        persons_table_name = 'persons'
        
        storage.write_table(
                table_name=persons_table_name,
                table_data={
                    'person_id':array([1, 2, 3, 4, 5]),
                    'household_id':array([1, 1, 3, 3, 3]),
                    'member_id':array([1,2,1,2,3]),
                    'home_zone_id':      array([3, 1, 1, 2, 3]),
                    'work_place_zone_id':array([1, 3, 3, 1, 2])
                    },
            )

        persons = PersonDataset(in_storage=storage, in_table_name=persons_table_name)        

        values = VariableTestToolbox().compute_variable(self.variable_name,
            data_dictionary = {
                'person':persons,
                'travel_data':{
                    'from_zone_id':array([3,3,1,1,1,2,2,3,2]),
                    'to_zone_id':  array([1,3,1,3,2,1,3,2,2]),
                    'am_single_vehicle_to_work_travel_time':array([1.1, 2.2, 3.3, 4.4, 0.5, 0.7, 8.7, 7.8, 1.0])
                    }
                },
            dataset = 'person'
            )
            
        should_be = array([1.1, 4.4, 4.4, 0.7, 7.8])
        
        self.assert_(ma.allclose(values, should_be, rtol=1e-2),
            'Error in ' + self.variable_name)
Example #52
0
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')
        
        persons_table_name = 'persons'
        
        storage.write_table(
                table_name=persons_table_name,
                table_data={
                    'person_id':array([1, 2, 3, 4, 5]),
                    'household_id':array([1, 1, 3, 3, 3]),
                    'member_id':array([1,2,1,2,3])
                    },
            )

        persons = PersonDataset(in_storage=storage, in_table_name=persons_table_name)
        
        values = VariableTestToolbox().compute_variable(self.variable_name,
            data_dictionary = {
                'household':{ 
                    'household_id':array([1,2,3]),
                    'grid_id':array([9, 9, 7])
                    },
                'person':persons
                }, 
            dataset = 'person'
            )
            
        should_be = array([9, 9, 7, 7, 7])
        
        self.assert_(ma.allclose(values, should_be, rtol=1e-7), 
            'Error in ' + self.variable_name)
Example #53
0
    def test_my_inputs(self):
        storage = StorageFactory().get_storage('dict_storage')        
        
        storage.write_table(
            table_name='gridcells',
            table_data={
                'grid_id': array([1, 2, 3]),
                'zone_id': array([1, 1, 3]),
            }
        )
        storage.write_table(
            table_name='zones',
            table_data={
                'zone_id': array([1, 2, 3]),
                "trip_weighted_average_generalized_cost_hbw_to_work_am_drive_alone": array([4.1, 5.3, 6.2]),
            }
        )
        
        dataset_pool = DatasetPool(package_order=['urbansim'],
                                   storage=storage)

        gridcell = dataset_pool.get_dataset('gridcell')
        gridcell.compute_variables(self.variable_name, 
                                   dataset_pool=dataset_pool)
        values = gridcell.get_attribute(self.variable_name)
        
        should_be = array([4.1, 4.1, 6.2])
        
        self.assert_(ma.allclose(values, should_be, rtol=1e-3), 
                     msg="Error in " + self.variable_name)
Example #54
0
    def test_my_input(self):
        storage = StorageFactory().get_storage('dict_storage')        
        
        storage.write_table(
            table_name='zones',
            table_data={
                "zone_id": array([1,3]),
                "number_of_jobs": array([10, 1])
            }
        )
        storage.write_table(
            table_name='travel_data',
            table_data={
                "from_zone_id":array([3,3,1,1]),
                "to_zone_id":array([1,3,1,3]),
                "am_total_transit_time_walk":array([1, 2, 3, 4]),
            }
        )
        
        dataset_pool = DatasetPool(package_order=['urbansim'],
                                   storage=storage)

        zone = dataset_pool.get_dataset('zone')
        zone.compute_variables(self.variable_name, 
                               dataset_pool=dataset_pool)
        values = zone.get_attribute(self.variable_name)
        
        should_be = array([1.17361, 10.25])
        
        self.assert_(ma.allclose(values, should_be, rtol=1e-3), 
                     msg="Error in " + self.variable_name)
    def test(self):
        storage = StorageFactory().get_storage('dict_storage')
        
        parcels_table_name = 'parcels'
        
        storage.write_table(
                table_name=parcels_table_name,
                table_data={
                    'parcel_id':array([1,2,3,4])
                    },
            )

        parcels = ParcelDataset(in_storage=storage, in_table_name=parcels_table_name)
        
        sid = number_of_surveyed_households.surveyed_households_starting_id
        values = VariableTestToolbox().compute_variable(self.variable_name, \
            data_dictionary = {
                'parcel':parcels,
                'household':{
                'parcel_id':array([1, 2, 3, 4, 2, 2]),
                    'household_id':array([sid, sid+11, sid-1, sid-5, sid+7, sid-6]), 
                    }
                },
            dataset = 'parcel'
            )
            
        should_be = array([1,1,0,0])
        
        self.assert_(ma.allclose(values, should_be, rtol=1e-20),
            'Error in ' + self.variable_name)