Пример #1
0
class AbstractServiceTests(opus_unittest.OpusTestCase):
    def setUp(self):
        self.database_name = 'test_services_database'
        self.config = TestDatabaseConfiguration(database_name = self.database_name)
        self.db_server = DatabaseServer(self.config)
    
    def tearDown(self):
        self.db_server.drop_database(self.database_name)
        self.db_server.close()
        
    def test_create_when_already_exists(self):
        """Shouldn't do anything if the database already exists."""
        self.db_server.create_database(self.database_name)
        db = self.db_server.get_database(self.database_name)
        self.assertFalse(db.table_exists('run_activity'))
        self.assertFalse(db.table_exists('computed_indicators'))

        services = AbstractService(self.config)
        services.services_db.close()
        self.assertTrue(db.table_exists('run_activity')) 
        self.assertTrue(db.table_exists('computed_indicators'))

    def test_create(self):
        """Should create services tables if the database doesn't exist."""
        services = AbstractService(self.config)
        services.services_db.close()
        self.assertTrue(self.db_server.has_database(self.database_name))
        db = self.db_server.get_database(self.database_name)
        self.assertTrue(db.table_exists('run_activity')) 
        self.assertTrue(db.table_exists('computed_indicators'))               
Пример #2
0
class Tests(opus_unittest.OpusTestCase):
    def setUp(self):
        self.db_name = 'test_create_table'
        
        self.db_server = DatabaseServer(TestDatabaseConfiguration(protocol = 'mysql'))
        self.db_server.drop_database(self.db_name)
        self.db_server.create_database(self.db_name)
        self.db = self.db_server.get_database(self.db_name)
            
        
    def tearDown(self):
        self.db.close()
        self.db_server.drop_database(self.db_name)
        self.db_server.close()
        
        
    def test_setUp(self):
        self.assert_(not self.db.table_exists('building_types'))
        
        
    def test_create_table(self):
        CreateBuildingTypesTable().create_building_types_table(
            TestDatabaseConfiguration(protocol = 'mysql'), self.db_name)
        
        self.assert_(self.db.table_exists('building_types'))
Пример #3
0
    def combine_tables(self, db_config, db_name, from_tables_names, to_table_name):
        dbconfig = DatabaseServerConfiguration(
            host_name = db_config.host_name,
            protocol = 'mysql',
            user_name = db_config.user_name,
            password = db_config.password                                       
        )
        db_server = DatabaseServer(dbconfig)
        
        try:
            db = db_server.get_database(db_name)
        except:
            raise NameError, "Unknown database '%s'!" % db_name

        union_statements = []
        for from_table_name in from_tables_names:
            union_statements.append('(SELECT * FROM %s)' % from_table_name)
            
        create_table_query = "CREATE TABLE %s " % to_table_name
        create_table_query += ' UNION ALL '.join(union_statements)
        create_table_query += ';'
        
        try:
            db.DoQuery('DROP TABLE IF EXISTS %s;' % to_table_name)
            db.DoQuery(create_table_query)
        except:
            raise NameError, "Unknown or invalid table specified!"
def opusRun(progressCB,logCB,params):
    param_dict = {}
    for key, val in params.iteritems():
        param_dict[str(key)] = str(val)
        
    
    # get parameter values
    database_name = param_dict['database_name']
    database_server_connection = param_dict['database_server_connection']
    households_table_name = 'raw_pums_hh_data'
    
    query = "DELETE h.* FROM %s AS h WHERE h.persons = '00' IS NULL" % (households_table_name)

    # create engine and connection
    logCB("Openeing database connection\n")
    dbs_config = DatabaseServerConfiguration(database_configuration=database_server_connection)
    server = DatabaseServer(database_server_configuration = dbs_config)
    opus_db = server.get_database(database_name=database_name)   
    # Do Query       
    logCB("Deleting empty household records...\n")
    opus_db.execute(query)
    
    # Finish up
    logCB("Closing database connection\n")
    opus_db.close()
    logCB('Finished running query\n')
Пример #5
0
    def __init__(self, config):
        ss = SimulationState(new_instance=True)
        ss.set_current_time(config['base_year'])
        ss.set_cache_directory(config['cache_directory'])

        SessionConfiguration(new_instance=True,
                             package_order=config['dataset_pool_configuration'].package_order,
                             in_storage=AttributeCache())
        #if not os.path.exists(config['cache_directory']):  ## if cache exists, it will automatically skip
        cacher = CreateBaseyearCache()
        cache_dir = cacher.run(config)

        if 'estimation_database_configuration' in config:
            db_server = DatabaseServer(config['estimation_database_configuration'])
            db = db_server.get_database(config['estimation_database_configuration'].database_name)
            out_storage = StorageFactory().get_storage(
                'sql_storage', 
                storage_location = db)
        else:
            output_cache = os.path.join(config['cache_directory'], str(config['base_year']+1))
            out_storage = StorageFactory().get_storage('flt_storage', storage_location=output_cache)

        dataset_pool = SessionConfiguration().get_dataset_pool()
        households = dataset_pool.get_dataset("household")
        buildings = dataset_pool.get_dataset("building")
        zones = dataset_pool.get_dataset("zone")
        zone_ids = zones.get_id_attribute()
        capacity_attribute_name = "residential_units"  #_of_use_id_%s" % id
        capacity_variable_name = "%s=sanfrancisco.zone.aggregate_%s_from_building" % \
                                 (capacity_attribute_name, capacity_attribute_name)
        buildings.compute_variables("sanfrancisco.building.zone_id", dataset_pool=dataset_pool)
        zones.compute_variables(capacity_variable_name, dataset_pool=dataset_pool)

        building_zone_id = buildings.get_attribute('zone_id')
        
#        is_household_unplace = datasets['household'].get_attribute("building_id") <= 0
        is_household_unplaced = 1 #all households are unplaced
        household_building_id = zeros(households.size(), dtype='int32')-1 #datasets['household'].get_attribute("building_id")
        
        for zone_id in zone_ids:
            capacity = zones.get_attribute_by_id(capacity_attribute_name, zone_id)
            is_household_in_this_zone = (households.get_attribute('zone_id') == zone_id)
            is_unplaced_household_in_this_zone = is_household_in_this_zone * is_household_unplaced
            is_building_in_this_zone = (building_zone_id == zone_id)
#            if not is_household_in_this_zone.sum() <= capacity:
            if capacity == 0 or is_household_in_this_zone.sum()==0:
                print "WARNING: zone %s has %s households but only %s units" % (zone_id, is_household_in_this_zone.sum(), capacity)
                continue
                        
            prob = buildings.get_attribute(capacity_attribute_name) * is_building_in_this_zone / array(capacity, dtype=float64)

            r = random(sum(is_unplaced_household_in_this_zone))
            prob_cumsum = ncumsum(prob)
            index_to_bldg = searchsorted(prob_cumsum, r)

            household_building_id[where(is_unplaced_household_in_this_zone)] = buildings.get_attribute_by_index('building_id', index_to_bldg)

#        import pdb;pdb.set_trace()
        households.set_values_of_one_attribute('building_id', household_building_id)
        households.write_dataset(out_table_name='households', out_storage=out_storage)
Пример #6
0
def opusRun(progressCB,logCB,params):
    param_dict = {}
    for key, val in params.iteritems():
        param_dict[str(key)] = str(val)
        
    
    # get parameter values
    database_name = param_dict['database_name']
    database_server_connection = param_dict['database_server_connection']
    query = param_dict['query']

    # create engine and connection
    logCB("Openeing database connection\n")
    dbs_config = DatabaseServerConfiguration(database_configuration=database_server_connection)
    server = DatabaseServer(database_server_configuration = dbs_config)
    opus_db = server.get_database(database_name=database_name)
    
    # Do Query
    logCB("Running Query...\n")
    opus_db.execute(query)
    
    # Finish up
    logCB("Closing database connection\n")
    opus_db.close()
    logCB('Finished running query\n')
Пример #7
0
    def convert_databases(self, db_config, config):
        databases = config['databases']
        tables = config['tables']
        
        try: backup = config['backup']
        except KeyError: backup = True
        
        try: backup_postfix = config['backup_postfix']
        except KeyError: backup_postfix = '_old'
        
        dbconfig = DatabaseServerConfiguration(
            protocol = 'mysql',
            host_name = db_config.host_name,
            user_name = db_config.user_name,
            password = db_config.password                                       
        )
        db_server = DatabaseServer(dbconfig)
        
        for db_name in databases:
            db = db_server.get_database(db_name)

            self.convert_database(db, tables[db_name], backup, backup_postfix)
                
            db.close()
            
        db_server.close()
	def run (self):
		time = -1
		latest = ""
		directoryname = 'data/vibe_gridcell/runs/'
		if self.isParcel is True:
			directoryname = 'data/vibe_parcel/'
		for filename in os.listdir(os.path.join(os.environ['OPUS_HOME'], directoryname)):
			print filename
			if time == -1:
				time = os.path.getmtime(os.path.join(os.environ['OPUS_HOME'], directoryname, filename))
				latest = filename
			if os.path.getmtime(os.path.join(os.environ['OPUS_HOME'], directoryname, filename)) > time:
				time = os.path.getmtime(os.path.join(os.environ['OPUS_HOME'], directoryname, filename))
				latest = filename

		config = DatabaseServerConfiguration(host_name = 'localhost',
                                           user_name = 'urbansim',
					   password = '******',
                                           protocol = 'mysql')
		db_server = DatabaseServer(config)
		for i in range(1981, 1980+int(self.YearsToRun)):
			newdir = latest + '/' + str(i)
			flt_directory_in = os.path.join(os.environ['OPUS_HOME'], directoryname ,newdir)
			input_storage = flt_storage(storage_location = flt_directory_in)	
			db = db_server.get_database('ress_'+str(i))
			output_storage = StorageFactory().get_storage('sql_storage', storage_location = db)
			ExportStorage().export(in_storage=input_storage, out_storage=output_storage)
    def create_building_types_table(self, db_config, db_name):
        table_name = 'job_building_types'

        dbconfig = DatabaseServerConfiguration(
            host_name = db_config.host_name,
            user_name = db_config.user_name,
            protocol = 'mysql',
            password = db_config.password                                       
        )
        db_server = DatabaseServer(dbconfig)
        
        try:
            db = db_server.get_database(db_name)
        except:
            raise NameError, "Unknown database '%s'!" % db_name

        logger.log_status('Creating table %s.' % table_name)
        try:
            db.DoQuery('DROP TABLE IF EXISTS %s;' % table_name)
            db.DoQuery('CREATE TABLE %s '
                '(id INT, name varchar(50), home_based INT);' 
                % table_name)
        except:
            raise NameError, "Invalid table name specified! (%s)" % table_name
            
        db.DoQuery('INSERT INTO %s (id, name, home_based) VALUES'
            '(1, "commercial", 0),' 
            '(3, "industrial", 0),'
            '(2, "governmental", 0),'
            '(4, "home_based", 1);'
                % table_name)
 def _create_db_from_chain_via_python(self, 
                                      from_database_configuration, 
                                      to_database_configuration,
                                      tables_to_copy):
             
     db_server_from = DatabaseServer(from_database_configuration)
     db_server_to = DatabaseServer(to_database_configuration)
     db_server_to.drop_database(to_database_configuration.database_name)
     db_server_to.create_database(to_database_configuration.database_name)
     
     database_out = db_server_to.get_database(to_database_configuration.database_name)
     
     scenario_db_manager = ScenarioDatabaseManager(
         server_configuration = from_database_configuration, 
         base_scenario_database_name = from_database_configuration.database_name)
     table_mapping = scenario_db_manager.get_database_to_table_mapping()
 
     cross_db_operations = CrossDatabaseOperations()
     
     #by default, copy all tables
     if tables_to_copy == []:
         tables_to_copy = sum(table_mapping.values(), [])  # flat a list of lists
     elif 'scenario_information' not in tables_to_copy:
         tables_to_copy.append('scenario_information')
         
     for database_name, tables in table_mapping.items():
         database_in = db_server_from.get_database(database_name)
         for table in tables:
             if table not in tables_to_copy:
                 continue
             
             logger.start_block("Copying table '%s' from database '%s'" 
                                % (table, database_name)) 
             
             try:               
                 cross_db_operations.copy_table(table_to_copy = table, 
                                                database_in = database_in, 
                                                database_out = database_out, 
                                                use_chunking = True)
             finally:
                 logger.end_block()        
         database_in.close()
         
     self._fix_scenario_information_table(database_out)            
     database_out.close()
     db_server_from.close()
     db_server_to.close()
    def __init__(self, config):
        if 'estimation_database_configuration' in config:
            db_server = DatabaseServer(config['estimation_database_configuration'])
            db = db_server.get_database(config['estimation_database_configuration'].database_name)
        
            out_storage = StorageFactory().build_storage_for_dataset(
                type='sql_storage', storage_location=db)
        else:
            out_storage = StorageFactory().get_storage(type='flt_storage',
                storage_location=os.path.join(config['cache_directory'], str(config['base_year']+1)))

        simulation_state = SimulationState()
        simulation_state.set_cache_directory(config['cache_directory'])
        simulation_state.set_current_time(config['base_year'])
        attribute_cache = AttributeCache()
        
        SessionConfiguration(new_instance=True,
                             package_order=config['dataset_pool_configuration'].package_order,
                             in_storage=attribute_cache)
        
        if not os.path.exists(os.path.join(config['cache_directory'], str(config['base_year']))):
            #raise RuntimeError, "datasets uncached; run prepare_estimation_data.py first"
            CacheScenarioDatabase().run(config, unroll_gridcells=False)

        for dataset_name in config['datasets_to_preload']:
            SessionConfiguration().get_dataset_from_pool(dataset_name)

        households = SessionConfiguration().get_dataset_from_pool("household")
        household_ids = households.get_id_attribute()
        workers = households.get_attribute("workers")
        
        hh_ids = []
        member_ids = []
        is_worker = []
        job_ids = []

        for i in range(households.size()):  
            if workers[i] > 0:
                hh_ids += [household_ids[i]] * workers[i]
                member_ids += range(1, workers[i]+1)
                is_worker += [1] * workers[i]
                job_ids += [-1] * workers[i]

        in_storage = StorageFactory().get_storage('dict_storage')
        
        persons_table_name = 'persons'
        in_storage.write_table(
                table_name=persons_table_name,
                table_data={
                    'person_id':arange(len(hh_ids))+1,
                    'household_id':array(hh_ids),
                    'member_id':array(member_ids),
                    'is_worker':array(is_worker),                    
                    'job_id':array(job_ids),
                    },
            )

        persons = PersonDataset(in_storage=in_storage, in_table_name=persons_table_name)
        persons.write_dataset(out_storage=out_storage, out_table_name=persons_table_name)
 def get(self, database):
     db_config = ScenarioDatabaseConfiguration()
     db_server = DatabaseServer(db_config)
     db = db_server.get_database(database)
     
     storage = StorageFactory().get_storage(
         'sql_storage',
         storage_location = db)
     return storage
def drop_table(table_name, dbname, schema):
    dbserverconfig = IndicatorsDatabaseConfiguration(protocol='postgres')
    server = DatabaseServer(database_server_configuration = dbserverconfig)
    db = server.get_database(database_name=dbname)
    query = 'DROP TABLE %s.%s' % (schema, table_name)
    try:
        db.execute(query)
        logCB('DROPPED TABLE %s \n' % table_name)
    except:
        return
class TestCreateJobBuildingTypesTable(opus_unittest.OpusTestCase):
    def setUp(self):
        self.db_name = 'test_create_table'
        
        self.db_server = DatabaseServer(TestDatabaseConfiguration(protocol = 'mysql',))
        
        self.db_server.drop_database(self.db_name)
        self.db_server.create_database(self.db_name)
        self.db = self.db_server.get_database(self.db_name)
            
        
    def tearDown(self):
        self.db.close()
        self.db_server.drop_database(self.db_name)
        self.db_server.close()
        
        
    def test_setUp(self):
        try:
            self.db.DoQuery('select * from job_building_types;')
            self.fail('Output table job_building_tpes already exists. (Check setUp)')
        except: pass
        
        
    def test_create_table(self):
        CreateJobBuildingTypesTable().create_building_types_table(
            TestDatabaseConfiguration(protocol = 'mysql'), self.db_name)
        
        try:
            self.db.DoQuery('select * from job_building_types;')
        except:
            self.fail('Expected output table job_building_types does not exist.')
    
            
    def test_values(self):
        CreateJobBuildingTypesTable().create_building_types_table(
            TestDatabaseConfiguration(protocol = 'mysql'), self.db_name)
        
        expected_results = [
            ['id', 'name', 'home_based'],
            [1, "commercial", 0],
            [3, "industrial", 0],
            [2, "governmental", 0],
            [4, "home_based", 1]
            ]
        
        try:
            results = self.db.GetResultsFromQuery(
                'select * from job_building_types;')
        except:
            self.fail('Expected output table job_building_types does not exist.')
        
        self.assert_(expected_results == results,
            "Table job_building_types has incorrect values! "
            "Expected: %s. Received: %s" % (expected_results, results))
Пример #15
0
    def prepare_for_run(self, database_configuration, database_name):
        ## sql protocol, hostname, username and password are set in
        ## $OPUS_HOME/settings/database_server_setting.xml
        db_config = DatabaseConfiguration(database_name=database_name, database_configuration=database_configuration)
        db_server = DatabaseServer(db_config)
        if not db_server.has_database(database_name):
            db_server.create_database(database_name)
        db = db_server.get_database(database_name)
        self.out_storage = sql_storage(storage_location=db)

        return self.out_storage
Пример #16
0
    def save_results(self, out_storage=None, model_name=None):
        if self.specification is None or self.coefficients is None:
            raise ValueError, "model specification or coefficient is None"

        #invalid = self.coefficients.is_invalid()
        if False:
            logger.log_warning('Invalid coefficients. Not saving results!')
            return

        if model_name is None:
            model_name = self.config.get('model_name_for_coefficients', None)
            
        if model_name is None:
            if self.model_name is not None:
                model_name = self.model_name
            else:
                raise ValueError, "model_name unspecified"

        out_storage_available = True
        if out_storage:
            pass
        elif 'estimation_database_configuration' in self.config:
            try:
                db_server = DatabaseServer(self.config['estimation_database_configuration'])
                database_name = self.config["estimation_database_configuration"].database_name
    
                if not db_server.has_database(database_name):
                    db_server.create_database(database_name)
    
                output_db = db_server.get_database(database_name)
                out_storage = StorageFactory().get_storage(
                    type='sql_storage',
                    storage_location=output_db)
            except:
                logger.log_warning("Problem with connecting database given by 'estimation_database_configuration'.")
                out_storage_available = False
        else:
            logger.log_warning("No estimation_database_configuration given.")
            out_storage_available = False

        # the original model name of development_project_lcm is too long as a mysql db table name, truncate it
        if model_name.rfind("_development_project_location_choice_model") >=0:
            model_name = model_name.replace('_project', '')
        specification_table = '%s_specification' % model_name
        coefficients_table = '%s_coefficients' % model_name
        if out_storage_available:
            logger.start_block("Writing specification and coefficients into storage given by 'estimation_database_configuration'")
            self.specification.write(out_storage=out_storage, out_table_name=specification_table)
            self.coefficients.write(out_storage=out_storage, out_table_name=coefficients_table)
            logger.end_block()
        logger.start_block("Writing specification and coefficients into %s" % AttributeCache().get_storage_location())
        self.specification.write(out_storage=AttributeCache(), out_table_name=specification_table)
        self.coefficients.write(out_storage=AttributeCache(), out_table_name=coefficients_table)
        logger.end_block()
Пример #17
0
class RunManagerTests(opus_unittest.OpusTestCase):
    def setUp(self):
        self.database_name = 'test_services_database'
        self.config = TestDatabaseConfiguration(database_name = self.database_name)
        self.db_server = DatabaseServer(self.config)

    def tearDown(self):
        self.db_server.drop_database(self.database_name)
        self.db_server.close()

    def test_setup_run(self):
        base_directory = tempfile.mkdtemp(prefix='opus_tmp')
        run_name = 'test_scenario_name'
        run_manager = RunManager(self.config)

        run_manager.setup_new_run(cache_directory = os.path.join(base_directory, run_name),
                                  configuration = {})
        resulting_cache_directory = run_manager.get_current_cache_directory()
        self.assertTrue(resulting_cache_directory.find(run_name)>-1)
        self.assertEquals(os.path.dirname(resulting_cache_directory), base_directory)
        self.assertTrue(run_manager.ready_to_run)
        self.assertTrue(not os.path.exists(resulting_cache_directory))
        run_manager.services_db.close()
        os.rmdir(base_directory)

    def test_add_row_to_history(self):
        run_manager = RunManager(self.config)
        cache_directory = tempfile.mkdtemp(prefix='opus_tmp')
        resources = {'cache_directory':cache_directory,
                     'description':'test_run',
                     'base_year':2000,
                     'project_name': 'test'}

        run_manager.add_row_to_history(run_id = 1,
                                       resources = resources,
                                       status = 'done')

        db = self.db_server.get_database(self.database_name)
        run_activity_table = db.get_table('run_activity')

        s = select([run_activity_table.c.run_description,
                    run_activity_table.c.status],
                    whereclause = run_activity_table.c.run_id == 1)

        results = db.execute(s).fetchall()
        self.assertEqual(len(results), 1)

        run_name, status = results[0]
        self.assertEqual(status, 'done')
        self.assertEqual(run_name, 'test_run')

        run_manager.services_db.close()
        os.rmdir(cache_directory)
Пример #18
0
def opusRun(progressCB,logCB,params):
    param_dict = {}
    for key, val in params.iteritems():
        param_dict[str(key)] = str(val)
    
    # get parameter values
    pums_id_to_bg_id_file_path = param_dict['pums_id_to_bg_id_file_path']
    database_server_connection = param_dict['database_server_connection']
    database_name = param_dict['database_name']
    
    # set up database server configuration
    dbs_config = DatabaseServerConfiguration(database_configuration=database_server_connection)
    server = DatabaseServer(database_server_configuration = dbs_config)
    opus_db = server.get_database(database_name=database_name)   

    # Main application routine:

    opus_db.execute("""
            CREATE TABLE pums_id_to_bg_id (
              county int,
              tract int,
              bg int,
              puma5 int,
              tract_string text,
              number_of_digits int);
            """)
    
    opus_db.execute("""
            LOAD DATA LOCAL INFILE '%s' INTO TABLE pums_id_to_bg_id
            FIELDS TERMINATED BY ',' LINES TERMINATED BY '\r\n';
            """ % (pums_id_to_bg_id_file_path))
    
    opus_db.execute("""
            update pums_id_to_bg_id
            set tract_string = tract;
    """)
    
    opus_db.execute("""
            update pums_id_to_bg_id
            set number_of_digits = length(tract_string);
    """)

    opus_db.execute("""
            update pums_id_to_bg_id
            set tract = tract*100
            where number_of_digits <= 3;
    """)

    progressCB(90)
    logCB("Closing database connection...\n")
    opus_db.close()
    logCB('Finished running queries.\n')
    progressCB(100)
Пример #19
0
def opusRun(progressCB,logCB,params):

    params_dict = {}
    for key, val in params.iteritems():
        params_dict[str(key)] = str(val)

    opus_data_directory = params_dict['opus_data_directory']
    opus_data_directory = paths.prepend_opus_home_if_relative(opus_data_directory)
    opus_data_year = params_dict['opus_data_year']
    database_name = params_dict['database_name']
    table_name = params_dict['table_name']
    database_server_connection = params_dict['database_server_connection']
    overwrite = params_dict['overwrite']
    
    dbs_config = DatabaseServerConfiguration(database_configuration=database_server_connection)
    server = DatabaseServer(database_server_configuration = dbs_config)
    opusdb = server.get_database(database_name=database_name, create_if_doesnt_exist=False)
    
    input_storage = sql_storage(storage_location = opusdb)

    attribute_cache = AttributeCache(cache_directory=opus_data_directory)
    output_storage = attribute_cache.get_flt_storage_for_year(opus_data_year)
    SimulationState().set_current_time(opus_data_year)
    SessionConfiguration(new_instance=True,
                         package_order=[],
                         in_storage=AttributeCache())

    if table_name == 'ALL':
        lst = input_storage.get_table_names()
    else:
        lst = re.split(' +', table_name.strip())
        
    tables = len(lst)
    lst_out = create_list_string(lst, ', ')

    logCB('caching tables:\n%s\n' % lst_out)
        
    for j, i in enumerate(lst, start=1):
        logCB("Exporting table '%s' to year %s of cache located at %s...\n" %
                   (i, opus_data_year, opus_data_directory))
        try:
            ExportStorage().export_dataset(
                                           dataset_name = i,
                                           in_storage = input_storage,
                                           out_storage = output_storage,
                                           overwrite = overwrite,
                                           )
        except:
            logCB('Error in exporting %s.' % i)
        progressCB(100 * j / tables)

    logCB('successfully cached tables:\n%s\n' % lst_out)
Пример #20
0
 def _get_db(self, db_config, db_name):
     dbconfig = DatabaseServerConfiguration(
         protocol = 'mysql',
         host_name = db_config.host_name,
         user_name = db_config.user_name,
         password = db_config.password                                       
     )
     db_server = DatabaseServer(dbconfig)
     
     try:
         return db_server.get_database(db_name)
     except:
         raise NameError, "Unknown database '%s'!" % db_name      
Пример #21
0
def opusRun(progressCB, logCB, params):
    params_dict = {}
    for key, val in params.iteritems():
        params_dict[str(key)] = str(val)

    database_name = params_dict["database_name"]
    opus_data_directory = params_dict["opus_data_directory"]
    opus_data_year = params_dict["opus_data_year"]
    opus_table_name = params_dict["opus_table_name"]

    database_server_connection = params_dict["database_server_connection"]
    dbs_config = DatabaseServerConfiguration(database_configuration=database_server_connection)
    server = DatabaseServer(database_server_configuration=dbs_config)
    opusdb = server.get_database(database_name=database_name)

    attribute_cache = AttributeCache(cache_directory=opus_data_directory)
    attribute_cache_years = [int(year) for year in os.listdir(opus_data_directory) if year.isdigit() and len(year) == 4]
    if opus_data_year != "ALL":
        attribute_cache_years = [opus_data_year]

    for year in attribute_cache_years:
        # input_storage = sql_storage(storage_location = opusdb)
        input_storage = attribute_cache.get_flt_storage_for_year(year)
        # output_storage = attribute_cache.get_flt_storage_for_year(opus_data_year)
        if opus_data_year == "ALL":
            opusdb = server.get_database(database_name=database_name + "_" + str(year))
        output_storage = sql_storage(storage_location=opusdb)
        SimulationState().set_current_time(year)
        SessionConfiguration(new_instance=True, package_order=[], in_storage=AttributeCache())

        if opus_table_name != "ALL":
            opus_table_name_list = re.split(" +", opus_table_name.strip())
        else:
            opus_table_name_list = input_storage.get_table_names()

        for i in opus_table_name_list:
            logCB("Exporting %s, %s, %s\n" % (i, year, opus_data_directory))
            ExportStorage().export_dataset(dataset_name=i, in_storage=input_storage, out_storage=output_storage)
Пример #22
0
    def __init__(self,
                 indicator_directory,
                 name = None,
                 output_type = None,
                 storage_location = None,
                 output_style = ALL,
                 fixed_field_format = None  # Only used with the 'fixed_field' output type
                ):

        if output_type == 'sql' and not isinstance(storage_location, DatabaseConfiguration):
            raise Exception("If Table output_type is 'sql', a Database object must be passed as storage_location.")
        elif output_type in ['dbf', 'csv', 'tab', 'esri', 'fixed_field'] and \
               storage_location is not None and \
               not isinstance(storage_location,str):
            raise Exception("If Table output_type is %s, storage_location must be a path to the output directory"%output_type)
        elif output_type not in ['dbf', 'csv', 'tab', 'sql', 'esri', 'fixed_field']:
            raise Exception("Table output_type must be either dbf, csv, tab, sql, esri, fixed_field, not %s"%output_type)

        if output_type == "fixed_field" and not fixed_field_format:
            raise ValueError("If Table output_type is 'fixed_field', an XML format string must be passed as fixed_field_format.")
        
        self.fixed_field_format = fixed_field_format

        if output_style not in [Table.ALL,
                                Table.PER_YEAR,
                                Table.PER_ATTRIBUTE]:
            raise Exception(('%s output_style is not appropriate.'%output_style,
                   'Choose from Table.ALL, Table.PER_YEAR, ',
                   'and Table.PER_ATTRIBUTE'))

        self.output_type = output_type
        self.output_style = output_style

        if storage_location is None:
            storage_location = indicator_directory
        elif output_type == 'sql':
            server = DatabaseServer(database_server_configuration = storage_location)
            if not server.has_database(database_name = storage_location.database_name):
                server.create_database(database_name = storage_location.database_name)
            storage_location = server.get_database(
                                   database_name = storage_location.database_name)
        self.storage_location = storage_location

        self.output_storage = StorageFactory().get_storage(
            type = '%s_storage'%(self.output_type),
            storage_location = storage_location
        )

        self.name = name
        self.indicator_directory = indicator_directory
Пример #23
0
 def convert_databases(self, db_config, databases, tables, patterns, backup=True, backup_postfix='_old'):
     dbconfig = DatabaseServerConfiguration(
         host_name = db_config.host_name,
         protocol = 'mysql',
         user_name = db_config.user_name,
         password = db_config.password                                       
     )
     db_server = DatabaseServer(dbconfig)
     
     for db_name in databases:
         db = db_server.get_database(db_name)
         self.convert_database(db, tables[db_name], patterns, backup, backup_postfix)
         db.close()
         
     db_server.close()
def create_dataset_from_sql_storage():
    from opus_core.database_management.configurations.services_database_configuration import ServicesDatabaseConfiguration
    from opus_core.database_management.database_server import DatabaseServer
    
    # make sure the environment variables are set, or replace it by approproate values 
    db_config = ServicesDatabaseConfiguration()
    db_server = DatabaseServer(db_config)
    database = db_server.get_database('services') # name of the database
        
    storage = StorageFactory().get_storage('sql_storage',
                                           storage_location = database)
    services_dataset = Dataset(in_storage = storage, 
                           in_table_name='available_runs', # name of the table
                           id_name=[] # the table doees not have an unique identifier
                           )
    return services_dataset
 def __init__(self):
     db_config = DatabaseServerConfiguration(
         host_name=settings.get_db_host_name(),
         user_name=settings.get_db_user_name(),
         password=settings.get_db_password()                                           
     )
     db_server = DatabaseServer(db_config)
     db = db_server.get_database(settings.db)
     
     in_storage = StorageFactory().get_storage(
         'sql_storage',
         storage_location = db)
     
     gcs = GridcellDataset(in_storage=in_storage, nchunks=5)
     print "Read and Write GridcellDataset."
     out_storage = StorageFactory().build_storage_for_dataset(type='flt_storage', 
         storage_location=settings.dir)
     ReadWriteADataset(gcs, out_storage=out_storage, out_table_name=settings.gcsubdir)
def opusRun(progressCB,logCB,params):

    params_dict = {}
    for key, val in params.iteritems():
        params_dict[str(key)] = str(val)

    opus_data_directory = params_dict['opus_data_directory']
    opus_data_year = params_dict['opus_data_year']
    database_name = params_dict['database_name']
    table_name = params_dict['table_name']
    database_server_connection = params_dict['database_server_connection']
    
    dbs_config = DatabaseServerConfiguration(database_configuration=database_server_connection)
    server = DatabaseServer(database_server_configuration = dbs_config)
    opusdb = server.get_database(database_name=database_name)
    
    input_storage = sql_storage(storage_location = opusdb)

    attribute_cache = AttributeCache(cache_directory=opus_data_directory)
    output_storage = attribute_cache.get_flt_storage_for_year(opus_data_year)
    SimulationState().set_current_time(opus_data_year)
    SessionConfiguration(new_instance=True,
                         package_order=[],
                         in_storage=AttributeCache())

    if table_name == 'ALL':
        logCB('caching all tables...\n')
        lst = input_storage.get_table_names()
        for i in lst:
            ExportStorage().export_dataset(
                dataset_name = i,
                in_storage = input_storage,
                out_storage = output_storage,
            )
    else:
        logCB("Exporting table '%s' to year %s of cache located at %s...\n" %
                   (table_name, opus_data_year, opus_data_directory))
        ExportStorage().export_dataset(
            dataset_name = table_name,
            in_storage = input_storage,
            out_storage = output_storage)
Пример #27
0
class Tests(opus_unittest.OpusTestCase):
    def setUp(self):
        self.db_name = 'test_create_table'
        
        self.db_server = DatabaseServer(TestDatabaseConfiguration(protocol = 'mysql'))
        
        self.db_server.drop_database(self.db_name)
        self.db_server.create_database(self.db_name)
        self.db = self.db_server.get_database(self.db_name)
            
        
    def tearDown(self):
        self.db.close()
        self.db_server.drop_database(self.db_name)
        self.db_server.close()
        
        
    def test_setUp(self):
        try:
            self.db.DoQuery('select * from building_types;')
            self.fail('Output table building_tpes already exists. (Check setUp)')
        except: pass
        
    def test_create_table(self):
        creator = TableCreator()
        db = creator._get_db(TestDatabaseConfiguration(protocol = 'mysql'), self.db_name)
        self.assert_(not db.table_exists('test_table'))
        self.assert_(not db.table_exists('test_table_bak'))
        
        db.DoQuery('CREATE TABLE test_table '
                '(id INT, name varchar(50), units varchar(50));')
        self.assert_(db.table_exists('test_table'))
        self.assert_(not db.table_exists('test_table_bak'))

        creator._backup_table(db, 'test_table')
        self.assert_(db.table_exists('test_table'))
        self.assert_(db.table_exists('test_table_bak'))
        
        creator._drop_table(db, 'test_table')
        self.assert_(not db.table_exists('test_table'))
        self.assert_(db.table_exists('test_table_bak'))
Пример #28
0
    def create_storage(self):

        try:
            server = DatabaseServer(self.server_config)
        except:
            logger.log_error('Cannot connect to the database server that the services database is hosted on %s.' % self.server_config.database_name)
            raise
        
        if not server.has_database(self.server_config.database_name):
            server.create_database(self.server_config.database_name)

        try:
            services_db = server.get_database(self.server_config.database_name)
        except:
            logger.log_error('Cannot connect to a services database on %s.'% server.get_connection_string(scrub = True))
            raise

        metadata.bind = services_db.engine
        setup_all()
        create_all()
        
        return services_db
    def rename_by_swapping_employment_and_commercial_or_industrial_or_home_based_for_elcm(self, 
            db_config, db_name):
                
        db_server = DatabaseServer(db_config)
        
        try:
            db = db_server.get_database(db_name)
        except:
            raise NameError, "Unknown database '%s'!" % db_name
        
        pattern_template = '%s_%s_location_choice_model_%s'
        pattern = re.compile('^%s$' % pattern_template 
            % ('(employment)',
               '(commercial|industrial|home_based)', 
               '(specification|coefficients)'
               ))
        
        new_tables = []
        for table in db.get_tables_in_database():
            match = pattern.match(table)
            if match:
                new_tables.append((pattern_template 
                        # Swap around employment and commercial/industrial/home_based
                        % (match.group(2), match.group(1), match.group(3)), 
                    table))

        if len(new_tables) == 0:
            logger.log_warning('No tables to convert!')
            return

        for new_name, old_name in new_tables:
            logger.log_status('Renaming %s to %s.' % (old_name, new_name))
            db.drop_table(new_name)
            db.DoQuery('CREATE TABLE %s SELECT * FROM %s;' 
                % (new_name, old_name))
            # Rename old table
            db.drop_table('%s_old' %old_name)
            db.DoQuery('RENAME TABLE %s TO %s_old;' % (old_name, old_name))
Пример #30
0
class ResultsManagerTests(opus_unittest.OpusTestCase):
    def setUp(self):
        self.database_name = 'test_services_database'
        self.config = TestDatabaseConfiguration(database_name = self.database_name)
        self.db_server = DatabaseServer(self.config)
    
    def tearDown(self):
        self.db_server.drop_database(self.database_name)
        self.db_server.close()
                
    def test_add_computed_indicator(self):
        result_manager = ResultsManager(self.config)
        
        indicator_name = 'test'
        dataset_name = 'ds'
        expression = 'exp'
        run_id = None
        data_path = '/home'        
        
        result_manager.add_computed_indicator(indicator_name, dataset_name, expression, run_id, data_path, project_name = 'test')
        
        db = self.db_server.get_database(self.database_name)
        computed_indicators_table = db.get_table('computed_indicators')
        
        s = select([computed_indicators_table.c.indicator_name,
                    computed_indicators_table.c.expression],
                    whereclause = computed_indicators_table.c.dataset_name == 'ds')

        results = db.execute(s).fetchall()
        self.assertEqual(len(results), 1)
        
        i_name, exp = results[0]
        self.assertEqual(indicator_name, i_name)
        self.assertEqual(expression, exp)
        
        result_manager.services_db.close()                
Пример #31
0
class ScenarioDatabaseManager(object):
    """Extract a flattened scenario database to the cache.
    """
    def __init__(self, server_configuration, base_scenario_database_name):
        self.database_server = DatabaseServer(server_configuration)
        self.base_scenario_database_name = base_scenario_database_name

    def get_database_to_table_mapping(self):
        table_mapping = self._get_table_mapping(
            scenario_database_name=self.base_scenario_database_name,
            table_mapping={})

        database_to_table_mapping = {}
        for table_name, database_name in table_mapping.items():
            if database_name not in database_to_table_mapping:
                database_to_table_mapping[database_name] = [table_name]
            else:
                database_to_table_mapping[database_name].append(table_name)

        return database_to_table_mapping

    def _get_table_mapping(self, scenario_database_name, table_mapping):
        database = self.database_server.get_database(scenario_database_name)

        tables_in_database = database.get_tables_in_database()
        relevant_tables = [
            table_name for table_name in tables_in_database
            if table_name not in table_mapping
        ]

        for table_name in relevant_tables:
            table_mapping[table_name] = scenario_database_name
            logger.log_status('Found: ' + scenario_database_name + "." +
                              table_name,
                              tags=['database'],
                              verbosity_level=3)

        if 'scenario_information' in tables_in_database:
            scenario_info_table = database.get_table('scenario_information')
            if 'PARENT_DATABASE_URL' in scenario_info_table.c:
                col = scenario_info_table.c.PARENT_DATABASE_URL
            elif 'parent_database_url' in scenario_info_table.c:
                col = scenario_info_table.c.parent_database_url
            else:
                raise 'Scenario information table contains no parent_database_url column'

            query = select(columns=[col])
            next_database_name = database.execute(query).fetchone()
            if next_database_name == () or next_database_name[0] == '':
                next_database_name = None
            else:
                next_database_name = next_database_name[0]

            if next_database_name is not None:
                #old method stored chain as a jdbc url; if this is the case, this code will update it
                match = re.search("jdbc:mysql://[^/]*/(.*)",
                                  next_database_name)
                if match is not None:
                    next_database_name = match.group(1)
                    if 'PARENT_DATABASE_URL' in scenario_info_table.c:
                        u = scenario_info_table.update(
                            values={'PARENT_DATABASE_URL': next_database_name})
                    else:
                        u = scenario_info_table.update(
                            values={'parent_database_url': next_database_name})
                    database.execute(u)
                database.close()
                table_mapping = self._get_table_mapping(
                    next_database_name, table_mapping)
            else:
                database.close()
        else:
            database.close()

        return table_mapping
    def __init__(self):
        config = AbstractUrbansimConfiguration()
        db_server = DatabaseServer(ScenarioDatabaseConfiguration())
        db = db_server.get_database('PSRC_2000_baseyear')

        config_changes = {
            'description':
            'baseline with travel model',
            'in_storage':
            StorageFactory().get_storage('sql_storage', storage_location=db),
            'cache_directory':
            None,  ### TODO: Set this cache_directory to something useful.
            'creating_baseyear_cache_configuration':
            CreatingBaseyearCacheConfiguration(
                cache_directory_root='d:/urbansim_cache',
                cache_from_database=True,
                baseyear_cache=BaseyearCacheConfiguration(
                    r'D:\urbansim_cache\run_1417.2006_12_08_01_50'),
                cache_scenario_database=
                'urbansim.model_coordinators.cache_scenario_database',
                tables_to_cache=[
                    'annual_employment_control_totals',
                    'annual_household_control_totals', 'buildings',
                    'building_types', 'development_event_history', 'gridcells',
                    'households', 'job_building_types', 'jobs', 'travel_data',
                    'zones', 'counties',
                    'commercial_development_location_choice_model_coefficients',
                    'commercial_development_location_choice_model_specification',
                    'commercial_employment_location_choice_model_coefficients',
                    'commercial_employment_location_choice_model_specification',
                    'home_based_employment_location_choice_model_specification',
                    'home_based_employment_location_choice_model_coefficients',
                    'industrial_employment_location_choice_model_coefficients',
                    'industrial_employment_location_choice_model_specification',
                    'industrial_development_location_choice_model_coefficients',
                    'industrial_development_location_choice_model_specification',
                    'residential_development_location_choice_model_coefficients',
                    'residential_development_location_choice_model_specification',
                    'fazes', 'urbansim_constants',
                    'household_location_choice_model_coefficients',
                    'household_location_choice_model_specification',
                    'land_price_model_coefficients',
                    'land_price_model_specification',
                    'residential_land_share_model_coefficients',
                    'residential_land_share_model_specification',
                    'plan_type_group_definitions', 'plan_type_groups',
                    'large_areas', 'household_characteristics_for_ht',
                    'development_types', 'development_type_group_definitions',
                    'development_constraints',
                    'annual_relocation_rates_for_households',
                    'annual_relocation_rates_for_jobs', 'base_year', 'cities',
                    'development_events', 'development_type_groups',
                    'employment_adhoc_sector_group_definitions',
                    'employment_adhoc_sector_groups', 'employment_events',
                    'employment_sectors', 'land_use_events', 'plan_types',
                    'race_names', 'target_vacancies', 'jobs_for_estimation',
                    'households_for_estimation',
                    'development_events_exogenous', 'job_building_types'
                ],
                tables_to_cache_nchunks={'gridcells': 4},
                tables_to_copy_to_previous_years={
                    'development_type_groups': 1995,
                    'development_types': 1995,
                    'development_type_group_definitions': 1995,
                },
            ),
            'models': [],  # run no urbansim models
            'scenario_database_configuration':
            ScenarioDatabaseConfiguration(
                database_name='PSRC_2000_baseyear', ),
            'base_year':
            2000,
            'years': (2001, 2001),
            'unroll_gridcells':
            False
        }
        config.merge(config_changes)

        travel_model_config = create_travel_model_configuration(
            'baseline_travel_model_psrc_fresh',
            mode='get_emme2_data_after_run',
            years_to_run={2001: '2000_06'},
            emme2_batch_file='QUICKRUN.BAT')
        config['travel_model_configuration'] = travel_model_config
        self.merge(config)
def opusRun(progressCB, logCB, params):
    param_dict = {}
    for key, val in params.iteritems():
        param_dict[str(key)] = str(val)

    # get parameter values
    database_name = param_dict['database_name']
    database_server_connection = param_dict['database_server_connection']
    raw_pums_households_table_name = 'raw_pums_hh_data'

    dbs_config = DatabaseServerConfiguration(
        database_configuration=database_server_connection)
    server = DatabaseServer(database_server_configuration=dbs_config)
    opus_db = server.get_database(database_name=database_name)

    logCB("Creating temporary table hh_temp...\n")

    opus_db.execute("""
            drop table if exists hh_temp;
            """)
    opus_db.execute("""
            create table hh_temp
            select PUMA5, 0 as pumano,
                   SERIALNO, 0 as hhpumsid,
                   UNITTYPE, 0 as hhtype,
                   NOC, 0 as childpresence,
                   HHT, 0 as hhldtype,
                   PERSONS, 0 as hhldsize,
                   HINC, 0 as hhldinc,
                   0 as groupquarter
            from %s;
            """ % (raw_pums_households_table_name))
    progressCB(5)
    logCB("Updating values...\n")
    opus_db.execute("""
            update hh_temp
            set pumano = PUMA5,
            hhpumsid = SERIALNO;
            """)
    opus_db.execute("""        
            update hh_temp
            set hhtype = 1
            where UNITTYPE = 0;
            """)
    opus_db.execute("""
            update hh_temp
            set hhtype = 2
            where UNITTYPE = 1 or UNITTYPE = 2;
            """)
    opus_db.execute("""        
            update hh_temp
            set childpresence = 1
            where NOC <> '00';
            """)
    opus_db.execute("""        
            update hh_temp
            set childpresence = 2
            where NOC = '00';
            """)
    opus_db.execute("""
            update hh_temp
            set childpresence = -99
            where hhtype = 2;
            """)
    opus_db.execute("""         
            update hh_temp
            set hhldtype = 1
            where HHT = 1;
            """)
    opus_db.execute("""
            update hh_temp
            set hhldtype = 2
            where HHT = 2;
            """)
    opus_db.execute("""        
            update hh_temp
            set hhldtype = 3
            where HHT = 3;
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldtype = 4
            where HHT = 4 or HHT = 6;
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldtype = 5
            where HHT = 5 or HHT = 7;
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldtype = -99
            where hhtype = 2;
            """)
    opus_db.execute("""
            update hh_temp
            set hhldsize = 1
            where persons = '01';
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldsize = 2
            where persons = '02';
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldsize = 3
            where persons = '03';
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldsize = 4
            where persons = '04';
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldsize = 5
            where persons = '05';
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldsize = 6
            where persons = '06';
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldsize = 7
            where cast(persons as signed) >= 7;
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldsize = -99
            where hhtype = 2;
            """)
    opus_db.execute("""    
            update hh_temp
            set hhldinc = 1
            where cast(HINC as signed) <= 14999;
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldinc = 2
            where (cast(HINC as signed) >= 15000) AND (cast(HINC as signed) <= 24999);
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldinc = 3
            where (cast(HINC as signed) >= 25000) AND (cast(HINC as signed) <= 34999);
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldinc = 4
            where (cast(HINC as signed) >= 35000) AND (cast(HINC as signed) <= 44999);
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldinc = 5
            where (cast(HINC as signed) >= 45000) AND (cast(HINC as signed) <= 59999);
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldinc = 6
            where (cast(HINC as signed) >= 60000) AND (cast(HINC as signed) <= 99999);
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldinc = 7
            where (cast(HINC as signed) >= 100000) AND (cast(HINC as signed) <= 149999);
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldinc = 8
            where cast(HINC as signed) >= 150000;
            """)
    opus_db.execute("""            
            update hh_temp
            set hhldinc = -99
            where hhtype = 2;
            """)
    opus_db.execute("""
            update hh_temp
            set groupquarter = 1
            where UNITTYPE = 1;
            """)
    opus_db.execute("""            
            update hh_temp
            set groupquarter = 2
            where UNITTYPE = 2;
            """)
    opus_db.execute("""            
            update hh_temp
            set groupquarter = -99
            where UNITTYPE = 0;
            """)
    progressCB(75)
    logCB("Creating 'housing_pums' table for synthesizer...\n")
    opus_db.execute("""
            drop table if exists housing_pums;
            """)
    opus_db.execute("""
            create table housing_pums
            select pumano, hhpumsid,hhtype, childpresence, hhldtype, hhldsize, hhldinc, groupquarter
            from hh_temp;
            """)
    opus_db.execute("""            
            ALTER TABLE housing_pums ADD COLUMN hhid INTEGER NOT NULL AUTO_INCREMENT AFTER hhpumsid, add primary key (hhid);
            """)

    hhld_variables = 'childpresence, hhldtype, hhldsize, hhldinc'
    gq_variables = 'groupquarter'
    opus_db.execute(
        '''create table hhld_pums select pumano, hhpumsid, hhid, %s from housing_pums where hhtype = 1'''
        % (hhld_variables))
    opus_db.execute(
        '''create table gq_pums select pumano, hhpumsid, hhid, %s from housing_pums where hhtype = 2'''
        % (gq_variables))

    progressCB(90)
    logCB("Closing database connection...\n")
    opus_db.close()
    logCB('Finished running queries.\n')
    progressCB(100)
Пример #34
0
class CacheScenarioDatabase(Model):
    """Extract a flattened scenario database to the cache.
    """
    def run(self, config, show_output=False):
        logger.log_status("Caching large SQL tables to: " +
                          config['cache_directory'])
        self.show_output = show_output

        #import pydevd;pydevd.settrace()

        server_configuration = config['scenario_database_configuration']

        scenario_database_manager = ScenarioDatabaseManager(
            server_configuration=server_configuration,
            base_scenario_database_name=server_configuration.database_name)

        self.database_server = DatabaseServer(server_configuration)

        database_to_table_mapping = scenario_database_manager.get_database_to_table_mapping(
        )

        self.tables_to_cache = config[
            'creating_baseyear_cache_configuration'].tables_to_cache

        simulation_state = SimulationState()
        if 'low_memory_run' in config:
            simulation_state.set_low_memory_run(config['low_memory_run'])
        simulation_state.set_cache_directory(config['cache_directory'])
        simulation_state.set_current_time(config['base_year'])

        self.tables_cached = set()
        for database_name, tables in database_to_table_mapping.items():
            self.cache_database_tables(config, database_name, tables)

        un_cached_tables = set(self.tables_to_cache) - self.tables_cached
        if un_cached_tables:
            logger.log_warning(
                'The following requested tables were NOT cached:')
            for table_name in un_cached_tables:
                logger.log_warning('\t%s' % table_name)

    def cache_database_tables(self, config, database_name, tables):
        """Loads cache with all tables from this database.
        """
        database = self.database_server.get_database(database_name)
        in_storage = StorageFactory().get_storage(type='sql_storage',
                                                  storage_location=database)

        for table_name in tables:
            if table_name in self.tables_to_cache:
                try:
                    self.cache_database_table(table_name, config['base_year'],
                                              database, in_storage, config)
                except:
                    logger.log_error(
                        "Problem caching table %s from database %s" %
                        (table_name, database_name))
                    raise
                self.tables_cached.add(table_name)

    def cache_database_table(self, table_name, base_year, database, in_storage,
                             config):
        """Copy this table from input database into attribute cache.
        """
        logger.start_block('Caching table %s' % table_name)
        try:
            #TODO: why is the config being modified...seems like its kind of useless here...
            config['storage_location'] = os.path.join(
                config['cache_directory'], str(base_year), table_name)

            if not os.path.exists(config['storage_location']):
                flt_storage = StorageFactory().get_storage(
                    type='flt_storage',
                    subdir='store',
                    storage_location=config['storage_location'])

                table = database.get_table(table_name)

                id_name = [
                    primary_key.name.lower()
                    for primary_key in table.primary_key
                ]

                dataset = Dataset(resources=config,
                                  in_storage=in_storage,
                                  out_storage=flt_storage,
                                  in_table_name=table_name,
                                  id_name=id_name)

                nchunks = config[
                    'creating_baseyear_cache_configuration'].tables_to_cache_nchunks.get(
                        table_name, 1)
                current_time = SimulationState().get_current_time()
                SimulationState().set_current_time(base_year)
                dataset.load_dataset(nchunks=nchunks,
                                     flush_after_each_chunk=True)
                SimulationState().set_current_time(current_time)
            else:
                logger.log_status(config['storage_location'] +
                                  " already exits; skip caching " + table_name)

        finally:
            logger.end_block()