def run(self, config, show_output = False): logger.log_status("Caching large SQL tables to: " + config['cache_directory']) self.show_output = show_output #import pydevd;pydevd.settrace() server_configuration = config['scenario_database_configuration'] scenario_database_manager = ScenarioDatabaseManager( server_configuration = server_configuration, base_scenario_database_name = server_configuration.database_name ) self.database_server = DatabaseServer(server_configuration) database_to_table_mapping = scenario_database_manager.get_database_to_table_mapping() self.tables_to_cache = config['creating_baseyear_cache_configuration'].tables_to_cache simulation_state = SimulationState() if 'low_memory_run' in config: simulation_state.set_low_memory_run(config['low_memory_run']) simulation_state.set_cache_directory(config['cache_directory']) simulation_state.set_current_time(config['base_year']) self.tables_cached = set() for database_name, tables in database_to_table_mapping.items(): self.cache_database_tables(config, database_name, tables) un_cached_tables = set(self.tables_to_cache) - self.tables_cached if un_cached_tables: logger.log_warning('The following requested tables were NOT cached:') for table_name in un_cached_tables: logger.log_warning('\t%s' % table_name)
def _create_db_from_chain_via_python(self, from_database_configuration, to_database_configuration, tables_to_copy): db_server_from = DatabaseServer(from_database_configuration) db_server_to = DatabaseServer(to_database_configuration) db_server_to.drop_database(to_database_configuration.database_name) db_server_to.create_database(to_database_configuration.database_name) database_out = db_server_to.get_database( to_database_configuration.database_name) scenario_db_manager = ScenarioDatabaseManager( server_configuration=from_database_configuration, base_scenario_database_name=from_database_configuration. database_name) table_mapping = scenario_db_manager.get_database_to_table_mapping() cross_db_operations = CrossDatabaseOperations() #by default, copy all tables if tables_to_copy == []: tables_to_copy = sum(table_mapping.values(), []) # flat a list of lists elif 'scenario_information' not in tables_to_copy: tables_to_copy.append('scenario_information') for database_name, tables in table_mapping.items(): database_in = db_server_from.get_database(database_name) for table in tables: if table not in tables_to_copy: continue logger.start_block("Copying table '%s' from database '%s'" % (table, database_name)) try: cross_db_operations.copy_table(table_to_copy=table, database_in=database_in, database_out=database_out, use_chunking=True) finally: logger.end_block() database_in.close() self._fix_scenario_information_table(database_out) database_out.close() db_server_from.close() db_server_to.close()
def _create_db_from_chain_via_python(self, from_database_configuration, to_database_configuration, tables_to_copy): db_server_from = DatabaseServer(from_database_configuration) db_server_to = DatabaseServer(to_database_configuration) db_server_to.drop_database(to_database_configuration.database_name) db_server_to.create_database(to_database_configuration.database_name) database_out = db_server_to.get_database(to_database_configuration.database_name) scenario_db_manager = ScenarioDatabaseManager( server_configuration = from_database_configuration, base_scenario_database_name = from_database_configuration.database_name) table_mapping = scenario_db_manager.get_database_to_table_mapping() cross_db_operations = CrossDatabaseOperations() #by default, copy all tables if tables_to_copy == []: tables_to_copy = sum(table_mapping.values(), []) # flat a list of lists elif 'scenario_information' not in tables_to_copy: tables_to_copy.append('scenario_information') for database_name, tables in table_mapping.items(): database_in = db_server_from.get_database(database_name) for table in tables: if table not in tables_to_copy: continue logger.start_block("Copying table '%s' from database '%s'" % (table, database_name)) try: cross_db_operations.copy_table(table_to_copy = table, database_in = database_in, database_out = database_out, use_chunking = True) finally: logger.end_block() database_in.close() self._fix_scenario_information_table(database_out) database_out.close() db_server_from.close() db_server_to.close()
def run(self, config, show_output=False): logger.log_status("Caching large SQL tables to: " + config['cache_directory']) self.show_output = show_output #import pydevd;pydevd.settrace() server_configuration = config['scenario_database_configuration'] scenario_database_manager = ScenarioDatabaseManager( server_configuration=server_configuration, base_scenario_database_name=server_configuration.database_name) self.database_server = DatabaseServer(server_configuration) database_to_table_mapping = scenario_database_manager.get_database_to_table_mapping( ) self.tables_to_cache = config[ 'creating_baseyear_cache_configuration'].tables_to_cache simulation_state = SimulationState() if 'low_memory_run' in config: simulation_state.set_low_memory_run(config['low_memory_run']) simulation_state.set_cache_directory(config['cache_directory']) simulation_state.set_current_time(config['base_year']) self.tables_cached = set() for database_name, tables in database_to_table_mapping.items(): self.cache_database_tables(config, database_name, tables) un_cached_tables = set(self.tables_to_cache) - self.tables_cached if un_cached_tables: logger.log_warning( 'The following requested tables were NOT cached:') for table_name in un_cached_tables: logger.log_warning('\t%s' % table_name)