def write_table(self, table_name, table_data): """ 'table_name' specifies the subdirectory relative to base directory. 'table_data' is a dictionary where keys are the column names and values are value arrays of the corresponding columns. """ unused_column_size, column_names = self._get_column_size_and_names(table_data) local_dir = os.path.join(self._get_base_directory(), table_name) remote_dir = convertntslash( os.path.join(self._get_base_directory_remote(),table_name )) remote_base_dir = convertntslash( self._get_base_directory_remote() ) if not self.ssh_client.exists_remotely(remote_dir): self.ssh_client.makedirs(remote_dir) else: ## handle existing files with column_name existing_files = self._get_remote_files(table_name=table_name) existing_file_short_names = [file.get_short_name() for file in existing_files] for column_name in column_names: n = existing_file_short_names.count(column_name) if n == 0: continue elif n == 1: i = existing_file_short_names.index(column_name) self.ssh_client.remove(existing_files[i].get_name()) elif n > 1: message = "Column '%s' has multiple files with different file extensions:\n" % column_name message += "Either the process of copying files into this directory is flawed, or there is a bug in Opus." raise FltError(message) file_flt_storage.write_table(self, table_name, table_data) self.ssh_client.mput(local_dir, remote_base_dir)
def test_writing_column_to_file_when_file_of_same_column_name_and_different_type_already_exists( self): if skip_test(): return column_name = "some_column" os.mkdir( os.path.join(self.storage._get_base_directory(), self.table_name)) existing_file = file( os.path.join(self.storage._get_base_directory(), self.table_name, column_name + ".li4"), "w") existing_file.close() remote_file = convertntslash( os.path.join(self.storage._get_base_directory_remote(), self.table_name, column_name + ".li4")) if not self.storage.ssh_client.exists_remotely( os.path.dirname(remote_file)): self.storage.ssh_client.makedirs(os.path.dirname(remote_file)) self.storage.ssh_client.mput(existing_file.name, remote_file) storage = sftp_flt_storage(storage_location=self.sftp_location) ## Test writing my_data = {column_name: array([9, 99, 999], dtype='<i8')} storage.write_table(table_name=self.table_name, table_data=my_data) self.assert_(not ( self.storage.ssh_client.exists_remotely(remote_file))) new_remote_file = convertntslash( os.path.join(self.storage._get_base_directory_remote(), self.table_name, column_name + ".li8")) self.assert_(self.storage.ssh_client.exists_remotely(new_remote_file))
def test_writing_column_to_file_when_two_files_of_same_column_name_and_different_type_already_exist(self): if skip_test(): return column_name= "some_column" os.mkdir(os.path.join(self.storage._get_base_directory(), self.table_name)) existing_file_1 = file(os.path.join(self.storage._get_base_directory() , self.table_name, column_name + ".li4"), "w") existing_file_1.close() existing_file_2 = file(os.path.join(self.storage._get_base_directory() , self.table_name, column_name + ".bi4"), "w") existing_file_2.close() remote_file_1 = convertntslash(os.path.join(self.storage._get_base_directory_remote(), self.table_name, column_name + ".li4")) if not self.storage.ssh_client.exists_remotely(os.path.dirname(remote_file_1)): self.storage.ssh_client.makedirs(os.path.dirname(remote_file_1)) self.storage.ssh_client.mput(existing_file_1.name, remote_file_1) remote_file_2 = convertntslash(os.path.join(self.storage._get_base_directory_remote(), self.table_name, column_name + ".bi4")) if not self.storage.ssh_client.exists_remotely(os.path.dirname(remote_file_2)): self.storage.ssh_client.makedirs(os.path.dirname(remote_file_2)) self.storage.ssh_client.mput(existing_file_2.name, remote_file_2) storage = sftp_flt_storage(storage_location=self.sftp_location) # Test writing my_data = { column_name: array([9,99,999], dtype='<i8') } self.assertRaises(FltError, storage.write_table, self.table_name, my_data) new_remote_file = convertntslash(os.path.join(self.storage._get_base_directory_remote(), self.table_name, column_name + ".li8")) self.assert_(not (self.storage.ssh_client.exists_remotely(new_remote_file)))
def test_write_int_array(self): if skip_test(): return expected = array([100, 70]) table_data = { 'int_column': expected, } # file_name is e.g. 'int_column.li4' for a little-endian 32 bit machine file_name = 'int_column.%(endian)si%(bytes)u' % replacements # numpy_dtype is e.g. '<i4' for a little-endian 32 bit machine numpy_dtype = '%(numpy_endian)si%(bytes)u' % replacements remote_file_name = convertntslash( os.path.join(self.storage._get_base_directory_remote(), self.table_name, file_name)) local_file_name = os.path.join(self.storage._get_base_directory(), self.table_name, file_name) self.storage.write_table(self.table_name, table_data) self.assert_(self.storage.ssh_client.exists_remotely(remote_file_name)) os.remove(local_file_name) self.storage.load_table(self.table_name) actual = fromfile(local_file_name, dtype=numpy_dtype) self.assert_((expected == actual).all())
def _get_remote_files(self, table_name=''): dataset_path = convertntslash( os.path.join(self._get_base_directory_remote(), table_name) ) if self.ssh_client.exists_remotely(dataset_path): dataset_file_pattern = dataset_path + '/*.*' file_names = self.ssh_client.glob(dataset_file_pattern) return [self.storage_file( name ) for name in file_names] else: raise FltError("Cache directory '%s' does not exist!" % dataset_path)
def test_get_table_names_1981(self): if skip_test(): return self.storage._base_directory_remote = convertntslash( os.path.join(self.remote_temp_dir, 'data', 'test_cache', '1981') ) expected = ['base_year', 'cities'] actual = self.storage.get_table_names() expected.sort() actual.sort() self.assertEquals(expected, actual)
def test_get_table_names_1981(self): if skip_test(): return self.storage._base_directory_remote = convertntslash( os.path.join(self.remote_temp_dir, 'data', 'test_cache', '1981')) expected = ['base_year', 'cities'] actual = self.storage.get_table_names() expected.sort() actual.sort() self.assertEquals(expected, actual)
def _get_remote_files(self, table_name=''): dataset_path = convertntslash( os.path.join(self._get_base_directory_remote(), table_name)) if self.ssh_client.exists_remotely(dataset_path): dataset_file_pattern = dataset_path + '/*.*' file_names = self.ssh_client.glob(dataset_file_pattern) return [self.storage_file(name) for name in file_names] else: raise FltError("Cache directory '%s' does not exist!" % dataset_path)
def test_write_float_and_boolean_array(self): if skip_test(): return expected_float = array([100.17, 70.00]) expected_bool = array([True, False]) table_data = { 'float_column': expected_float, 'bool_column': expected_bool, } if sys.byteorder == 'little': file_name = 'float_column.lf8' numpy_ext = '<f8' else: file_name = 'float_column.bf8' numpy_ext = '>f8' remote_file_name = convertntslash( os.path.join(self.storage._get_base_directory_remote(), self.table_name, file_name)) local_file_name = os.path.join(self.storage._get_base_directory(), self.table_name, file_name) self.storage.write_table(self.table_name, table_data) self.assert_(self.storage.ssh_client.exists_remotely(remote_file_name)) os.remove(local_file_name) self.storage.load_table(self.table_name) actual = fromfile(local_file_name, numpy_ext) self.assert_((expected_float == actual).all()) remote_file_name = convertntslash( os.path.join(self.storage._get_base_directory_remote(), self.table_name, 'bool_column.ib1')) local_file_name = os.path.join(self.storage._get_base_directory(), self.table_name, 'bool_column.ib1') self.storage.write_table(self.table_name, table_data) self.assert_(self.storage.ssh_client.exists_remotely(remote_file_name)) os.remove(local_file_name) self.storage.load_table(self.table_name) actual = fromfile(local_file_name, '|b1') self.assert_((expected_bool == actual).all())
def write_table(self, table_name, table_data): """ 'table_name' specifies the subdirectory relative to base directory. 'table_data' is a dictionary where keys are the column names and values are value arrays of the corresponding columns. """ unused_column_size, column_names = self._get_column_size_and_names( table_data) local_dir = os.path.join(self._get_base_directory(), table_name) remote_dir = convertntslash( os.path.join(self._get_base_directory_remote(), table_name)) remote_base_dir = convertntslash(self._get_base_directory_remote()) if not self.ssh_client.exists_remotely(remote_dir): self.ssh_client.makedirs(remote_dir) else: ## handle existing files with column_name existing_files = self._get_remote_files(table_name=table_name) existing_file_short_names = [ file.get_short_name() for file in existing_files ] for column_name in column_names: n = existing_file_short_names.count(column_name) if n == 0: continue elif n == 1: i = existing_file_short_names.index(column_name) self.ssh_client.remove(existing_files[i].get_name()) elif n > 1: message = "Column '%s' has multiple files with different file extensions:\n" % column_name message += "Either the process of copying files into this directory is flawed, or there is a bug in Opus." raise FltError(message) file_flt_storage.write_table(self, table_name, table_data) self.ssh_client.mput(local_dir, remote_base_dir)
def load_table(self, table_name, column_names=Storage.ALL_COLUMNS, lowercase=True): remote_dataset_path = convertntslash( os.path.join(self._get_base_directory_remote(), table_name)) local_dataset_path = self._get_base_directory() self.ssh_client.mget(remote_dataset_path, local_dataset_path) return file_flt_storage.load_table(self, table_name, column_names=column_names, lowercase=lowercase)
def get_table_names(self): dataset_path = convertntslash(self._get_base_directory_remote()) if self.ssh_client.exists_remotely(dataset_path): result = [] file_paths = self.ssh_client.glob(dataset_path+'/*') for file_path in file_paths: if self.ssh_client.isdir(file_path): file_name = os.path.basename(file_path) if len(self.get_column_names(file_name))>0: result.append(file_name) return result else: raise FltError("Cache directory '%s' does not exist!" % dataset_path)
def test_write_float_and_boolean_array(self): if skip_test(): return expected_float = array([100.17, 70.00]) expected_bool = array([True, False]) table_data = { 'float_column': expected_float, 'bool_column': expected_bool, } if sys.byteorder=='little': file_name = 'float_column.lf8' numpy_ext = '<f8' else: file_name = 'float_column.bf8' numpy_ext = '>f8' remote_file_name = convertntslash(os.path.join(self.storage._get_base_directory_remote(), self.table_name, file_name)) local_file_name = os.path.join(self.storage._get_base_directory(), self.table_name, file_name) self.storage.write_table(self.table_name, table_data) self.assert_(self.storage.ssh_client.exists_remotely(remote_file_name)) os.remove(local_file_name) self.storage.load_table(self.table_name) actual = fromfile(local_file_name, numpy_ext) self.assert_((expected_float==actual).all()) remote_file_name = convertntslash(os.path.join(self.storage._get_base_directory_remote(), self.table_name, 'bool_column.ib1')) local_file_name = os.path.join(self.storage._get_base_directory(), self.table_name, 'bool_column.ib1') self.storage.write_table(self.table_name, table_data) self.assert_(self.storage.ssh_client.exists_remotely(remote_file_name)) os.remove(local_file_name) self.storage.load_table(self.table_name) actual = fromfile(local_file_name, '|b1') self.assert_((expected_bool == actual).all())
def get_table_names(self): dataset_path = convertntslash(self._get_base_directory_remote()) if self.ssh_client.exists_remotely(dataset_path): result = [] file_paths = self.ssh_client.glob(dataset_path + '/*') for file_path in file_paths: if self.ssh_client.isdir(file_path): file_name = os.path.basename(file_path) if len(self.get_column_names(file_name)) > 0: result.append(file_name) return result else: raise FltError("Cache directory '%s' does not exist!" % dataset_path)
def test_write_char_array(self): if skip_test(): return expected = array(['string1', 'string227']) table_data = { 'char_column': expected, } remote_file_name = convertntslash(os.path.join(self.storage._get_base_directory_remote(), self.table_name, 'char_column.iS9')) local_file_name = os.path.join(self.storage._get_base_directory(), self.table_name, 'char_column.iS9') self.storage.write_table(self.table_name, table_data) self.assert_(self.storage.ssh_client.exists_remotely(remote_file_name)) os.remove(local_file_name) self.storage.load_table(self.table_name) actual = fromfile(local_file_name, dtype='|S9') self.assert_((expected==actual).all())
def test_write_char_array(self): if skip_test(): return expected = array(['string1', 'string227']) table_data = { 'char_column': expected, } remote_file_name = convertntslash( os.path.join(self.storage._get_base_directory_remote(), self.table_name, 'char_column.iS9')) local_file_name = os.path.join(self.storage._get_base_directory(), self.table_name, 'char_column.iS9') self.storage.write_table(self.table_name, table_data) self.assert_(self.storage.ssh_client.exists_remotely(remote_file_name)) os.remove(local_file_name) self.storage.load_table(self.table_name) actual = fromfile(local_file_name, dtype='|S9') self.assert_((expected == actual).all())
def test_write_int_array(self): if skip_test(): return expected = array([100, 70]) table_data = { 'int_column': expected, } # file_name is e.g. 'int_column.li4' for a little-endian 32 bit machine file_name = 'int_column.%(endian)si%(bytes)u' % replacements # numpy_dtype is e.g. '<i4' for a little-endian 32 bit machine numpy_dtype = '%(numpy_endian)si%(bytes)u' % replacements remote_file_name = convertntslash(os.path.join(self.storage._get_base_directory_remote(), self.table_name, file_name)) local_file_name = os.path.join(self.storage._get_base_directory(), self.table_name, file_name) self.storage.write_table(self.table_name, table_data) self.assert_(self.storage.ssh_client.exists_remotely(remote_file_name)) os.remove(local_file_name) self.storage.load_table(self.table_name) actual = fromfile(local_file_name, dtype=numpy_dtype) self.assert_((expected==actual).all())
def load_table(self, table_name, column_names=Storage.ALL_COLUMNS, lowercase=True): remote_dataset_path = convertntslash( os.path.join(self._get_base_directory_remote(), table_name) ) local_dataset_path = self._get_base_directory() self.ssh_client.mget( remote_dataset_path, local_dataset_path) return file_flt_storage.load_table(self, table_name, column_names=column_names, lowercase=lowercase)
def _do_run(self, run_id, config, start_year=None, end_year=None): """ """ cache_directory = config['cache_directory'] if start_year is None: start_year = config['years'][0] if end_year is None: end_year = config['years'][1] travel_model_resources = None travel_model_years = [] if config.has_key('travel_model_configuration'): travel_model_resources = copy.deepcopy(config) if not self.is_localhost(self.urbansim_server_config['hostname']): travel_model_resources['cache_directory'] = "sftp://%s@%s%s" % (self.urbansim_server_config['username'], self.urbansim_server_config['hostname'], cache_directory) elif not self.is_localhost(self.travelmodel_server_config['hostname']): ## urbansim runs on localhost, and travel model runs on travelmodel_server ## set sftp_flt_storage to the hostname of localhost try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('www.google.com', 80)) urbansim_server = s.getsockname()[0] s.close() except: ## this won't work when the hostname cannot be converted to ip address urbansim_server=socket.gethostbyname(socket.gethostname()) urbansim_user = self.urbansim_server_config.get('username') if urbansim_user is None or len(urbansim_user)==0: urbansim_user = getuser() travel_model_resources['cache_directory'] = "sftp://%s@%s%s" % (urbansim_user, urbansim_server, cache_directory) #only keep sorted travel model years falls into years range for key in travel_model_resources['travel_model_configuration'].keys(): if type(key) == int: if key >= start_year and key <= end_year: travel_model_years.append(key) if end_year not in travel_model_years: travel_model_years.append(end_year) ## in the case end_year is not a travel_model year, appending it ## so we have 1 more iteration after the last travel_model_year travel_model_years.sort() this_start_year = start_year for travel_model_year in travel_model_years: if this_start_year > end_year: return #run finished, should not be needed this_end_year = travel_model_year config['years'] = (this_start_year, this_end_year) ## since there is no --skip-travel-model switch for restart_run yet ## delete travel_model_configuration, so travel model won't run on urbansim_server if config.has_key('travel_model_configuration'): del config['travel_model_configuration'] self.update_services_database(self.get_run_manager(), run_id, config) if not self.is_localhost(self.urbansim_server_config['hostname']): logger.start_block("Start UrbanSim Simulation on %s from %s to %s" % (self.urbansim_server_config['hostname'], this_start_year, this_end_year) ) cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \ {'module':self.remote_module_path_from_opus_path(self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config), 'opus_core.tools.restart_run'), 'run_id':run_id, 'start_year':this_start_year, 'services_hostname': self.services_db_config.host_name} cmd += ' --skip-cache-cleanup --create-baseyear-cache-if-not-exists >> ' + 'urbansim_run_%s.log' % run_id ## to avoid stdout overfilling sshclient buffer, redirect stdout to a log file ## TODO: better handle the location of the urbansim_remote_run.log logger.log_status("Call " + cmd) ssh_client = self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config) self.invoke_remote_run(ssh_client, cmd, run_id=run_id) logger.end_block() ##TODO: open_sftp may need to be closed if not self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config).exists_remotely( convertntslash(os.path.join(cache_directory, str(this_end_year))) ): raise StandardError, "cache for year %s doesn't exist in directory %s; there may be problem with urbansim run" % \ (this_end_year, cache_directory) else: cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \ {'module':module_path_from_opus_path('opus_core.tools.restart_run'), 'run_id':run_id, 'start_year':this_start_year, 'services_hostname': self.services_db_config.host_name} cmd += ' --skip-cache-cleanup --create-baseyear-cache-if-not-exists' logger.log_status("Call " + cmd) os.system(cmd) if not os.path.exists(os.path.join(cache_directory, str(this_end_year))): raise StandardError, "cache for year %s doesn't exist in directory %s; there may be problem with urbansim run" % \ (this_end_year, cache_directory) if travel_model_resources is not None: if travel_model_resources['travel_model_configuration'].has_key(this_end_year): travel_model_resources['years'] = (this_end_year, this_end_year) self.update_services_database(self.get_run_manager(), run_id, travel_model_resources) if not self.is_localhost(self.travelmodel_server_config['hostname']): logger.start_block("Start Travel Model on %s from %s to %s" % (self.travelmodel_server_config['hostname'], this_start_year, this_end_year) ) cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \ {'module':self.remote_module_path_from_opus_path(self.get_ssh_client(self.ssh['travelmodel_server'], self.travelmodel_server_config), 'opus_core.tools.restart_run'), 'run_id':run_id, 'start_year':this_end_year, 'services_hostname': self.services_db_config.host_name} cmd += ' --skip-cache-cleanup --skip-urbansim >> ' + 'travelmodel_run_%s.log' % run_id ## to avoid stdout overfilling sshclient buffer, redirect stdout to a log file ## TODO: better handle the location of the travelmodel_remote_run.log logger.log_status("Call " + cmd) ssh_client = self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config) self.invoke_remote_run(ssh_client, cmd, run_id=run_id) logger.end_block() else: cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \ {'module':module_path_from_opus_path('opus_core.tools.restart_run'), 'run_id':run_id, 'start_year':this_end_year, 'services_hostname': self.services_db_config.host_name} cmd += ' --skip-cache-cleanup --skip-urbansim' logger.log_status("Call " + cmd) os.system(cmd) flt_directory_for_next_year = os.path.join(cache_directory, str(this_end_year+1)) if not self.is_localhost(self.urbansim_server_config['hostname']): if not self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config).exists_remotely( convertntslash(flt_directory_for_next_year) ): raise StandardError, "travel model didn't create any output for year %s in directory %s on %s; there may be problem with travel model run" % \ (this_end_year+1, cache_directory, self.urbansim_server_config['hostname']) elif not os.path.exists(flt_directory_for_next_year): raise StandardError, "travel model didn't create any output for year %s in directory %s; there may be problem with travel model run" % \ (this_end_year+1, cache_directory) this_start_year = travel_model_year + 1 #next run starting from the next year of a travel model year return
def _do_run(self, run_id, config, start_year=None, end_year=None): """ """ cache_directory = config['cache_directory'] if start_year is None: start_year = config['years'][0] if end_year is None: end_year = config['years'][1] travel_model_resources = None travel_model_years = [] if config.has_key('travel_model_configuration'): travel_model_resources = copy.deepcopy(config) if not self.is_localhost(self.urbansim_server_config['hostname']): travel_model_resources['cache_directory'] = "sftp://%s@%s%s" % ( self.urbansim_server_config['username'], self.urbansim_server_config['hostname'], cache_directory) elif not self.is_localhost( self.travelmodel_server_config['hostname']): ## urbansim runs on localhost, and travel model runs on travelmodel_server ## set sftp_flt_storage to the hostname of localhost try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('www.google.com', 80)) urbansim_server = s.getsockname()[0] s.close() except: ## this won't work when the hostname cannot be converted to ip address urbansim_server = socket.gethostbyname( socket.gethostname()) urbansim_user = self.urbansim_server_config.get('username') if urbansim_user is None or len(urbansim_user) == 0: urbansim_user = getuser() travel_model_resources[ 'cache_directory'] = "sftp://%s@%s%s" % ( urbansim_user, urbansim_server, cache_directory) #only keep sorted travel model years falls into years range for key in travel_model_resources[ 'travel_model_configuration'].keys(): if type(key) == int: if key >= start_year and key <= end_year: travel_model_years.append(key) if end_year not in travel_model_years: travel_model_years.append(end_year) ## in the case end_year is not a travel_model year, appending it ## so we have 1 more iteration after the last travel_model_year travel_model_years.sort() this_start_year = start_year for travel_model_year in travel_model_years: if this_start_year > end_year: return #run finished, should not be needed this_end_year = travel_model_year config['years'] = (this_start_year, this_end_year) ## since there is no --skip-travel-model switch for restart_run yet ## delete travel_model_configuration, so travel model won't run on urbansim_server if config.has_key('travel_model_configuration'): del config['travel_model_configuration'] self.update_services_database(self.get_run_manager(), run_id, config) if not self.is_localhost(self.urbansim_server_config['hostname']): logger.start_block( "Start UrbanSim Simulation on %s from %s to %s" % (self.urbansim_server_config['hostname'], this_start_year, this_end_year)) cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \ {'module':self.remote_module_path_from_opus_path(self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config), 'opus_core.tools.restart_run'), 'run_id':run_id, 'start_year':this_start_year, 'services_hostname': self.services_db_config.host_name} cmd += ' --skip-cache-cleanup --create-baseyear-cache-if-not-exists >> ' + 'urbansim_run_%s.log' % run_id ## to avoid stdout overfilling sshclient buffer, redirect stdout to a log file ## TODO: better handle the location of the urbansim_remote_run.log logger.log_status("Call " + cmd) ssh_client = self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config) self.invoke_remote_run(ssh_client, cmd, run_id=run_id) logger.end_block() ##TODO: open_sftp may need to be closed if not self.get_ssh_client( self.ssh['urbansim_server'], self.urbansim_server_config).exists_remotely( convertntslash( os.path.join(cache_directory, str(this_end_year)))): raise StandardError, "cache for year %s doesn't exist in directory %s; there may be problem with urbansim run" % \ (this_end_year, cache_directory) else: cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \ {'module':module_path_from_opus_path('opus_core.tools.restart_run'), 'run_id':run_id, 'start_year':this_start_year, 'services_hostname': self.services_db_config.host_name} cmd += ' --skip-cache-cleanup --create-baseyear-cache-if-not-exists' logger.log_status("Call " + cmd) os.system(cmd) if not os.path.exists( os.path.join(cache_directory, str(this_end_year))): raise StandardError, "cache for year %s doesn't exist in directory %s; there may be problem with urbansim run" % \ (this_end_year, cache_directory) if travel_model_resources is not None: if travel_model_resources[ 'travel_model_configuration'].has_key(this_end_year): travel_model_resources['years'] = (this_end_year, this_end_year) self.update_services_database(self.get_run_manager(), run_id, travel_model_resources) if not self.is_localhost( self.travelmodel_server_config['hostname']): logger.start_block( "Start Travel Model on %s from %s to %s" % (self.travelmodel_server_config['hostname'], this_start_year, this_end_year)) cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \ {'module':self.remote_module_path_from_opus_path(self.get_ssh_client(self.ssh['travelmodel_server'], self.travelmodel_server_config), 'opus_core.tools.restart_run'), 'run_id':run_id, 'start_year':this_end_year, 'services_hostname': self.services_db_config.host_name} cmd += ' --skip-cache-cleanup --skip-urbansim >> ' + 'travelmodel_run_%s.log' % run_id ## to avoid stdout overfilling sshclient buffer, redirect stdout to a log file ## TODO: better handle the location of the travelmodel_remote_run.log logger.log_status("Call " + cmd) ssh_client = self.get_ssh_client( self.ssh['urbansim_server'], self.urbansim_server_config) self.invoke_remote_run(ssh_client, cmd, run_id=run_id) logger.end_block() else: cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \ {'module':module_path_from_opus_path('opus_core.tools.restart_run'), 'run_id':run_id, 'start_year':this_end_year, 'services_hostname': self.services_db_config.host_name} cmd += ' --skip-cache-cleanup --skip-urbansim' logger.log_status("Call " + cmd) os.system(cmd) flt_directory_for_next_year = os.path.join( cache_directory, str(this_end_year + 1)) if not self.is_localhost( self.urbansim_server_config['hostname']): if not self.get_ssh_client( self.ssh['urbansim_server'], self.urbansim_server_config).exists_remotely( convertntslash( flt_directory_for_next_year)): raise StandardError, "travel model didn't create any output for year %s in directory %s on %s; there may be problem with travel model run" % \ (this_end_year+1, cache_directory, self.urbansim_server_config['hostname']) elif not os.path.exists(flt_directory_for_next_year): raise StandardError, "travel model didn't create any output for year %s in directory %s; there may be problem with travel model run" % \ (this_end_year+1, cache_directory) this_start_year = travel_model_year + 1 #next run starting from the next year of a travel model year return