def updatebc_run(self, domain): # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) # run da_update_bc.exe j_id = None if len(self.config['options_slurm']['slurm_updatebc.exe']): if j_id: mid = "--dependency=afterok:%d" %j_id updatebc_command = ['sbatch', mid, self.config['options_slurm']['slurm_updatebc.exe']] else: updatebc_command = ['sbatch', self.config['options_slurm']['slurm_updatebc.exe']] try: res = subprocess.check_output(updatebc_command, cwd=wrfda_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: logger.error('Updatebc failed %s:' %updatebc_command) raise # re-raise exception while True: time.sleep(0.5) if not utils.testjob(j_id): break else: # run locally print os.path.join(wrfda_workdir, 'da_update_bc.exe') subprocess.check_call(os.path.join(wrfda_workdir, 'da_update_bc.exe'), cwd=wrfda_workdir, stdout=utils.devnull(), stderr=utils.devnull())
def _run_unipost_step(self, wrfout, current_time, thours): ''' Input variables for the function are: - full path to a wrfout file (regular wrfout naming) - time to run unipost for in format YYYY-MM-DD_HH - thours: TODO add description The following functionality is provided by the function: - validate input parameters - write itag file - run unipost.exe command - rename output - archive output - cleanup output ''' # see if current_time is in wrfout AND validate time format utils.validate_time_wrfout(wrfout, current_time) # extract domain information from wrfout filename domain = int(wrfout[-22:-20]) # write itag file self._write_itag(wrfout, current_time) # run unipost.exe # TODO: should be a call directly to uniposte.exe instead of a script uni.exe # TODO: uni.exe load ifort modules, calls unipost.exe and reloads gfort modules # TODO: should fix compilation of numpy/netCDF4 with intel compiler subprocess.check_call(os.path.join(config['upp_dir'], 'bin', 'uni.exe'), cwd=config['post_dir'], stdout=utils.devnull(), stderr=utils.devnull()) # rename and archive output self._archive_output(current_time, thours, domain) # cleanup output files self._cleanup_output_files()
def _run_metgrid(self, j_id=None): ''' run metgrid.exe (locally or using slurm script defined in config.json) ''' if len(self.config['options_slurm']['slurm_metgrid.exe']): if j_id: mid = "--dependency=afterok:%d" %j_id metgrid_command = ['sbatch', mid, self.config['options_slurm']['slurm_metgrid.exe']] else: metgrid_command = ['sbatch', self.config['options_slurm']['slurm_metgrid.exe']] utils.check_file_exists(metgrid_command[-1]) utils.silentremove(os.path.join(self.wps_workdir, 'metgrid', 'metgrid.exe')) os.symlink(os.path.join(self.config['filesystem']['wps_dir'],'metgrid','metgrid.exe'), os.path.join(self.wps_workdir, 'metgrid', 'metgrid.exe')) try: res = subprocess.check_output(metgrid_command, cwd=self.wps_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: logger.error('Metgrid failed %s:' %metgrid_command) raise # re-raise exception return j_id # return slurm job-id else: metgrid_command = os.path.join(self.config['filesystem']['wps_dir'], 'metgrid', 'metgrid.exe') utils.check_file_exists(metgrid_command) try: subprocess.check_call(metgrid_command, cwd=self.wps_workdir, stdout=utils.devnull(), stderr=utils.devnull()) except subprocess.CalledProcessError: logger.error('Metgrid failed %s:' %metgrid_command) raise # re-raise exception
def wrfvar_run(self, domain): ''' run da_wrfvar.exe ''' # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) logfile = os.path.join(wrfda_workdir, 'log.wrfda_d' + str(domain)) j_id = None if len(self.config['options_slurm']['slurm_wrfvar.exe']): if j_id: mid = "--dependency=afterok:%d" %j_id wrfvar_command = ['sbatch', mid, self.config['options_slurm']['slurm_wrfvar.exe']] else: wrfvar_command = ['sbatch', self.config['options_slurm']['slurm_wrfvar.exe']] utils.check_file_exists(wrfvar_command[-1]) try: res = subprocess.check_output(wrfvar_command, cwd=wrfda_workdir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: logger.error('Wrfvar failed %s:' %wrfvar_command) raise # re-raise exception while True: time.sleep(1) if not utils.testjob(j_id): break else: # run locally subprocess.check_call([os.path.join(wrfda_workdir, 'da_wrfvar.exe'), '>&!', logfile], cwd=wrfda_workdir, stdout=utils.devnull(), stderr=utils.devnull())
def obsproc_run(self): ''' run obsproc.exe ''' obsproc_dir = os.path.join(self.config['filesystem']['wrfda_dir'], 'var/obsproc') # TODO: check if output is file is created and no errors have occurred j_id = None if len(self.config['options_slurm']['slurm_obsproc.exe']): # run using slurm if j_id: mid = "--dependency=afterok:%d" %j_id obsproc_command = ['sbatch', mid, self.config['options_slurm']['slurm_obsproc.exe']] else: obsproc_command = ['sbatch', self.config['options_slurm']['slurm_obsproc.exe']] utils.check_file_exists(obsproc_command[-1]) try: res = subprocess.check_output(obsproc_command, cwd=obsproc_dir, stderr=utils.devnull()) j_id = int(res.split()[-1]) # slurm job-id except subprocess.CalledProcessError: logger.error('Obsproc failed %s:' %obsproc_command) raise # re-raise exception return j_id # return slurm job-id else: # run locally subprocess.check_call(os.path.join(obsproc_dir, 'obsproc.exe'), cwd=obsproc_dir, stdout=utils.devnull(), stderr=utils.devnull()) return None
def test_glancing_metadata_cirros_import_no_cksum(self): # 12 MB mdfile = get_local_path('..', 'stratuslab', 'cirros_no_cksum.json') with devnull('stderr'): self.assertTrue( glancing.main(['-v', '-n', test_name(), mdfile, '-k']))
def test_utils_devnull_print_explicit(self): print('samarche', file=sys.stdout) with utils.devnull('stdout'): print('sareum', file=sys.stdout) print('sonreup', file=sys.stderr) print('samarchedenouveau', file=sys.stdout) self._commit() self.assertEqual(open('test_devnull_err.txt', 'rb').read(), 'sonreup\n') self.assertEqual(open('test_devnull_out.txt', 'rb').read(), 'samarche\nsamarchedenouveau\n')
def test_utils_devnull_write(self): sys.stdout.write('samarche') with utils.devnull('stdout'): sys.stdout.write('sareum') sys.stderr.write('sonreup') sys.stdout.write('samarchedenouveau') self._commit() self.assertEqual(open('test_devnull_err.txt', 'rb').read(), 'sonreup') self.assertEqual(open('test_devnull_out.txt', 'rb').read(), 'samarchesamarchedenouveau')
def test_utils_devnull_write(self): sys.stdout.write('samarche') with utils.devnull('stdout'): sys.stdout.write('sareum') sys.stderr.write('sonreup') sys.stdout.write('samarchedenouveau') self._commit() self.assertEqual(open('test_devnull_err.txt', 'rb').read(), 'sonreup') self.assertEqual( open('test_devnull_out.txt', 'rb').read(), 'samarchesamarchedenouveau')
def test_utils_devnull_print_explicit(self): print('samarche', file=sys.stdout) with utils.devnull('stdout'): print('sareum', file=sys.stdout) print('sonreup', file=sys.stderr) print('samarchedenouveau', file=sys.stdout) self._commit() self.assertEqual( open('test_devnull_err.txt', 'rb').read(), 'sonreup\n') self.assertEqual( open('test_devnull_out.txt', 'rb').read(), 'samarche\nsamarchedenouveau\n')
def test_glance_ok(self): self.assertTrue(glance.glance_ok()) with utils.devnull('stderr'): with utils.environ('PATH', 'not_the_one'): self.assertFalse(glance.glance_ok()) self.assertTrue(glance.glance_ok())
import unittest from tutils import local_pythonpath, get_local_path # Setup project-local PYTHONPATH local_pythonpath('..', '..', 'src') import utils import glance import glance_manager utils.set_verbose(True) # Check we have a cloud ready to import images into... _GLANCE_OK = False with utils.devnull('stderr'): _GLANCE_OK = glance.glance_run('image-list') is not None _OLDGMF = glance_manager.get_meta_file class GlanceManagerBasicTest(unittest.TestCase): def test_glance_manager_nolist(self): ret = glance_manager.main([]) self.assertFalse(ret) def test_glance_manager_badlistpath(self): ret = glance_manager.main(['-l', '/nonexistent']) self.assertFalse(ret) def test_glance_manager_nolist_verbose(self):
def test_glancing_image_notenough_param(self): with devnull('stderr'): with self.assertRaises(SystemExit): glancing.main(['-d', os.devnull, '-s'])
def test_glance_main_fail_wrong_param_name(self): self.common_start() with self.assertRaises(SystemExit): with utils.devnull('stderr'): glance.main([self._IMG_NAME]) self.assertTrue(glance.glance_exists(self._IMG_NAME))
def test_glancing_metadata_cirros_import_bad_size(self): # 12 MB mdfile = get_local_path('..', 'stratuslab', 'cirros_bad_size.json') with devnull('stderr'): self.assertFalse(glancing.main(['-v', '-n', test_name(), mdfile])) self.assertTrue(glancing.main(['-f', '-n', test_name(), mdfile]))
def test_glancing_url_notenough_param(self): with devnull('stderr'): with self.assertRaises(SystemExit): url = 'http://nulle.part.fr/nonexistent_file.txt' glancing.main(['-d', url, '-s'])
from tutils import local_pythonpath, get_local_path # Setup project-local PYTHONPATH local_pythonpath('..', '..', 'src') import utils from utils import devnull, environ, test_name, run, cleanup import glance import glancing import multihash # Check we have a cloud ready to import images into... _GLANCE_OK = False with devnull('stderr'): _GLANCE_OK = run(['glance', 'image-list'])[0] # Avoid heavy image download tests _HEAVY_TESTS = False # < 500 MB images, ~2 min -> ~6 min... _HUGE_TESTS = False # 1 x 5 GB image def glance_cleanup(name=None): '''Decorator that automagically clean up after a test. An image that has the 'name' name is deleted upon test exit. If the 'name' parameter is not given, it defaults to the test method name itself. And it tests that the image does not exists before exiting the test. ''' def wrapper(f): @wraps(f) def wrapped(self, *f_args, **f_kwargs):
def test_glancing_metadata_cirros_import_no_cksum(self): # 12 MB mdfile = get_local_path('..', 'stratuslab', 'cirros_no_cksum.json') with devnull('stderr'): self.assertTrue(glancing.main(['-v', '-n', test_name(), mdfile, '-k']))
def test_metadata_json_bad(self): with devnull('stderr'): with self.assertRaises(ValueError): metadata.MetaStratusLabJson(os.devnull)
from tutils import local_pythonpath, get_local_path # Setup project-local PYTHONPATH local_pythonpath('..', '..', 'src') import utils from utils import devnull, environ, test_name, run, cleanup import glance import glancing import multihash # Check we have a cloud ready to import images into... _GLANCE_OK = False with devnull('stderr'): _GLANCE_OK = run(['glance', 'image-list'])[0] # Avoid heavy image download tests _HEAVY_TESTS = False # < 500 MB images, ~2 min -> ~6 min... _HUGE_TESTS = False # 1 x 5 GB image def glance_cleanup(name=None): '''Decorator that automagically clean up after a test. An image that has the 'name' name is deleted upon test exit. If the 'name' parameter is not given, it defaults to the test method name itself. And it tests that the image does not exists before exiting the test. ''' def wrapper(f): @wraps(f)
def test_metadata_json_fixture_not_dict(self): with open(self.fn, 'wb') as jsonf: jsonf.write('""\n') with devnull('stderr'): with self.assertRaises(ValueError): metadata.MetaStratusLabJson(self.fn)