Beispiel #1
0
def transfer_to_web_dir(config):
    """ Transfers all plots in output folder to web folder"""

    logger = shared.get_logger()
    logger.debug('Transferring plot files to web dir')
    init_time = config['init_time']
    full_trace = config['full_trace']
    ncl_out_dir = shared.sub_date(config['ncl_out_dir'], init_time=init_time)
    ncl_web_dir = shared.sub_date(config['ncl_web_dir'], init_time=init_time)

    if not os.path.exists(ncl_web_dir):
        os.makedirs(ncl_web_dir)

    flist = glob.glob(ncl_out_dir + '/*')
    shared.transfer(flist, ncl_web_dir, mode='copy', debug_level='NONE')

    ncl_out_dir = shared.sub_date(config['ncl_ol_out_dir'],
                                  init_time=init_time)
    ncl_web_dir = shared.sub_date(config['ncl_ol_web_dir'],
                                  init_time=init_time)

    if not os.path.exists(ncl_web_dir):
        os.makedirs(ncl_web_dir)

    flist = glob.glob(ncl_out_dir + '/*')
    shared.transfer(flist, ncl_web_dir, mode='copy', debug_level='NONE')
Beispiel #2
0
def prepare_ndown(config):
    """Runs a one-way nested simulation using ndown.exe
    We assume the coarse resolution run has been done, 
    and we have wrfout_d01.date files.
    
    We only need to run metgrid for the initial forecast time.
    
    We have two options, either we force the user to do all the renaming themselves, 
    or we allow them to utilise the original namelist.input file, and add effectivley
    add a column onto that. This could be done via a bunch of smaller utility steps.
    e.g. shift_namelist namelist.input 3 > namelist.input
    
    Which would rotate the columns of a namelist.input file so that the n-th column 
    becomes the first column.
    
        
    Therefore we have to run ungrib, geogrid, metgrid
    Assume the geo_em files exist for both domains.
    What """

    logger = shared.get_logger()
    logger.info('*** PREPARING NDOWN ***')
    namelist_wps = config['namelist_wps']
    namelist_input = config['namelist_input']
    max_dom = config['max_dom']
    wrf_run_dir = config['wrf_run_dir']

    if max_dom != 2:
        raise ConfigError("max_dom must equal 2 when doing ndown runs")

    bdy_times = shared.get_bdy_times(config)
    ndown_fmt = config['ndown_fmt']

    wrfout_d01_files = [
        shared.sub_date(ndown_fmt, init_time=bdy_times[0], valid_time=t)
        for t in bdy_times
    ]
    for f in wrfout_d01_files:
        if not os.path.exists(f):
            raise MissingFile("File: %s missing" % f)
        cmd = 'ln -sf %s %s' % (f, wrf_run_dir)
        shared.run_cmd(cmd, config)

    # Check for wrfinput_d02
    wrfinput_d02 = '%s/wrfinput_d02' % wrf_run_dir
    if not os.path.exists(wrfinput_d02):
        raise MissingFile("wrfinput_d02 is missing")

    os.rename('%s/wrfinput_d02' % wrf_run_dir, '%s/wrfndi_d02' % wrf_run_dir)

    namelist = read_namelist(namelist_input)

    # History interval is in minutes
    history_interval = namelist.settings['history_interval']
    interval_seconds = history_interval[0] * 60
    namelist.update('interval_seconds', interval_seconds)
    namelist.insert('io_form_auxinput2', 2, 'time_control')
    namelist.to_file(namelist_input)

    logger.info('*** DONE PREPARE NDOWN ***')
Beispiel #3
0
def prepare_ndown(config):
    """Runs a one-way nested simulation using ndown.exe
    We assume the coarse resolution run has been done, 
    and we have wrfout_d01.date files.
    
    We only need to run metgrid for the initial forecast time.
    
    We have two options, either we force the user to do all the renaming themselves, 
    or we allow them to utilise the original namelist.input file, and add effectivley
    add a column onto that. This could be done via a bunch of smaller utility steps.
    e.g. shift_namelist namelist.input 3 > namelist.input
    
    Which would rotate the columns of a namelist.input file so that the n-th column 
    becomes the first column.
    
        
    Therefore we have to run ungrib, geogrid, metgrid
    Assume the geo_em files exist for both domains.
    What """

    logger =shared.get_logger()
    logger.info('*** PREPARING NDOWN ***')
    namelist_wps   = config['namelist_wps']
    namelist_input = config['namelist_input']
    max_dom        = config['max_dom']
    wrf_run_dir    = config['wrf_run_dir']
    
    
    if max_dom!=2:
        raise ConfigError("max_dom must equal 2 when doing ndown runs")
    
    bdy_times = shared.get_bdy_times(config)
    ndown_fmt = config['ndown_fmt']
    
    wrfout_d01_files = [shared.sub_date(ndown_fmt, init_time=bdy_times[0], valid_time=t) for t in bdy_times]
    for f in wrfout_d01_files:
        if not os.path.exists(f):
            raise MissingFile("File: %s missing" % f)
        cmd = 'ln -sf %s %s' % (f, wrf_run_dir)
        shared.run_cmd(cmd, config)
    
    
    # Check for wrfinput_d02
    wrfinput_d02 = '%s/wrfinput_d02' % wrf_run_dir
    if not os.path.exists(wrfinput_d02):
        raise MissingFile("wrfinput_d02 is missing")
    
    os.rename('%s/wrfinput_d02' % wrf_run_dir, '%s/wrfndi_d02' % wrf_run_dir)
    
    
    namelist         = read_namelist(namelist_input)
    
    # History interval is in minutes
    history_interval = namelist.settings['history_interval']
    interval_seconds = history_interval[0] * 60
    namelist.update('interval_seconds', interval_seconds)
    namelist.insert('io_form_auxinput2', 2, 'time_control')
    namelist.to_file(namelist_input)
    
    logger.info('*** DONE PREPARE NDOWN ***')
Beispiel #4
0
def update_namelist_wps(config):
    """ Updates the namelist.wps to reflect updated settings in config
    
    Arguments:
    config -- dictionary containing various configuration options
        
    """
    logger = shared.get_logger()
    logger.debug('*** UPDATING namelist.wps ***')

    #domain_dir = config['domain_dir']
    #model_run  = config['model_run']
    #wps_dir    = config['wps_dir']
    wps_run_dir = config['wps_run_dir']  # required for opt_geogrid_tbl_path
    #bdy_conditions = config['bdy_conditions']

    namelist_wps = config['namelist_wps']
    shutil.copyfile(namelist_wps, namelist_wps + '.backup')

    bdy_times = shared.get_bdy_times(config)

    max_dom = config['max_dom']
    init_time = config['init_time']

    met_em_dir = shared.sub_date(config['met_em_dir'], init_time=init_time)
    geo_em_dir = config['geo_em_dir']

    bdy_interval = config['bdy_interval']
    interval_seconds = bdy_interval * 60 * 60

    logger.debug('reading namelist.wps <--------- %s' % namelist_wps)
    namelist = shared.read_namelist(namelist_wps)

    #
    # Update some options based on the forecast config file
    #
    namelist.update('max_dom', max_dom)
    namelist.update('opt_output_from_geogrid_path',
                    geo_em_dir,
                    section='share')
    namelist.update('opt_geogrid_tbl_path', wps_run_dir, section='geogrid')
    namelist.update('opt_metgrid_tbl_path', wps_run_dir, section='metgrid')
    namelist.update('interval_seconds', [interval_seconds])

    #
    # Generate formatted strings for inclusion in the namelist.wps file
    #
    start_str = bdy_times[0].strftime("%Y-%m-%d_%H:%M:%S")
    end_str = bdy_times[-1].strftime("%Y-%m-%d_%H:%M:%S")
    logger.debug("Updating namelist.wps start and end")
    logger.debug(start_str)
    logger.debug(end_str)

    namelist.update('start_date', [start_str] * max_dom)
    namelist.update('end_date', [end_str] * max_dom)

    logger.debug('writing modified namelist.wps to file')
    namelist.to_file(namelist_wps)
    logger.debug('*** FINISHED UPDATING namelist.wps ***')
Beispiel #5
0
def update_namelist_wps(config):
    """ Updates the namelist.wps to reflect updated settings in config
    
    Arguments:
    config -- dictionary containing various configuration options
        
    """    
    logger     = shared.get_logger()
    logger.debug('*** UPDATING namelist.wps ***')

    #domain_dir = config['domain_dir']
    #model_run  = config['model_run']
    #wps_dir    = config['wps_dir']
    wps_run_dir= config['wps_run_dir']          # required for opt_geogrid_tbl_path
    #bdy_conditions = config['bdy_conditions'] 
    
    namelist_wps = config['namelist_wps']
    shutil.copyfile(namelist_wps, namelist_wps+'.backup')
    
    bdy_times  = shared.get_bdy_times(config)
    
    max_dom    = config['max_dom']
    init_time  = config['init_time']

    met_em_dir = shared.sub_date(config['met_em_dir'], init_time=init_time)
    geo_em_dir = config['geo_em_dir']

    bdy_interval = config['bdy_interval']
    interval_seconds = bdy_interval * 60 * 60

    logger.debug('reading namelist.wps <--------- %s' % namelist_wps)
    namelist = shared.read_namelist(namelist_wps)

    #
    # Update some options based on the forecast config file
    #
    namelist.update('max_dom', max_dom)
    namelist.update('opt_output_from_geogrid_path', geo_em_dir, section='share')
    namelist.update('opt_geogrid_tbl_path', wps_run_dir, section='geogrid')
    namelist.update('opt_metgrid_tbl_path', wps_run_dir, section='metgrid')
    namelist.update('interval_seconds', [interval_seconds])
    
    #
    # Generate formatted strings for inclusion in the namelist.wps file
    #
    start_str  = bdy_times[0].strftime("%Y-%m-%d_%H:%M:%S")
    end_str    = bdy_times[-1].strftime("%Y-%m-%d_%H:%M:%S")
    logger.debug("Updating namelist.wps start and end")
    logger.debug(start_str)
    logger.debug(end_str)

    namelist.update('start_date', [start_str]*max_dom)
    namelist.update('end_date',   [end_str]*max_dom)

        
    logger.debug('writing modified namelist.wps to file')
    namelist.to_file(namelist_wps)
    logger.debug('*** FINISHED UPDATING namelist.wps ***')
Beispiel #6
0
def run_metgrid(config):
    """ Runs metgrid.exe and checks output was sucessful
    
    Arguments:
    config -- dictionary specifying configuration options
    
    """
    logger = shared.get_logger()
    logger.info("\n*** RUNNING METGRID ***")

    queue = config['queue']
    wps_run_dir = config['wps_run_dir']
    log_file = '%s/metgrid.log' % wps_run_dir
    bdy_conditions = config['bdy_conditions']
    namelist_wps = config['namelist_wps']
    namelist = shared.read_namelist(namelist_wps)

    met_em_dir = shared.sub_date(config['met_em_dir'], config['init_time'])

    #
    # vtable may be a dictionary to support running ungrib multiple
    # times. In which case, we need to put multiple prefixes into
    # the namelist.wps file
    #

    vtable = config['vtable']

    if type(vtable) == type({}):
        prefixes = vtable.keys()
    else:
        prefixes = [bdy_conditions]

    namelist.update('fg_name', prefixes)
    namelist.update('opt_output_from_metgrid_path',
                    met_em_dir,
                    section='metgrid')
    if not config['sst']:
        namelist.remove('constants_name')

    namelist.to_file(namelist_wps)

    logger.debug('met_em_dir: %s' % met_em_dir)
    if not os.path.exists(met_em_dir):
        logger.debug('creating met_em_dir: %s ' % met_em_dir)
        os.makedirs(met_em_dir)

    os.chdir(wps_run_dir)
    cmd = "%s/metgrid.exe" % wps_run_dir

    shared.run(cmd, config, from_dir=wps_run_dir)

    cmd = 'grep "Successful completion" %s/metgrid.log*' % wps_run_dir
    ret = shared.run_cmd(cmd, config)
    if ret != 0:
        raise IOError('metgrid.exe did not complete')

    logger.info('*** SUCESS METGRID ***\n')
Beispiel #7
0
def run_metgrid(config):
    """ Runs metgrid.exe and checks output was sucessful
    
    Arguments:
    config -- dictionary specifying configuration options
    
    """
    logger =shared.get_logger()
    logger.info("\n*** RUNNING METGRID ***")
    
    queue          = config['queue']
    wps_run_dir    = config['wps_run_dir']
    log_file       = '%s/metgrid.log' % wps_run_dir
    bdy_conditions = config['bdy_conditions']
    namelist_wps   = config['namelist_wps']
    namelist       = shared.read_namelist(namelist_wps)
    
    met_em_dir     = shared.sub_date(config['met_em_dir'], config['init_time'])        
    
    #
    # vtable may be a dictionary to support running ungrib multiple
    # times. In which case, we need to put multiple prefixes into
    # the namelist.wps file
    #
    
    vtable = config['vtable']
    
    if type(vtable)==type({}):
        prefixes = vtable.keys()
    else:
        prefixes = [bdy_conditions]    

        
    namelist.update('fg_name', prefixes)
    namelist.update('opt_output_from_metgrid_path', met_em_dir, section='metgrid')
    if not config['sst']:
        namelist.remove('constants_name')
        
    namelist.to_file(namelist_wps)
    
    logger.debug('met_em_dir: %s' % met_em_dir)
    if not os.path.exists(met_em_dir):
        logger.debug('creating met_em_dir: %s ' % met_em_dir)
        os.makedirs(met_em_dir)

    os.chdir(wps_run_dir)
    cmd      =  "%s/metgrid.exe" % wps_run_dir
    
    shared.run(cmd, config, from_dir=wps_run_dir)

    cmd = 'grep "Successful completion" %s/metgrid.log*' % wps_run_dir
    ret =shared.run_cmd(cmd, config)
    if ret!=0:
        raise IOError('metgrid.exe did not complete')
    
    logger.info('*** SUCESS METGRID ***\n')
Beispiel #8
0
def prepare_wrf(config):
    """Checks that met_em files exist, and links into WRF/run directory. 
    
    Arguments:
    config -- a dictionary containing forecast options

    """
    logger = shared.get_logger()
    logger.debug('*** PREPARING FILES FOR WRF ***')

    met_em_format = "%Y-%m-%d_%H:%M:%S"

    max_dom = config['max_dom']
    domains = range(1, max_dom + 1)
    init_time = config['init_time']
    fcst_hours = config['fcst_hours']
    bdy_interval = config['bdy_interval']
    bdy_times = shared.get_bdy_times(config)
    met_em_dir = shared.sub_date(config['met_em_dir'], init_time=init_time)
    met_em_files = [
        '%s/met_em.d%02d.%s.nc' % (met_em_dir, d, t.strftime(met_em_format))
        for d in domains for t in bdy_times
    ]
    wrf_run_dir = config['wrf_run_dir']
    namelist_run = '%s/namelist.input' % wrf_run_dir
    namelist_input = config['namelist_input']

    logger.debug('linking met_em files:')

    #
    # Link met_em files. There are two options for error handling here.
    # The first is to abort if any of the met_em files are missing.
    # The second is just to run wrf and see how far it gets before
    # running out of files. This will allow a partial forecast to run,
    # even if later files are missing.
    #
    # To use the first approach, raise an exception when a missing
    # file is encountered, otherwise just print a warning message.
    #
    # Actually, the two are equivalent so long as the met_em files
    # are sorted.
    #
    for f in met_em_files:
        if not os.path.exists(f):
            raise IOError('met_em file missing : %s' % f)
        cmd = 'ln -sf %s %s/' % (f, wrf_run_dir)
        shared.run_cmd(cmd, config)

    logger.debug('linking namelist.input to wrf_run_dir')
    cmd = 'rm -f %s' % namelist_run
    shared.run_cmd(cmd, config)
    cmd = 'ln -sf %s %s' % (namelist_input, namelist_run)
    shared.run_cmd(cmd, config)

    logger.debug('*** FINISHED PREPARING FILES FOR WRF ***')
Beispiel #9
0
def prepare_wrf(config):
    """Checks that met_em files exist, and links into WRF/run directory. 
    
    Arguments:
    config -- a dictionary containing forecast options

    """
    logger =shared.get_logger()    
    logger.debug('*** PREPARING FILES FOR WRF ***')
    
    met_em_format      = "%Y-%m-%d_%H:%M:%S"
    

    max_dom      = config['max_dom']
    domains      = range(1,max_dom+1)
    init_time    = config['init_time']
    fcst_hours   = config['fcst_hours']
    bdy_interval = config['bdy_interval']
    bdy_times    = shared.get_bdy_times(config)
    met_em_dir   = shared.sub_date(config['met_em_dir'], init_time=init_time)
    met_em_files = ['%s/met_em.d%02d.%s.nc' % (met_em_dir,d, t.strftime(met_em_format)) for d in domains for t in bdy_times] 
    wrf_run_dir    = config['wrf_run_dir']
    namelist_run   = '%s/namelist.input' % wrf_run_dir
    namelist_input = config['namelist_input']
    
    
    logger.debug('linking met_em files:')
    
    #
    # Link met_em files. There are two options for error handling here.
    # The first is to abort if any of the met_em files are missing.
    # The second is just to run wrf and see how far it gets before
    # running out of files. This will allow a partial forecast to run, 
    # even if later files are missing.
    #    
    # To use the first approach, raise an exception when a missing
    # file is encountered, otherwise just print a warning message.
    #
    # Actually, the two are equivalent so long as the met_em files 
    # are sorted.
    #
    for f in met_em_files:
        if not os.path.exists(f):
            raise IOError('met_em file missing : %s' %f)
        cmd = 'ln -sf %s %s/'%(f, wrf_run_dir)
        shared.run_cmd(cmd, config)
    
    
    logger.debug('linking namelist.input to wrf_run_dir')
    cmd = 'rm -f %s' % namelist_run
    shared.run_cmd(cmd, config)
    cmd = 'ln -sf %s %s' %(namelist_input, namelist_run)
    shared.run_cmd(cmd, config)

    logger.debug('*** FINISHED PREPARING FILES FOR WRF ***')
Beispiel #10
0
def transfer_to_web_dir(config):
    """ Transfers all plots in output folder to web folder"""
    
    logger = shared.get_logger()    
    logger.debug('Transferring plot files to web dir')
    init_time      = config['init_time']    
    full_trace     = config['full_trace']
    ncl_out_dir    = shared.sub_date(config['ncl_out_dir'], init_time=init_time)
    ncl_web_dir    = shared.sub_date(config['ncl_web_dir'], init_time=init_time)
    
    if not os.path.exists(ncl_web_dir):
        os.makedirs(ncl_web_dir)
    
    flist = glob.glob(ncl_out_dir+'/*')
    shared.transfer(flist, ncl_web_dir, mode='copy', debug_level='NONE')

    ncl_out_dir    = shared.sub_date(config['ncl_ol_out_dir'], init_time=init_time)
    ncl_web_dir    = shared.sub_date(config['ncl_ol_web_dir'], init_time=init_time)
    
    if not os.path.exists(ncl_web_dir):
        os.makedirs(ncl_web_dir)
    
    flist = glob.glob(ncl_out_dir+'/*')
    shared.transfer(flist, ncl_web_dir, mode='copy', debug_level='NONE')
Beispiel #11
0
def get_sst(config):
    """ Downloads SST fields from an ftp server.
    Whoever is running this must have the http_proxy environment variable set
    correctly to allow them to download files through the proxy.  Example:
    http_proxy = http://slha:[email protected]:8080"""
    logger = shared.get_logger()
    # create an lftpscript in model run dir

    logger.info('*** FETCHING SST ***')
    working_dir = config['working_dir']
    tmp_dir = config['tmp_dir']
    http_proxy = os.environ['http_proxy']
    home = os.environ['HOME']
    sst_server = config['sst_server']
    sst_server_dir = config['sst_server_dir']
    sst_local_dir = config['sst_local_dir']
    sst_time = shared.get_sst_time(config)
    sst_filename = shared.sub_date(shared.get_sst_filename(config),
                                   init_time=config['init_time'])

    if not os.path.exists(sst_local_dir):
        os.makedirs(sst_local_dir)

    if os.path.exists('%s/%s' % (sst_local_dir, sst_filename)):
        logger.info('*** SST ALREADY EXISTS LOCALLY, NOT DOWNLOADED ***')
        return

    lftpfilename = '%s/lftpscript' % working_dir
    logger.debug('Writing lftpscript to %s' % lftpfilename)
    lftpscript = open(lftpfilename, 'w')
    lftpscript.write('lcd %s\n' % sst_local_dir)
    lftpscript.write('set ftp:proxy %s\n' % http_proxy)
    lftpscript.write('set hftp:use-type no\n')
    lftpscript.write('open %s\n' % sst_server)
    lftpscript.write('get %s/%s\n' % (sst_server_dir, sst_filename))
    lftpscript.write('bye')
    lftpscript.close()

    cmd = '/usr/bin/lftp -f %s' % lftpfilename
    shared.run_cmd(cmd, config)
    # check if file downloaded

    if not os.path.exists('%s/%s' % (sst_local_dir, sst_filename)):
        raise IOError('SST file: %s not downloaded' % sst_filename)
    logger.info('*** SUCCESS SST DOWNLOADED ***')
Beispiel #12
0
def get_sst(config):
    """ Downloads SST fields from an ftp server.
    Whoever is running this must have the http_proxy environment variable set
    correctly to allow them to download files through the proxy.  Example:
    http_proxy = http://slha:[email protected]:8080"""
    logger      = shared.get_logger()
    # create an lftpscript in model run dir
    
    logger.info('*** FETCHING SST ***')
    working_dir    = config['working_dir']
    tmp_dir        = config['tmp_dir']
    http_proxy     = os.environ['http_proxy']
    home           = os.environ['HOME']
    sst_server     = config['sst_server']
    sst_server_dir = config['sst_server_dir']
    sst_local_dir  = config['sst_local_dir']
    sst_time       = shared.get_sst_time(config)
    sst_filename   = shared.sub_date(shared.get_sst_filename(config), init_time=config['init_time'])
   
    if not os.path.exists(sst_local_dir):
        os.makedirs(sst_local_dir)
    
    if os.path.exists('%s/%s' %(sst_local_dir, sst_filename)):
        logger.info('*** SST ALREADY EXISTS LOCALLY, NOT DOWNLOADED ***')
        return
    
    lftpfilename = '%s/lftpscript' % working_dir
    logger.debug('Writing lftpscript to %s' % lftpfilename)
    lftpscript     = open(lftpfilename, 'w')    
    lftpscript.write('lcd %s\n' % sst_local_dir)    
    lftpscript.write('set ftp:proxy %s\n' % http_proxy) 
    lftpscript.write('set hftp:use-type no\n')
    lftpscript.write('open %s\n' % sst_server)
    lftpscript.write('get %s/%s\n' % (sst_server_dir,sst_filename))
    lftpscript.write('bye')
    lftpscript.close()
    
    cmd = '/usr/bin/lftp -f %s' % lftpfilename
    shared.run_cmd(cmd, config)
    # check if file downloaded

    if not os.path.exists('%s/%s' %(sst_local_dir, sst_filename)):
        raise IOError('SST file: %s not downloaded' % sst_filename)
    logger.info('*** SUCCESS SST DOWNLOADED ***')
Beispiel #13
0
def produce_ncl_plots(config):
    """ Calls a series of ncl scripts to produce visualisations.
    
    Need to think about how to define a flexible visualisation framework
    Currently communication with NCL is via environment variables
    Perhaps in future we should move to PyNGL for easier (direct) integration
    as then we could simply pass in the config dictionary, use the same logging 
    framework, and make use of a vtable like mapping to forecast vars.
    
    However, for the time being we design each ncl script to expect 
    certain environment variables.  Then, the list of ncl scripts to 
    run can simply be specified somewhere in the config file
    e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc.
    
    
    Arguments:
    config -- dictionary containing various configuration options """
    
    logger = shared.get_logger()    

     

    domain         = config['domain']
    model_run      = config['model_run']
    working_dir    = config['working_dir']
    ncl_code_dir   = config['ncl_code_dir']
    ncl_files      = config['ncl_code']
    ncl_code       =  ncl_files
    ncl_log        = config['ncl_log']
    wrfout_dir     = config['wrfout_dir']
    init_time      = config['init_time']
    dom            = config['dom']
    fcst_file      = '%s/wrfout_d%02d_%s:00:00.nc' %(wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H"))
    ncl_in_file    = fcst_file
    ncl_loc_file   = config['locations_file']
    ncl_out_dir    = shared.sub_date(config['ncl_out_dir'], init_time=init_time)
    ncl_out_type   = config['ncl_out_type']
    nest_id        =  '%02d' % dom
    ncl_opt_template = config['ncl_opt_template']
    ncl_opt_file = config['ncl_opt_file']
    extract_hgts = config['extract_hgts']
    logger.info('*** RUNNING NCL SCRIPTS FOR DOMAIN d%02d***' % dom)
    
    if not os.path.exists(ncl_out_dir):
        os.makedirs(ncl_out_dir)

    if not ncl_in_file.endswith('.nc'):
        ncl_in_file = ncl_in_file + '.nc' 

    ncl_hgts = '(/%s/)' % ','.join(map(str,extract_hgts))        
        
    replacements = {'<ncl_in_file>'  : ncl_in_file, 
                    '<ncl_out_dir>'  : ncl_out_dir, 
                    '<ncl_out_type>' : ncl_out_type,
                    '<ncl_loc_file>' : ncl_loc_file,
                    '<extract_heights>' : ncl_hgts} 
        

    fill_template(ncl_opt_template, ncl_opt_file, replacements)
        
    logger.debug('ncl_opt_template: %s' % ncl_opt_template)
    logger.debug('    ncl_in_file  ----> %s' % ncl_in_file)
    logger.debug('    ncl_out_dir  ----> %s' % ncl_out_dir)
    logger.debug('    ncl_out_type ----> %s' % ncl_out_type)
    logger.debug('    ncl_loc_file ----> %s' % ncl_loc_file)
    logger.debug('ncl_opt_file: %s' % ncl_opt_file)

    for script in ncl_code:
        #
        # mem_total forces the use postprocessing node
        #
        #cmd  = "ncl %s >> %s 2>&1" % (script, ncl_log)
        #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd
        logger.debug(script)
        
        queue = config['queue']
        cmd = "ncl %s" % script
        
        
        #if queue['ncl']:
            # works on Schumi
        
            #cmd  = """ncl ncl_in_file="%s" ncl_out_dir="%s" ncl_out_type="%s" ncl_loc_file="%s" %s""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script)
            # works on maestro
            #cmd  = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s """ % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script)
        #else:
            #cmd  = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script, working_dir)
        env_vars={'NCL_OPT_FILE' : ncl_opt_file}
        ret = shared.run(cmd, config,env_vars=env_vars )
Beispiel #14
0
def produce_ncl_ol_plots(config):
    """ Calls a series of ncl scripts to produce visualisations.
    
    Need to think about how to define a flexible visualisation framework
    Currently communication with NCL is via environment variables
    Perhaps in future we should move to PyNGL for easier (direct) integration
    as then we could simply pass in the config dictionary, use the same logging 
    framework, and make use of a vtable like mapping to forecast vars.
    
    However, for the time being we design each ncl script to expect 
    certain environment variables.  Then, the list of ncl scripts to 
    run can simply be specified somewhere in the config file
    e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc.
    
    Updated: Some plots are much easier to produce using the original 
    wrfout netcdf files, rather than use the UPP post-processed grib files. 
    Howeverm in future we should stick with one or the other.
    
    
    Arguments:
    config -- dictionary containing various configuration options """
    
    logger = shared.get_logger()    
    logger.info('*** RUNNING NCL SCRIPTS ***')
     
    working_dir  = config['working_dir']

    ncl_code_dir   = config['ncl_code_dir']
    ncl_files      = config['ncl_ol_code']
    #ncl_code       = ['%s/%s' % (ncl_code_dir, f) for f in ncl_files]
    ncl_code       = ncl_files
    ncl_log        = config['ncl_log']
    wrftools_dir   = config['wrftools_dir']
    wrfout_dir     = config['wrfout_dir']
    init_time      = config['init_time']
    dom            = config['dom']
    fcst_file      = '%s/wrfout_d%02d_%s:00:00.nc' %(wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H"))
    ncl_out_dir    = shared.sub_date(config['ncl_ol_out_dir'], init_time=init_time)
    ncl_out_type   = config['ncl_out_type']
    nest_id        =  '%02d' % dom
    

    ncl_in_file    = fcst_file
    ncl_loc_file   = config['locations_file']
    ncl_out_dir    = shared.sub_date(config['ncl_out_dir'], init_time=init_time)
    ncl_out_type   = config['ncl_out_type']


    if not os.path.exists(ncl_out_dir):
        os.makedirs(ncl_out_dir)

    #
    # Communicate to NCL via environment variables
    # NCL expects the following to be set
    #File    = getenv("FCST_FILE")
    #type    = getenv("NCL_OUT_TYPE")
    #diro    = getenv("NCL_OUT_DIR")
    #;web_dir = getenv("WEB_DIR")
    #domain  = getenv("NEST_ID")    
    #run_hour = getenv("RUN_HOUR")

    #
    # Try escaping : in fcst_file
    #
    #fcst_file = fcst_file.replace(':', r'\:')
    #os.environ['FCST_FILE']      = fcst_file
    #os.environ['NCL_OUT_DIR']    = ncl_out_dir
    #os.environ['NCL_OUT_TYPE']   = ncl_out_type
    #os.environ['NEST_ID']        = nest_id
    #os.environ['DOMAIN']         = domain
    #os.environ['MODEL_RUN']      = model_run



    #logger.debug('Setting environment variables')
    logger.debug('FCST_FILE    ----> %s' % fcst_file)
    logger.debug('NCL_OUT_DIR  ----> %s' % ncl_out_dir)
    logger.debug('NCL_OUT_TYPE ----> %s' % ncl_out_type)
    logger.debug('NEST_ID      ----> %s' % nest_id)
    logger.debug('PATH')
    logger.debug(os.environ['PATH'])
    #logger.debug('DOMAIN       ----> %s' % domain)
    #logger.debug('MODEL_RUN    ----> %s' % model_run)


    for script in ncl_code:
        #cmd  = "ncl %s >> %s 2>&1" % (script, ncl_log)
        cmd  = "ncl %s " % script
        #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd
        
        logger.warn("NCL to produce GEOTIFFS does not work on post-processing queue, runnign on head node")

        

        cmd  = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script, working_dir)

        ret = shared.run_cmd(cmd, config)
        
        
        gwarp = config['gwarp']
        os.chdir(ncl_out_dir)
        
        cmd = "%s %s/*.tiff" %(gwarp, ncl_out_dir)
        logger.debug(cmd)
        shared.run_cmd(cmd, config)
Beispiel #15
0
def produce_ncl_plots(config):
    """ Calls a series of ncl scripts to produce visualisations.
    
    Need to think about how to define a flexible visualisation framework
    Currently communication with NCL is via environment variables
    Perhaps in future we should move to PyNGL for easier (direct) integration
    as then we could simply pass in the config dictionary, use the same logging 
    framework, and make use of a vtable like mapping to forecast vars.
    
    However, for the time being we design each ncl script to expect 
    certain environment variables.  Then, the list of ncl scripts to 
    run can simply be specified somewhere in the config file
    e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc.
    
    Updated: Some plots are much easier to produce using the original 
    wrfout netcdf files, rather than use the UPP post-processed grib files. 
    Howeverm in future we should stick with one or the other.
    
    
    Arguments:
    config -- dictionary containing various configuration options """

    logger = shared.get_logger()

    domain = config['domain']
    model_run = config['model_run']
    working_dir = config['working_dir']
    ncl_code_dir = config['ncl_code_dir']
    ncl_files = config['ncl_code']
    #ncl_code      = ['%s/%s' % (ncl_code_dir, f) for f in ncl_files]
    ncl_code = ncl_files
    ncl_log = config['ncl_log']
    wrfout_dir = config['wrfout_dir']
    init_time = config['init_time']
    dom = config['dom']
    fcst_file = '%s/wrfout_d%02d_%s:00:00.nc' % (
        wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H"))
    ncl_in_file = fcst_file
    ncl_loc_file = config['locations_file']
    ncl_out_dir = shared.sub_date(config['ncl_out_dir'], init_time=init_time)
    ncl_out_type = config['ncl_out_type']
    nest_id = '%02d' % dom

    logger.info('*** RUNNING NCL SCRIPTS FOR DOMAIN d%02d***' % dom)

    if not os.path.exists(ncl_out_dir):
        os.makedirs(ncl_out_dir)

    #
    # Communicate to NCL via environment variables
    # NCL expects the following to be set
    #File    = getenv("FCST_FILE")
    #type    = getenv("NCL_OUT_TYPE")
    #diro    = getenv("NCL_OUT_DIR")
    #;web_dir = getenv("WEB_DIR")
    #domain  = getenv("NEST_ID")
    #run_hour = getenv("RUN_HOUR")

    #
    # Try escaping : in fcst_file
    #
    #fcst_file = fcst_file.replace(':', r'\:')
    #os.environ['FCST_FILE']      = fcst_file
    #os.environ['LOCATIONS_FILE'] = loc_file
    #os.environ['NCL_OUT_DIR']    = ncl_out_dir
    #os.environ['NCL_OUT_TYPE']   = ncl_out_type
    #os.environ['NEST_ID']        = nest_id
    #os.environ['DOMAIN']         = domain
    #os.environ['MODEL_RUN']      = model_run

    logger.debug('ncl_in_file  ----> %s' % ncl_in_file)
    logger.debug('ncl_out_dir  ----> %s' % ncl_out_dir)
    logger.debug('ncl_out_type ----> %s' % ncl_out_type)
    logger.debug('ncl_loc_file ----> %s' % ncl_loc_file)

    if not ncl_in_file.endswith('.nc'):
        ncl_in_file = ncl_in_file + '.nc'

    for script in ncl_code:
        #
        # mem_total forces the use postprocessing node
        #
        #cmd  = "ncl %s >> %s 2>&1" % (script, ncl_log)
        #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd
        logger.debug(script)

        queue = config['queue']
        if queue['ncl']:
            cmd = """ncl ncl_in_file="%s" ncl_out_dir="%s" ncl_out_type="%s" ncl_loc_file="%s" %s""" % (
                ncl_in_file, ncl_out_dir, ncl_out_type, ncl_loc_file, script)
        else:
            cmd = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % (
                ncl_in_file, ncl_out_dir, ncl_out_type, ncl_loc_file, script,
                working_dir)

        ret = shared.run(cmd, config)
Beispiel #16
0
def produce_ncl_plots(config):
    """ Calls a series of ncl scripts to produce visualisations.
    
    Need to think about how to define a flexible visualisation framework
    Currently communication with NCL is via environment variables
    Perhaps in future we should move to PyNGL for easier (direct) integration
    as then we could simply pass in the config dictionary, use the same logging 
    framework, and make use of a vtable like mapping to forecast vars.
    
    However, for the time being we design each ncl script to expect 
    certain environment variables.  Then, the list of ncl scripts to 
    run can simply be specified somewhere in the config file
    e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc.
    
    
    Arguments:
    config -- dictionary containing various configuration options """

    logger = shared.get_logger()

    domain = config['domain']
    model_run = config['model_run']
    working_dir = config['working_dir']
    ncl_code_dir = config['ncl_code_dir']
    ncl_files = config['ncl_code']
    ncl_code = ncl_files
    ncl_log = config['ncl_log']
    wrfout_dir = config['wrfout_dir']
    init_time = config['init_time']
    dom = config['dom']
    fcst_file = '%s/wrfout_d%02d_%s:00:00.nc' % (
        wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H"))
    ncl_in_file = fcst_file
    ncl_loc_file = config['locations_file']
    ncl_out_dir = shared.sub_date(config['ncl_out_dir'], init_time=init_time)
    ncl_out_type = config['ncl_out_type']
    nest_id = '%02d' % dom
    ncl_opt_template = config['ncl_opt_template']
    ncl_opt_file = config['ncl_opt_file']
    extract_hgts = config['extract_hgts']
    logger.info('*** RUNNING NCL SCRIPTS FOR DOMAIN d%02d***' % dom)

    if not os.path.exists(ncl_out_dir):
        os.makedirs(ncl_out_dir)

    if not ncl_in_file.endswith('.nc'):
        ncl_in_file = ncl_in_file + '.nc'

    ncl_hgts = '(/%s/)' % ','.join(map(str, extract_hgts))

    replacements = {
        '<ncl_in_file>': ncl_in_file,
        '<ncl_out_dir>': ncl_out_dir,
        '<ncl_out_type>': ncl_out_type,
        '<ncl_loc_file>': ncl_loc_file,
        '<extract_heights>': ncl_hgts
    }

    fill_template(ncl_opt_template, ncl_opt_file, replacements)

    logger.debug('ncl_opt_template: %s' % ncl_opt_template)
    logger.debug('    ncl_in_file  ----> %s' % ncl_in_file)
    logger.debug('    ncl_out_dir  ----> %s' % ncl_out_dir)
    logger.debug('    ncl_out_type ----> %s' % ncl_out_type)
    logger.debug('    ncl_loc_file ----> %s' % ncl_loc_file)
    logger.debug('ncl_opt_file: %s' % ncl_opt_file)

    for script in ncl_code:
        #
        # mem_total forces the use postprocessing node
        #
        #cmd  = "ncl %s >> %s 2>&1" % (script, ncl_log)
        #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd
        logger.debug(script)

        queue = config['queue']
        cmd = "ncl %s" % script

        #if queue['ncl']:
        # works on Schumi

        #cmd  = """ncl ncl_in_file="%s" ncl_out_dir="%s" ncl_out_type="%s" ncl_loc_file="%s" %s""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script)
        # works on maestro
        #cmd  = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s """ % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script)
        #else:
        #cmd  = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script, working_dir)
        env_vars = {'NCL_OPT_FILE': ncl_opt_file}
        ret = shared.run(cmd, config, env_vars=env_vars)