예제 #1
0
파일: post.py 프로젝트: keenmisty/wrftools
def add_metadata(config):
    """ Adds metadata tags into the wrfout files. Expects there to be one 
    wrfout file per init_time. If there are more, they will not have metadata added."""

    logger = shared.get_logger()
    logger.info("*** Adding metadata to wrfout files ***")


    wrfout_dir = config['wrfout_dir']
    init_time  = config['init_time']
    max_dom    = config['max_dom']
    
    metadata = config['metadata']
    logger.debug(metadata)
    wrfout_files = ['%s/wrfout_d%02d_%s' %(wrfout_dir, d, init_time.strftime('%Y-%m-%d_%H:%M:%S')) for d in range(1,max_dom+1)]
    
    for f in wrfout_files:
        logger.debug("compressing %s" % f)
        if not os.path.exists(f):
            raise MissingFile("could not find %s" % f)
        
        # create attribute description for ncatted 
        # note that we make the attribute names uppercase for consistency with WRF output
        att_defs = ' '.join(['-a %s,global,c,c,"%s"' %(s.upper(), config[s]) for s in metadata])
        logger.debug(att_defs)
        cmd = 'ncatted -O -h %s %s' % (att_defs, f)
        logger.debug(cmd)
        shared.run_cmd(cmd, config)
예제 #2
0
파일: main.py 프로젝트: crazyideas21/dev
def main():
    """ Starts the experiment. """

    # shared.sync_clocks()
    shared.error_log('New run: ' + str(datetime.datetime.now()))

    # Initialize output directories.
    shared.run_cmd('mkdir -p ../out-pickle')
    shared.run_cmd('mkdir -p ../out-graph')

    for flow_count in [100, 500, 1000, 1500]:

        for forward_bw in [1200]:

#            reset_state()            
#            try:
#                switch.reset_flow_table()
#                switch.add_rules(flow_count)
#            except Exception, err:
#                print 'Cannot add rules:', repr(err)
#                continue

            time.sleep(2)

            config.pkt_size = 1400
            config.flow_count = flow_count
            config.max_time = 60
            config.target_bw_Mbps = forward_bw

            shared.safe_run(run_and_save, 'direct')
예제 #3
0
def prepare_ndown(config):
    """Runs a one-way nested simulation using ndown.exe
    We assume the coarse resolution run has been done, 
    and we have wrfout_d01.date files.
    
    We only need to run metgrid for the initial forecast time.
    
    We have two options, either we force the user to do all the renaming themselves, 
    or we allow them to utilise the original namelist.input file, and add effectivley
    add a column onto that. This could be done via a bunch of smaller utility steps.
    e.g. shift_namelist namelist.input 3 > namelist.input
    
    Which would rotate the columns of a namelist.input file so that the n-th column 
    becomes the first column.
    
        
    Therefore we have to run ungrib, geogrid, metgrid
    Assume the geo_em files exist for both domains.
    What """

    logger = shared.get_logger()
    logger.info('*** PREPARING NDOWN ***')
    namelist_wps = config['namelist_wps']
    namelist_input = config['namelist_input']
    max_dom = config['max_dom']
    wrf_run_dir = config['wrf_run_dir']

    if max_dom != 2:
        raise ConfigError("max_dom must equal 2 when doing ndown runs")

    bdy_times = shared.get_bdy_times(config)
    ndown_fmt = config['ndown_fmt']

    wrfout_d01_files = [
        shared.sub_date(ndown_fmt, init_time=bdy_times[0], valid_time=t)
        for t in bdy_times
    ]
    for f in wrfout_d01_files:
        if not os.path.exists(f):
            raise MissingFile("File: %s missing" % f)
        cmd = 'ln -sf %s %s' % (f, wrf_run_dir)
        shared.run_cmd(cmd, config)

    # Check for wrfinput_d02
    wrfinput_d02 = '%s/wrfinput_d02' % wrf_run_dir
    if not os.path.exists(wrfinput_d02):
        raise MissingFile("wrfinput_d02 is missing")

    os.rename('%s/wrfinput_d02' % wrf_run_dir, '%s/wrfndi_d02' % wrf_run_dir)

    namelist = read_namelist(namelist_input)

    # History interval is in minutes
    history_interval = namelist.settings['history_interval']
    interval_seconds = history_interval[0] * 60
    namelist.update('interval_seconds', interval_seconds)
    namelist.insert('io_form_auxinput2', 2, 'time_control')
    namelist.to_file(namelist_input)

    logger.info('*** DONE PREPARE NDOWN ***')
예제 #4
0
파일: post.py 프로젝트: zhangylang/wrftools
def add_metadata(config):
    """ Adds metadata tags into the wrfout files. Expects there to be one 
    wrfout file per init_time. If there are more, they will not have metadata added."""

    logger = shared.get_logger()
    logger.info("*** Adding metadata to wrfout files ***")

    wrfout_dir = config['wrfout_dir']
    init_time = config['init_time']
    max_dom = config['max_dom']

    metadata = config['metadata']
    logger.debug(metadata)
    wrfout_files = [
        '%s/wrfout_d%02d_%s' %
        (wrfout_dir, d, init_time.strftime('%Y-%m-%d_%H:%M:%S'))
        for d in range(1, max_dom + 1)
    ]

    for f in wrfout_files:
        logger.debug("compressing %s" % f)
        if not os.path.exists(f):
            raise MissingFile("could not find %s" % f)

        # create attribute description for ncatted
        # note that we make the attribute names uppercase for consistency with WRF output
        att_defs = ' '.join([
            '-a %s,global,c,c,"%s"' % (s.upper(), config[s]) for s in metadata
        ])
        logger.debug(att_defs)
        cmd = 'ncatted -O -h %s %s' % (att_defs, f)
        logger.debug(cmd)
        shared.run_cmd(cmd, config)
예제 #5
0
def prepare_ndown(config):
    """Runs a one-way nested simulation using ndown.exe
    We assume the coarse resolution run has been done, 
    and we have wrfout_d01.date files.
    
    We only need to run metgrid for the initial forecast time.
    
    We have two options, either we force the user to do all the renaming themselves, 
    or we allow them to utilise the original namelist.input file, and add effectivley
    add a column onto that. This could be done via a bunch of smaller utility steps.
    e.g. shift_namelist namelist.input 3 > namelist.input
    
    Which would rotate the columns of a namelist.input file so that the n-th column 
    becomes the first column.
    
        
    Therefore we have to run ungrib, geogrid, metgrid
    Assume the geo_em files exist for both domains.
    What """

    logger =shared.get_logger()
    logger.info('*** PREPARING NDOWN ***')
    namelist_wps   = config['namelist_wps']
    namelist_input = config['namelist_input']
    max_dom        = config['max_dom']
    wrf_run_dir    = config['wrf_run_dir']
    
    
    if max_dom!=2:
        raise ConfigError("max_dom must equal 2 when doing ndown runs")
    
    bdy_times = shared.get_bdy_times(config)
    ndown_fmt = config['ndown_fmt']
    
    wrfout_d01_files = [shared.sub_date(ndown_fmt, init_time=bdy_times[0], valid_time=t) for t in bdy_times]
    for f in wrfout_d01_files:
        if not os.path.exists(f):
            raise MissingFile("File: %s missing" % f)
        cmd = 'ln -sf %s %s' % (f, wrf_run_dir)
        shared.run_cmd(cmd, config)
    
    
    # Check for wrfinput_d02
    wrfinput_d02 = '%s/wrfinput_d02' % wrf_run_dir
    if not os.path.exists(wrfinput_d02):
        raise MissingFile("wrfinput_d02 is missing")
    
    os.rename('%s/wrfinput_d02' % wrf_run_dir, '%s/wrfndi_d02' % wrf_run_dir)
    
    
    namelist         = read_namelist(namelist_input)
    
    # History interval is in minutes
    history_interval = namelist.settings['history_interval']
    interval_seconds = history_interval[0] * 60
    namelist.update('interval_seconds', interval_seconds)
    namelist.insert('io_form_auxinput2', 2, 'time_control')
    namelist.to_file(namelist_input)
    
    logger.info('*** DONE PREPARE NDOWN ***')
예제 #6
0
파일: build.py 프로젝트: jimmydo/cotton
    def _generate_deploy_files(self):
        app_type = detection.detect_app_type(self)
        procfile = _parse_procfile()
        shared.run_cmd('mkdir -p {0}'.format(shared.DEPLOY_DIR))
        with shared.chdir(shared.DEPLOY_DIR):
            _apply_template2(
                app_type.init_template,
                'initialize',
                app_type.init_template_context,
                mode=0700
            )
            _apply_template2(
                'update',
                'update',
                {
                    'update_command': app_type.update_command
                },
                mode=0700
            )
            _apply_template2(
                'run-command',
                'run-command',
                {
                    'env_vars_file': shared.ENV_VARS_FILE
                },
                mode=0700
            )

            self._create_process_type_run_scripts(procfile)
            self._create_supervisor_configs(procfile.keys())
예제 #7
0
def extract_tseries(config):

    logger = shared.get_logger()
    logger.info("\n*** EXTRACTING TIME SERIES ***")

    wrfout_dir = config["wrfout_dir"]
    tseries_dir = config["tseries_dir"]
    json_dir = config["json_dir"]
    init_time = config["init_time"]
    dom = config["dom"]
    fcst_file = "%s/wrfout_d%02d_%s:00:00.nc" % (
        wrfout_dir,
        dom,
        init_time.strftime("%Y-%m-%d_%H"),
    )  # note we add on the nc extension here
    ncl_loc_file = config["locations_file"]
    ncl_code = config["tseries_code"]
    extract_hgts = config["extract_hgts"]
    # tseries_fmt    = config['tseries_fmt']
    ncl_log = config["ncl_log"]
    ncl_opt_template = config["ncl_opt_template"]
    ncl_opt_file = config["ncl_opt_file"]

    if not os.path.exists(tseries_dir):
        os.makedirs(tseries_dir)

    # Always go via the netcdf file
    tseries_file = "%s/tseries_d%02d_%s.nc" % (tseries_dir, dom, init_time.strftime("%Y-%m-%d_%H"))

    ncl_hgts = "(/%s/)" % ",".join(map(str, extract_hgts))
    replacements = {
        "<ncl_in_file>": fcst_file,
        "<ncl_out_file>": tseries_file,
        "<ncl_out_dir>": tseries_dir,
        "<ncl_out_type>": "nc",
        "<ncl_loc_file>": ncl_loc_file,
        "<extract_heights>": ncl_hgts,
    }

    shared.fill_template(ncl_opt_template, ncl_opt_file, replacements)

    logger.debug("ncl_opt_template: %s" % ncl_opt_template)
    logger.debug("    ncl_in_file  ----> %s" % fcst_file)
    logger.debug("    ncl_out_dir  ----> %s" % tseries_dir)
    logger.debug("    ncl_out_type ----> %s" % "nc")
    logger.debug("    ncl_loc_file ----> %s" % ncl_loc_file)
    logger.debug("ncl_opt_file: %s" % ncl_opt_file)

    for script in ncl_code:
        cmd = "NCL_OPT_FILE=%s ncl %s >> %s 2>&1" % (ncl_opt_file, script, ncl_log)
        shared.run_cmd(cmd, config)

    logger.info("*** DONE EXTRACTING TIME SERIES ***\n")
예제 #8
0
파일: extract.py 프로젝트: wxguy/wrftools-1
def extract_tseries(config):

    logger = shared.get_logger()
    logger.info('\n*** EXTRACTING TIME SERIES ***')
     
    wrfout_dir     = config['wrfout_dir']
    tseries_dir    = config['tseries_dir']
    json_dir       = config['json_dir']
    init_time      = config['init_time']
    dom            = config['dom']
    fcst_file      = '%s/wrfout_d%02d_%s:00:00.nc' %(wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H")) # note we add on the nc extension here
    ncl_loc_file   = config['locations_file']
    ncl_code       = config['tseries_code']
    extract_hgts   = config['extract_hgts']
    #tseries_fmt    = config['tseries_fmt']
    ncl_log        = config['ncl_log']
    ncl_opt_template = config['ncl_opt_template']
    ncl_opt_file     = config['ncl_opt_file']

    if not os.path.exists(tseries_dir):
        os.makedirs(tseries_dir)
    
    # Always go via the netcdf file
    tseries_file = '%s/tseries_d%02d_%s.nc' % (tseries_dir, dom,init_time.strftime("%Y-%m-%d_%H"))

    ncl_hgts = '(/%s/)' % ','.join(map(str,extract_hgts))
    replacements = {'<ncl_in_file>'  : fcst_file, 
                    '<ncl_out_file>' : tseries_file,
                    '<ncl_out_dir>'  : tseries_dir, 
                    '<ncl_out_type>' : "nc",
                    '<ncl_loc_file>' : ncl_loc_file,
                    '<extract_heights>': ncl_hgts}
        

    shared.fill_template(ncl_opt_template, ncl_opt_file, replacements)
        
    logger.debug('ncl_opt_template: %s' % ncl_opt_template)
    logger.debug('    ncl_in_file  ----> %s' % fcst_file)
    logger.debug('    ncl_out_dir  ----> %s' % tseries_dir)
    logger.debug('    ncl_out_type ----> %s' % "nc")
    logger.debug('    ncl_loc_file ----> %s' % ncl_loc_file)
    logger.debug('ncl_opt_file: %s' % ncl_opt_file)
    
    
    
    for script in ncl_code:
        cmd  = "NCL_OPT_FILE=%s ncl %s >> %s 2>&1" % (ncl_opt_file,script, ncl_log)
        shared.run_cmd(cmd, config)

    logger.info("*** DONE EXTRACTING TIME SERIES ***\n")
예제 #9
0
def extract_tseries(config):

    logger = shared.get_logger()
    logger.info('*** EXTRACTING TIME SERIES ***')

    wrfout_dir = config['wrfout_dir']
    tseries_dir = config['tseries_dir']
    json_dir = config['json_dir']
    init_time = config['init_time']
    dom = config['dom']
    fcst_file = '%s/wrfout_d%02d_%s:00:00.nc' % (
        wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H")
    )  # note we add on the nc extension here
    loc_file = config['locations_file']
    ncl_code = config['tseries_code']
    extract_hgts = config['extract_hgts']
    tseries_fmt = config['tseries_fmt']
    ncl_opt_file = config['ncl_opt_file']

    ncl_log = config['ncl_log']
    if not os.path.exists(tseries_dir):
        os.makedirs(tseries_dir)

    # Always go via the netcdf file
    tseries_file = '%s/tseries_d%02d_%s.nc' % (
        tseries_dir, dom, init_time.strftime("%Y-%m-%d_%H"))

    os.environ['FCST_FILE'] = fcst_file
    os.environ['LOCATIONS_FILE'] = loc_file
    os.environ['NCL_OUT_DIR'] = tseries_dir
    os.environ['NCL_OUT_FILE'] = tseries_file
    os.environ['NCL_OPT_FILE'] = ncl_opt_file

    logger.debug('Setting environment variables')
    logger.debug('FCST_FILE    ----> %s' % fcst_file)
    logger.debug('NCL_OUT_DIR  ----> %s' % tseries_dir)
    logger.debug('NCL_OUT_FILE  ----> %s' % tseries_file)
    logger.debug('LOCATIONS_FILE ----> %s' % loc_file)
    logger.debug('NCL_OPT_FILE   ----> %s' % ncl_opt_file)
    logger.debug(extract_hgts)

    ncl_hgts = '(/%s/)' % ','.join(map(str, extract_hgts))

    for script in ncl_code:
        cmd = "ncl 'extract_heights=%s'  %s >> %s 2>&1" % (ncl_hgts, script,
                                                           ncl_log)
        shared.run_cmd(cmd, config)

    ncdump(config)
예제 #10
0
파일: tcpdump.py 프로젝트: crazyideas21/dev
def parse_pkt(pkt_func):
    """
    Loops to parse output from tcpdump. An example would be:

    [recvd_time     ]                 [   ] <- (flow_id + 10000)
    1329098408.055825 IP 192.168.1.20.10007 > 192.168.1.1.9: UDP, length 22
          0x0000:    4500 0032 066e 0000 2011 10e8 c0a8 0114 <- ignore
          0x0010:    c0a8 0101 2717 0009 001e 0000 be9b e955 <- ignore
          0x0020:    0000 066f 4f38 6ea6 000e 4402 0000 0000 
                     [seq_num] [tvsec  ] [tvusec ]
                    ... the rest of the lines can be ignored

    Each time a new packet arrives, invokes the pkt_func callback function. The
    pkt_func should have arguments (flow_id, seq_number, sent_time, recvd_time).
    This allows users to handle incoming packets, based on these four
    parameters, accordingly.
    
    Returns None.

    """    
    # Initialize fields to extract.
    recvd_time = flow_id = seq_num = tvsec = tvusec = None

    # Regex applied on udp header to extract recvd_time and flow_id.
    regex_udp = re.compile('(\d+\.\d+) IP .*\.(\d+) >')

    # Regex applied on the pktgen payload.
    regex_pktgen = re.compile('0x0020:\s+(.{10})(.{10})(.{10})')

    # Parse with tcpdump -r
    p_tcpdump = shared.run_cmd('tcpdump -nnxStt -r ', config.tmp_pcap_file,
                               stdout=subprocess.PIPE)

    for line in p_tcpdump.stdout:

        re_udp = regex_udp.search(line)
        if re_udp:
            recvd_time = float(re_udp.group(1))
            flow_id = int(re_udp.group(2)) - 10000
            continue

        re_pktgen = regex_pktgen.search(line)
        if re_pktgen:

            # Here, the seq_num is a global value. We need to convert it to a
            # per-flow sequence number.
            seq_num = _hex_to_int(re_pktgen.group(1))
            seq_num = seq_num / config.flow_count

            # Convert the recvd timestamp to float.
            tvsec = _hex_to_int(re_pktgen.group(2))
            tvusec = _hex_to_int(re_pktgen.group(3))
            sent_time = tvsec + tvusec / 1000000.0

            # We should have obtained all necessary fields to form a packet.
            assert None not in (recvd_time, flow_id)
            pkt_func(flow_id, seq_num, sent_time, recvd_time)

            # Reset all fields.
            recvd_time = flow_id = seq_num = tvsec = tvusec = None
예제 #11
0
def run_geogrid(config):
    """ Runs geogrid.exe and checks output was sucessful
    
    Arguments:
    config -- dictionary specifying configuration options
    
    """
    logger = shared.get_logger()
    logger.info("\n*** RUNINING GEOGRID ***")
    wps_run_dir = config['wps_run_dir']
    os.chdir(wps_run_dir)

    queue = config['queue']
    log_file = '%s/geogrid.log' % wps_run_dir

    geogrid_wps = '%(wps_run_dir)s/GEOGRID.TBL' % config

    if not os.path.exists(geogrid_wps):
        raise IOError("Could not find GEOGRID.TBL at: %s " % geogrid_wps)

    cmd = '%s/geogrid.exe' % wps_run_dir

    shared.run(cmd, config, wps_run_dir)

    cmd = 'grep "Successful completion" %s/geogrid.log*' % (wps_run_dir)
    ret = shared.run_cmd(cmd, config)
    if ret != 0:
        raise IOError('geogrid.exe did not complete')

    logger.info('*** SUCESS GEOGRID ***\n')
예제 #12
0
def run_geogrid(config):
    """ Runs geogrid.exe and checks output was sucessful
    
    Arguments:
    config -- dictionary specifying configuration options
    
    """
    logger =shared.get_logger()
    logger.info("\n*** RUNINING GEOGRID ***")
    wps_run_dir    = config['wps_run_dir']
    os.chdir(wps_run_dir)

    queue          = config['queue']
    log_file       = '%s/geogrid.log' % wps_run_dir
    
    geogrid_wps = '%(wps_run_dir)s/GEOGRID.TBL' % config

    if not os.path.exists(geogrid_wps):
        raise IOError("Could not find GEOGRID.TBL at: %s " % geogrid_wps)
    

    cmd       =  '%s/geogrid.exe' % wps_run_dir
    
    shared.run(cmd, config, wps_run_dir)
    
    cmd = 'grep "Successful completion" %s/geogrid.log*' %(wps_run_dir)
    ret =shared.run_cmd(cmd, config)
    if ret!=0:
        raise IOError('geogrid.exe did not complete')

    logger.info('*** SUCESS GEOGRID ***\n')
예제 #13
0
def run_wrf(config):
    """ Run wrf.exe and check output was sucessful
    
    Arguments:
    config -- dictionary containing various configuration options
    
    """
    logger          =shared.get_logger()    
    logger.info('\n*** RUNNNING WRF ***')
    queue         = config['queue']
    wrf_run_dir   = config['wrf_run_dir']
    log_file      = '%s/wrf.log' % wrf_run_dir
    
    executable  = '%s/wrf.exe' % wrf_run_dir
    shared.run(executable, config, from_dir=wrf_run_dir)
    

    #
    # Check for success
    #    
    cmd = 'grep "SUCCESS COMPLETE" %s/rsl.error.0000' % wrf_run_dir
    ret = shared.run_cmd(cmd, config)
    if ret!=0:
        raise IOError('wrf.exe did not complete')
    
    logger.info('*** SUCESS WRF ***\n')
예제 #14
0
def run_wrf(config):
    """ Run wrf.exe and check output was sucessful
    
    Arguments:
    config -- dictionary containing various configuration options
    
    """
    logger = shared.get_logger()
    logger.info('\n*** RUNNNING WRF ***')
    queue = config['queue']
    wrf_run_dir = config['wrf_run_dir']
    log_file = '%s/wrf.log' % wrf_run_dir

    executable = '%s/wrf.exe' % wrf_run_dir
    shared.run(executable, config, from_dir=wrf_run_dir)

    #
    # Check for success
    #
    cmd = 'grep "SUCCESS COMPLETE" %s/rsl.error.0000' % wrf_run_dir
    ret = shared.run_cmd(cmd, config)
    if ret != 0:
        raise IOError('wrf.exe did not complete')

    logger.info('*** SUCESS WRF ***\n')
예제 #15
0
파일: extract.py 프로젝트: qingu/wrftools
def extract_tseries(config):

    logger = shared.get_logger()
    logger.info('*** EXTRACTING TIME SERIES ***')
     
    wrfout_dir     = config['wrfout_dir']
    tseries_dir    = config['tseries_dir']
    json_dir       = config['json_dir']
    init_time      = config['init_time']
    dom            = config['dom']
    fcst_file      = '%s/wrfout_d%02d_%s:00:00.nc' %(wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H")) # note we add on the nc extension here
    loc_file       = config['locations_file']
    ncl_code       = config['tseries_code']
    extract_hgts   = config['extract_hgts']
    tseries_fmt    = config['tseries_fmt']
    ncl_opt_file   = config['ncl_opt_file']
    
    
    ncl_log        = config['ncl_log']
    if not os.path.exists(tseries_dir):
        os.makedirs(tseries_dir)
    
    # Always go via the netcdf file
    tseries_file = '%s/tseries_d%02d_%s.nc' % (tseries_dir, dom,init_time.strftime("%Y-%m-%d_%H"))

    os.environ['FCST_FILE']      = fcst_file
    os.environ['LOCATIONS_FILE'] = loc_file
    os.environ['NCL_OUT_DIR']    = tseries_dir
    os.environ['NCL_OUT_FILE']   = tseries_file
    os.environ['NCL_OPT_FILE']   = ncl_opt_file
    
    
    logger.debug('Setting environment variables')
    logger.debug('FCST_FILE    ----> %s'  % fcst_file)
    logger.debug('NCL_OUT_DIR  ----> %s'  % tseries_dir)
    logger.debug('NCL_OUT_FILE  ----> %s' % tseries_file)
    logger.debug('LOCATIONS_FILE ----> %s' % loc_file)
    logger.debug('NCL_OPT_FILE   ----> %s' % ncl_opt_file)
    logger.debug(extract_hgts)

    ncl_hgts = '(/%s/)' % ','.join(map(str,extract_hgts))
    
    for script in ncl_code:
        cmd  = "ncl 'extract_heights=%s'  %s >> %s 2>&1" % (ncl_hgts,script, ncl_log)
        shared.run_cmd(cmd, config)

    ncdump(config)
예제 #16
0
파일: fetch.py 프로젝트: dchichkov/wrftools
def get_sst(config):
    """ Downloads SST fields from an ftp server.
    Whoever is running this must have the http_proxy environment variable set
    correctly to allow them to download files through the proxy.  Example:
    http_proxy = http://slha:[email protected]:8080"""
    logger = shared.get_logger()
    # create an lftpscript in model run dir

    logger.info('*** FETCHING SST ***')
    working_dir = config['working_dir']
    tmp_dir = config['tmp_dir']
    http_proxy = os.environ['http_proxy']
    home = os.environ['HOME']
    sst_server = config['sst_server']
    sst_server_dir = config['sst_server_dir']
    sst_local_dir = config['sst_local_dir']
    sst_time = shared.get_sst_time(config)
    sst_filename = shared.sub_date(shared.get_sst_filename(config),
                                   init_time=config['init_time'])

    if not os.path.exists(sst_local_dir):
        os.makedirs(sst_local_dir)

    if os.path.exists('%s/%s' % (sst_local_dir, sst_filename)):
        logger.info('*** SST ALREADY EXISTS LOCALLY, NOT DOWNLOADED ***')
        return

    lftpfilename = '%s/lftpscript' % working_dir
    logger.debug('Writing lftpscript to %s' % lftpfilename)
    lftpscript = open(lftpfilename, 'w')
    lftpscript.write('lcd %s\n' % sst_local_dir)
    lftpscript.write('set ftp:proxy %s\n' % http_proxy)
    lftpscript.write('set hftp:use-type no\n')
    lftpscript.write('open %s\n' % sst_server)
    lftpscript.write('get %s/%s\n' % (sst_server_dir, sst_filename))
    lftpscript.write('bye')
    lftpscript.close()

    cmd = '/usr/bin/lftp -f %s' % lftpfilename
    shared.run_cmd(cmd, config)
    # check if file downloaded

    if not os.path.exists('%s/%s' % (sst_local_dir, sst_filename)):
        raise IOError('SST file: %s not downloaded' % sst_filename)
    logger.info('*** SUCCESS SST DOWNLOADED ***')
예제 #17
0
def run_metgrid(config):
    """ Runs metgrid.exe and checks output was sucessful
    
    Arguments:
    config -- dictionary specifying configuration options
    
    """
    logger = shared.get_logger()
    logger.info("\n*** RUNNING METGRID ***")

    queue = config['queue']
    wps_run_dir = config['wps_run_dir']
    log_file = '%s/metgrid.log' % wps_run_dir
    bdy_conditions = config['bdy_conditions']
    namelist_wps = config['namelist_wps']
    namelist = shared.read_namelist(namelist_wps)

    met_em_dir = shared.sub_date(config['met_em_dir'], config['init_time'])

    #
    # vtable may be a dictionary to support running ungrib multiple
    # times. In which case, we need to put multiple prefixes into
    # the namelist.wps file
    #

    vtable = config['vtable']

    if type(vtable) == type({}):
        prefixes = vtable.keys()
    else:
        prefixes = [bdy_conditions]

    namelist.update('fg_name', prefixes)
    namelist.update('opt_output_from_metgrid_path',
                    met_em_dir,
                    section='metgrid')
    if not config['sst']:
        namelist.remove('constants_name')

    namelist.to_file(namelist_wps)

    logger.debug('met_em_dir: %s' % met_em_dir)
    if not os.path.exists(met_em_dir):
        logger.debug('creating met_em_dir: %s ' % met_em_dir)
        os.makedirs(met_em_dir)

    os.chdir(wps_run_dir)
    cmd = "%s/metgrid.exe" % wps_run_dir

    shared.run(cmd, config, from_dir=wps_run_dir)

    cmd = 'grep "Successful completion" %s/metgrid.log*' % wps_run_dir
    ret = shared.run_cmd(cmd, config)
    if ret != 0:
        raise IOError('metgrid.exe did not complete')

    logger.info('*** SUCESS METGRID ***\n')
예제 #18
0
파일: fetch.py 프로젝트: qingu/wrftools
def get_sst(config):
    """ Downloads SST fields from an ftp server.
    Whoever is running this must have the http_proxy environment variable set
    correctly to allow them to download files through the proxy.  Example:
    http_proxy = http://slha:[email protected]:8080"""
    logger      = shared.get_logger()
    # create an lftpscript in model run dir
    
    logger.info('*** FETCHING SST ***')
    working_dir    = config['working_dir']
    tmp_dir        = config['tmp_dir']
    http_proxy     = os.environ['http_proxy']
    home           = os.environ['HOME']
    sst_server     = config['sst_server']
    sst_server_dir = config['sst_server_dir']
    sst_local_dir  = config['sst_local_dir']
    sst_time       = shared.get_sst_time(config)
    sst_filename   = shared.sub_date(shared.get_sst_filename(config), init_time=config['init_time'])
   
    if not os.path.exists(sst_local_dir):
        os.makedirs(sst_local_dir)
    
    if os.path.exists('%s/%s' %(sst_local_dir, sst_filename)):
        logger.info('*** SST ALREADY EXISTS LOCALLY, NOT DOWNLOADED ***')
        return
    
    lftpfilename = '%s/lftpscript' % working_dir
    logger.debug('Writing lftpscript to %s' % lftpfilename)
    lftpscript     = open(lftpfilename, 'w')    
    lftpscript.write('lcd %s\n' % sst_local_dir)    
    lftpscript.write('set ftp:proxy %s\n' % http_proxy) 
    lftpscript.write('set hftp:use-type no\n')
    lftpscript.write('open %s\n' % sst_server)
    lftpscript.write('get %s/%s\n' % (sst_server_dir,sst_filename))
    lftpscript.write('bye')
    lftpscript.close()
    
    cmd = '/usr/bin/lftp -f %s' % lftpfilename
    shared.run_cmd(cmd, config)
    # check if file downloaded

    if not os.path.exists('%s/%s' %(sst_local_dir, sst_filename)):
        raise IOError('SST file: %s not downloaded' % sst_filename)
    logger.info('*** SUCCESS SST DOWNLOADED ***')
예제 #19
0
def run_metgrid(config):
    """ Runs metgrid.exe and checks output was sucessful
    
    Arguments:
    config -- dictionary specifying configuration options
    
    """
    logger =shared.get_logger()
    logger.info("\n*** RUNNING METGRID ***")
    
    queue          = config['queue']
    wps_run_dir    = config['wps_run_dir']
    log_file       = '%s/metgrid.log' % wps_run_dir
    bdy_conditions = config['bdy_conditions']
    namelist_wps   = config['namelist_wps']
    namelist       = shared.read_namelist(namelist_wps)
    
    met_em_dir     = shared.sub_date(config['met_em_dir'], config['init_time'])        
    
    #
    # vtable may be a dictionary to support running ungrib multiple
    # times. In which case, we need to put multiple prefixes into
    # the namelist.wps file
    #
    
    vtable = config['vtable']
    
    if type(vtable)==type({}):
        prefixes = vtable.keys()
    else:
        prefixes = [bdy_conditions]    

        
    namelist.update('fg_name', prefixes)
    namelist.update('opt_output_from_metgrid_path', met_em_dir, section='metgrid')
    if not config['sst']:
        namelist.remove('constants_name')
        
    namelist.to_file(namelist_wps)
    
    logger.debug('met_em_dir: %s' % met_em_dir)
    if not os.path.exists(met_em_dir):
        logger.debug('creating met_em_dir: %s ' % met_em_dir)
        os.makedirs(met_em_dir)

    os.chdir(wps_run_dir)
    cmd      =  "%s/metgrid.exe" % wps_run_dir
    
    shared.run(cmd, config, from_dir=wps_run_dir)

    cmd = 'grep "Successful completion" %s/metgrid.log*' % wps_run_dir
    ret =shared.run_cmd(cmd, config)
    if ret!=0:
        raise IOError('metgrid.exe did not complete')
    
    logger.info('*** SUCESS METGRID ***\n')
예제 #20
0
def run_real(config):
    """ Run real.exe and check output was sucessful
    Arguments:
    config -- dictionary containing various configuration options """
    
    logger =shared.get_logger()    
    logger.info('\n*** RUNNING REAL ***')
    
    queue           = config['queue']
    working_dir   = config['working_dir']
    wrf_run_dir     = config['wrf_run_dir']
    wps_dir         = config['wps_dir']
    domain          = config['domain']
    model_run       = config['model_run']
    init_time       = config['init_time']
    log_file        = '%s/real.log' % wrf_run_dir


    # Log files from real appear in the current directory, 
    # so we need to change directory first.
    os.chdir(wrf_run_dir)
    cmd     =  "%s/real.exe" % wrf_run_dir
    shared.run(cmd, config, from_dir=wrf_run_dir)
    
    
    rsl = '%s/rsl.error.0000' % wrf_run_dir
    if not os.path.exists(rsl):
        raise IOError('No log file found for real.exe')

    # now copy rsl file to a log directory
    cmd = 'cp %s %s/rsl/rsl.error.%s.%s.%s' % (rsl, working_dir, domain, model_run, init_time.strftime('%y-%m-%d_%H') )
    shared.run_cmd(cmd, config)



    cmd = 'grep "SUCCESS COMPLETE" %s/rsl.error.0000' % wrf_run_dir
    ret =shared.run_cmd(cmd, config)
    
    if ret!=0:
        raise IOError('real.exe did not complete')


    logger.info('*** SUCESS REAL ***\n')
예제 #21
0
def run_real(config):
    """ Run real.exe and check output was sucessful
    Arguments:
    config -- dictionary containing various configuration options """
    
    logger =shared.get_logger()    
    logger.info('*** RUNNING REAL ***')
    
    queue           = config['queue']
    working_dir   = config['working_dir']
    wrf_run_dir     = config['wrf_run_dir']
    wps_dir         = config['wps_dir']
    domain          = config['domain']
    model_run       = config['model_run']
    init_time       = config['init_time']
    log_file        = '%s/real.log' % wrf_run_dir


    # Log files from real appear in the current directory, 
    # so we need to change directory first.
    os.chdir(wrf_run_dir)
    cmd     =  "%s/real.exe" % wrf_run_dir
    shared.run(cmd, config, wrf_run_dir)
    
    
    rsl = '%s/rsl.error.0000' % wrf_run_dir
    if not os.path.exists(rsl):
        raise IOError('No log file found for real.exe')

    # now copy rsl file to a log directory
    cmd = 'cp %s %s/rsl/rsl.error.%s.%s.%s' % (rsl, working_dir, domain, model_run, init_time.strftime('%y-%m-%d_%H') )
    shared.run_cmd(cmd, config)



    cmd = 'grep "SUCCESS COMPLETE" %s/rsl.error.0000' % wrf_run_dir
    ret =shared.run_cmd(cmd, config)
    
    if ret!=0:
        raise IOError('real.exe did not complete')


    logger.info('*** SUCESS REAL ***')
예제 #22
0
파일: post.py 프로젝트: zhangylang/wrftools
def convert_grib(config):
    """Converts the grib1 outputs of UPP to grib2 format, mainly so the wgrib2 tool 
    can be used to extract csv time series from it.
    
    
    Should not rely on globbing directories here. Could have nasty consequences,
    e.g. conversion of too many files etc """

    logger = shared.get_logger()
    logger.debug('*** CONVERTING GRIB1 TO GRIB2 ***')
    domain_dir = config['domain_dir']
    model_run = config['model_run']
    init_time = config['init_time']
    dom = config['dom']

    f1 = '%s/%s/archive/wrfpost_d%02d_%s.grb' % (
        domain_dir, model_run, dom, init_time.strftime('%Y-%m-%d_%H'))
    f2 = f1.replace('.grb', '.grib2')
    cmd = 'cnvgrib -g12 %s %s' % (f1, f2)
    shared.run_cmd(cmd, config)
예제 #23
0
파일: build.py 프로젝트: jimmydo/cotton
    def build(self):
        """Generate support scripts.

        Should be run from within the root of the snapshot repo.

        """

        if os.path.exists(COTTON_BUILD_FILE):
            shared.run_cmd('./' + COTTON_BUILD_FILE)

        self._generate_deploy_files()
        build_repo_dir = self._build_repo.ensure()

        # rsync from snapshot repo to build repo
        rsync_command = 'rsync -av --delete . {0} --exclude=.git --exclude=env-vars'.format(
            build_repo_dir
        )
        shared.run_cmd(rsync_command)

        self._build_repo.commit()
예제 #24
0
파일: fetch.py 프로젝트: dchichkov/wrftools
def run_gribmaster(config):
    """Runs the gribmaster programme to download the most recent boundary conditions """
    logger = shared.get_logger()
    gm_dir = config['gm_dir']
    gm_transfer = config['gm_transfer']
    gm_dataset = config['gm_dataset']
    start = config['init_time']
    fcst_hours = config['fcst_hours']
    gm_log = config['gm_log']
    gm_sleep = config['gm_sleep']  # this is in minutes
    gm_max_attempts = int(config['gm_max_attempts'])

    log_dir = '/home/slha/forecasting'

    cmd = '%s/gribmaster --verbose --%s --dset %s --date %s --cycle %s --length %s > %s' % (
        gm_dir, gm_transfer, gm_dataset, start.strftime('%Y%m%d'),
        start.strftime('%H'), fcst_hours, gm_log)

    for attempt in range(gm_max_attempts):
        logger.info('*** RUNNING GRIBMASTER, %s attempt ***' % (attempt + 1))
        shared.run_cmd(cmd, config)

        cmd = 'grep "BUMMER" %s' % gm_log  # check for failure
        ret = subprocess.call(cmd, shell=True)
        # if we positively find the string BUMMER, we know we have failed
        if ret == 0:
            logger.error('*** FAIL GRIBMASTER: Attempt %d of %d ***' %
                         (attempt + 1, gm_max_attempts))
            logger.info('Sleeping for %s minutes' % gm_sleep)
            time.sleep(gm_sleep * 60)

        # else we check for definite sucess
        else:
            cmd = 'grep "ENJOY" %s' % gm_log  # check for failure
            ret = subprocess.call(cmd, shell=True)
            if ret == 0:
                logger.info('*** SUCESS GRIBMASTER ***')
                return

    raise IOError('gribmaster did not find files after %d attempts' %
                  gm_max_attempts)
예제 #25
0
파일: fetch.py 프로젝트: qingu/wrftools
def run_gribmaster(config):
    """Runs the gribmaster programme to download the most recent boundary conditions """
    logger      = shared.get_logger()
    gm_dir      = config['gm_dir']
    gm_transfer = config['gm_transfer']
    gm_dataset  = config['gm_dataset']
    start       = config['init_time']
    fcst_hours  = config['fcst_hours']
    gm_log      = config['gm_log']
    gm_sleep    = config['gm_sleep'] # this is in minutes
    gm_max_attempts = int(config['gm_max_attempts'])

    log_dir = '/home/slha/forecasting'
       
    cmd     = '%s/gribmaster --verbose --%s --dset %s --date %s --cycle %s --length %s > %s' %(gm_dir, gm_transfer, gm_dataset, start.strftime('%Y%m%d'), start.strftime('%H'), fcst_hours, gm_log )

    for attempt in range(gm_max_attempts):
        logger.info('*** RUNNING GRIBMASTER, %s attempt ***' % (attempt+1))
        shared.run_cmd(cmd, config)
        
        cmd = 'grep "BUMMER" %s' % gm_log # check for failure
        ret = subprocess.call(cmd, shell=True)
        # if we positively find the string BUMMER, we know we have failed
        if ret==0:
            logger.error('*** FAIL GRIBMASTER: Attempt %d of %d ***' % (attempt+1, gm_max_attempts))
            logger.info('Sleeping for %s minutes' % gm_sleep) 
            time.sleep(gm_sleep*60)
        
        # else we check for definite sucess
        else:
            cmd = 'grep "ENJOY" %s' % gm_log # check for failure
            ret = subprocess.call(cmd, shell=True)
            if ret==0:
                logger.info('*** SUCESS GRIBMASTER ***')
                return
        
        
    raise IOError('gribmaster did not find files after %d attempts' % gm_max_attempts)
예제 #26
0
파일: post.py 프로젝트: keenmisty/wrftools
def convert_grib(config):
    """Converts the grib1 outputs of UPP to grib2 format, mainly so the wgrib2 tool 
    can be used to extract csv time series from it.
    
    
    Should not rely on globbing directories here. Could have nasty consequences,
    e.g. conversion of too many files etc """

    logger=shared.get_logger()
    logger.debug('*** CONVERTING GRIB1 TO GRIB2 ***')
    domain_dir = config['domain_dir']
    model_run  = config['model_run']
    init_time  = config['init_time']
    dom        = config['dom']
    
    
    
    f1 = '%s/%s/archive/wrfpost_d%02d_%s.grb' % (domain_dir, model_run, dom, init_time.strftime('%Y-%m-%d_%H'))
    f2 =  f1.replace('.grb', '.grib2')
    cmd = 'cnvgrib -g12 %s %s' %(f1, f2)
    shared.run_cmd(cmd, config)
    
    
예제 #27
0
def prepare_wrf(config):
    """Checks that met_em files exist, and links into WRF/run directory. 
    
    Arguments:
    config -- a dictionary containing forecast options

    """
    logger =shared.get_logger()    
    logger.debug('*** PREPARING FILES FOR WRF ***')
    
    met_em_format      = "%Y-%m-%d_%H:%M:%S"
    

    max_dom      = config['max_dom']
    domains      = range(1,max_dom+1)
    init_time    = config['init_time']
    fcst_hours   = config['fcst_hours']
    bdy_interval = config['bdy_interval']
    bdy_times    = shared.get_bdy_times(config)
    met_em_dir   = shared.sub_date(config['met_em_dir'], init_time=init_time)
    met_em_files = ['%s/met_em.d%02d.%s.nc' % (met_em_dir,d, t.strftime(met_em_format)) for d in domains for t in bdy_times] 
    wrf_run_dir    = config['wrf_run_dir']
    namelist_run   = '%s/namelist.input' % wrf_run_dir
    namelist_input = config['namelist_input']
    
    
    logger.debug('linking met_em files:')
    
    #
    # Link met_em files. There are two options for error handling here.
    # The first is to abort if any of the met_em files are missing.
    # The second is just to run wrf and see how far it gets before
    # running out of files. This will allow a partial forecast to run, 
    # even if later files are missing.
    #    
    # To use the first approach, raise an exception when a missing
    # file is encountered, otherwise just print a warning message.
    #
    # Actually, the two are equivalent so long as the met_em files 
    # are sorted.
    #
    for f in met_em_files:
        if not os.path.exists(f):
            raise IOError('met_em file missing : %s' %f)
        cmd = 'ln -sf %s %s/'%(f, wrf_run_dir)
        shared.run_cmd(cmd, config)
    
    
    logger.debug('linking namelist.input to wrf_run_dir')
    cmd = 'rm -f %s' % namelist_run
    shared.run_cmd(cmd, config)
    cmd = 'ln -sf %s %s' %(namelist_input, namelist_run)
    shared.run_cmd(cmd, config)

    logger.debug('*** FINISHED PREPARING FILES FOR WRF ***')
예제 #28
0
def prepare_wrf(config):
    """Checks that met_em files exist, and links into WRF/run directory. 
    
    Arguments:
    config -- a dictionary containing forecast options

    """
    logger = shared.get_logger()
    logger.debug('*** PREPARING FILES FOR WRF ***')

    met_em_format = "%Y-%m-%d_%H:%M:%S"

    max_dom = config['max_dom']
    domains = range(1, max_dom + 1)
    init_time = config['init_time']
    fcst_hours = config['fcst_hours']
    bdy_interval = config['bdy_interval']
    bdy_times = shared.get_bdy_times(config)
    met_em_dir = shared.sub_date(config['met_em_dir'], init_time=init_time)
    met_em_files = [
        '%s/met_em.d%02d.%s.nc' % (met_em_dir, d, t.strftime(met_em_format))
        for d in domains for t in bdy_times
    ]
    wrf_run_dir = config['wrf_run_dir']
    namelist_run = '%s/namelist.input' % wrf_run_dir
    namelist_input = config['namelist_input']

    logger.debug('linking met_em files:')

    #
    # Link met_em files. There are two options for error handling here.
    # The first is to abort if any of the met_em files are missing.
    # The second is just to run wrf and see how far it gets before
    # running out of files. This will allow a partial forecast to run,
    # even if later files are missing.
    #
    # To use the first approach, raise an exception when a missing
    # file is encountered, otherwise just print a warning message.
    #
    # Actually, the two are equivalent so long as the met_em files
    # are sorted.
    #
    for f in met_em_files:
        if not os.path.exists(f):
            raise IOError('met_em file missing : %s' % f)
        cmd = 'ln -sf %s %s/' % (f, wrf_run_dir)
        shared.run_cmd(cmd, config)

    logger.debug('linking namelist.input to wrf_run_dir')
    cmd = 'rm -f %s' % namelist_run
    shared.run_cmd(cmd, config)
    cmd = 'ln -sf %s %s' % (namelist_input, namelist_run)
    shared.run_cmd(cmd, config)

    logger.debug('*** FINISHED PREPARING FILES FOR WRF ***')
예제 #29
0
파일: pktgen.py 프로젝트: crazyideas21/dev
def send(pkt_count=56, pkt_size=1400, delay=0, flow_count=1):
    """
    Sends packets. Returns a Popen handle.
    
    """        
    f = open('../script/pktgen_wrapper_template.sh')
    pktgen_script = f.read()
    f.close()

    # Replace the place-holders in pktgen_wrapper.sh with actual parameters.
    
    replacement_dict = {'[PKTGEN_PROC]': config.pktgen_proc,
                        '[PKTGEN_IFACE]': config.pktgen_iface,
                        '[PKT_COUNT]': str(pkt_count),
                        '[PKT_SIZE]': str(pkt_size),
                        '[DELAY]': str(delay),
                        '[MAX_PORT]': str((flow_count + 10000)),
                        '[SRC_IP]': config.source_ip_fake,
                        '[DST_IP]': config.dest_ip,
                        '[DST_MAC]': config.dest_mac
                        }
    pktgen_script = _replace_string_with_dict(pktgen_script, replacement_dict)
    
    f = open('/tmp/pktgen_wrapper.sh', 'w')
    f.write(pktgen_script)
    f.close()

    # Copy the file to pktgen host's tmp.
    
    p = shared.run_cmd('scp -q /tmp/pktgen_wrapper.sh ',
                       'root@', config.pktgen_host, ':/tmp; ',
                       'rm /tmp/pktgen_wrapper.sh')
    p.wait()

    # Execute the script remotely.

    state.pktgen_run_time = state.pktgen_sent_pkt_count = None
    
    return shared.run_ssh('chmod +x /tmp/pktgen_wrapper.sh; ', 
                          '/tmp/pktgen_wrapper.sh', 
                          hostname=config.pktgen_host)
예제 #30
0
def move_wrfout_files(config):
    """ Moves output files from run directory to wrfout 
    director"""
    logger = shared.get_logger()
    logger.debug('\n*** MOVING WRFOUT FILES AND NAMELIST SETTINGS ***')

    domain = config['domain']
    model_run = config['model_run']
    working_dir = config['working_dir']
    wrf_run_dir = config['wrf_run_dir']
    init_time = config['init_time']
    init_str = init_time.strftime('%Y-%m-%d_%H')

    namelist_input = config['namelist_input']
    namelist_wps = config['namelist_wps']

    wrfout_dir = '%s/wrfout' % (working_dir)
    log_dir = '%s/log' % working_dir
    rsl_dir = '%s/rsl' % working_dir
    namelist_dir = '%s/namelist' % working_dir
    run_key = '%s.%s' % (domain, model_run)  # composite key

    logger.debug('Moving wrfout files from %s ----> %s' %
                 (wrf_run_dir, wrfout_dir))

    # Move WRF output files to new directory
    flist = glob.glob(wrf_run_dir + '/wrfout*')
    shared.transfer(flist, wrfout_dir, mode='move', debug_level='debug')

    # Move log files to new directoy
    #flist = glob.glob(wrf_run_dir+'/rsl.*')
    #transfer(flist, rsl_dir, mode='move', debug_level='debug')

    cmd = 'cp %s %s/namelist.input.%s.%s' % (namelist_input, namelist_dir,
                                             run_key, init_str)
    shared.run_cmd(cmd, config)

    cmd = 'cp %s/namelist.wps %s/namelist.wps.%s.%s' % (
        working_dir, namelist_dir, run_key, init_str)
    shared.run_cmd(cmd, config)

    #
    # Archive log files
    #
    logger.debug('moving rsl files ----> %s' % rsl_dir)
    cmd = 'cp %s/rsl.out.0000 %s/rsl.out.%s' % (wrf_run_dir, rsl_dir, run_key)
    shared.run_cmd(cmd, config)
    logger.debug("*** FINISHED MOVING WRFOUT FILES ***")
예제 #31
0
def finalise(config):
    """Removes files, transfers etc."""

    logger = shared.get_logger()

    logger.info('*** FINALISING ***')
    
    working_dir    = config['working_dir']
    
    
    links       = [shared.expand(x, config) for x in config['finalise.link']]
    remove      = [shared.expand(x, config) for x in config['finalise.remove']]
    subdirs     = [shared.expand(x, config) for x in config['finalise.create']]
    copy        = [shared.expand(x, config) for x in config['finalise.copy']]
    move        = [shared.expand(x, config) for x in config['finalise.move']]
    run         = [shared.expand(x, config) for x in config['finalise.run']]
    
        
    fulldirs  = subdirs 
    for d in fulldirs:
        if not os.path.exists(d):
            logger.debug('creating directory %s ' %d)
            os.mkdir(d) 
   
    
    for arg in move:
        cmd = "mv %s" % arg
        shared.run_cmd(cmd, config)

    for arg in copy:
        cmd = "cp %s" % arg
        shared.run_cmd(cmd, config)        
        
    for pattern in links:
        shared.link(pattern)
    
    for cmd in run:
        shared.run_cmd(cmd, config)
    
    for pattern in remove:
        flist = glob.glob(pattern)
        for f in flist:
            if os.path.exists(f):
                os.remove(f)
    
    logger.info('*** DONE FINALISE ***')    
예제 #32
0
def move_wrfout_files(config):
    """ Moves output files from run directory to wrfout 
    director"""
    logger =shared.get_logger()    
    logger.debug('\n*** MOVING WRFOUT FILES AND NAMELIST SETTINGS ***')
    
    domain        = config['domain']
    model_run     = config['model_run']
    working_dir = config['working_dir']
    wrf_run_dir   = config['wrf_run_dir']
    init_time     = config['init_time']
    init_str      = init_time.strftime('%Y-%m-%d_%H')

    namelist_input = config['namelist_input']
    namelist_wps   = config['namelist_wps']
    
    wrfout_dir    = '%s/wrfout'   %(working_dir)
    log_dir       = '%s/log'      % working_dir    
    rsl_dir       = '%s/rsl'      % working_dir
    namelist_dir  = '%s/namelist' % working_dir
    run_key       = '%s.%s'       %(domain, model_run)    # composite key  

    logger.debug('Moving wrfout files from %s ----> %s' %(wrf_run_dir, wrfout_dir) )

    # Move WRF output files to new directory
    flist = glob.glob(wrf_run_dir+'/wrfout*')
    shared.transfer(flist, wrfout_dir, mode='move', debug_level='debug')

    # Move log files to new directoy
    #flist = glob.glob(wrf_run_dir+'/rsl.*')
    #transfer(flist, rsl_dir, mode='move', debug_level='debug')

    cmd = 'cp %s %s/namelist.input.%s.%s' % (namelist_input, namelist_dir, run_key, init_str)
    shared.run_cmd(cmd, config)
    
    cmd = 'cp %s/namelist.wps %s/namelist.wps.%s.%s' % (working_dir, namelist_dir, run_key, init_str)
    shared.run_cmd(cmd, config)


    #
    # Archive log files
    # 
    logger.debug('moving rsl files ----> %s' % rsl_dir )
    cmd = 'cp %s/rsl.out.0000 %s/rsl.out.%s' %(wrf_run_dir, rsl_dir, run_key)
    shared.run_cmd(cmd, config)
    logger.debug("*** FINISHED MOVING WRFOUT FILES ***")
예제 #33
0
def run_ndown(config):
    logger = shared.get_logger()
    logger.info('*** RUNNING NDOWN ***')

    wrf_run_dir = config['wrf_run_dir']
    queue = config['queue']
    log_file = '%s/ndown.log' % wrf_run_dir

    cmd = '%s/ndown.exe' % wrf_run_dir

    nprocs = config['num_procs']
    poll_interval = config['poll_interval']
    logger.debug(poll_interval)
    logger.debug(nprocs)
    logger.debug(nprocs['ndown.exe'])

    shared.run(cmd, config, wrf_run_dir)

    cmd = 'grep "Successful completion" %s' % log_file  # check for success
    ret = shared.run_cmd(cmd, config)
    if ret != 0:
        raise IOError('ndown.exe did not complete')

    logger.info('*** SUCESS NDOWN ***')
예제 #34
0
def run_ndown(config):
    logger =shared.get_logger()
    logger.info('*** RUNNING NDOWN ***')
    
    wrf_run_dir = config['wrf_run_dir']
    queue       = config['queue']
    log_file    = '%s/ndown.log' % wrf_run_dir
    
    cmd = '%s/ndown.exe' % wrf_run_dir
    
    nprocs = config['num_procs']
    poll_interval = config['poll_interval']
    logger.debug(poll_interval)
    logger.debug(nprocs)
    logger.debug(nprocs['ndown.exe'])
    
    shared.run(cmd, config, wrf_run_dir)
        
    cmd = 'grep "Successful completion" %s' % log_file # check for success
    ret =shared.run_cmd(cmd,config)
    if ret!=0:
        raise IOError('ndown.exe did not complete')
    
    logger.info('*** SUCESS NDOWN ***')
예제 #35
0
def finalise(config):
    """Removes files, transfers etc."""

    logger = shared.get_logger()

    logger.info('*** FINALISING ***')

    working_dir = config['working_dir']

    links = [shared.expand(x, config) for x in config['finalise.link']]
    remove = [shared.expand(x, config) for x in config['finalise.remove']]
    subdirs = [shared.expand(x, config) for x in config['finalise.create']]
    copy = [shared.expand(x, config) for x in config['finalise.copy']]
    move = [shared.expand(x, config) for x in config['finalise.move']]
    run = [shared.expand(x, config) for x in config['finalise.run']]

    fulldirs = subdirs
    for d in fulldirs:
        if not os.path.exists(d):
            logger.debug('creating directory %s ' % d)
            os.mkdir(d)

    for arg in move:
        cmd = "mv %s" % arg
        shared.run_cmd(cmd, config)

    for arg in copy:
        cmd = "cp %s" % arg
        shared.run_cmd(cmd, config)

    for pattern in links:
        shared.link(pattern)

    for cmd in run:
        shared.run_cmd(cmd, config)

    for pattern in remove:
        flist = glob.glob(pattern)
        for f in flist:
            if os.path.exists(f):
                os.remove(f)

    logger.info('*** DONE FINALISE ***')
예제 #36
0
def ungrib_sst(config):
    """ Runs ungrib.exe for SST fields, makes and modifies a copy of namelist.wps,
    then restores the original namelist.wps"""
    logger = shared.get_logger()
    
    wps_dir      = config['wps_dir']
    wps_run_dir  = config['wps_run_dir']
    tmp_dir      = config['tmp_dir']
    working_dir  = config['working_dir']
    init_time    = config['init_time']
    max_dom      = config['max_dom']
    sst_local_dir = config['sst_local_dir']
    sst_time     = shared.get_sst_time(config)
    sst_filename = shared.get_sst_filename(config)
    vtable_sst   = config['sst_vtable']
    vtable       = wps_run_dir+'/Vtable'
    queue        = config['queue']
    log_file     = '%s/ungrib.sst.log' % wps_run_dir
    namelist_wps  = config['namelist_wps']
    namelist_sst  = '%s/namelist.sst' % working_dir

    namelist      = shared.read_namelist(namelist_wps)

    #
    # update one line to point to the new SST field
    # ungrib.exe will name SST field as e.g.
    # SST:2013-04-24_00
    #
    constants_name = '%s/SST:%s' %(wps_run_dir, sst_time.strftime('%Y-%m-%d_%H'))
    logger.debug('Updating constants_name ----> %s' % constants_name)
    namelist.update('constants_name', constants_name, section='metgrid')

    # Write the changes into the original
    namelist.to_file(namelist_wps)

    #
    # Update start and end time to process SST
    #
    start_str  = sst_time.strftime("%Y-%m-%d_%H:%M:%S")
    end_str    = sst_time.strftime("%Y-%m-%d_%H:%M:%S")
    logger.debug("Updating namelist.sst")
    logger.debug('PREFIX ------> SST')
    logger.debug('start_date---> ' +start_str)
    logger.debug('end_date-----> '+ end_str)

    namelist.update('prefix', 'SST')
    namelist.update('start_date', [start_str]*max_dom)
    namelist.update('end_date',   [end_str]*max_dom)
    logger.debug('writing modified namelist.sst to file -------> %s' % namelist_sst)
    namelist.to_file(namelist_sst)

    #remove any linked namelist.wps 
    logger.debug('removing namelist.wps')
    namelist_run = '%s/namelist.wps' % wps_run_dir
    if os.path.exists(namelist_run):
        os.remove(namelist_run)

    # link namelist.sst to namelist.wps in WPS run dir
    logger.debug('linking namelist.sst -----> namelist.wps')
    cmd = 'ln -sf %s %s' %(namelist_sst, namelist_run)
    shared.run_cmd(cmd, config)

    logger.debug('removing Vtable')
    if os.path.exists(vtable):
        os.remove(vtable)
    logger.debug('linking Vtable.SST ----> Vtable')
    cmd = 'ln -sf %s %s' %(vtable_sst, vtable)
    shared.run_cmd(cmd, config)

    # run link_grib to link SST gribs files
    logger.debug('Linking SST GRIB files')
    cmd = '%s/link_grib.csh %s/%s' %(wps_dir, sst_local_dir, sst_filename)
    shared.run_cmd(cmd, config)


    logger.info('\n*** RUNNING UNGRIB FOR SST ***')
    cmd     =  '%s/ungrib.exe' % wps_run_dir
    shared.run_cmd(cmd, config)

    cmd = 'grep "Successful completion" ./ungrib.log*' # check for success
    ret = shared.run_cmd(cmd, config)
    if ret!=0:
        raise IOError('Ungrib failed for SST')
    
    logger.info('*** SUCCESS UNGRIB SST ***\n')
    logger.debug('Removing namelist.wps')
    if os.path.exists(namelist_run): 
        os.remove(namelist_run)
    # link in original (unmodified) namelist.wps
    cmd = 'ln -sf %s %s' %(namelist_wps, namelist_run)    
    shared.run_cmd(cmd, config)
예제 #37
0
def run_ungrib(config):
    """ Runs ungrib.exe and checks output was sucessfull
    If vtable and gbr_input_fmt are NOT dictionaries, 
    then dictionarius will be constructed from them using 
    the key bdy_conditions from the metadata
    
    Arguments:
    config -- dictionary specifying configuration options
    
    """
    logger = shared.get_logger()
    wps_dir = config['wps_dir']
    wps_run_dir = config['wps_run_dir']
    namelist_wps = config['namelist_wps']
    working_dir = config['working_dir']
    met_em_dir = config['met_em_dir']
    init_time = config['init_time']
    log_file = '%s/ungrib.log' % wps_run_dir
    vtable = config['vtable']
    grb_input_fmt = config['grb_input_fmt']
    grb_input_delay = config.get(
        "grb_input_delay")  # this allows None to be returned

    bdy_conditions = config['bdy_conditions']

    logger.info("\n*** RUNNING UNGRIB ***")

    namelist = shared.read_namelist(namelist_wps)

    bdy_times = shared.get_bdy_times(config)

    if type(grb_input_fmt) != type({}):
        grb_input_fmt = {bdy_conditions: grb_input_fmt}

    if type(vtable) != type({}):
        vtable = {bdy_conditions: vtable}

    #
    # Check that boundary conditions exist
    #
    for key in vtable.keys():

        if grb_input_delay and key in grb_input_delay:
            logger.debug("applying delay")
            delay = datetime.timedelta(0, grb_input_delay[key] * 60 * 60)
            new_bdy_times = [b - delay for b in bdy_times]
        else:
            logger.debug("no delay applied")
            new_bdy_times = bdy_times

        fmt = grb_input_fmt[key]
        #
        # Generate filelist based on the initial time, and the forecast hour
        #
        filelist = list(
            OrderedDict.fromkeys(shared.get_bdy_filenames(fmt, new_bdy_times)))

        #
        # Check the boundary files exist
        #
        logger.debug('checking boundary condition files exists')
        for f in filelist:
            if not os.path.exists(f):
                raise IOError('cannot find file: %s' % f)

    logger.debug('all boundary conditions files exist')

    #
    # Now process boundary conditions
    #
    for key in vtable.keys():

        if grb_input_delay and key in grb_input_delay:
            logger.debug("applying delay")
            delay = datetime.timedelta(0, grb_input_delay[key] * 60 * 60)
            new_bdy_times = [b - delay for b in bdy_times]
        else:
            logger.debug("no delay applied")
            new_bdy_times = bdy_times

        fmt = grb_input_fmt[key]
        #
        # Generate filelist based on the initial time, and the forecast hour
        #
        filelist = list(
            OrderedDict.fromkeys(shared.get_bdy_filenames(fmt, new_bdy_times)))

        logger.debug(
            'running link_grib.csh script to link grib files to GRIBFILE.AAA etc'
        )

        os.chdir(wps_run_dir)
        args = ' '.join(filelist)
        cmd = '%s/link_grib.csh %s' % (wps_run_dir, args)
        shared.run_cmd(cmd, config)

        vtab_path = vtable[key]
        prefix = key
        namelist.update('prefix', key)
        namelist.to_file(namelist_wps)
        link_namelist_wps(config)
        vtab_wps = wps_run_dir + '/Vtable'

        if os.path.exists(vtab_wps):
            os.remove(vtab_wps)
        cmd = 'ln -sf %s %s' % (vtab_path, vtab_wps)
        logger.debug(cmd)
        subprocess.call(cmd, shell=True)
        #logger.debug("changing directory to %s" % wps_run_dir)
        #os.chdir(wps_run_dir)
        cmd = '%s/ungrib.exe' % wps_run_dir

        logger.debug(cmd)
        shared.run(cmd, config, wps_run_dir)

        cmd = 'grep "Successful completion" %s/ungrib.log*' % wps_run_dir  # check for success
        ret = shared.run_cmd(cmd, config)
        if ret != 0:
            raise IOError('ungrib.exe did not complete')

    logger.info('*** SUCESS UNGRIB ***\n')
예제 #38
0
def ungrib_sst(config):
    """ Runs ungrib.exe for SST fields, makes and modifies a copy of namelist.wps,
    then restores the original namelist.wps"""
    logger = shared.get_logger()

    wps_dir = config['wps_dir']
    wps_run_dir = config['wps_run_dir']
    tmp_dir = config['tmp_dir']
    working_dir = config['working_dir']
    init_time = config['init_time']
    max_dom = config['max_dom']
    sst_local_dir = config['sst_local_dir']
    sst_time = shared.get_sst_time(config)
    sst_filename = shared.get_sst_filename(config)
    vtable_sst = config['sst_vtable']
    vtable = wps_run_dir + '/Vtable'
    queue = config['queue']
    log_file = '%s/ungrib.sst.log' % wps_run_dir
    namelist_wps = config['namelist_wps']
    namelist_sst = '%s/namelist.sst' % working_dir

    namelist = shared.read_namelist(namelist_wps)

    #
    # update one line to point to the new SST field
    # ungrib.exe will name SST field as e.g.
    # SST:2013-04-24_00
    #
    constants_name = '%s/SST:%s' % (wps_run_dir,
                                    sst_time.strftime('%Y-%m-%d_%H'))
    logger.debug('Updating constants_name ----> %s' % constants_name)
    namelist.update('constants_name', constants_name, section='metgrid')

    # Write the changes into the original
    namelist.to_file(namelist_wps)

    #
    # Update start and end time to process SST
    #
    start_str = sst_time.strftime("%Y-%m-%d_%H:%M:%S")
    end_str = sst_time.strftime("%Y-%m-%d_%H:%M:%S")
    logger.debug("Updating namelist.sst")
    logger.debug('PREFIX ------> SST')
    logger.debug('start_date---> ' + start_str)
    logger.debug('end_date-----> ' + end_str)

    namelist.update('prefix', 'SST')
    namelist.update('start_date', [start_str] * max_dom)
    namelist.update('end_date', [end_str] * max_dom)
    logger.debug('writing modified namelist.sst to file -------> %s' %
                 namelist_sst)
    namelist.to_file(namelist_sst)

    #remove any linked namelist.wps
    logger.debug('removing namelist.wps')
    namelist_run = '%s/namelist.wps' % wps_run_dir
    if os.path.exists(namelist_run):
        os.remove(namelist_run)

    # link namelist.sst to namelist.wps in WPS run dir
    logger.debug('linking namelist.sst -----> namelist.wps')
    cmd = 'ln -sf %s %s' % (namelist_sst, namelist_run)
    shared.run_cmd(cmd, config)

    logger.debug('removing Vtable')
    if os.path.exists(vtable):
        os.remove(vtable)
    logger.debug('linking Vtable.SST ----> Vtable')
    cmd = 'ln -sf %s %s' % (vtable_sst, vtable)
    shared.run_cmd(cmd, config)

    # run link_grib to link SST gribs files
    logger.debug('Linking SST GRIB files')
    cmd = '%s/link_grib.csh %s/%s' % (wps_dir, sst_local_dir, sst_filename)
    shared.run_cmd(cmd, config)

    logger.info('\n*** RUNNING UNGRIB FOR SST ***')
    cmd = '%s/ungrib.exe' % wps_run_dir
    shared.run_cmd(cmd, config)

    cmd = 'grep "Successful completion" ./ungrib.log*'  # check for success
    ret = shared.run_cmd(cmd, config)
    if ret != 0:
        raise IOError('Ungrib failed for SST')

    logger.info('*** SUCCESS UNGRIB SST ***\n')
    logger.debug('Removing namelist.wps')
    if os.path.exists(namelist_run):
        os.remove(namelist_run)
    # link in original (unmodified) namelist.wps
    cmd = 'ln -sf %s %s' % (namelist_wps, namelist_run)
    shared.run_cmd(cmd, config)
예제 #39
0
def run_ungrib(config):
    """ Runs ungrib.exe and checks output was sucessfull
    If vtable and gbr_input_fmt are NOT dictionaries, 
    then dictionarius will be constructed from them using 
    the key bdy_conditions from the metadata
    
    Arguments:
    config -- dictionary specifying configuration options
    
    """
    logger        =shared.get_logger()
    wps_dir       = config['wps_dir']
    wps_run_dir   = config['wps_run_dir']
    namelist_wps  = config['namelist_wps']
    working_dir   = config['working_dir']    
    met_em_dir    = config['met_em_dir']
    init_time     = config['init_time']
    log_file      = '%s/ungrib.log' % wps_run_dir
    vtable        = config['vtable']
    grb_input_fmt  = config['grb_input_fmt']
    grb_input_delay = config.get("grb_input_delay")  # this allows None to be returned 
    
    bdy_conditions = config['bdy_conditions']
    
    
    
    
    
    logger.info("\n*** RUNNING UNGRIB ***")
    
    namelist = shared.read_namelist(namelist_wps)
    
    bdy_times     = shared.get_bdy_times(config)
    

    if type(grb_input_fmt)!=type({}):
        grb_input_fmt = {bdy_conditions:grb_input_fmt}

    if type(vtable)!=type({}):
        vtable = {bdy_conditions:vtable}


    #
    # Check that boundary conditions exist
    #     
    for key in vtable.keys():
        
        
        if grb_input_delay and key in grb_input_delay:
            logger.debug("applying delay")
            delay = datetime.timedelta(0, grb_input_delay[key]*60*60)
            new_bdy_times = [b - delay for b in bdy_times]
        else:
            logger.debug("no delay applied")
            new_bdy_times = bdy_times
        
        fmt = grb_input_fmt[key]
        #
        # Generate filelist based on the initial time, and the forecast hour
        #        
        filelist = list(OrderedDict.fromkeys(shared.get_bdy_filenames(fmt, new_bdy_times)))

        #
        # Check the boundary files exist
        #
        logger.debug('checking boundary condition files exists')    
        for f in filelist:
            if not os.path.exists(f):
                raise IOError('cannot find file: %s' %f)
        
    
    
    logger.debug('all boundary conditions files exist')
    
    #
    # Now process boundary conditions
    #
    for key in vtable.keys():

        if grb_input_delay and key in grb_input_delay:
            logger.debug("applying delay")
            delay = datetime.timedelta(0, grb_input_delay[key]*60*60)
            new_bdy_times = [b - delay for b in bdy_times]
        else:
            logger.debug("no delay applied")
            new_bdy_times = bdy_times
        
        fmt = grb_input_fmt[key]
        #
        # Generate filelist based on the initial time, and the forecast hour
        #        
        filelist = list(OrderedDict.fromkeys(shared.get_bdy_filenames(fmt, new_bdy_times)))

        
        logger.debug('running link_grib.csh script to link grib files to GRIBFILE.AAA etc')
        
        os.chdir(wps_run_dir)
        args = ' '.join(filelist)
        cmd = '%s/link_grib.csh %s' %(wps_run_dir,args)
        shared.run_cmd(cmd, config)
  
        vtab_path = vtable[key]
        prefix = key
        namelist.update('prefix', key)
        namelist.to_file(namelist_wps)
        link_namelist_wps(config)
        vtab_wps  = wps_run_dir+'/Vtable'

        if os.path.exists(vtab_wps):
            os.remove(vtab_wps)
        cmd = 'ln -sf %s %s' %(vtab_path, vtab_wps)
        logger.debug(cmd)
        subprocess.call(cmd, shell=True)    
        #logger.debug("changing directory to %s" % wps_run_dir)
        #os.chdir(wps_run_dir)
        cmd     =  '%s/ungrib.exe' % wps_run_dir
        
        logger.debug(cmd)
        shared.run(cmd, config, wps_run_dir)

        cmd = 'grep "Successful completion" %s/ungrib.log*' % wps_run_dir # check for success
        ret =shared.run_cmd(cmd,config)
        if ret!=0:
            raise IOError('ungrib.exe did not complete')
    
    logger.info('*** SUCESS UNGRIB ***\n')
예제 #40
0
파일: build.py 프로젝트: jimmydo/cotton
 def _create_supervisor_configs(self, process_types):
     SUPERVISOR_DIR = 'supervisor'
     shared.run_cmd('mkdir -p {0}'.format(SUPERVISOR_DIR))
     with shared.chdir(SUPERVISOR_DIR):
         for process_type in process_types:
             self._create_supervisor_config(process_type)
예제 #41
0
def produce_ncl_ol_plots(config):
    """ Calls a series of ncl scripts to produce visualisations.
    
    Need to think about how to define a flexible visualisation framework
    Currently communication with NCL is via environment variables
    Perhaps in future we should move to PyNGL for easier (direct) integration
    as then we could simply pass in the config dictionary, use the same logging 
    framework, and make use of a vtable like mapping to forecast vars.
    
    However, for the time being we design each ncl script to expect 
    certain environment variables.  Then, the list of ncl scripts to 
    run can simply be specified somewhere in the config file
    e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc.
    
    Updated: Some plots are much easier to produce using the original 
    wrfout netcdf files, rather than use the UPP post-processed grib files. 
    Howeverm in future we should stick with one or the other.
    
    
    Arguments:
    config -- dictionary containing various configuration options """

    logger = shared.get_logger()
    logger.info('*** RUNNING NCL SCRIPTS ***')

    working_dir = config['working_dir']

    ncl_code_dir = config['ncl_code_dir']
    ncl_files = config['ncl_ol_code']
    #ncl_code       = ['%s/%s' % (ncl_code_dir, f) for f in ncl_files]
    ncl_code = ncl_files
    ncl_log = config['ncl_log']
    wrftools_dir = config['wrftools_dir']
    wrfout_dir = config['wrfout_dir']
    init_time = config['init_time']
    dom = config['dom']
    fcst_file = '%s/wrfout_d%02d_%s:00:00.nc' % (
        wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H"))
    ncl_out_dir = shared.sub_date(config['ncl_ol_out_dir'],
                                  init_time=init_time)
    ncl_out_type = config['ncl_out_type']
    nest_id = '%02d' % dom

    ncl_in_file = fcst_file
    ncl_loc_file = config['locations_file']
    ncl_out_dir = shared.sub_date(config['ncl_out_dir'], init_time=init_time)
    ncl_out_type = config['ncl_out_type']

    if not os.path.exists(ncl_out_dir):
        os.makedirs(ncl_out_dir)

    #
    # Communicate to NCL via environment variables
    # NCL expects the following to be set
    #File    = getenv("FCST_FILE")
    #type    = getenv("NCL_OUT_TYPE")
    #diro    = getenv("NCL_OUT_DIR")
    #;web_dir = getenv("WEB_DIR")
    #domain  = getenv("NEST_ID")
    #run_hour = getenv("RUN_HOUR")

    #
    # Try escaping : in fcst_file
    #
    #fcst_file = fcst_file.replace(':', r'\:')
    #os.environ['FCST_FILE']      = fcst_file
    #os.environ['NCL_OUT_DIR']    = ncl_out_dir
    #os.environ['NCL_OUT_TYPE']   = ncl_out_type
    #os.environ['NEST_ID']        = nest_id
    #os.environ['DOMAIN']         = domain
    #os.environ['MODEL_RUN']      = model_run

    #logger.debug('Setting environment variables')
    logger.debug('FCST_FILE    ----> %s' % fcst_file)
    logger.debug('NCL_OUT_DIR  ----> %s' % ncl_out_dir)
    logger.debug('NCL_OUT_TYPE ----> %s' % ncl_out_type)
    logger.debug('NEST_ID      ----> %s' % nest_id)
    logger.debug('PATH')
    logger.debug(os.environ['PATH'])
    #logger.debug('DOMAIN       ----> %s' % domain)
    #logger.debug('MODEL_RUN    ----> %s' % model_run)

    for script in ncl_code:
        #cmd  = "ncl %s >> %s 2>&1" % (script, ncl_log)
        cmd = "ncl %s " % script
        #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd

        logger.warn(
            "NCL to produce GEOTIFFS does not work on post-processing queue, runnign on head node"
        )

        cmd = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % (
            ncl_in_file, ncl_out_dir, ncl_out_type, ncl_loc_file, script,
            working_dir)

        ret = shared.run_cmd(cmd, config)

        gwarp = config['gwarp']
        os.chdir(ncl_out_dir)

        cmd = "%s %s/*.tiff" % (gwarp, ncl_out_dir)
        logger.debug(cmd)
        shared.run_cmd(cmd, config)
예제 #42
0
def produce_ncl_ol_plots(config):
    """ Calls a series of ncl scripts to produce visualisations.
    
    Need to think about how to define a flexible visualisation framework
    Currently communication with NCL is via environment variables
    Perhaps in future we should move to PyNGL for easier (direct) integration
    as then we could simply pass in the config dictionary, use the same logging 
    framework, and make use of a vtable like mapping to forecast vars.
    
    However, for the time being we design each ncl script to expect 
    certain environment variables.  Then, the list of ncl scripts to 
    run can simply be specified somewhere in the config file
    e.g. wrf_basic_plots.ncl, wrf_vertical_plots.ncl etc.
    
    Updated: Some plots are much easier to produce using the original 
    wrfout netcdf files, rather than use the UPP post-processed grib files. 
    Howeverm in future we should stick with one or the other.
    
    
    Arguments:
    config -- dictionary containing various configuration options """
    
    logger = shared.get_logger()    
    logger.info('*** RUNNING NCL SCRIPTS ***')
     
    working_dir  = config['working_dir']

    ncl_code_dir   = config['ncl_code_dir']
    ncl_files      = config['ncl_ol_code']
    #ncl_code       = ['%s/%s' % (ncl_code_dir, f) for f in ncl_files]
    ncl_code       = ncl_files
    ncl_log        = config['ncl_log']
    wrftools_dir   = config['wrftools_dir']
    wrfout_dir     = config['wrfout_dir']
    init_time      = config['init_time']
    dom            = config['dom']
    fcst_file      = '%s/wrfout_d%02d_%s:00:00.nc' %(wrfout_dir, dom, init_time.strftime("%Y-%m-%d_%H"))
    ncl_out_dir    = shared.sub_date(config['ncl_ol_out_dir'], init_time=init_time)
    ncl_out_type   = config['ncl_out_type']
    nest_id        =  '%02d' % dom
    

    ncl_in_file    = fcst_file
    ncl_loc_file   = config['locations_file']
    ncl_out_dir    = shared.sub_date(config['ncl_out_dir'], init_time=init_time)
    ncl_out_type   = config['ncl_out_type']


    if not os.path.exists(ncl_out_dir):
        os.makedirs(ncl_out_dir)

    #
    # Communicate to NCL via environment variables
    # NCL expects the following to be set
    #File    = getenv("FCST_FILE")
    #type    = getenv("NCL_OUT_TYPE")
    #diro    = getenv("NCL_OUT_DIR")
    #;web_dir = getenv("WEB_DIR")
    #domain  = getenv("NEST_ID")    
    #run_hour = getenv("RUN_HOUR")

    #
    # Try escaping : in fcst_file
    #
    #fcst_file = fcst_file.replace(':', r'\:')
    #os.environ['FCST_FILE']      = fcst_file
    #os.environ['NCL_OUT_DIR']    = ncl_out_dir
    #os.environ['NCL_OUT_TYPE']   = ncl_out_type
    #os.environ['NEST_ID']        = nest_id
    #os.environ['DOMAIN']         = domain
    #os.environ['MODEL_RUN']      = model_run



    #logger.debug('Setting environment variables')
    logger.debug('FCST_FILE    ----> %s' % fcst_file)
    logger.debug('NCL_OUT_DIR  ----> %s' % ncl_out_dir)
    logger.debug('NCL_OUT_TYPE ----> %s' % ncl_out_type)
    logger.debug('NEST_ID      ----> %s' % nest_id)
    logger.debug('PATH')
    logger.debug(os.environ['PATH'])
    #logger.debug('DOMAIN       ----> %s' % domain)
    #logger.debug('MODEL_RUN    ----> %s' % model_run)


    for script in ncl_code:
        #cmd  = "ncl %s >> %s 2>&1" % (script, ncl_log)
        cmd  = "ncl %s " % script
        #qcmd = 'qrsh -cwd -l mem_total=36G "%s"' % cmd
        
        logger.warn("NCL to produce GEOTIFFS does not work on post-processing queue, runnign on head node")

        

        cmd  = """ncl 'ncl_in_file="%s"' 'ncl_out_dir="%s"' 'ncl_out_type="%s"' 'ncl_loc_file="%s"' %s 2>&1 >> %s/ncl.log""" % (ncl_in_file,ncl_out_dir, ncl_out_type, ncl_loc_file, script, working_dir)

        ret = shared.run_cmd(cmd, config)
        
        
        gwarp = config['gwarp']
        os.chdir(ncl_out_dir)
        
        cmd = "%s %s/*.tiff" %(gwarp, ncl_out_dir)
        logger.debug(cmd)
        shared.run_cmd(cmd, config)
예제 #43
0
def prepare_wps(config):
    """ Runs all the pre-processing steps necessary for running WPS.
    
    Reads the current value of init_time from config, and links 
    boundary condition files into correct directory. Creates an output
    directory for the met_em files.
    
    Arguments:
    config -- dictionary containing various configuration options"""
    
    logger       = shared.get_logger()
    logger.debug('*** PREPARING FILES FOR WPS ***')
    
    wps_dir       = config['wps_dir']          # the base installation of WPS
    wps_run_dir   = config['wps_run_dir']      # the directory to run WPS from
    working_dir = config['working_dir']    # model run directory 
    met_em_dir    = config['met_em_dir']
    init_time     = config['init_time']

    
    grb_input_fmt = config['grb_input_fmt']
    vtable        = config['vtable']
    bdy_times     = shared.get_bdy_times(config)

    if type(grb_input_fmt)==type({}):
        logger.debug(grb_input_fmt)
        fmts = grb_input_fmt.values()
        
    else:
        fmts = [grb_input_fmt]
    
    
    for fmt in fmts:
        #
        # Generate filelist based on the initial time, and the forecast hour
        #        
        filelist = shared.get_bdy_filenames(fmt, bdy_times)

        #
        # Check the boundary files exist
        #
        logger.debug('checking boundary condition files exists')    
        for f in filelist:
            if not os.path.exists(f):
                raise IOError('cannot find file: %s' %f)
        
    logger.debug('all boundary conditions files exist')
    
    #
    # Run the link_grib scipt to link the FNL files
    #
    logger.debug('running link_grib.csh script to link grib files to GRIBFILE.AAA etc')
    os.chdir(wps_run_dir)
    args = ' '.join(filelist)
    cmd = '%s/link_grib.csh %s' %(wps_run_dir,args)
    shared.run_cmd(cmd, config)

    logger.debug('Path for met_em files is %s' % met_em_dir)
    if not os.path.exists(met_em_dir):
        os.makedirs(met_em_dir)

   
    logger.debug('*** FINISHED PREPARING FILES FOR WPS ***')    
예제 #44
0
파일: simulate.py 프로젝트: qingu/wrftools
def prepare_wps(config):
    """ Runs all the pre-processing steps necessary for running WPS.
    
    Reads the current value of init_time from config, and links 
    boundary condition files into correct directory. Creates an output
    directory for the met_em files.
    
    Arguments:
    config -- dictionary containing various configuration options"""
    
    logger       = shared.get_logger()
    logger.debug('*** PREPARING FILES FOR WPS ***')
    
    wps_dir       = config['wps_dir']          # the base installation of WPS
    wps_run_dir   = config['wps_run_dir']      # the directory to run WPS from
    working_dir = config['working_dir']    # model run directory 
    met_em_dir    = config['met_em_dir']
    init_time     = config['init_time']

    
    grb_input_fmt = config['grb_input_fmt']
    vtable        = config['vtable']
    bdy_times     = shared.get_bdy_times(config)

    if type(grb_input_fmt)==type({}):
        logger.debug(grb_input_fmt)
        fmts = grb_input_fmt.values()
        
    else:
        fmts = [grb_input_fmt]
    
    
    for fmt in fmts:
        #
        # Generate filelist based on the initial time, and the forecast hour
        #        
        filelist = shared.get_bdy_filenames(fmt, bdy_times)

        #
        # Check the boundary files exist
        #
        logger.debug('checking boundary condition files exists')    
        for f in filelist:
            if not os.path.exists(f):
                raise IOError('cannot find file: %s' %f)
        
    logger.debug('all boundary conditions files exist')
    
    #
    # Run the link_grib scipt to link the FNL files
    #
    logger.debug('running link_grib.csh script to link grib files to GRIBFILE.AAA etc')
    os.chdir(wps_run_dir)
    args = ' '.join(filelist)
    cmd = '%s/link_grib.csh %s' %(wps_run_dir,args)
    shared.run_cmd(cmd, config)

    logger.debug('Path for met_em files is %s' % met_em_dir)
    if not os.path.exists(met_em_dir):
        os.makedirs(met_em_dir)

   
    logger.debug('*** FINISHED PREPARING FILES FOR WPS ***')    
예제 #45
0
파일: post.py 프로젝트: keenmisty/wrftools
def run_unipost(config):
    """ Runs the Universal Post Processor for each forecast time. 
    Translated from the run_unipost_frames shell script. A post-processing
    directory should exist, specified by the post_dir entry in config, and
    the UPP control file wrf_cntrl should exist within this directory.
    
    TODO: tidy up some of the hangovers from the shell script version
    
    Arguments:
    config -- dictionary containing various configuration options
    
    """
    logger = shared.get_logger()    
    logger.info('*** RUNNING UNIVERSAL POST PROCESSOR ***')
    
    domain_dir    = config['domain_dir']
    max_dom       = config['max_dom']
    dom           = config['dom'] # current domain number
    model_run     = config['model_run']
    wrfout_dir    = '%s/%s/wrfout' %(domain_dir, model_run)    


    post_dir      = '%s/%s/postprd' % (domain_dir, model_run)
    wrf_cntrl     = post_dir+'/wrf_cntrl.parm'
    upp_dir       = config['upp_dir']
    wrf_working_dir   = config['wrf_dir']+'/run'
    namelist      = read_namelist(wrf_working_dir+'/namelist.input')

    fcst_times    = get_fcst_times(config)    
    init_time     = fcst_times[0]
    history_interval = config['history_interval']
    grb_fmt       = config['grb_fmt']


    #----CREATE DIRECTORIES-----------------------------------------------
    # Create archive directories to store data and settings
    #---------------------------------------------------------------------


    wrfpost_dir    = '%s/%s/wrfpost' %(domain_dir,model_run)

    if not os.path.exists(wrfpost_dir):
        os.makedirs(wrfpost_dir)

    #----PREPARATION-------------------------------------------------------
    # Link all the relevant files need to compute various diagnostics
    #---------------------------------------------------------------------
    
    #
    # Everything is done within the postprd directory
    #
    logger.debug('Going into postprd directory: %s' %post_dir)
    #os.chdir(post_dir)

    #
    # Clean up old output files
    #
    #logger.debug('Removing old output files')
    cmd = 'rm -f %s/*.out' % post_dir
    shared.run_cmd(cmd, config)
    cmd = 'rm -f %s/*.tm00' % post_dir
    shared.run_cmd(cmd, config)
    
    
    # Link Ferrier's microphysic's table and Unipost control file, 
    cmd = 'ln -sf %s/ETAMPNEW_DATA ./eta_micro_lookup.dat' % wrf_working_dir
    shared.run_cmd(cmd, config)
    
    #
    # Get local copy of parm file
    # no - lets force the user to manually ensure a copy is placed
    # in the postprd diretory first
    # os.system('ln -sf ../parm/wrf_cntrl.parm .')
    
    #
    # Check wrf_cntrl file exists
    #
    if not os.path.exists(wrf_cntrl):
        raise IOError('could not find control file: %s'% wrf_cntrl)
    
    
    #
    # link coefficients for crtm2 (simulated GOES)
    # Jeez - these should really get called via run_cmd for 
    # consistency, but I can't be chewed right now
    #
    CRTMDIR  = upp_dir+'/src/lib/crtm2/coefficients'
    os.system('ln -fs %s/EmisCoeff/Big_Endian/EmisCoeff.bin           ./' %CRTMDIR)
    os.system('ln -fs %s/AerosolCoeff/Big_Endian/AerosolCoeff.bin     ./' %CRTMDIR)
    os.system('ln -fs %s/CloudCoeff/Big_Endian/CloudCoeff.bin         ./' %CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/imgr_g12.SpcCoeff.bin    ./' %CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/imgr_g12.TauCoeff.bin    ./' %CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/imgr_g11.SpcCoeff.bin    ./' %CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/imgr_g11.TauCoeff.bin    ./' %CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/amsre_aqua.SpcCoeff.bin  ./' %CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/amsre_aqua.TauCoeff.bin  ./' %CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/tmi_trmm.SpcCoeff.bin    ./' %CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/tmi_trmm.TauCoeff.bin    ./' %CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/ssmi_f15.SpcCoeff.bin    ./' %CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/ssmi_f15.TauCoeff.bin    ./' %CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/ssmis_f20.SpcCoeff.bin   ./' %CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/ssmis_f20.TauCoeff.bin   ./' %CRTMDIR)
    
    os.putenv('MP_SHARED_MEMORY', 'yes')
    os.putenv('MP_LABELIO', 'yes')
    os.putenv('tmmark', 'tm00')
    
    
    #
    # Run unipost for each time in the output file
    # Note that unipost names the intermediate files
    # WRFPRShhh.tm00 where hhh is the forecast hour
    #
    for n,t in enumerate(fcst_times):
    
        current_time = t.strftime('%Y-%m-%d_%H:%M:%S')
        fhr          = n*history_interval/60
        #logger.debug('post processing time %s, fhr %d ' % (current_time, fhr))

        #
        # Assume the forecast is contained in one wrfout file 
        # named according to the forecast initial time
        #
        wrfout = '%s/wrfout_d%02d_%s' %(wrfout_dir,dom, init_time.strftime('%Y-%m-%d_%H:%M:%S'))
        #logger.debug('looking for file: %s' % wrfout)
        
        #--- itag file --------------------------------------------------------
        #   Create input file for Unipost
        #   First line is where your wrfout data is
        #   Second line is the format
        #   Third line is the time for this process file
        #   Forth line is a tag identifing the model (WRF, GFS etc)
        #----------------------------------------------------------------------
        #logger.debug('writing itag file')
        #logger.debug('time in itag file: %s' %current_time)
        itag = open('itag', 'w')
        itag.write('%s\n'%wrfout)
        itag.write('netcdf\n')
        itag.write('%s\n'%current_time)
        itag.write('NCAR\n')
        itag.close()
        
        #-----------------------------------------------------------------------
        #  Check wrf_cntrl.parm file exists
        #-----------------------------------------------------------------------            
        
        
        
        #-----------------------------------------------------------------------
        #   Run unipost.
        #-----------------------------------------------------------------------            
        os.system('rm -f fort.*')
        os.system('ln -sf wrf_cntrl.parm fort.14')
        os.system('ln -sf griddef.out fort.110')
        cmd = '%s/bin/unipost.exe < itag > unipost_d%02d.%s.out 2>&1' %(upp_dir, dom,current_time)
        shared.run_cmd(cmd, config)
        
        tmp_name = 'WRFPRS%03d.tm00' % fhr
        grb_name = 'wrfpost_d%02d_%s.tm00' %(dom,current_time)
        
        #
        # If keeping same format, just move output file
        #
        cmd = 'mv %s %s' %(tmp_name, grb_name)
        shared.run_cmd(cmd, config)
        
        #
        # Convert to grib2 format if required
        #            
        #if grb_fmt=='grib2':
        #    cmd = 'cnvgrib -g12 %s %s' %(tmp_name, grb_name) 
        #    shared.run_cmd(cmd, config)
            
    logger.debug('concatenating grib records into single file for domain dom %02d...' %dom)
    outname = 'wrfpost_d%02d_%s.grb'%(dom,init_time.strftime('%Y-%m-%d_%H'))
    cmd     = 'cat wrfpost_d%02d_*.tm00 > %s' %(dom, outname)
    shared.run_cmd(cmd, config)

    
    #-----------------------------------------------------------------------
    # Archive
    #-----------------------------------------------------------------------
    cmd = 'mv %s %s' %(outname, wrfpost_dir)

    ret = shared.run_cmd(cmd, config)
    
    if ret!=0:
        raise IOError('could not move post-processed output')
  
  
    logger.info("*** SUCESS UPP ***")
예제 #46
0
파일: tcpdump.py 프로젝트: crazyideas21/dev
def sniff_and_send(pkt_count_arg=None, pkt_size_arg=None, 
                     gap_ns_arg=None, flow_count_arg=None):
    """
    Main entry point. Starts sniff for traffic while generating packets on the
    remote host. 
    
    Returns None.
    
    """
    ignore_global_params = None not in (pkt_count_arg, pkt_size_arg, 
                                        gap_ns_arg, flow_count_arg)
    
    # Sniff. Save the text output to check for kernel-dropped packets.    
    
    shared.run_cmd('tcpdump -i ', config.sniff_iface,
                   ' -vnnxStt -s 96 -w ', config.tmp_pcap_file, 
                   ' udp > /tmp/tcpdump.log 2>&1')
    time.sleep(2)

    # Send!
    
    if ignore_global_params:

        # Use the function arguments.
                        
        p_pktgen = pktgen.send(pkt_count_arg, pkt_size_arg, 
                               gap_ns_arg, flow_count_arg)

    else:

        # Calculate the necessary parameters. Work in bits.
        
        target_bw_bps = config.target_bw_Mbps * 1000 * 1000
        pkt_size_b = config.pkt_size * 8
        pkt_count = target_bw_bps * config.max_time / pkt_size_b
        gap_ns = pkt_size_b * (10**9) / target_bw_bps # nanoseconds

        p_pktgen = pktgen.send(pkt_count, config.pkt_size, 
                               gap_ns, config.flow_count)
       
    if ignore_global_params:
        
        print 'Waiting for pktgen to complete.'
        p_pktgen.wait()

    else:

        # Make sure pktgen runs no more than max_time.
        
        pktgen_start_time = time.time()
        elapsed_time = 0
        
        while elapsed_time <= config.max_time and p_pktgen.poll() is None:    
            
            elapsed_time = time.time() - pktgen_start_time
            time.sleep(1)    
            
            sys.stdout.write('\r%s sec left' % int(config.max_time - elapsed_time))
            sys.stdout.flush()
        
        print ''
        pktgen.stop_and_parse_result()

    # Wait a bit before terminating tcpdump. It's probably not getting new
    # packets any more. Send Ctrl+C to tcpdump.
    
    time.sleep(2)
    shared.run_cmd('pkill tcpdump').wait()
    
    # Parse the number of packets dropped by the kernel.
    
    global last_captured_pkt_count, last_dropped_pkt_count
    last_captured_pkt_count = last_dropped_pkt_count = None
    
    logf = open('/tmp/tcpdump.log')
        
    for line in logf:
                
        r = re.search('(\d+) packets received by filter', line)
        if r: state.tcpdump_recvd_pkt_count = int(r.group(1))
            
        r = re.search('(\d+) packets dropped by kernel', line)
        if r: state.tcpdump_dropped_pkt_count = int(r.group(1))
    
    logf.close()

    # Displays the result of tcpdump    
    assert None not in (state.tcpdump_recvd_pkt_count,
                        state.tcpdump_dropped_pkt_count)
    if config.verbose:
        print 'TCPDUMP - received packets:',
        print state.tcpdump_recvd_pkt_count,
        print 'dropped packets:',
        print state.tcpdump_dropped_pkt_count
예제 #47
0
파일: post.py 프로젝트: zhangylang/wrftools
def run_unipost(config):
    """ Runs the Universal Post Processor for each forecast time. 
    Translated from the run_unipost_frames shell script. A post-processing
    directory should exist, specified by the post_dir entry in config, and
    the UPP control file wrf_cntrl should exist within this directory.
    
    TODO: tidy up some of the hangovers from the shell script version
    
    Arguments:
    config -- dictionary containing various configuration options
    
    """
    logger = shared.get_logger()
    logger.info('*** RUNNING UNIVERSAL POST PROCESSOR ***')

    domain_dir = config['domain_dir']
    max_dom = config['max_dom']
    dom = config['dom']  # current domain number
    model_run = config['model_run']
    wrfout_dir = '%s/%s/wrfout' % (domain_dir, model_run)

    post_dir = '%s/%s/postprd' % (domain_dir, model_run)
    wrf_cntrl = post_dir + '/wrf_cntrl.parm'
    upp_dir = config['upp_dir']
    wrf_working_dir = config['wrf_dir'] + '/run'
    namelist = read_namelist(wrf_working_dir + '/namelist.input')

    fcst_times = get_fcst_times(config)
    init_time = fcst_times[0]
    history_interval = config['history_interval']
    grb_fmt = config['grb_fmt']

    #----CREATE DIRECTORIES-----------------------------------------------
    # Create archive directories to store data and settings
    #---------------------------------------------------------------------

    wrfpost_dir = '%s/%s/wrfpost' % (domain_dir, model_run)

    if not os.path.exists(wrfpost_dir):
        os.makedirs(wrfpost_dir)

    #----PREPARATION-------------------------------------------------------
    # Link all the relevant files need to compute various diagnostics
    #---------------------------------------------------------------------

    #
    # Everything is done within the postprd directory
    #
    logger.debug('Going into postprd directory: %s' % post_dir)
    #os.chdir(post_dir)

    #
    # Clean up old output files
    #
    #logger.debug('Removing old output files')
    cmd = 'rm -f %s/*.out' % post_dir
    shared.run_cmd(cmd, config)
    cmd = 'rm -f %s/*.tm00' % post_dir
    shared.run_cmd(cmd, config)

    # Link Ferrier's microphysic's table and Unipost control file,
    cmd = 'ln -sf %s/ETAMPNEW_DATA ./eta_micro_lookup.dat' % wrf_working_dir
    shared.run_cmd(cmd, config)

    #
    # Get local copy of parm file
    # no - lets force the user to manually ensure a copy is placed
    # in the postprd diretory first
    # os.system('ln -sf ../parm/wrf_cntrl.parm .')

    #
    # Check wrf_cntrl file exists
    #
    if not os.path.exists(wrf_cntrl):
        raise IOError('could not find control file: %s' % wrf_cntrl)

    #
    # link coefficients for crtm2 (simulated GOES)
    # Jeez - these should really get called via run_cmd for
    # consistency, but I can't be chewed right now
    #
    CRTMDIR = upp_dir + '/src/lib/crtm2/coefficients'
    os.system('ln -fs %s/EmisCoeff/Big_Endian/EmisCoeff.bin           ./' %
              CRTMDIR)
    os.system('ln -fs %s/AerosolCoeff/Big_Endian/AerosolCoeff.bin     ./' %
              CRTMDIR)
    os.system('ln -fs %s/CloudCoeff/Big_Endian/CloudCoeff.bin         ./' %
              CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/imgr_g12.SpcCoeff.bin    ./' %
              CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/imgr_g12.TauCoeff.bin    ./' %
              CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/imgr_g11.SpcCoeff.bin    ./' %
              CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/imgr_g11.TauCoeff.bin    ./' %
              CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/amsre_aqua.SpcCoeff.bin  ./' %
              CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/amsre_aqua.TauCoeff.bin  ./' %
              CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/tmi_trmm.SpcCoeff.bin    ./' %
              CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/tmi_trmm.TauCoeff.bin    ./' %
              CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/ssmi_f15.SpcCoeff.bin    ./' %
              CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/ssmi_f15.TauCoeff.bin    ./' %
              CRTMDIR)
    os.system('ln -fs %s/SpcCoeff/Big_Endian/ssmis_f20.SpcCoeff.bin   ./' %
              CRTMDIR)
    os.system('ln -fs %s/TauCoeff/Big_Endian/ssmis_f20.TauCoeff.bin   ./' %
              CRTMDIR)

    os.putenv('MP_SHARED_MEMORY', 'yes')
    os.putenv('MP_LABELIO', 'yes')
    os.putenv('tmmark', 'tm00')

    #
    # Run unipost for each time in the output file
    # Note that unipost names the intermediate files
    # WRFPRShhh.tm00 where hhh is the forecast hour
    #
    for n, t in enumerate(fcst_times):

        current_time = t.strftime('%Y-%m-%d_%H:%M:%S')
        fhr = n * history_interval / 60
        #logger.debug('post processing time %s, fhr %d ' % (current_time, fhr))

        #
        # Assume the forecast is contained in one wrfout file
        # named according to the forecast initial time
        #
        wrfout = '%s/wrfout_d%02d_%s' % (
            wrfout_dir, dom, init_time.strftime('%Y-%m-%d_%H:%M:%S'))
        #logger.debug('looking for file: %s' % wrfout)

        #--- itag file --------------------------------------------------------
        #   Create input file for Unipost
        #   First line is where your wrfout data is
        #   Second line is the format
        #   Third line is the time for this process file
        #   Forth line is a tag identifing the model (WRF, GFS etc)
        #----------------------------------------------------------------------
        #logger.debug('writing itag file')
        #logger.debug('time in itag file: %s' %current_time)
        itag = open('itag', 'w')
        itag.write('%s\n' % wrfout)
        itag.write('netcdf\n')
        itag.write('%s\n' % current_time)
        itag.write('NCAR\n')
        itag.close()

        #-----------------------------------------------------------------------
        #  Check wrf_cntrl.parm file exists
        #-----------------------------------------------------------------------

        #-----------------------------------------------------------------------
        #   Run unipost.
        #-----------------------------------------------------------------------
        os.system('rm -f fort.*')
        os.system('ln -sf wrf_cntrl.parm fort.14')
        os.system('ln -sf griddef.out fort.110')
        cmd = '%s/bin/unipost.exe < itag > unipost_d%02d.%s.out 2>&1' % (
            upp_dir, dom, current_time)
        shared.run_cmd(cmd, config)

        tmp_name = 'WRFPRS%03d.tm00' % fhr
        grb_name = 'wrfpost_d%02d_%s.tm00' % (dom, current_time)

        #
        # If keeping same format, just move output file
        #
        cmd = 'mv %s %s' % (tmp_name, grb_name)
        shared.run_cmd(cmd, config)

        #
        # Convert to grib2 format if required
        #
        #if grb_fmt=='grib2':
        #    cmd = 'cnvgrib -g12 %s %s' %(tmp_name, grb_name)
        #    shared.run_cmd(cmd, config)

    logger.debug(
        'concatenating grib records into single file for domain dom %02d...' %
        dom)
    outname = 'wrfpost_d%02d_%s.grb' % (dom, init_time.strftime('%Y-%m-%d_%H'))
    cmd = 'cat wrfpost_d%02d_*.tm00 > %s' % (dom, outname)
    shared.run_cmd(cmd, config)

    #-----------------------------------------------------------------------
    # Archive
    #-----------------------------------------------------------------------
    cmd = 'mv %s %s' % (outname, wrfpost_dir)

    ret = shared.run_cmd(cmd, config)

    if ret != 0:
        raise IOError('could not move post-processed output')

    logger.info("*** SUCESS UPP ***")