def test_file_exists(self):

        print("\nRunning unittest: file_exists")

        # Start with a "clean" directory and non-existent file
        # expect the file_exists function to return False
        test_dir_base = self.p.getdir('TEST_DIR')
        test_file = self.p.getstr('config', 'TEST_FILENAME')
        full_test_file = os.path.join(test_dir_base, test_file)

        # Don't do the this test if the TEST_DIR exists.
        # We do not want to remove a directory unless this test created it.
        if os.path.exists(test_dir_base):
            print("Remove your TEST_DIR: %s" % test_dir_base)
            self.assertTrue(False)
        else:
            # Create a file, expect the file_exists function to
            # return True
            util.mkdir_p(test_dir_base)
            touch_cmd = ' '.join(["/usr/bin/touch", full_test_file])
            #print("full_test_file: %s" % full_test_file)
            os.system(touch_cmd)
            self.assertTrue(util.file_exists(full_test_file))

            # clean up
            util.rmtree(test_dir_base)
    def test_file_exists(self):
        p = P.Params()
        p.init(__doc__)

        # Start with a "clean" directory and non-existent file
        # expect the file_exists function to return False
        rm_exe = p.opt["RM_EXE"]
        test_dir_base = p.opt["TEST_DIR"]
        rm_file = [rm_exe, " ", test_dir_base]
        rm_file_cmd = ''.join(rm_file)
        test_file = p.opt["TEST_FILENAME"]
        full_test_file = test_dir_base + "/" + test_file
        self.assertFalse(util.file_exists(full_test_file))

        # Create a file, expect the file_exists function to
        # return True
        util.mkdir_p(test_dir_base)
        touch_cmd = "/usr/bin/touch full_test_file"
        print("full_test_file: %s", full_test_file)
        os.system(touch_cmd)
Beispiel #3
0
    def run_at_time(self, cur_init):
        """!Get TC-paris data then regrid tiles centered on the storm.

        Get TC-pairs track data and GFS model data, do any necessary
        processing then regrid the forecast and analysis files to a
        30 x 30 degree tile centered on the storm.
        Args:

        Returns:

            None: invokes regrid_data_plane to create a netCDF file from two
                    extratropical storm track files.
        """
        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.
        # Used in logging
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name

        # get the process id to be used to identify the output
        # amongst different users and runs.
        cur_pid = str(os.getpid())
        tmp_dir = os.path.join(self.config.getdir('TMP_DIR'), cur_pid)
        msg = ("INFO|[" + cur_filename + ":" + cur_function + "]"
               "|Begin extract tiles")
        self.logger.info(msg)

        # Check that there are tc_pairs data which are used as input
        if util.is_dir_empty(self.tc_pairs_dir):
            msg = ("ERROR|[" + cur_filename + ":" + cur_function + "]"
                   "|No tc pairs data found at " + self.tc_pairs_dir +
                   "Exiting...")
            self.logger.error(msg)
            sys.exit(1)

        # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |
        # [File : function]| Message logger.info("INFO |  [" +
        # cur_filename +  ":" + "cur_function] |" + "BEGIN extract_tiles")
        # Process TC pairs by initialization time
        # Begin processing for initialization time, cur_init
        year_month = util.extract_year_month(cur_init, self.logger)

        # Create the name of the filter file we need to find.  If
        # the file doesn't exist, then run TC_STAT
        filter_filename = "filter_" + cur_init + ".tcst"
        filter_name = os.path.join(self.filtered_out_dir, cur_init,
                                   filter_filename)

        if util.file_exists(filter_name) and not self.overwrite_flag:
            msg = ("DEBUG| [" + cur_filename + ":" + cur_function +
                   " ] | Filter file exists, using Track data file: " +
                   filter_name)
            self.logger.debug(msg)
        else:
            # Create the storm track by applying the
            # filter options defined in the config/param file.
            tile_dir_parts = [self.tc_pairs_dir, "/", year_month]
            tile_dir = ''.join(tile_dir_parts)
            # Use TcStatWrapper to build up the tc_stat command and invoke
            # the MET tool tc_stat to perform the filtering.
            tcs = TcStatWrapper(self.config)
            tcs.build_tc_stat(self.filtered_out_dir, cur_init,
                              tile_dir, self.addl_filter_opts)

            # Remove any empty files and directories that can occur
            # from filtering.
            util.prune_empty(filter_name, self.logger)

        # Now get unique storm ids from the filter file,
        # filter_yyyymmdd_hh.tcst
        sorted_storm_ids = util.get_storm_ids(filter_name, self.logger)

        # Check for empty sorted_storm_ids, if empty,
        # continue to the next time.
        if not sorted_storm_ids:
            # No storms found for init time, cur_init
            msg = ("DEBUG|[" + cur_filename + ":" + cur_function + " ]|" +
                   "No storms were found for " + cur_init +
                   "...continue to next in list")
            self.logger.debug(msg)
            return

        # Process each storm in the sorted_storm_ids list
        # Iterate over each filter file in the output directory and
        # search for the presence of the storm id.  Store this
        # corresponding row of data into a temporary file in the
        # /tmp/<pid> directory.
        for cur_storm in sorted_storm_ids:
            storm_output_dir = os.path.join(self.filtered_out_dir,
                                            cur_init, cur_storm)
            header = open(filter_name, "r").readline()
            util.mkdir_p(storm_output_dir)
            util.mkdir_p(tmp_dir)
            tmp_filename = "filter_" + cur_init + "_" + cur_storm
            full_tmp_filename = os.path.join(tmp_dir, tmp_filename)

            storm_match_list = util.grep(cur_storm, filter_name)
            with open(full_tmp_filename, "a+") as tmp_file:
                # copy over header information
                tmp_file.write(header)
                for storm_match in storm_match_list:
                    tmp_file.write(storm_match)

            # Perform regridding of the forecast and analysis files
            # to an n X n degree tile centered on the storm (dimensions
            # are indicated in the config/param file).
            util.retrieve_and_regrid(full_tmp_filename, cur_init,
                                     cur_storm, self.filtered_out_dir,
                                     self.logger, self.config)

        # end of for cur_storm

        # Remove any empty files and directories in the extract_tiles output
        # directory
        util.prune_empty(self.filtered_out_dir, self.logger)

        # Clean up the tmp directory if it exists
        if os.path.isdir(tmp_dir):
            util.rmtree(tmp_dir)
            msg = ("INFO|[" + cur_function + ":" + cur_filename + "]"
                   "| Finished extract tiles")
            self.logger.info(msg)
Beispiel #4
0
    def run_at_time(self, input_dict):
        """!Get TC-paris data then regrid tiles centered on the storm.

        Get TC-pairs track data and GFS model data, do any necessary
        processing then regrid the forecast and analysis files to a
        30 x 30 degree tile centered on the storm.
        Args:
            input_dict:  Time dictionary
        Returns:

            None: invokes regrid_data_plane to create a netCDF file from two
                    extratropical storm track files.
        """
        time_info = time_util.ti_calculate(input_dict)
        init_time = time_info['init_fmt']

        # get the process id to be used to identify the output
        # amongst different users and runs.
        cur_pid = str(os.getpid())
        tmp_dir = os.path.join(self.config.getdir('TMP_DIR'), cur_pid)
        self.logger.info("Begin extract tiles")

        cur_init = init_time[0:8] + "_" + init_time[8:10]

        # Check that there are tc_pairs data which are used as input
        if util.is_dir_empty(self.tc_pairs_dir):
            self.logger.error("No tc pairs data found at {}"\
                              .format(self.tc_pairs_dir))
            sys.exit(1)

        # Create the name of the filter file we need to find.  If
        # the file doesn't exist, then run TC_STAT
        filter_filename = "filter_" + cur_init + ".tcst"
        filter_name = os.path.join(self.filtered_out_dir, cur_init,
                                   filter_filename)

        if util.file_exists(filter_name) and not self.overwrite_flag:
            self.logger.debug("Filter file exists, using Track data file: {}"\
                              .format(filter_name))
        else:
            # Create the storm track by applying the
            # filter options defined in the config/param file.
            # Use TcStatWrapper to build up the tc_stat command and invoke
            # the MET tool tc_stat to perform the filtering.
            tiles_list = util.get_files(self.tc_pairs_dir, ".*tcst",
                                        self.logger)
            tiles_list_str = ' '.join(tiles_list)

            tcs = TcStatWrapper(self.config, self.logger)
            tcs.build_tc_stat(self.filtered_out_dir, cur_init, tiles_list_str,
                              self.addl_filter_opts)

            # Remove any empty files and directories that can occur
            # from filtering.
            util.prune_empty(filter_name, self.logger)

        # Now get unique storm ids from the filter file,
        # filter_yyyymmdd_hh.tcst
        sorted_storm_ids = util.get_storm_ids(filter_name, self.logger)

        # Check for empty sorted_storm_ids, if empty,
        # continue to the next time.
        if not sorted_storm_ids:
            # No storms found for init time, cur_init
            msg = "No storms were found for {} ...continue to next in list"\
              .format(cur_init)
            self.logger.debug(msg)
            return

        # Process each storm in the sorted_storm_ids list
        # Iterate over each filter file in the output directory and
        # search for the presence of the storm id.  Store this
        # corresponding row of data into a temporary file in the
        # /tmp/<pid> directory.
        for cur_storm in sorted_storm_ids:
            storm_output_dir = os.path.join(self.filtered_out_dir, cur_init,
                                            cur_storm)
            header = open(filter_name, "r").readline()
            util.mkdir_p(storm_output_dir)
            util.mkdir_p(tmp_dir)
            tmp_filename = "filter_" + cur_init + "_" + cur_storm
            full_tmp_filename = os.path.join(tmp_dir, tmp_filename)

            storm_match_list = util.grep(cur_storm, filter_name)
            with open(full_tmp_filename, "a+") as tmp_file:
                # copy over header information
                tmp_file.write(header)
                for storm_match in storm_match_list:
                    tmp_file.write(storm_match)

            # Perform regridding of the forecast and analysis files
            # to an n X n degree tile centered on the storm (dimensions
            # are indicated in the config/param file).
            feature_util.retrieve_and_regrid(full_tmp_filename, cur_init,
                                             cur_storm, self.filtered_out_dir,
                                             self.config)

        # end of for cur_storm

        # Remove any empty files and directories in the extract_tiles output
        # directory
        util.prune_empty(self.filtered_out_dir, self.logger)

        # Clean up the tmp directory if it exists
        if os.path.isdir(tmp_dir):
            util.rmtree(tmp_dir)
Beispiel #5
0
    def build_pb2nc_command(self, relevant_pb_files):
        """! Build the command to MET pb2nc

             Args:
                 @param relevant_pb_files - a list containing the relevant
                                            prepbufr files after applying a
                                            filtering by valid time or by
                                            init time.  Each item is a
                                            named tuple with the full filepath,
                                            date, and cycle hour of the file.
             Returns:
                 None - builds the command and invokes MET pb2nc
        """
        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.

        # Used for logging.
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name
        self.logger.info("INFO|:" + cur_function + '|' + cur_filename + '| ' +
                         "Building MET pb2nc command...")

        # Create the call to MET pb2nc with the following format:
        # pb2nc \
        # /path/to/input/prepbufr_file (first file in
        # prepbufr_files_to_evaluate \
        # /path/to/output/netCDF_file \
        # /path/to/MET_pb2nc_config_file
        # -pbFile <list of remaining prepbufr files in the
        # prepbufr_files_to_evaluate
        # pylint:disable=simplifiable-if-statement
        # Expecting 'yes' or 'no' from user in config file.
        if self.pb_dict['OVERWRITE_NC_OUTPUT'] == 'yes':
            overwrite_flag = True
        else:
            overwrite_flag = False

        for relevant_pb_file in relevant_pb_files:
            pb_file = relevant_pb_file

            # Start building the pieces of the argument to invoke MET pb2nc if
            # the input file is within the start and end init times specified.
            # Input file with full path.
            # input_file = os.path.join(self.pb_dict['PREPBUFR_DATA_DIR'],
            #                         pb_file)
            input_file = pb_file
            self.add_arg(input_file)

            # Generate the output filename (with full path) based on the
            # file template specified in the config file
            pb_output_file_to_name = self.extract_prepbufr_file_info(
                relevant_pb_file)

            output_full_filename = self.generate_output_nc_filename(
                pb_output_file_to_name)

            # Run if overwrite_flag is True, or if the output
            # file doesn't already exist.
            if overwrite_flag or \
                    not util.file_exists(output_full_filename):
                self.pb_dict['OUTPUT_DIR_STRUCTURE'] = output_full_filename
                self.add_arg(output_full_filename)

                # Config file location
                self.set_param_file(self.pb_dict['PB2NC_CONFIG_FILE'])

                # For developer debugging
                # self.add_arg(' -index -v 4 -log /tmp/pb2nc.log')

                # Invoke MET pb2nc
                cmd = self.get_command()
                self.logger.debug('DEBUG|:' + cur_function + '|' +
                                  cur_filename + 'pb2nc called with: ' + cmd)
                self.build()
                self.logger.debug('DEBUG|:' + cur_function + '|' +
                                  cur_filename + ' Finished running pb2nc...')
                self.clear()

            else:
                self.logger.debug("DEBUG|:" + cur_function + '|' +
                                  cur_filename + ' Not overwriting existing '
                                  'files, continue')
    def _init_tcmpr_script(self):
        """! Called by the constructor to set up the environment variables
        used by the plot_tcmpr.R script and  to set the self.tcmpr_script
        variable."""

        # User environment variable settings take precedence over
        # configuration files.

        # The purpose of this method is to support MET 6.0 and later,
        # and to not throw a superfluous error, due to a missing  env variable
        # that is version specific.
        # For example,
        # MET_INSTALL_DIR is required starting with met-6.1, so we don't
        # want to throw an error if it is not defined and we are running
        # with an earlier version of met.
        #
        # Ultimately, the plot_tcmpr.R script will throw an error
        # indicating any missing required environment variables.
        # So if all else fails, we defer to plot_tcmpr.R,
        # We are being nice and trying to catch/prevent it here.

        # The logic in this method is not perfect. So it is entirely
        # possible for a scenario to exist that may cause this to
        # break. It would be much easier if there was a way to check
        # for the version of met. Hopefully it covers 99% of the cases.

        # Evironment variables and met versions required  by the plot_tcmpr.R
        # met-6.1 and later: MET_INSTALL_DIR, MET_BASE
        # met-6.0: MET_BUILD_BASE, RSCRIPTS_BASE

        # At some point in the future MET_BUILD_BASE and RSCRIPTS_BASE
        # should go-away from all METplus references. When we no longer
        # need to support MET 6.0, this method  can be simplified.

        # MET_INSTALL_DIR introduced in METplus conf file, for met-6.1 and later
        if 'MET_INSTALL_DIR' in os.environ:
            self.logger.info('Using MET_INSTALL_DIR setting from user '
                             'environment instead of metplus configuration '
                             'file. Using: %s' % os.environ['MET_INSTALL_DIR'])
        else:

            # If MET_BUILD_BASE is defined in conf file, assume we are
            # running with met-6.0 and earlier. Which means MET_INSTALL_DIR
            # is NOT required, so we don't want to throw an error, if it is
            # not defined.
            if self.p.has_option('dir', 'MET_BUILD_BASE'):
                if self.p.has_option('dir', 'MET_INSTALL_DIR'):
                    os.environ['MET_INSTALL_DIR'] = self.p.getdir(
                        'MET_INSTALL_DIR')
            else:
                os.environ['MET_INSTALL_DIR'] = self.p.getdir(
                    'MET_INSTALL_DIR')

        # MET_BASE has always been defined in METplus, so it 'should'
        # exist, so we will throw an error, if it is not defined,
        # even though it is not required if running METplus against
        # met-6.0 and earlier.
        if 'MET_BASE' in os.environ:
            self.logger.info('Using MET_BASE setting from user '
                             'environment instead of metplus configuration '
                             'file. Using: %s' % os.environ['MET_BASE'])
            met_base_tcmpr_script = \
                os.path.join(os.environ['MET_BASE'],'Rscripts/plot_tcmpr.R')
        else:
            os.environ['MET_BASE'] = self.p.getdir('MET_BASE')
            met_base_tcmpr_script = \
                os.path.join(self.p.getdir('MET_BASE'),'Rscripts/plot_tcmpr.R')

        # RSCRIPTS_BASE introduced and used ONLY in met-6.0 release.
        # Will go away when we no longer support met-6.0 and earlier.
        # RSCRIPTS_BASE /path/to/scripts/Rscripts
        if 'RSCRIPTS_BASE' in os.environ:
            self.logger.info('Using RSCRIPTS_BASE setting from user '
                             'environment instead of metplus configuration '
                             'file. Using: %s' % os.environ['RSCRIPTS_BASE'])
        else:
            # If MET_BUILD_BASE is defined in conf file, assume we are
            # running with met-6.0 and earlier. Which means RSCRIPTS_BASE
            # is required, so throw an error, if it is not defined.
            if self.p.has_option('dir', 'MET_BUILD_BASE'):
                os.environ['RSCRIPTS_BASE'] = self.p.getdir('RSCRIPTS_BASE')

        # MET_BUILD_BASE has always been defined in METplus.
        # Will go away when we no longer support met-6.0 and earlier.
        if 'MET_BUILD_BASE' in os.environ:
            self.logger.info('Using MET_BUILD_BASE setting from user '
                             'environment instead of metplus configuration '
                             'file. Using: %s' % os.environ['MET_BUILD_BASE'])
            met_build_base_tcmpr_script = \
                os.path.join(os.environ['MET_BUILD_BASE'],
                             'scripts/Rscripts/plot_tcmpr.R')
        else:
            if self.p.has_option('dir', 'MET_BUILD_BASE'):
                os.environ['MET_BUILD_BASE'] = self.p.getdir('MET_BUILD_BASE')
                met_build_base_tcmpr_script = os.path.join(
                    self.p.getdir('MET_BUILD_BASE'),
                    'scripts/Rscripts/plot_tcmpr.R')
            else:
                #Set to empty string since we test it later.
                met_build_base_tcmpr_script = ''

        if util.file_exists(met_base_tcmpr_script):
            self.tcmpr_script = met_base_tcmpr_script
            self.logger.info('Using MET_BASE plot_tcmpr script: %s ' %
                             met_base_tcmpr_script)
        elif util.file_exists(met_build_base_tcmpr_script):
            self.tcmpr_script = met_build_base_tcmpr_script
            self.logger.info('Using MET_BUILD_BASE plot_tcmpr script: %s ' %
                             met_build_base_tcmpr_script)
        else:
            self.logger.error(
                'NO tcmpr_plot.R script could be found, '
                'Check your MET_BASE or MET_BUILD_BASE paths in conf file.')
            sys.exit(1)
Beispiel #7
0
    def apply_series_filters(self, tile_dir, init_times, series_output_dir,
                             filter_opts, temporary_dir):
        """! Apply filter options, as specified in the
            param/config file.
            Args:
               @param tile_dir:  Directory where input data files reside.
                                 e.g. data which we will be applying our filter
                                 criteria.
               @param init_times:  List of init times that define the
                                   input data.
               @param series_output_dir:  The directory where the filter results
                                          will be stored.
               @param filter_opts:  The filter options to apply
               @param temporary_dir:  The temporary directory where intermediate
                                      files are saved.
            Returns:
                None
        """
        # pylint: disable=too-many-arguments
        # Seven input arguments are needed to perform filtering.

        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.

        # Useful for logging
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name

        # Create temporary directory where intermediate files are saved.
        cur_pid = str(os.getpid())
        tmp_dir = os.path.join(temporary_dir, cur_pid)
        self.logger.debug("creating tmp dir: " + tmp_dir)

        for cur_init in init_times:
            # Call the tc_stat wrapper to build up the command and invoke
            # the MET tool tc_stat.
            filter_file = "filter_" + cur_init + ".tcst"
            filter_filename = os.path.join(series_output_dir, cur_init,
                                           filter_file)

            tcs = TcStatWrapper(self.config, self.logger)
            tcs.build_tc_stat(series_output_dir, cur_init, tile_dir,
                              filter_opts)

            # Check that the filter.tcst file isn't empty. If
            # it is, then use the files from extract_tiles as
            # input (tile_dir = extract_out_dir)
            if not util.file_exists(filter_filename):
                msg = ("Non-existent filter file, filter " +
                       " Never created by MET Tool tc_stat.")
                self.logger.debug(msg)
                continue
            elif os.stat(filter_filename).st_size == 0:
                msg = ("Empty filter file, filter " +
                       " options yield nothing.")
                self.logger.debug(msg)
                continue
            else:
                # Now retrieve the files corresponding to these
                # storm ids that resulted from filtering.
                sorted_storm_ids = util.get_storm_ids(filter_filename,
                                                      self.logger)

                # Retrieve the header from filter_filename to be used in
                # creating the temporary files.
                with open(filter_filename, 'r') as filter_file:
                    header = filter_file.readline()

                for cur_storm in sorted_storm_ids:
                    msg = ("Processing storm: " + cur_storm + " for file: " +
                           filter_filename)
                    self.logger.debug(msg)
                    storm_output_dir = os.path.join(series_output_dir,
                                                    cur_init, cur_storm)
                    util.mkdir_p(storm_output_dir)
                    util.mkdir_p(tmp_dir)
                    tmp_file = "filter_" + cur_init + "_" + cur_storm
                    tmp_filename = os.path.join(tmp_dir, tmp_file)
                    storm_match_list = util.grep(cur_storm, filter_filename)
                    with open(tmp_filename, "a+") as tmp_file:
                        tmp_file.write(header)
                        for storm_match in storm_match_list:
                            tmp_file.write(storm_match)

                    # Create the analysis and forecast files based
                    # on the storms (defined in the tmp_filename created above)
                    # Store the analysis and forecast files in the
                    # series_output_dir.
                    feature_util.retrieve_and_regrid(tmp_filename, cur_init,
                                                     cur_storm,
                                                     series_output_dir,
                                                     self.config)

        # Check for any empty files and directories and remove them to avoid
        # any errors or performance degradation when performing
        # series analysis.
        util.prune_empty(series_output_dir, self.logger)

        # Clean up the tmp dir
        util.rmtree(tmp_dir)
Beispiel #8
0
def retrieve_and_regrid(tmp_filename, cur_init, cur_storm, out_dir, config):
    """! Retrieves the data from the MODEL_DATA_DIR (defined in metplus.conf)
         that corresponds to the storms defined in the tmp_filename:
        1) create the analysis tile and forecast file names from the
           tmp_filename file.
        2) perform regridding via MET tool (regrid_data_plane) and store
           results (netCDF files) in the out_dir or via
           Regridding via  regrid_data_plane on the forecast and analysis
           files via a latlon string with the following format:
                latlon Nx Ny lat_ll lon_ll delta_lat delta_lon
                NOTE:  these values are defined in the extract_tiles_parm
                parameter/config file as NLAT, NLON.
        ***NOTE:  This is used by both extract_tiles_wrapper.py and
               series_by_lead_wrapper.py
        Args:
        @param tmp_filename:   Filename of the temporary filter file in
                               the /tmp directory. Contains rows
                               of data corresponding to a storm id of varying
                               times.
        @param cur_init:       The current init time
        @param cur_storm:      The current storm
        @param out_dir:  The directory where regridded netCDF or grib2 output
                         is saved depending on which regridding methodology is
                         requested.  If the MET tool regrid_data_plane is
                         requested, then netCDF data is produced.  If wgrib2
                         is requested, then grib2 data is produced.
        @param config:  config instance
        Returns:
           None
    """

    # pylint: disable=protected-access
    # Need to call sys._getframe() to get current function and file for
    # logging information.
    # pylint: disable=too-many-arguments
    # all input is needed to perform task

    # rdp=, was added when logging capability was added to capture
    # all MET output to log files. It is a temporary work around
    # to get logging up and running as needed.
    # It is being used to call the run_cmd method, which runs the cmd
    # and redirects logging based on the conf settings.
    # Instantiate a RegridDataPlaneWrapper
    logger = config.logger
    rdp = RegridDataPlaneWrapper(config, logger)

    # For logging
    cur_filename = sys._getframe().f_code.co_filename
    cur_function = sys._getframe().f_code.co_name

    # Get variables, etc. from param/config file.
    model_data_dir = config.getdir('MODEL_DATA_DIR')
    met_install_dir = config.getdir('MET_INSTALL_DIR')
    regrid_data_plane_exe = os.path.join(met_install_dir,
                                         'bin/regrid_data_plane')

    # regrid_data_plane_exe = config.getexe('REGRID_DATA_PLANE')
    wgrib2_exe = config.getexe('WGRIB2')
    egrep_exe = config.getexe('EGREP')
    regrid_with_met_tool = config.getbool('config', 'REGRID_USING_MET_TOOL')
    overwrite_flag = config.getbool('config', 'OVERWRITE_TRACK')

    # Extract the columns of interest: init time, lead time,
    # valid time lat and lon of both tropical cyclone tracks, etc.
    # Then calculate the forecast hour and other things.
    with open(tmp_filename, "r") as tf:
        # read header
        header = tf.readline().split()
        # get column number for columns on interest
        # print('header{}:'.format(header))
        header_colnum_init, header_colnum_lead, header_colnum_valid = \
            header.index('INIT'), header.index('LEAD'), header.index(
                'VALID')
        header_colnum_alat, header_colnum_alon = \
            header.index('ALAT'), header.index('ALON')
        header_colnum_blat, header_colnum_blon = \
            header.index('BLAT'), header.index('BLON')
        for line in tf:
            col = line.split()
            init, lead, valid, alat, alon, blat, blon = \
                col[header_colnum_init], col[header_colnum_lead], \
                col[header_colnum_valid], col[header_colnum_alat], \
                col[header_colnum_alon], col[header_colnum_blat], \
                col[header_colnum_blon]

            # integer division for both Python 2 and 3
            lead_time = int(lead)
            fcst_hr = lead_time // 10000

            init_ymd_match = re.match(r'[0-9]{8}', init)
            if init_ymd_match:
                init_ymd = init_ymd_match.group(0)
            else:
                logger.WARN("RuntimeError raised")
                raise RuntimeError('init time has unexpected format for YMD')

            init_ymdh_match = re.match(r'[0-9|_]{11}', init)
            if init_ymdh_match:
                init_ymdh = init_ymdh_match.group(0)
            else:
                logger.WARN("RuntimeError raised")

            valid_ymd_match = re.match(r'[0-9]{8}', valid)
            if valid_ymd_match:
                valid_ymd = valid_ymd_match.group(0)
            else:
                logger.WARN("RuntimeError raised")

            valid_ymdh_match = re.match(r'[0-9|_]{11}', valid)
            if valid_ymdh_match:
                valid_ymdh = valid_ymdh_match.group(0)
            else:
                logger.WARN("RuntimeError raised")

            lead_str = str(fcst_hr).zfill(3)
            fcst_dir = os.path.join(model_data_dir, init_ymd)
            init_ymdh_split = init_ymdh.split("_")
            init_yyyymmddhh = "".join(init_ymdh_split)
            anly_dir = os.path.join(model_data_dir, valid_ymd)
            valid_ymdh_split = valid_ymdh.split("_")
            valid_yyyymmddhh = "".join(valid_ymdh_split)

            init_dt = datetime.datetime.strptime(init_yyyymmddhh, '%Y%m%d%H')
            valid_dt = datetime.datetime.strptime(valid_yyyymmddhh, '%Y%m%d%H')
            lead_seconds = int(fcst_hr * 3600)
            # Create output filenames for regridding
            # wgrib2 used to regrid.
            # Create the filename for the regridded file, which is a
            # grib2 file.
            fcst_sts = \
                StringSub(logger, config.getraw('filename_templates',
                                            'GFS_FCST_FILE_TMPL'),
                          init=init_dt, lead=lead_seconds)

            anly_sts = \
                StringSub(logger, config.getraw('filename_templates',
                                            'GFS_ANLY_FILE_TMPL'),
                          valid=valid_dt, lead=lead_seconds)

            fcst_file = fcst_sts.do_string_sub()
            fcst_filename = os.path.join(fcst_dir, fcst_file)
            anly_file = anly_sts.do_string_sub()
            anly_filename = os.path.join(anly_dir, anly_file)

            # Check if the forecast input file exists. If it doesn't
            # exist, just log it
            if util.file_exists(fcst_filename):
                logger.debug("Forecast file: {}".format(fcst_filename))
            else:
                logger.warning("Can't find forecast file {}, continuing"\
                               .format(fcst_filename))
                continue

            # Check if the analysis input file exists. If it doesn't
            # exist, just log it.
            if util.file_exists(anly_filename):
                logger.debug("Analysis file: {}".format(anly_filename))

            else:
                logger.warning("Can't find analysis file {}, continuing"\
                       .format(anly_filename))
                continue

            # Create the arguments used to perform regridding.
            # NOTE: the base name
            # is the same for both the fcst and anly filenames,
            # so use either one to derive the base name that will
            # be used to create the fcst_regridded_filename and
            # anly_regridded_filename.
            fcst_anly_base = os.path.basename(fcst_filename)

            fcst_grid_spec = \
                util.create_grid_specification_string(alat, alon,
                                                      logger,
                                                      config)
            anly_grid_spec = \
                util.create_grid_specification_string(blat, blon,
                                                      logger,
                                                      config)
            if regrid_with_met_tool:
                nc_fcst_anly_base = re.sub("grb2", "nc", fcst_anly_base)
                fcst_anly_base = nc_fcst_anly_base

            tile_dir = os.path.join(out_dir, cur_init, cur_storm)
            fcst_hr_str = str(fcst_hr).zfill(3)

            fcst_regridded_filename = \
                config.getstr('regex_pattern', 'FCST_TILE_PREFIX') + \
                fcst_hr_str + "_" + fcst_anly_base
            fcst_regridded_file = os.path.join(tile_dir,
                                               fcst_regridded_filename)
            anly_regridded_filename = \
                config.getstr('regex_pattern', 'ANLY_TILE_PREFIX') + \
                fcst_hr_str + "_" + fcst_anly_base
            anly_regridded_file = os.path.join(tile_dir,
                                               anly_regridded_filename)

            # Regrid the fcst file only if a fcst tile
            # file does NOT already exist or if the overwrite flag is True.
            # Create new gridded file for fcst tile
            if util.file_exists(fcst_regridded_file) and not overwrite_flag:
                msg = "Forecast tile file {} exists, skip regridding"\
                  .format(fcst_regridded_file)
                logger.debug(msg)
            else:
                # Perform fcst regridding on the records of interest
                var_level_string = retrieve_var_info(config)
                if regrid_with_met_tool:
                    # Perform regridding using MET Tool regrid_data_plane
                    fcst_cmd_list = [
                        regrid_data_plane_exe, ' ', fcst_filename, ' ',
                        fcst_grid_spec, ' ', fcst_regridded_file, ' ',
                        var_level_string, ' -method NEAREST '
                    ]
                    regrid_cmd_fcst = ''.join(fcst_cmd_list)

                    # Since not using the CommandBuilder to build the cmd,
                    # add the met verbosity level to the
                    # MET cmd created before we run the command.
                    regrid_cmd_fcst = rdp.cmdrunner.insert_metverbosity_opt(
                        regrid_cmd_fcst)
                    (ret, regrid_cmd_fcst) = rdp.cmdrunner.run_cmd(
                        regrid_cmd_fcst, env=None, app_name=rdp.app_name)
                else:
                    # Perform regridding via wgrib2
                    requested_records = retrieve_var_info(config)
                    fcst_cmd_list = [
                        wgrib2_exe, ' ', fcst_filename, ' | ', egrep_exe, ' ',
                        requested_records, '|', wgrib2_exe, ' -i ',
                        fcst_filename, ' -new_grid ', fcst_grid_spec, ' ',
                        fcst_regridded_file
                    ]
                    wgrb_cmd_fcst = ''.join(fcst_cmd_list)

                    (ret,
                     wgrb_cmd_fcst) = rdp.cmdrunner.run_cmd(wgrb_cmd_fcst,
                                                            env=None,
                                                            ismetcmd=False)

            # Create new gridded file for anly tile
            if util.file_exists(anly_regridded_file) and not overwrite_flag:
                logger.debug("Analysis tile file: " + anly_regridded_file +
                             " exists, skip regridding")
            else:
                # Perform anly regridding on the records of interest
                var_level_string = retrieve_var_info(config)
                if regrid_with_met_tool:
                    anly_cmd_list = [
                        regrid_data_plane_exe, ' ', anly_filename, ' ',
                        anly_grid_spec, ' ', anly_regridded_file, ' ',
                        var_level_string, ' ', ' -method NEAREST '
                    ]
                    regrid_cmd_anly = ''.join(anly_cmd_list)

                    # Since not using the CommandBuilder to build the cmd,
                    # add the met verbosity level to the MET cmd
                    # created before we run the command.
                    regrid_cmd_anly = rdp.cmdrunner.insert_metverbosity_opt(
                        regrid_cmd_anly)
                    (ret, regrid_cmd_anly) = rdp.cmdrunner.run_cmd(
                        regrid_cmd_anly, env=None, app_name=rdp.app_name)
                    msg = ("on anly file:" + anly_regridded_file)
                    logger.debug(msg)
                else:
                    # Regridding via wgrib2.
                    requested_records = util.retrieve_var_info(config)
                    anly_cmd_list = [
                        wgrib2_exe, ' ', anly_filename, ' | ', egrep_exe, ' ',
                        requested_records, '|', wgrib2_exe, ' -i ',
                        anly_filename, ' -new_grid ', anly_grid_spec, ' ',
                        anly_regridded_file
                    ]
                    wgrb_cmd_anly = ''.join(anly_cmd_list)

                    (ret,
                     wgrb_cmd_anly) = rdp.cmdrunner.run_cmd(wgrb_cmd_anly,
                                                            env=None,
                                                            ismetcmd=False)