def test_file_exists(self):

        print("\nRunning unittest: file_exists")

        # Start with a "clean" directory and non-existent file
        # expect the file_exists function to return False
        test_dir_base = self.p.getdir('TEST_DIR')
        test_file = self.p.getstr('config', 'TEST_FILENAME')
        full_test_file = os.path.join(test_dir_base, test_file)

        # Don't do the this test if the TEST_DIR exists.
        # We do not want to remove a directory unless this test created it.
        if os.path.exists(test_dir_base):
            print("Remove your TEST_DIR: %s" % test_dir_base)
            self.assertTrue(False)
        else:
            # Create a file, expect the file_exists function to
            # return True
            util.mkdir_p(test_dir_base)
            touch_cmd = ' '.join(["/usr/bin/touch", full_test_file])
            #print("full_test_file: %s" % full_test_file)
            os.system(touch_cmd)
            self.assertTrue(util.file_exists(full_test_file))

            # clean up
            util.rmtree(test_dir_base)
示例#2
0
    def test_mkdir(self):

        # Make sure the test directory doesn't exist
        # before starting, and remove it when testing is complete

        # Gather test parameters from the extract_tiles_params.py file
        p = P.Params()
        p.init(__doc__)
        rm_exe = p.opt["RM_EXE"]
        test_dir_base = p.opt["TEST_DIR"]
        rm_file = [rm_exe, " ", test_dir_base]
        rm_file_cmd = ''.join(rm_file)

        # Make sure we have a clean directory
        # before trying to create the directory
        os.system(rm_file_cmd)
        full_test = (test_dir_base, "/", "extract_tiles_test")
        full_test_dir = "".join(full_test)
        util.mkdir_p(full_test_dir)
        self.assertTrue(os.path.exists)

        # clean up
        clean_up = [rm_exe, " ", test_dir_base]
        clean_up_cmd = "".join(clean_up)
        print("clean up cmd: %s", clean_up_cmd)
        os.system(clean_up_cmd)
    def test_mkdir_rmtree(self):

        print("\nRunning unittest: mkdir_rmtree")

        # Make sure the test directory doesn't exist
        # before starting, and remove it when testing is complete

        # Gather test parameters
        test_dir_base = self.p.getdir('TEST_DIR')

        # Don't do the this test if the TEST_DIR exists.
        # We do not want to remove a directory unless this test created it.

        # Make sure we have a clean directory
        # before trying to create the directory
        if os.path.exists(test_dir_base):
            print("Remove your TEST_DIR: %s" % test_dir_base)
            self.assertTrue(False)
        else:
            full_test_dir = os.path.join(test_dir_base, 'extract_tiles_test')
            util.mkdir_p(full_test_dir)
            self.assertTrue(os.path.exists(full_test_dir))

            # clean up
            util.rmtree(test_dir_base)
            self.assertFalse(os.path.exists(test_dir_base))
示例#4
0
    def build_tc_stat(self, series_output_dir, cur_init, tile_dir,
                      filter_opts):
        """! Create the call to MET tool TC-STAT to subset tc-pairs output
            based on the criteria specified in the parameter/config file.
            Args:
            @param series_output_dir:  The output directory where filtered
                                       results are saved.
            @param cur_init:  The initialization time
            @param tile_dir:  The input data directory (tc pair data to be
                              filtered)
            @param filter_opts:  The list of filter options to apply

            Returns:
                None: if no error, then invoke MET tool TC-STAT and
                    subsets tc-pairs data, creating a filter.tcst file.

                Raises CalledProcessError
        """
        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.

        # Useful for logging
        # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |
        # [File : function]| Message
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name

        util.mkdir_p(series_output_dir)
        filter_filename = "filter_" + cur_init + ".tcst"
        filter_name = os.path.join(series_output_dir, cur_init,
                                   filter_filename)
        filter_path = os.path.join(series_output_dir, cur_init)
        util.mkdir_p(filter_path)

        tc_cmd_list = [
            self.tc_exe, " -job filter ", " -lookin ", tile_dir,
            " -match_points true ", " -init_inc ", cur_init, " -dump_row ",
            filter_name, " ", filter_opts
        ]

        tc_cmd_str = ''.join(tc_cmd_list)

        # Since this wrapper is not using the CommandBuilder to build the cmd,
        # we need to add the met verbosity level to the MET cmd created before
        # we run the command.
        tc_cmd_str = self.cmdrunner.insert_metverbosity_opt(tc_cmd_str)

        # Run tc_stat
        try:
            #tc_cmd = batchexe('sh')['-c', tc_cmd_str].err2out()
            #checkrun(tc_cmd)
            (ret, cmd) = self.cmdrunner.run_cmd(tc_cmd_str,
                                                app_name=self.app_name)
            if not ret == 0:
                raise ExitStatusException(
                    '%s: non-zero exit status' % (repr(cmd), ), ret)
        except ExitStatusException as ese:
            self.logger.error(ese)
示例#5
0
    def build_tc_stat(self, series_output_dir, cur_init, tile_dir,
                      filter_opts):
        """! Create the call to MET tool TC-STAT to subset tc-pairs output
            based on the criteria specified in the parameter/config file.
            Args:
            @param series_output_dir:  The output directory where filtered
                                       results are saved.
            @param cur_init:  The initialization time
            @param tile_dir:  The input data directory (tc pair data to be
                              filtered)
            @param filter_opts:  The list of filter options to apply

            Returns:
                None: if no error, then invoke MET tool TC-STAT and
                    subsets tc-pairs data, creating a filter.tcst file.

                Raises CalledProcessError
        """
        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.

        # Useful for logging
        # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |
        # [File : function]| Message
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name

        util.mkdir_p(series_output_dir)
        filter_filename = "filter_" + cur_init + ".tcst"
        filter_name = os.path.join(series_output_dir, cur_init,
                                   filter_filename)
        filter_path = os.path.join(series_output_dir, cur_init)
        util.mkdir_p(filter_path)

        tc_cmd_list = [
            self.tc_exe, " -job filter ", " -lookin ", tile_dir,
            " -match_points true ", " -init_inc ", cur_init, " -dump_row ",
            filter_name, " ", filter_opts
        ]

        tc_cmd_str = ''.join(tc_cmd_list)

        # Make call to tc_stat, capturing any stderr and stdout to the MET
        #  Plus log.
        try:
            tc_cmd = batchexe('sh')['-c', tc_cmd_str].err2out()
            checkrun(tc_cmd)
        except produtil.run.ExitStatusException as ese:
            msg = ("ERROR| " + cur_filename + ":" + cur_function +
                   " from calling MET TC-STAT with command:" +
                   tc_cmd.to_shell())
            self.logger.error(msg)
            self.logger.error(ese)
示例#6
0
    def create_out_arg(self, cur_storm, cur_init, name, level):
        """! Create/build the -out portion of the series_analysis command and
             creates the output directory.
            Args:
                @param cur_storm: The storm of interest.

                @param cur_init:  The initialization time of interest.

                @param name:  The variable name of interest.

                @param level:  The level of interest.

            Returns:
        """

        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.
        # For logging
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name

        # create the output dir
        self.outdir = os.path.join(self.series_out_dir, cur_init, cur_storm)
        util.mkdir_p(self.outdir)
        # Set the NAME and LEVEL environment variables, this
        # is required by the MET series_analysis binary.
        os.environ['NAME'] = name
        os.environ['LEVEL'] = level
        self.add_env_var('NAME', name)
        self.add_env_var('LEVEL', level)

        # Set the NAME to name_level if regrid_data_plane
        # was used to regrid.
        if self.regrid_with_met_tool:
            name_level = name + "_" + level
            os.environ['NAME'] = name_level
            self.add_env_var('NAME', name_level)
        series_anly_output_parts = [
            self.outdir, '/', 'series_', name, '_', level, '.nc'
        ]
        # Set the sbi_out_dir for this instance, this will be
        # used for generating the plot.
        self.sbi_plotting_out_dir = ''.join(series_anly_output_parts)
        self.outfile = self.sbi_plotting_out_dir

        self.logger.debug("DEBUG|" + cur_function + '|' + cur_filename +
                          '| output arg/output dir for series_analysis: ' +
                          self.get_output_path())
        self.set_output_dir(self.outdir)
        self.set_output_filename(self.outfile)
示例#7
0
    def run_all_times(self):
        """! Runs MET Point_ for all times indicated in the configuration
             file"""

        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.

        # Used for logging.
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name
        self.logger.info("INFO|:" + cur_function + '|' + cur_filename + '| ' +
                         "Running point-stat for all initialization times...")

        # Get a list of all the files in the model/fcst and obs directories
        # Determine if we are performing point_stat based on init times or
        # valid times.
        pairs_by_time_method = self.select_fcst_obs_pairs()

        # Build up the commands to run MET point_stat
        self.set_environment_variables()

        # Call point_stat for each matched pair of fcst/model and obs file
        # within the specified time window (by init time or by valid time).
        for pairs in pairs_by_time_method:
            # fcst file
            self.add_arg(pairs[0])
            # obs file
            self.add_arg(pairs[1])

            # MET point_stat config file
            self.set_param_file(self.ps_dict['POINT_STAT_CONFIG_FILE'])

            # Output directory
            self.set_output_dir(self.ps_dict['POINT_STAT_OUTPUT_DIR'])
            util.mkdir_p(self.outdir)

            cmd = self.get_command()
            self.logger.debug("DEBUG:|" + cur_function + "|" + cur_filename +
                              "| Command to run MET point_stat: " + cmd)
            self.build()
            self.clear()
    def create_output_subdir(self, tcst_file):
        """! Extract the base portion of the tcst filename:
            eg amlqYYYYMMDDhh.gfso.nnnn in
            /d1/username/tc_pairs/YYYYMM/amlqYYYYMMDDhh.gfso.nnnn and use this
            as the subdirectory (gets appended to the TCMPR output directory).
            This allows the user to determine which plots correspond to the
            input track file.

            Args:
                @param tcst_file:  The input tc-pairs file.
        """
        subdir_match = re.match(r'.*/(.*).tcst', tcst_file)
        subdir = subdir_match.group(1)
        dated_output_dir = os.path.join(self.output_base_dir, subdir)
        self.logger.debug("DEBUG: " + dated_output_dir + " for " + tcst_file)

        # Create the subdir
        util.mkdir_p(dated_output_dir)

        return dated_output_dir
示例#9
0
    def test_file_exists(self):
        p = P.Params()
        p.init(__doc__)

        # Start with a "clean" directory and non-existent file
        # expect the file_exists function to return False
        rm_exe = p.opt["RM_EXE"]
        test_dir_base = p.opt["TEST_DIR"]
        rm_file = [rm_exe, " ", test_dir_base]
        rm_file_cmd = ''.join(rm_file)
        test_file = p.opt["TEST_FILENAME"]
        full_test_file = test_dir_base + "/" + test_file
        self.assertFalse(util.file_exists(full_test_file))

        # Create a file, expect the file_exists function to
        # return True
        util.mkdir_p(test_dir_base)
        touch_cmd = "/usr/bin/touch full_test_file"
        print("full_test_file: %s", full_test_file)
        os.system(touch_cmd)
示例#10
0
    def run_all_times(self):
        """! Builds the command for invoking tcmpr.R plot script.

             Args:

             Returns:

        """
        base_cmds_list = [' Rscript ', self.tcmpr_script, ' -lookin ']
        base_cmds = ''.join(base_cmds_list)
        self.logger.debug("base_cmds " + base_cmds)
        cmds_list = []

        self.logger.debug("DEBUG: TCMPR input " + self.input_data)
        self.logger.debug("DEBUG: TCMPR config file " + self.plot_config_file)
        self.logger.debug("DEBUG: output " + self.output_base_dir)

        # Create a list of all the "optional" options and flags.
        optionals_list = self.retrieve_optionals()

        # Create the output base directory
        util.mkdir_p(self.output_base_dir)

        # If input data is a file, create a single command and invoke R script.
        if os.path.isfile(self.input_data):
            self.logger.debug("Currently plotting " + self.input_data)
            cmds_list.append(base_cmds)
            cmds_list.append(self.input_data)

            # Special treatment of the "optional" output_base_dir option
            # because we are supporting the plotting of multiple tcst files
            # in a directory.
            if self.output_base_dir:
                # dated_output_dir = self.create_output_subdir(self.input_data)
                optionals_list.append(' -outdir ')
                # optionals_list.append(dated_output_dir)
                optionals_list.append(self.output_base_dir)
                optionals = ''.join(optionals_list)

            if optionals:
                cmds_list.append(optionals)
                # Due to the way cmds_list was created, join it all in to
                # one string and than split that in to a list, so element [0]
                # is 'Rscript', instead of 'Rscript self.tcmpr_script -lookin'
                cmds_list = ''.join(cmds_list).split()
                # cmd = batchexe('sh')['-c',''.join(cmds_list)] > '/dev/null'
                cmd = batchexe(cmds_list[0])[cmds_list[1:]] > '/dev/null'
                self.logger.debug("DEBUG: Command run " + cmd.to_shell())
                self.logger.info("INFO: Generating requested plots for " +
                                 self.input_data)

                # pylint:disable=unnecessary-pass
                # If a tc file is empty, continue to the next, thus the pass
                # isn't unnecessary.
                try:
                    checkrun(cmd)
                except produtil.run.ExitStatusException as ese:
                    self.logger.warn("WARN: plot_tcmpr.R returned non-zero"
                                     " exit status, "
                                     "tcst file may be missing data, "
                                     "continuing: " + ese)

                    # Remove the empty directory
                    if not os.listdir(self.output_base_dir):
                        os.rmdir(self.output_base_dir)
                    pass

        # If the input data is a directory, create a list of all the
        # files in the directory and invoke the R script for this list
        # of files.
        if os.path.isdir(self.input_data):
            self.logger.debug("plot all files in directory " + self.input_data)
            cmds_list = []
            all_tcst_files_list = util.get_files(self.input_data, ".*.tcst",
                                                 self.logger)
            all_tcst_files = ' '.join(all_tcst_files_list)
            self.logger.debug("num of files " + str(len(all_tcst_files)))
            # Append the mandatory -lookin option to the base command.
            cmds_list.append(base_cmds)
            cmds_list.append(all_tcst_files)
            # dated_output_dir = self.create_output_subdir(self.output_plot)
            dated_output_dir = self.output_base_dir
            if self.output_base_dir:
                cmds_list.append(' -outdir ')
                util.mkdir_p(self.output_base_dir)
                cmds_list.append(self.output_base_dir)
                self.logger.debug("DEBUG: Creating dated output dir " +
                                  dated_output_dir)

            if optionals_list:
                remaining_options = ''.join(optionals_list)
                cmds_list.append(remaining_options)

            # Due to the way cmds_list was created, join it all in to
            # one string and than split that in to a list, so element [0]
            # is 'Rscript', instead of 'Rscript self.tcmpr_script -lookin'
            cmds_list = ''.join(cmds_list).split()
            cmd = batchexe(cmds_list[0])[cmds_list[1:]] > '/dev/null'
            self.logger.debug("DEBUG:  Command run " + cmd.to_shell())

            # pylint:disable=unnecessary-pass
            # If a tc file is empty, continue to the next, thus the pass
            # isn't unnecessary.
            try:
                checkrun(cmd)
            except produtil.run.ExitStatusException as ese:
                # If the tcst file is empty (with the exception of the
                #  header), or there is some other problem, then
                # plot_tcmpr.R will return with a non-zero exit status of 1
                self.logger.warn("WARN: plot_tcmpr.R returned non-zero"
                                 " exit status, tcst file may be missing"
                                 " data... continuing: " + str(ese))
                # Remove the empty directory
                # Remove the empty directory
                if not os.listdir(dated_output_dir):
                    os.rmdir(dated_output_dir)

                pass
            # Reset empty cmds_list to prepare for next tcst file.
            cmds_list = []

        self.logger.info("INFO: Plotting complete")
    def create_plot(self):
        """ Create the plot, with a Basemap of the projection type
            requested in the metplus.conf file."""
        #pylint:disable=redefined-builtin
        #pylint:disable=unused-variable
        map, proj_type, extent = self.get_basemap()

        # Make sure the output directory exists, and create it if it doesn't.
        util.mkdir_p(self.output_dir)

        # For the legend box
        #pylint:disable=invalid-name
        ax = plt.subplot(111)
        box = ax.get_position()
        ax.set_position(
            [box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])

        # Draw coastlines
        map.drawcoastlines()

        # Draw latitude lines
        parallels = np.arange(-90., 91., 20.)
        map.drawparallels(parallels, labels=[False, True, True, False])

        # Draw meridians and labels
        meridians = np.arange(-180., 181., 40.)
        map.drawmeridians(meridians, labels=[True, False, False, True])

        plt.title(self.title + "\nFor forecast with initial time = " +
                  self.init_date)

        # Create the NCAR watermark with a timestamp
        # This will appear in the bottom left corner of the plot, below
        # the legend.  NOTE: The timestamp is in the user's local time zone
        # and not in UTC time.
        ts = time.time()
        st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
        watermark = 'DTC METplus\nplot created at: ' + st
        plt.text(1, -180, watermark, fontsize=8, alpha=0.25)

        # Iterate over each unique storm id in self.storm_id_dict and
        # set the marker, marker size, and annotation
        # before drawing the line and scatter plots.

        # Use counters to set the labels for the legend. Since we don't
        # want repetitions in the legend, do this for a select number
        # of points.
        circle_counter = 0
        plus_counter = 0
        dummy_counter = 0

        # If requested, create an ASCII file with the tracks that are going to
        # be plotted.  This is useful to debug or verify that what you
        # see on the plot is what is expected.
        ascii_track_parts = [self.init_date, '_', self.projection, '.txt']
        ascii_track_output_name = ''.join(ascii_track_parts)
        plot_filename = os.path.join(self.output_dir, ascii_track_output_name)
        ascii_track_file = open(plot_filename, 'w')

        for cur_storm_id in self.unique_storm_id:
            # Lists used in creating each storm track.
            lon = []
            lat = []
            marker_list = []
            size_list = []
            anno_list = []

            # For this storm id, get a list of all data (corresponding
            # to lines/rows in the tcst data file).
            track_info_list = self.storm_id_dict[cur_storm_id]
            #pylint:disable=len-as-condition
            # if len(track_info_list) == 0:
            if not track_info_list:
                self.logger.error("Empty track list, no data extracted " +
                                  "from track files, exiting.")
                sys.exit(1)

            for track in track_info_list:
                # For now, all the marker symbols will be one color.
                color_list = ['red' for _ in range(0, len(track_info_list))]

                # Determine which marker symbol to use based on the
                # lead group and create the annotation text used to
                # annotate the first point in the storm track.
                # Adjust the longitude to the appropriate scale if necessary:
                if extent > 180.0:
                    # Rescale each lon
                    curr_lon = self.rescale_lon(float(track['lon']))
                else:
                    curr_lon = float(track['lon'])

                lon.append(curr_lon)
                lat.append(float(track['lat']))

                # Differentiate between the forecast lead "groups",
                # i.e. 0/12 vs 6/18 hr and
                # assign the marker symbol and size.
                if track['lead_group'] == '0':
                    marker = 'o'
                    marker_list.append(marker)
                    marker_size = self.circle_marker
                    size_list.append(marker_size)

                elif track['lead_group'] == '6':
                    marker = '+'
                    marker_list.append(marker)
                    marker_size = self.cross_marker
                    size_list.append(marker_size)

                # Determine the first point, needed later to annotate.
                #pylint:disable=invalid-name
                dd = track['valid_dd']
                hh = track['valid_hh']
                if dd and hh:
                    date_hr_str = dd + '/' + hh + 'z'
                    anno_list.append(date_hr_str)
                else:
                    date_hr_str = ''
                    anno_list.append(date_hr_str)

                # Write to the ASCII track file, if requested
                if self.gen_ascii:
                    line_parts = [
                        'model_name: ', track['model_name'], '   ',
                        'storm_id: ', track['storm_id'], '   ', 'init_time: ',
                        track['init_time'], '   ', 'valid_time: ',
                        track['valid_time'], '   ', 'lat: ',
                        str(track['lat']), '   ', 'lon: ',
                        str(track['lon']), '   ', 'lead_group: ',
                        track['lead_group'], '   ', 'first_point:',
                        str(track['first_point'])
                    ]
                    line = ''.join(line_parts)
                    ascii_track_file.write(line)
                    ascii_track_file.write('\n')

            # Generate the Basemap first, then add a scatter plot to add
            # the appropriate marker symbol to the forecast
            # hours corresponding to 6/18 hours.
            x, y = map(lon, lat)
            # Deliberately set this to a small value, as this will be
            # overwriting the points

            map.plot(x, y, color='red', linestyle='-')

            # Annotate the first point of the storm track
            for anno, adj_lon, adj_lat in zip(anno_list, lon, lat):
                x, y = map(adj_lon, adj_lat)
                # Annotate the first point of the storm track by
                # overlaying the annotation text over all points (all but
                # one will have text).
                plt.annotate(anno,
                             xy=(x, y),
                             xytext=(2, 2),
                             textcoords='offset points',
                             fontsize=11,
                             color='red')

            # Generate the scatterplot, where the 6/18 Z forecast times
            # are labelled with a '+'
            for adj_lon, adj_lat, symbol, sz, colours in zip(
                    lon, lat, marker_list, size_list, color_list):
                x, y = map(adj_lon, adj_lat)
                # red line, red +, red o, marker sizes are recognized,
                # no outline color of black for 'o'
                # plt.scatter(x, y, s=sz, c=colours, edgecolors=colours,
                # facecolors='none', marker=symbol, zorder=2)
                # Solid circle, just like the EMC NCEP plots
                # Separate the first two points so we can generate the legend
                if circle_counter == 0 or plus_counter == 0:
                    if symbol == 'o':
                        plt.scatter(x,
                                    y,
                                    s=sz,
                                    c=colours,
                                    edgecolors=colours,
                                    facecolors=colours,
                                    marker='o',
                                    zorder=2,
                                    label="Indicates a position " +
                                    "at 00 or 12 UTC")
                        circle_counter += 1
                    elif symbol == '+':
                        plt.scatter(x,
                                    y,
                                    s=sz,
                                    c=colours,
                                    edgecolors=colours,
                                    facecolors=colours,
                                    marker='+',
                                    zorder=2,
                                    label="\nIndicates a position at 06 or " +
                                    "18 UTC\n")
                        plus_counter += 1

                else:
                    # Set the legend for additional text using a
                    # dummy scatter point
                    if dummy_counter == 0:
                        plt.scatter(0,
                                    0,
                                    zorder=2,
                                    marker=None,
                                    c='',
                                    label="Date (dd/hhz) is the first " +
                                    "time storm was able to be tracked " +
                                    "in model")
                        dummy_counter += 1
                    plt.scatter(x,
                                y,
                                s=sz,
                                c=colours,
                                edgecolors=colours,
                                facecolors=colours,
                                marker=symbol,
                                zorder=2)

        # Draw the legend on the plot
        # If you wish to have the legend within the plot:
        # plt.legend(loc='lower left', prop={'size':5}, scatterpoints=1)
        # The legend is outside the plot, below the x-axis to
        # avoid obscuring any storm tracks in the Southern
        # Hemisphere.
        # ax.legend(loc='lower left', bbox_to_anchor=(-0.03, -0.5),
        #           fancybox=True, shadow=True, scatterpoints=1,
        #           prop={'size': 6})
        ax.legend(loc='lower left',
                  bbox_to_anchor=(-0.01, -0.4),
                  fancybox=True,
                  shadow=True,
                  scatterpoints=1,
                  prop={'size': 6})

        # Write the plot to the output directory
        out_filename_parts = [self.init_date, '_', self.projection, '.png']
        output_plot_name = ''.join(out_filename_parts)
        plot_filename = os.path.join(self.output_dir, output_plot_name)
        plt.savefig(plot_filename)

        # Close the ASCII track file, if generated
        if self.gen_ascii:
            ascii_track_file.close()

        # Plot data onto axes
        plt.show()
 def create_plots(self, verif_case, verif_type):
     """! Read in metplus_final.conf variables and call function
          for the specific verification plots to run
         
          Args:
              verif_case - string of the verification case to make
                           plots for
              verif_type - string of the verification type to make
                           plots for
            
          Returns:
     """
     self.logger.info("Running plots for VERIF_CASE = " + verif_case +
                      ", VERIF_TYPE = " + verif_type)
     #read config
     plot_time = self.config.getstr('config', 'PLOT_TIME')
     valid_beg_YYYYmmdd = self.config.getstr('config', 'VALID_BEG', "")
     valid_end_YYYYmmdd = self.config.getstr('config', 'VALID_END', "")
     valid_hour_method = self.config.getstr('config', 'VALID_HOUR_METHOD')
     valid_hour_beg = self.config.getstr('config', 'VALID_HOUR_BEG')
     valid_hour_end = self.config.getstr('config', 'VALID_HOUR_END')
     valid_hour_increment = self.config.getstr('config',
                                               'VALID_HOUR_INCREMENT')
     init_beg_YYYYmmdd = self.config.getstr('config', 'INIT_BEG', "")
     init_end_YYYYmmdd = self.config.getstr('config', 'INIT_END', "")
     init_hour_method = self.config.getstr('config', 'INIT_HOUR_METHOD')
     init_hour_beg = self.config.getstr('config', 'INIT_HOUR_BEG')
     init_hour_end = self.config.getstr('config', 'INIT_HOUR_END')
     init_hour_increment = self.config.getstr('config',
                                              'INIT_HOUR_INCREMENT')
     stat_files_input_dir = self.config.getdir('STAT_FILES_INPUT_DIR')
     plotting_out_dir = self.config.getdir('PLOTTING_OUTPUT_DIR')
     plotting_scripts_dir = self.config.getdir('PLOTTING_SCRIPTS_DIR')
     plot_stats_list = self.config.getstr('config', 'PLOT_STATS_LIST')
     ci_method = self.config.getstr('config', 'CI_METHOD')
     verif_grid = self.config.getstr('config', 'VERIF_GRID')
     event_equalization = self.config.getstr('config', 'EVENT_EQUALIZATION',
                                             "True")
     var_list = self.parse_vars_with_level_thresh_list()
     fourier_decom_list = self.parse_var_fourier_decomp()
     region_list = util.getlist(self.config.getstr('config', 'REGION_LIST'))
     lead_list = util.getlist(self.config.getstr('config', 'LEAD_LIST'))
     model_name_str_list, model_plot_name_str_list = self.parse_model_list()
     logging_filename = self.config.getstr('config', 'LOG_METPLUS')
     logging_level = self.config.getstr('config', 'LOG_LEVEL')
     met_base = self.config.getstr('dir', 'MET_BASE')
     #set envir vars based on config
     self.add_env_var('PLOT_TIME', plot_time)
     if plot_time == 'valid':
         self.add_env_var('START_DATE_YYYYmmdd', valid_beg_YYYYmmdd)
         self.add_env_var('END_DATE_YYYYmmdd', valid_end_YYYYmmdd)
     elif plot_time == 'init':
         self.add_env_var('START_DATE_YYYYmmdd', init_beg_YYYYmmdd)
         self.add_env_var('END_DATE_YYYYmmdd', init_end_YYYYmmdd)
     else:
         self.logger.error(
             "Invalid entry for PLOT_TIME, use 'valid' or 'init'")
         exit(1)
     self.add_env_var('STAT_FILES_INPUT_DIR', stat_files_input_dir)
     self.add_env_var('PLOTTING_OUT_DIR', plotting_out_dir)
     self.add_env_var('PLOT_STATS_LIST', plot_stats_list)
     self.add_env_var('MODEL_NAME_LIST', model_name_str_list)
     self.add_env_var('MODEL_PLOT_NAME_LIST', model_plot_name_str_list)
     self.add_env_var('CI_METHOD', ci_method)
     self.add_env_var('VERIF_GRID', verif_grid)
     self.add_env_var('EVENT_EQUALIZATION', event_equalization)
     self.add_env_var('LOGGING_FILENAME', logging_filename)
     self.add_env_var('LOGGING_LEVEL', logging_level)
     plotting_out_dir_full = os.path.join(plotting_out_dir, verif_case,
                                          verif_type)
     if os.path.exists(plotting_out_dir_full):
         self.logger.info(plotting_out_dir_full + " exists, removing")
         util.rmtree(plotting_out_dir_full)
     util.mkdir_p(os.path.join(plotting_out_dir_full, "imgs"))
     util.mkdir_p(os.path.join(plotting_out_dir_full, "data"))
     self.add_env_var('PLOTTING_OUT_DIR_FULL', plotting_out_dir_full)
     with open(met_base + '/version.txt') as met_version_txt:
         met_version_line = met_version_txt.readline()
         met_version = float(
             met_version_line.strip('\n').partition('/met-')[2].partition(
                 '_')[0])
     self.add_env_var('MET_VERSION', str(met_version))
     if met_version < 6.0:
         self.logger.exit("Please run with MET version >= 6.0")
         exit(1)
     #build valid and init hour information
     valid_beg_HHMMSS = calendar.timegm(
         time.strptime(valid_hour_beg, "%H%M"))
     valid_end_HHMMSS = calendar.timegm(
         time.strptime(valid_hour_end, "%H%M"))
     init_beg_HHMMSS = calendar.timegm(time.strptime(init_hour_beg, "%H%M"))
     init_end_HHMMSS = calendar.timegm(time.strptime(init_hour_end, "%H%M"))
     valid_hour_list = self.create_hour_group_list(
         valid_beg_HHMMSS, valid_end_HHMMSS, int(valid_hour_increment))
     init_hour_list = self.create_hour_group_list(init_beg_HHMMSS,
                                                  init_end_HHMMSS,
                                                  int(init_hour_increment))
     valid_init_time_pairs = self.pair_valid_init_times(
         valid_hour_list, valid_hour_method, init_hour_list,
         init_hour_method)
     #loop through time information
     for valid_init_time_pair in valid_init_time_pairs:
         self.add_env_var('VALID_TIME_INFO', valid_init_time_pair.valid)
         self.add_env_var('INIT_TIME_INFO', valid_init_time_pair.init)
         #loop through variable information
         for var_info in var_list:
             self.add_env_var('FCST_VAR_NAME', var_info.fcst_name)
             self.add_env_var('OBS_VAR_NAME', var_info.obs_name)
             fcst_var_level_list = var_info.fcst_level
             obs_var_level_list = var_info.obs_level
             if len(var_info.fcst_extra) == 0:
                 self.add_env_var('FCST_VAR_EXTRA', "None")
             else:
                 self.add_env_var('FCST_VAR_EXTRA', var_info.fcst_extra)
             if len(var_info.obs_extra) == 0:
                 self.add_env_var('OBS_VAR_EXTRA', "None")
             else:
                 self.add_env_var('OBS_VAR_EXTRA', var_info.obs_extra)
             if len(var_info.fcst_thresh) == 0 or len(
                     var_info.obs_thresh) == 0:
                 fcst_var_thresh_list = ["None"]
                 obs_var_thresh_list = ["None"]
             else:
                 fcst_var_thresh_list = var_info.fcst_thresh
                 obs_var_thresh_list = var_info.obs_thresh
             #check for fourier decompositon for variable, add to interp list
             interp_list = util.getlist(
                 self.config.getstr('config', 'INTERP', ""))
             var_fourier_decomp_info = fourier_decom_list[var_list.index(
                 var_info)]
             if var_fourier_decomp_info.run_fourier:
                 for pair in var_fourier_decomp_info.wave_num_pairings:
                     interp_list.append("WV1_" + pair)
             #loop through interpolation information
             for interp in interp_list:
                 self.add_env_var('INTERP', interp)
                 #loop through region information
                 for region in region_list:
                     self.add_env_var('REGION', region)
                     #call specific plot definitions to make plots
                     if verif_case == "precip":
                         self.create_plots_precip(fcst_var_level_list,
                                                  obs_var_level_list,
                                                  fcst_var_thresh_list,
                                                  obs_var_thresh_list,
                                                  lead_list,
                                                  plotting_scripts_dir)
示例#13
0
    def create_fcst_anly_to_ascii_file(self, fcst_anly_grid_files, cur_init,
                                       cur_storm, fcst_anly_filename_base):
        """! Create ASCII file for either the FCST or ANLY files that are
             aggregated based on init time and storm id.

        Args:
            fcst_anly_grid_files:       A list of the FCST or ANLY gridded
                                        files under consideration.

            cur_init:                  The initialization time of interest

            cur_storm:                 The storm id of interest

            fcst_anly_filename_base:   The base name of the ASCII file
                                        (either ANLY_ASCII_FILES_ or
                                        FCST_ASCII_FILES_ which will be
                                        appended with the storm id.

        Returns:
            None:                      Creates an ASCII file containing a list
                                        of either FCST or ANLY files based on
                                        init time and storm id.
        """

        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.

        # For logging
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name

        # Create an ASCII file containing a list of all
        # the fcst or analysis tiles.
        fcst_anly_ascii_fname_parts = [fcst_anly_filename_base, cur_storm]
        fcst_anly_ascii_fname = ''.join(fcst_anly_ascii_fname_parts)
        fcst_anly_ascii_dir = os.path.join(self.series_out_dir, cur_init,
                                           cur_storm)
        util.mkdir_p(fcst_anly_ascii_dir)
        fcst_anly_ascii = os.path.join(fcst_anly_ascii_dir,
                                       fcst_anly_ascii_fname)

        # Sort the files in the fcst_anly_grid_files list.
        sorted_fcst_anly_grid_files = sorted(fcst_anly_grid_files)
        tmp_param = ''
        for cur_fcst_anly in sorted_fcst_anly_grid_files:
            # Write out the files that pertain to this storm and
            # don't write if already in tmp_param.
            if cur_fcst_anly not in tmp_param and cur_storm in cur_fcst_anly:
                tmp_param += cur_fcst_anly
                tmp_param += '\n'
        # Now create the fcst or analysis ASCII file
        try:
            with open(fcst_anly_ascii, 'a') as filehandle:
                filehandle.write(tmp_param)
        except IOError:
            msg = ("Could not create requested ASCII file:  " +
                   fcst_anly_ascii)
            self.logger.error(msg)

        if os.stat(fcst_anly_ascii).st_size == 0:
            # Just in case there are any empty fcst ASCII or anly ASCII files
            # at this point,
            # explicitly remove them (and any resulting empty directories)
            #  so they don't cause any problems with further processing
            # steps.
            util.prune_empty(fcst_anly_ascii_dir, self.logger)
示例#14
0
    def generate_plots(self, sorted_filter_init, tile_dir):
        """! Generate the plots from the series_analysis output.
           Args:
               @param sorted_filter_init:  A list of the sorted directories
                                        corresponding to the init times (that
                                        are the result of filtering).  If
                                        filtering produced no results, this
                                        is the list of files created from
                                        running extract_tiles.

               @param tile_dir:  The directory where input data resides.
           Returns:
        """
        convert_exe = self.config.getexe('CONVERT')
        background_map = self.config.getbool('config', 'BACKGROUND_MAP')
        plot_data_plane_exe = os.path.join(
            self.config.getdir('MET_INSTALL_DIR'), 'bin/plot_data_plane')
        for cur_var in self.var_list:
            name, level = util.get_name_level(cur_var, self.logger)
            for cur_init in sorted_filter_init:
                storm_list = self.get_storms_for_init(cur_init, tile_dir)
                for cur_storm in storm_list:
                    # create the output directory where the finished
                    # plots will reside
                    output_dir = os.path.join(self.series_out_dir, cur_init,
                                              cur_storm)
                    util.mkdir_p(output_dir)

                    # Now we need to invoke the MET tool
                    # plot_data_plane to generate plots that are
                    # recognized by the MET viewer.
                    # Get the number of forecast tile files,
                    # the name of the first and last in the list
                    # to be used by the -title option.
                    if tile_dir == self.extract_tiles_dir:
                        # Since filtering was not requested, or
                        # the additional filtering doesn't yield results,
                        # search the series_out_dir
                        num, beg, end = \
                            self.get_fcst_file_info(self.series_out_dir,
                                                    cur_init, cur_storm)
                    else:
                        # Search the series_filtered_out_dir for
                        # the filtered files.
                        num, beg, end = self.get_fcst_file_info(
                            self.series_filtered_out_dir, cur_init, cur_storm)

                    # Assemble the input file, output file, field string,
                    # and title
                    plot_data_plane_input_fname = self.sbi_plotting_out_dir
                    for cur_stat in self.stat_list:
                        plot_data_plane_output = [
                            output_dir, '/series_', name, '_', level, '_',
                            cur_stat, '.ps'
                        ]
                        plot_data_plane_output_fname = ''.join(
                            plot_data_plane_output)
                        os.environ['CUR_STAT'] = cur_stat
                        self.add_env_var('CUR_STAT', cur_stat)

                        # Create versions of the arg based on
                        # whether the background map is requested
                        # in param file.
                        map_data = ' map_data={ source=[];}'

                        if background_map:
                            # Flag set to True, draw background map.
                            field_string_parts = [
                                "'name=", '"series_cnt_', cur_stat, '";',
                                'level="', level, '";', "'"
                            ]
                        else:
                            field_string_parts = [
                                "'name=", '"series_cnt_', cur_stat, '";',
                                'level="', level, '";', map_data, "'"
                            ]

                        field_string = ''.join(field_string_parts)
                        title_parts = [
                            ' -title "GFS Init ', cur_init, ' Storm ',
                            cur_storm, ' ',
                            str(num), ' Forecasts (',
                            str(beg), ' to ',
                            str(end), '),', cur_stat, ' for ', cur_var, '"'
                        ]
                        title = ''.join(title_parts)

                        # Now assemble the entire plot data plane command
                        data_plane_command_parts = \
                            [plot_data_plane_exe, ' ',
                             plot_data_plane_input_fname, ' ',
                             plot_data_plane_output_fname, ' ',
                             field_string, ' ', title]

                        data_plane_command = ''.join(data_plane_command_parts)

                        # Since this wrapper is not using the CommandBuilder
                        # to build the cmd, we need to add the met verbosity
                        # level to the MET cmd created before we run
                        # the command.
                        data_plane_command = self.cmdrunner.insert_metverbosity_opt\
                            (data_plane_command)
                        (ret, cmd) = self.cmdrunner.run_cmd\
                            (data_plane_command, env=None, app_name=self.app_name)

                        # Now assemble the command to convert the
                        # postscript file to png
                        png_fname = plot_data_plane_output_fname.replace(
                            '.ps', '.png')
                        convert_parts = [
                            convert_exe, ' -rotate 90',
                            ' -background white -flatten ',
                            plot_data_plane_output_fname, ' ', png_fname
                        ]
                        convert_command = ''.join(convert_parts)

                        (ret, cmd) = self.cmdrunner.run_cmd(convert_command,
                                                            ismetcmd=False)
示例#15
0
    def apply_series_filters(self, tile_dir, init_times, series_output_dir,
                             filter_opts, temporary_dir):
        """! Apply filter options, as specified in the
            param/config file.
            Args:
               @param tile_dir:  Directory where input data files reside.
                                 e.g. data which we will be applying our filter
                                 criteria.
               @param init_times:  List of init times that define the
                                   input data.
               @param series_output_dir:  The directory where the filter results
                                          will be stored.
               @param filter_opts:  The filter options to apply
               @param temporary_dir:  The temporary directory where intermediate
                                      files are saved.
            Returns:
                None
        """
        # pylint: disable=too-many-arguments
        # Seven input arguments are needed to perform filtering.

        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.

        # Useful for logging
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name

        # Create temporary directory where intermediate files are saved.
        cur_pid = str(os.getpid())
        tmp_dir = os.path.join(temporary_dir, cur_pid)
        self.logger.debug("creating tmp dir: " + tmp_dir)

        for cur_init in init_times:
            # Call the tc_stat wrapper to build up the command and invoke
            # the MET tool tc_stat.
            filter_file = "filter_" + cur_init + ".tcst"
            filter_filename = os.path.join(series_output_dir, cur_init,
                                           filter_file)

            tcs = TcStatWrapper(self.config, self.logger)
            tcs.build_tc_stat(series_output_dir, cur_init, tile_dir,
                              filter_opts)

            # Check that the filter.tcst file isn't empty. If
            # it is, then use the files from extract_tiles as
            # input (tile_dir = extract_out_dir)
            if not util.file_exists(filter_filename):
                msg = ("Non-existent filter file, filter " +
                       " Never created by MET Tool tc_stat.")
                self.logger.debug(msg)
                continue
            elif os.stat(filter_filename).st_size == 0:
                msg = ("Empty filter file, filter " +
                       " options yield nothing.")
                self.logger.debug(msg)
                continue
            else:
                # Now retrieve the files corresponding to these
                # storm ids that resulted from filtering.
                sorted_storm_ids = util.get_storm_ids(filter_filename,
                                                      self.logger)

                # Retrieve the header from filter_filename to be used in
                # creating the temporary files.
                with open(filter_filename, 'r') as filter_file:
                    header = filter_file.readline()

                for cur_storm in sorted_storm_ids:
                    msg = ("Processing storm: " + cur_storm + " for file: " +
                           filter_filename)
                    self.logger.debug(msg)
                    storm_output_dir = os.path.join(series_output_dir,
                                                    cur_init, cur_storm)
                    util.mkdir_p(storm_output_dir)
                    util.mkdir_p(tmp_dir)
                    tmp_file = "filter_" + cur_init + "_" + cur_storm
                    tmp_filename = os.path.join(tmp_dir, tmp_file)
                    storm_match_list = util.grep(cur_storm, filter_filename)
                    with open(tmp_filename, "a+") as tmp_file:
                        tmp_file.write(header)
                        for storm_match in storm_match_list:
                            tmp_file.write(storm_match)

                    # Create the analysis and forecast files based
                    # on the storms (defined in the tmp_filename created above)
                    # Store the analysis and forecast files in the
                    # series_output_dir.
                    feature_util.retrieve_and_regrid(tmp_filename, cur_init,
                                                     cur_storm,
                                                     series_output_dir,
                                                     self.config)

        # Check for any empty files and directories and remove them to avoid
        # any errors or performance degradation when performing
        # series analysis.
        util.prune_empty(series_output_dir, self.logger)

        # Clean up the tmp dir
        util.rmtree(tmp_dir)
示例#16
0
    def run_all_times(self):
        """! Builds the call to the MET tool TC-STAT for all requested
             initialization times (init or valid).  Called from master_metplus

             Args:

             Returns:
                0 if successfully runs MET tc_stat tool.
                1 otherwise
        """
        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.

        # Useful for logging
        # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |
        # [File : function]| Message
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name

        self.logger.info(cur_filename + '|' + cur_filename +
                         ':   Starting tc_stat_wrapper...')
        if self.by_config:
            self.set_envs()
            if not self.config_lists_ok():
                self.logger.error('There is at least one <>_VAL/<>_NAME pair'
                                  'requested in the MET tc-stat config '
                                  'file where the size of the lists '
                                  'is not equal.  Please '
                                  'check your MET tc-stat config file.')
                sys.exit(1)

        # Don't forget to create the output directory, as MET tc_stat will
        # not do this.
        util.mkdir_p(self.c_dict['OUTPUT_DIR'])

        # Since this is different from the other MET tools, we will build
        # the commands rather than use command builder's methods.
        match_points = str(self.c_dict['MATCH_POINTS'])
        if self.by_config:
            # Running with config file

            tc_cmd_list = [
                self.tc_exe, " -lookin", self.c_dict['INPUT_DIR'], " -config ",
                self.c_dict['CONFIG_FILE'], self.c_dict['JOBS_LIST']
            ]
        else:
            # Run single job from command line
            tc_cmd_list = [
                self.tc_exe, " -lookin", self.c_dict['INPUT_DIR'],
                self.c_dict['CMD_LINE_JOB'], "-match_points", match_points
            ]

        tc_cmd_str = ' '.join(tc_cmd_list)

        # Since this wrapper is not using the CommandBuilder to build the cmd,
        # we need to add the met verbosity level to the MET cmd created before
        # we run the command.
        tc_cmd_str = self.cmdrunner.insert_metverbosity_opt(tc_cmd_str)

        # Run tc_stat
        try:
            (ret, cmd) = \
                self.cmdrunner.run_cmd(tc_cmd_str, self.env, app_name=self.app_name)
            if not ret == 0:
                raise ExitStatusException(
                    '%s: non-zero exit status' % (repr(cmd), ), ret)
        except ExitStatusException as ese:
            self.logger.error(ese)

        return 0
示例#17
0
    def run_at_time(self, input_dict):
        """!Get TC-paris data then regrid tiles centered on the storm.

        Get TC-pairs track data and GFS model data, do any necessary
        processing then regrid the forecast and analysis files to a
        30 x 30 degree tile centered on the storm.
        Args:
            input_dict:  Time dictionary
        Returns:

            None: invokes regrid_data_plane to create a netCDF file from two
                    extratropical storm track files.
        """
        time_info = time_util.ti_calculate(input_dict)
        init_time = time_info['init_fmt']

        # get the process id to be used to identify the output
        # amongst different users and runs.
        cur_pid = str(os.getpid())
        tmp_dir = os.path.join(self.config.getdir('TMP_DIR'), cur_pid)
        self.logger.info("Begin extract tiles")

        cur_init = init_time[0:8] + "_" + init_time[8:10]

        # Check that there are tc_pairs data which are used as input
        if util.is_dir_empty(self.tc_pairs_dir):
            self.logger.error("No tc pairs data found at {}"\
                              .format(self.tc_pairs_dir))
            sys.exit(1)

        # Create the name of the filter file we need to find.  If
        # the file doesn't exist, then run TC_STAT
        filter_filename = "filter_" + cur_init + ".tcst"
        filter_name = os.path.join(self.filtered_out_dir, cur_init,
                                   filter_filename)

        if util.file_exists(filter_name) and not self.overwrite_flag:
            self.logger.debug("Filter file exists, using Track data file: {}"\
                              .format(filter_name))
        else:
            # Create the storm track by applying the
            # filter options defined in the config/param file.
            # Use TcStatWrapper to build up the tc_stat command and invoke
            # the MET tool tc_stat to perform the filtering.
            tiles_list = util.get_files(self.tc_pairs_dir, ".*tcst",
                                        self.logger)
            tiles_list_str = ' '.join(tiles_list)

            tcs = TcStatWrapper(self.config, self.logger)
            tcs.build_tc_stat(self.filtered_out_dir, cur_init, tiles_list_str,
                              self.addl_filter_opts)

            # Remove any empty files and directories that can occur
            # from filtering.
            util.prune_empty(filter_name, self.logger)

        # Now get unique storm ids from the filter file,
        # filter_yyyymmdd_hh.tcst
        sorted_storm_ids = util.get_storm_ids(filter_name, self.logger)

        # Check for empty sorted_storm_ids, if empty,
        # continue to the next time.
        if not sorted_storm_ids:
            # No storms found for init time, cur_init
            msg = "No storms were found for {} ...continue to next in list"\
              .format(cur_init)
            self.logger.debug(msg)
            return

        # Process each storm in the sorted_storm_ids list
        # Iterate over each filter file in the output directory and
        # search for the presence of the storm id.  Store this
        # corresponding row of data into a temporary file in the
        # /tmp/<pid> directory.
        for cur_storm in sorted_storm_ids:
            storm_output_dir = os.path.join(self.filtered_out_dir, cur_init,
                                            cur_storm)
            header = open(filter_name, "r").readline()
            util.mkdir_p(storm_output_dir)
            util.mkdir_p(tmp_dir)
            tmp_filename = "filter_" + cur_init + "_" + cur_storm
            full_tmp_filename = os.path.join(tmp_dir, tmp_filename)

            storm_match_list = util.grep(cur_storm, filter_name)
            with open(full_tmp_filename, "a+") as tmp_file:
                # copy over header information
                tmp_file.write(header)
                for storm_match in storm_match_list:
                    tmp_file.write(storm_match)

            # Perform regridding of the forecast and analysis files
            # to an n X n degree tile centered on the storm (dimensions
            # are indicated in the config/param file).
            feature_util.retrieve_and_regrid(full_tmp_filename, cur_init,
                                             cur_storm, self.filtered_out_dir,
                                             self.config)

        # end of for cur_storm

        # Remove any empty files and directories in the extract_tiles output
        # directory
        util.prune_empty(self.filtered_out_dir, self.logger)

        # Clean up the tmp directory if it exists
        if os.path.isdir(tmp_dir):
            util.rmtree(tmp_dir)
示例#18
0
    def run_all_times(self):
        """! Builds the command for invoking tcmpr.R plot script.

             Args:

             Returns:

        """

        self.logger.debug("TCMPR input " + self.input_data)
        self.logger.debug("TCMPR config file " + self.plot_config_file)
        self.logger.debug("output " + self.output_base_dir)

        # Create a dictionary of all the "optional" options and flags.
        cmds_dict = self.retrieve_optionals()

        # Create the TCMPR output base directory, where the final plots
        # will be saved.
        util.mkdir_p(self.output_base_dir)

        # If input data is a file, create a single command and invoke R script.
        if os.path.isfile(self.input_data):
            self.logger.debug("Currently plotting " + self.input_data)
            cmds_dict[' -lookin '] = self.input_data

            # Special treatment of the "optional" output_base_dir option
            # because we are supporting the plotting of multiple tcst files
            # in a directory.
            if self.output_base_dir:
                # dated_output_dir = self.create_output_subdir(self.input_data)
                cmds_dict[' -outdir '] = self.output_base_dir

            # Generate the list, where the -args are separated by their
            # values.
            full_cmd_list = ['Rscript' + self.tcmpr_script]
            for key, value in cmds_dict.items():
                full_cmd_list.append(key)
                full_cmd_list.append(value)

            # Separate the 'Rscript' portion from the args, to conform to
            # produtil's exe syntax.
            cmd = exe(full_cmd_list[0])[full_cmd_list[1:]] > '/dev/null'
            self.logger.debug("Command run " + cmd.to_shell())
            self.logger.info("Generating requested plots for " +
                             self.input_data)
            # pylint:disable=unnecessary-pass
            # If a tc file is empty, continue to the next, thus the pass
            # isn't unnecessary.
            try:
                checkrun(cmd)
            except produtil.run.ExitStatusException as ese:
                self.logger.warn("plot_tcmpr.R returned non-zero"
                                 " exit status, "
                                 "tcst file may be missing data, "
                                 "continuing: " + repr(ese))

        # If the input data is a directory, create a list of all the
        # files in the directory and invoke the R script for this list
        # of files.
        elif os.path.isdir(self.input_data):
            self.logger.debug("plot all files in directory " + self.input_data)
            cmds_dict = self.retrieve_optionals()
            all_tcst_files_list = util.get_files(self.input_data, ".*.tcst",
                                                 self.logger)
            all_tcst_files = ' '.join(all_tcst_files_list)
            self.logger.debug("num of files " + str(len(all_tcst_files)))
            # Append the mandatory -lookin option to the base command.
            cmds_dict['-lookin'] = all_tcst_files
            if self.output_base_dir:
                cmds_dict['-outdir'] = self.output_base_dir
                self.logger.debug("Creating dated output dir " +
                                  self.output_base_dir)

            # Create the full_cmd_list from the keys and values of the
            # cmds_dict and then form one command list.
            full_cmd_list = list()
            full_cmd_list.append("Rscript")
            full_cmd_list.append(self.tcmpr_script)
            for key, value in cmds_dict.items():
                full_cmd_list.append(key)
                if key == '-lookin':
                    # treat the list of dirs in -lookin differently,
                    # append each individual directory to replicate original
                    # implementation's behavior of splitting the commands
                    # by whitespace and assigning each command to an item
                    # in a list.
                    for tcst_file in all_tcst_files_list:
                        full_cmd_list.append(tcst_file)
                elif key == '-plot':
                    # plot types list is also appended as a single string,
                    # delimited by ','.
                    full_cmd_list.append(','.join(value))
                elif key == '-dep':
                    # dependant variables list items are appended
                    # as one string.  Convert list into a string delimited
                    # by ','.
                    full_cmd_list.append(','.join(value))

                else:
                    full_cmd_list.append(value)

            # Separate the 'Rscript' portion from the args, to conform to
            # produtil's exe syntax.
            cmd = exe(full_cmd_list[0])[full_cmd_list[1:]] > '/dev/null'

            # This can be a very long command if the user has
            # indicated a directory.  Only log this if necessary.
            # self.logger.debug("DEBUG:  Command run " + cmd.to_shell())
            # cmd_str = ' '.join(full_cmd_list)
            # cmd_list = 'Rscript ' + cmd_str
            # self.logger.debug('TCMPR Command run: ' + cmd_str)

            # Now run the command via produtil
            try:
                checkrun(cmd)
            except produtil.run.ExitStatusException as ese:
                # If the tcst file is empty (with the exception of the
                #  header), or there is some other problem, then
                # plot_tcmpr.R will return with a non-zero exit status of 1
                self.logger.error("plot_tcmpr.R returned non-zero"
                                  " exit status, tcst file may be missing"
                                  " data... continuing: " + str(ese))
                sys.exit(1)
        else:
            self.logger.error("Expected input is neither a file nor directory,"
                              "exiting...")
            sys.exit(1)

        self.logger.info("Plotting complete")
示例#19
0
    def run_at_time(self, cur_init):
        """!Get TC-paris data then regrid tiles centered on the storm.

        Get TC-pairs track data and GFS model data, do any necessary
        processing then regrid the forecast and analysis files to a
        30 x 30 degree tile centered on the storm.
        Args:

        Returns:

            None: invokes regrid_data_plane to create a netCDF file from two
                    extratropical storm track files.
        """
        # pylint:disable=protected-access
        # Need to call sys.__getframe() to get the filename and method/func
        # for logging information.
        # Used in logging
        cur_filename = sys._getframe().f_code.co_filename
        cur_function = sys._getframe().f_code.co_name

        # get the process id to be used to identify the output
        # amongst different users and runs.
        cur_pid = str(os.getpid())
        tmp_dir = os.path.join(self.config.getdir('TMP_DIR'), cur_pid)
        msg = ("INFO|[" + cur_filename + ":" + cur_function + "]"
               "|Begin extract tiles")
        self.logger.info(msg)

        # Check that there are tc_pairs data which are used as input
        if util.is_dir_empty(self.tc_pairs_dir):
            msg = ("ERROR|[" + cur_filename + ":" + cur_function + "]"
                   "|No tc pairs data found at " + self.tc_pairs_dir +
                   "Exiting...")
            self.logger.error(msg)
            sys.exit(1)

        # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |
        # [File : function]| Message logger.info("INFO |  [" +
        # cur_filename +  ":" + "cur_function] |" + "BEGIN extract_tiles")
        # Process TC pairs by initialization time
        # Begin processing for initialization time, cur_init
        year_month = util.extract_year_month(cur_init, self.logger)

        # Create the name of the filter file we need to find.  If
        # the file doesn't exist, then run TC_STAT
        filter_filename = "filter_" + cur_init + ".tcst"
        filter_name = os.path.join(self.filtered_out_dir, cur_init,
                                   filter_filename)

        if util.file_exists(filter_name) and not self.overwrite_flag:
            msg = ("DEBUG| [" + cur_filename + ":" + cur_function +
                   " ] | Filter file exists, using Track data file: " +
                   filter_name)
            self.logger.debug(msg)
        else:
            # Create the storm track by applying the
            # filter options defined in the config/param file.
            tile_dir_parts = [self.tc_pairs_dir, "/", year_month]
            tile_dir = ''.join(tile_dir_parts)
            # Use TcStatWrapper to build up the tc_stat command and invoke
            # the MET tool tc_stat to perform the filtering.
            tcs = TcStatWrapper(self.config)
            tcs.build_tc_stat(self.filtered_out_dir, cur_init,
                              tile_dir, self.addl_filter_opts)

            # Remove any empty files and directories that can occur
            # from filtering.
            util.prune_empty(filter_name, self.logger)

        # Now get unique storm ids from the filter file,
        # filter_yyyymmdd_hh.tcst
        sorted_storm_ids = util.get_storm_ids(filter_name, self.logger)

        # Check for empty sorted_storm_ids, if empty,
        # continue to the next time.
        if not sorted_storm_ids:
            # No storms found for init time, cur_init
            msg = ("DEBUG|[" + cur_filename + ":" + cur_function + " ]|" +
                   "No storms were found for " + cur_init +
                   "...continue to next in list")
            self.logger.debug(msg)
            return

        # Process each storm in the sorted_storm_ids list
        # Iterate over each filter file in the output directory and
        # search for the presence of the storm id.  Store this
        # corresponding row of data into a temporary file in the
        # /tmp/<pid> directory.
        for cur_storm in sorted_storm_ids:
            storm_output_dir = os.path.join(self.filtered_out_dir,
                                            cur_init, cur_storm)
            header = open(filter_name, "r").readline()
            util.mkdir_p(storm_output_dir)
            util.mkdir_p(tmp_dir)
            tmp_filename = "filter_" + cur_init + "_" + cur_storm
            full_tmp_filename = os.path.join(tmp_dir, tmp_filename)

            storm_match_list = util.grep(cur_storm, filter_name)
            with open(full_tmp_filename, "a+") as tmp_file:
                # copy over header information
                tmp_file.write(header)
                for storm_match in storm_match_list:
                    tmp_file.write(storm_match)

            # Perform regridding of the forecast and analysis files
            # to an n X n degree tile centered on the storm (dimensions
            # are indicated in the config/param file).
            util.retrieve_and_regrid(full_tmp_filename, cur_init,
                                     cur_storm, self.filtered_out_dir,
                                     self.logger, self.config)

        # end of for cur_storm

        # Remove any empty files and directories in the extract_tiles output
        # directory
        util.prune_empty(self.filtered_out_dir, self.logger)

        # Clean up the tmp directory if it exists
        if os.path.isdir(tmp_dir):
            util.rmtree(tmp_dir)
            msg = ("INFO|[" + cur_function + ":" + cur_filename + "]"
                   "| Finished extract tiles")
            self.logger.info(msg)
    def create_plot(self):
        """! Create the plot, using Cartopy.

        """

        # Use PlateCarree projection for now
        #use central meridian for central longitude
        cm_lon = 180
        ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=cm_lon))
        # ax = plt.axes(projection=ccrs.LambertCylindrical(central_longitude=0.0))

        # Add land, coastlines, and ocean
        ax.add_feature(cfeature.LAND)
        ax.coastlines()
        ax.add_feature(cfeature.OCEAN)

        # Add grid lines for longitude and latitude
        ax.gridlines(draw_labels=False, xlocs=[180, -180])
        gl = ax.gridlines(crs=ccrs.PlateCarree(central_longitude=0.0),
                          draw_labels=True, linewidth=1, color='gray',
                          alpha=0.5, linestyle='--')
        gl.xlabels_top = False
        gl.ylabels_left = False
        gl.xlines = True
        gl.xlocator = mticker.FixedLocator(
            [ -180,-140, -100, -60, -20, 20, 60, 100, 140, 180])
        gl.xformatter = LONGITUDE_FORMATTER
        gl.yformatter = LATITUDE_FORMATTER
        gl.xlabel_style = {'size': 9, 'color': 'blue'}
        gl.xlabel_style = {'color': 'black', 'weight': 'normal'}

        # Plot title
        plt.title(self.title + "\nFor forecast with initial time = " +
                  self.init_date)

        # Create the NCAR watermark with a timestamp
        # This will appear in the bottom left corner of the plot, below
        # the legend.  NOTE: The timestamp is in the user's local time zone
        # and not in UTC time.
        ts = time.time()
        st = datetime.datetime.fromtimestamp(ts).strftime(
            '%Y-%m-%d %H:%M:%S')
        watermark = 'DTC METplus\nplot created at: ' + st
        # plt.text(1, -180, watermark, fontsize=8, alpha=0.25)
        plt.text(-180, -170, watermark, fontsize=5, alpha=0.25)

        # Make sure the output directory exists, and create it if it doesn't.
        util.mkdir_p(self.output_dir)

        # Iterate over each unique storm id in self.storm_id_dict and
        # set the marker, marker size, and annotation
        # before drawing the line and scatter plots.

        # If requested, create an ASCII file with the tracks that are going to
        # be plotted.  This is useful to debug or verify that what you
        # see on the plot is what is expected.
        ascii_track_parts = [self.init_date, '.txt']
        ascii_track_output_name = ''.join(ascii_track_parts)
        plot_filename = os.path.join(self.output_dir, ascii_track_output_name)
        ascii_track_file = open(plot_filename, 'w')

        # Use counters to set the labels for the legend. Since we don't
        # want repetitions in the legend, do this for a select number
        # of points.
        circle_counter = 0
        plus_counter = 0
        dummy_counter = 0

        # If requested, create an ASCII file with the tracks that are going to
        # be plotted.  This is useful to debug or verify that what you
        # see on the plot is what is expected.
        ascii_track_parts = [self.init_date, '.txt']
        ascii_track_output_name = ''.join(ascii_track_parts)
        plot_filename = os.path.join(self.output_dir, ascii_track_output_name)
        ascii_track_file = open(plot_filename, 'w')

        for cur_storm_id in self.unique_storm_id:
            # Lists used in creating each storm track.
            cyclone_points = []
            lon = []
            lat = []
            marker_list = []
            size_list = []
            anno_list = []

            # For this storm id, get a list of all data (corresponding
            # to lines/rows in the tcst data file).
            track_info_list = self.storm_id_dict[cur_storm_id]
            # pylint:disable=len-as-condition
            # if len(track_info_list) == 0:
            if not track_info_list:
                self.logger.error("Empty track list, no data extracted " +
                                  "from track files, exiting.")
                sys.exit(1)

            for track in track_info_list:
                # For now, all the marker symbols will be one color.
                color_list = ['red' for _ in range(0, len(track_info_list))]

                lon.append(float(track['lon']))
                lat.append(float(track['lat']))

                # Differentiate between the forecast lead "groups",
                # i.e. 0/12 vs 6/18 hr and
                # assign the marker symbol and size.
                if track['lead_group'] == '0':
                    marker = 'o'
                    marker_list.append(marker)
                    marker_size = self.circle_marker
                    size_list.append(marker_size)
                    label = "Indicates a position at 00 or 12 UTC"

                elif track['lead_group'] == '6':
                    marker = '+'
                    marker_list.append(marker)
                    marker_size = self.cross_marker
                    size_list.append(marker_size)
                    label = "\nIndicates a position at 06 or 18 UTC\n"

                # Determine the first point, needed later to annotate.
                # pylint:disable=invalid-name
                dd = track['valid_dd']
                hh = track['valid_hh']
                if dd and hh:
                    date_hr_str = dd + '/' + hh + 'z'
                    anno_list.append(date_hr_str)
                else:
                    date_hr_str = ''
                    anno_list.append(date_hr_str)

                # Write to the ASCII track file, if requested
                if self.gen_ascii:
                    line_parts = ['model_name: ', track['model_name'], '   ',
                                  'storm_id: ', track['storm_id'], '   ',
                                  'init_time: ', track['init_time'], '   ',
                                  'valid_time: ', track['valid_time'], '   ',
                                  'lat: ', str(track['lat']), '   ',
                                  'lon: ', str(track['lon']), '   ',
                                  'lead_group: ', track['lead_group'], '   ',
                                  'first_point:', str(track['first_point'])]
                    line = ''.join(line_parts)
                    ascii_track_file.write(line)
                    ascii_track_file.write('\n')

            # Create ascatter plot to add
            # the appropriate marker symbol to the forecast
            # hours corresponding to 6/18 hours.

            # map.plot(x, y, color='red', linestyle='-')

            # Annotate the first point of the storm track
            for anno, adj_lon, adj_lat in zip(anno_list, lon, lat):
                # x, y = map(adj_lon, adj_lat)
                # Annotate the first point of the storm track by
                # overlaying the annotation text over all points (all but
                # one will have text).
                plt.annotate(anno, xy=(adj_lon, adj_lat), xytext=(2, 2),
                             textcoords='offset points', fontsize=11,
                             color='red')

            # Generate the scatterplot, where the 6/18 Z forecast times
            # are labelled with a '+'
            for adj_lon, adj_lat, symbol, sz, colours in zip(lon, lat,
                                                             marker_list,
                                                             size_list,
                                                             color_list):
                # red line, red +, red o, marker sizes are recognized,
                # no outline color of black for 'o'
                # plt.scatter(x, y, s=sz, c=colours, edgecolors=colours,
                # facecolors='none', marker=symbol, zorder=2)
                # Solid circle, just like the EMC NCEP plots
                # Separate the first two points so we can generate the legend
                if circle_counter == 0 or plus_counter == 0:
                    if symbol == 'o':
                        plt.scatter(adj_lon, adj_lat, s=sz, c=colours,
                                    edgecolors=colours, facecolors=colours,
                                    marker='o', zorder=2,
                                    label="Indicates a position " +
                                    "at 00 or 12 UTC")
                        plt.plot(adj_lon, adj_lat, linestyle='-')
                        circle_counter += 1
                    elif symbol == '+':
                        plt.scatter(adj_lon, adj_lat, s=sz, c=colours,
                                    edgecolors=colours, facecolors=colours,
                                    marker='+', zorder=2,
                                    label="\nIndicates a position at 06 or " +
                                    "18 UTC\n")
                        plus_counter += 1

                else:
                    # Set the legend for additional text using a
                    # dummy scatter point
                    if dummy_counter == 0:
                        plt.scatter(0, 0, zorder=2, marker=None, c='',
                                    label="Date (dd/hhz) is the first " +
                                    "time storm was able to be tracked " +
                                    "in model")
                        dummy_counter += 1
                    plt.scatter(adj_lon, adj_lat, s=sz, c=colours, edgecolors=colours,
                                facecolors=colours, marker=symbol, zorder=2)


        # Draw the legend on the plot
        # If you wish to have the legend within the plot:
        # plt.legend(loc='lower left', prop={'size':5}, scatterpoints=1)
        # The legend is outside the plot, below the x-axis to
        # avoid obscuring any storm tracks in the Southern
        # Hemisphere.
        # ax.legend(loc='lower left', bbox_to_anchor=(-0.03, -0.5),
        #           fancybox=True, shadow=True, scatterpoints=1,
        #           prop={'size': 6})
        ax.legend(loc='lower left', bbox_to_anchor=(-0.01, -0.4),
                  fancybox=True, shadow=True, scatterpoints=1,
                  prop={'size': 6})

        # Write the plot to the output directory
        out_filename_parts = [self.init_date, '.png']
        output_plot_name = ''.join(out_filename_parts)
        plot_filename = os.path.join(self.output_dir, output_plot_name)
        plt.savefig(plot_filename)

        # Close the ASCII track file, if generated
        if self.gen_ascii:
            ascii_track_file.close()

        # Plot data onto axes
        plt.show()