コード例 #1
0
def main(argv=None):
    """Load in arguments for estimating coefficients for Ensemble Model Output
       Statistics (EMOS), otherwise known as Non-homogeneous Gaussian
       Regression (NGR). 2 sources of input data must be provided: historical
       forecasts and historical truth data (to use in calibration). The
       estimated coefficients are written to a netCDF file.
    """
    parser = ArgParser(
        description='Estimate coefficients for Ensemble Model Output '
                    'Statistics (EMOS), otherwise known as Non-homogeneous '
                    'Gaussian Regression (NGR). There are two methods for '
                    'inputting data into this CLI, either by providing the '
                    'historic forecasts and truth separately, or by providing '
                    'a combined list of historic forecasts and truths along '
                    'with historic_forecast_identifier and truth_identifier '
                    'arguments to provide metadata that distinguishes between '
                    'them.')
    parser.add_argument('distribution', metavar='DISTRIBUTION',
                        choices=['gaussian', 'truncated_gaussian'],
                        help='The distribution that will be used for '
                             'calibration. This will be dependent upon the '
                             'input phenomenon. This has to be supported by '
                             'the minimisation functions in '
                             'ContinuousRankedProbabilityScoreMinimisers.')
    parser.add_argument('cycletime', metavar='CYCLETIME', type=str,
                        help='This denotes the cycle at which forecasts '
                             'will be calibrated using the calculated '
                             'EMOS coefficients. The validity time in the '
                             'output coefficients cube will be calculated '
                             'relative to this cycletime. '
                             'This cycletime is in the format '
                             'YYYYMMDDTHHMMZ.')

    # Historic forecast and truth filepaths
    parser.add_argument(
        '--historic_filepath', metavar='HISTORIC_FILEPATH', nargs='+',
        help='Paths to the input NetCDF files containing the '
             'historic forecast(s) used for calibration. '
             'This must be supplied with an associated truth filepath. '
             'Specification of either the combined_filepath, '
             'historic_forecast_identifier or the truth_identifier is '
             'invalid with this argument.')
    parser.add_argument(
        '--truth_filepath', metavar='TRUTH_FILEPATH', nargs='+',
        help='Paths to the input NetCDF files containing the '
             'historic truth analyses used for calibration. '
             'This must be supplied with an associated historic filepath. '
             'Specification of either the combined_filepath, '
             'historic_forecast_identifier or the truth_identifier is '
             'invalid with this argument.')

    # Input filepaths
    parser.add_argument(
        '--combined_filepath', metavar='COMBINED_FILEPATH', nargs='+',
        help='Paths to the input NetCDF files containing '
             'both the historic forecast(s) and truth '
             'analyses used for calibration. '
             'This must be supplied with both the '
             'historic_forecast_identifier and the truth_identifier. '
             'Specification of either the historic_filepath or the '
             'truth_filepath is invalid with this argument.')
    parser.add_argument(
        "--historic_forecast_identifier",
        metavar='HISTORIC_FORECAST_IDENTIFIER',
        help='The path to a json file containing metadata '
             'information that defines the historic forecast. '
             'This must be supplied with both the combined_filepath and the '
             'truth_identifier. Specification of either the historic_filepath'
             'or the truth_filepath is invalid with this argument. '
             'The intended contents is described in improver.'
             'ensemble_calibration.ensemble_calibration_utilities.'
             'SplitHistoricForecastAndTruth.')
    parser.add_argument(
        "--truth_identifier", metavar='TRUTH_IDENTIFIER',
        help='The path to a json file containing metadata '
             'information that defines the truth.'
             'This must be supplied with both the combined_filepath and the '
             'historic_forecast_identifier. Specification of either the '
             'historic_filepath or the truth_filepath is invalid with this '
             'argument. The intended contents is described in improver.'
             'ensemble_calibration.ensemble_calibration_utilities.'
             'SplitHistoricForecastAndTruth.')

    # Output filepath
    parser.add_argument('output_filepath', metavar='OUTPUT_FILEPATH',
                        help='The output path for the processed NetCDF')
    # Optional arguments.
    parser.add_argument('--units', metavar='UNITS',
                        help='The units that calibration should be undertaken '
                             'in. The historical forecast and truth will be '
                             'converted as required.')
    parser.add_argument('--predictor_of_mean', metavar='PREDICTOR_OF_MEAN',
                        choices=['mean', 'realizations'], default='mean',
                        help='String to specify the predictor used to '
                             'calibrate the forecast mean. Currently the '
                             'ensemble mean ("mean") and the ensemble '
                             'realizations ("realizations") are supported as '
                             'options. Default: "mean".')
    parser.add_argument('--max_iterations', metavar='MAX_ITERATIONS',
                        type=np.int32, default=1000,
                        help='The maximum number of iterations allowed '
                             'until the minimisation has converged to a '
                             'stable solution. If the maximum number '
                             'of iterations is reached, but the '
                             'minimisation has not yet converged to a '
                             'stable solution, then the available solution '
                             'is used anyway, and a warning is raised.'
                             'This may be modified for testing purposes '
                             'but otherwise kept fixed. If the '
                             'predictor_of_mean is "realizations", '
                             'then the number of iterations may require '
                             'increasing, as there will be more coefficients '
                             'to solve for.')
    args = parser.parse_args(args=argv)

    # Load Cubes
    historic_forecast = load_cube(args.historic_filepath, allow_none=True)
    truth = load_cube(args.truth_filepath, allow_none=True)

    combined = (load_cubelist(args.combined_filepath)
                if args.combined_filepath else None)
    historic_forecast_dict = (
        load_json_or_none(args.historic_forecast_identifier))
    truth_dict = load_json_or_none(args.truth_identifier)

    # Process Cube
    coefficients = process(historic_forecast, truth, combined,
                           historic_forecast_dict, truth_dict,
                           args.distribution, args.cycletime, args.units,
                           args.predictor_of_mean, args.max_iterations)
    # Save Cube
    # Check whether a coefficients cube has been created. If the historic
    # forecasts and truths provided did not match in validity time, then
    # no coefficients would have been calculated.
    if coefficients:
        save_netcdf(coefficients, args.output_filepath)
コード例 #2
0
def main(argv=None):
    """Load in arguments and get going."""
    parser = ArgParser(
        description=('Reads input orography and landmask fields. Creates '
                     'a series of topographic zone weights to indicate '
                     'where an orography point sits within the defined '
                     'topographic bands. If the orography point is in the '
                     'centre of a topographic band, then a single band will '
                     'have a weight of 1.0. If the orography point is at the '
                     'edge of a topographic band, then the upper band will '
                     'have a 0.5 weight whilst the lower band will also have '
                     'a 0.5 weight. Otherwise, the weight will vary linearly '
                     'between the centre of a topographic band and the edge.'))
    parser.add_argument('input_filepath_standard_orography',
                        metavar='INPUT_FILE_STANDARD_OROGRAPHY',
                        help=('A path to an input NetCDF orography file to '
                              'be processed'))
    parser.add_argument('output_filepath', metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')
    parser.add_argument('--input_filepath_landmask', metavar='INPUT_FILE_LAND',
                        help=('A path to an input NetCDF land mask file to be '
                              'processed. If provided, sea points will be '
                              'masked and set to the default fill value. If '
                              'no land mask is provided, weights will be '
                              'generated for sea points as well as land, '
                              'included in the appropriate topographic band.'))

    parser.add_argument('--force', dest='force', default=False,
                        action='store_true',
                        help=('If keyword is set (i.e. True), ancillaries '
                              'will be generated even if doing so will '
                              'overwrite existing files'))
    parser.add_argument('--thresholds_filepath',
                        metavar='THRESHOLDS_FILEPATH',
                        default=None,
                        help=("The path to a json file which can be used "
                              "to set the number and size of topographic "
                              "bounds. If unset a default bounds dictionary"
                              " will be used:"
                              "{'bounds': [[-500., 50.], [50., 100.], "
                              "[100., 150.],[150., 200.], [200., 250.], "
                              "[250., 300.], [300., 400.], [400., 500.], "
                              "[500., 650.],[650., 800.], [800., 950.], "
                              "[950., 6000.]], 'units': 'm'}"))
    args = parser.parse_args(args=argv)

    thresholds_dict = load_json_or_none(args.thresholds_filepath)

    if not os.path.exists(args.output_filepath) or args.force:
        orography = load_cube(args.input_filepath_standard_orography)
        landmask = None
        if args.input_filepath_landmask:
            try:
                landmask = load_cube(args.input_filepath_landmask)
            except IOError as err:
                msg = ("Loading land mask has been unsuccessful: {}. "
                       "This may be because the land mask could not be "
                       "located in {}; run "
                       'improver-generate-landmask-ancillary first.').format(
                           err, args.input_filepath_landmask)
                raise IOError(msg)

        result = process(landmask, orography, thresholds_dict)
        # Save Cube
        save_netcdf(result, args.output_filepath)
    else:
        print('File already exists here: ', args.output_filepath)
コード例 #3
0
 def test_none(self, m):
     """Tests if called with None returns None."""
     file_path = None
     dict_read = load_json_or_none(file_path)
     self.assertIsNone(dict_read)
     m.assert_not_called()
コード例 #4
0
ファイル: combine.py プロジェクト: TomekTrzeciak/improver
def main(argv=None):
    """Load in arguments for the cube combiner plugin.
    """
    parser = ArgParser(
        description="Combine the input files into a single file using "
                    "the requested operation e.g. + - min max etc.")
    parser.add_argument("input_filenames", metavar="INPUT_FILENAMES",
                        nargs="+", type=str,
                        help="Paths to the input NetCDF files. Each input"
                        " file should be able to be loaded as a single "
                        " iris.cube.Cube instance. The resulting file"
                        " metadata will be based on the first file but"
                        " its metadata can be overwritten via"
                        " the metadata_jsonfile option.")
    parser.add_argument("output_filepath", metavar="OUTPUT_FILE",
                        help="The output path for the processed NetCDF.")
    parser.add_argument("--operation", metavar="OPERATION",
                        default="+",
                        choices=["+", "-", "*",
                                 "add", "subtract", "multiply",
                                 "min", "max", "mean"],
                        help="Operation to use in combining NetCDF datasets"
                        " Default=+ i.e. add ", type=str)
    parser.add_argument("--new-name", metavar="NEW_NAME",
                        default=None,
                        help="New name for the resulting dataset. Will"
                        " default to the name of the first dataset if "
                        "not set.", type=str)
    parser.add_argument("--metadata_jsonfile", metavar="METADATA_JSONFILE",
                        default=None,
                        help="Filename for the json file containing "
                        "required changes to the metadata. "
                        " default=None", type=str)
    parser.add_argument('--warnings_on', action='store_true',
                        help="If warnings_on is set (i.e. True), "
                        "Warning messages where metadata do not match "
                        "will be given. Default=False", default=False)

    args = parser.parse_args(args=argv)

    new_metadata = load_json_or_none(args.metadata_jsonfile)
    # Load cubes
    cubelist = iris.cube.CubeList([])
    new_cube_name = args.new_name
    for filename in args.input_filenames:
        new_cube = load_cube(filename)
        cubelist.append(new_cube)
        if new_cube_name is None:
            new_cube_name = new_cube.name()
        if args.warnings_on:
            if (args.new_name is None and
                    new_cube_name != new_cube.name()):
                msg = ("Defaulting to first cube name, {} but combining with"
                       "a cube with name, {}.".format(
                            new_cube_name, new_cube.name()))
                warnings.warn(msg)

    # Process Cube
    result = process(cubelist, args.operation, new_cube_name,
                     new_metadata, args.warnings_on)

    # Save Cube
    save_netcdf(result, args.output_filepath)
コード例 #5
0
def main(argv=None):
    """Load in arguments and ensure they are set correctly.
       Then load in the data to blend and calculate default weights
       using the method chosen before carrying out the blending."""
    parser = ArgParser(
        description='Calculate the default weights to apply in weighted '
        'blending plugins using the ChooseDefaultWeightsLinear or '
        'ChooseDefaultWeightsNonLinear plugins. Then apply these '
        'weights to the dataset using the BasicWeightedAverage plugin.'
        ' Required for ChooseDefaultWeightsLinear: y0val and ynval.'
        ' Required for ChooseDefaultWeightsNonLinear: cval.'
        ' Required for ChooseWeightsLinear with dict: wts_dict.')

    parser.add_argument('--wts_calc_method',
                        metavar='WEIGHTS_CALCULATION_METHOD',
                        choices=['linear', 'nonlinear', 'dict'],
                        default='linear',
                        help='Method to use to calculate '
                        'weights used in blending. "linear" (default): '
                        'calculate linearly varying blending weights. '
                        '"nonlinear": calculate blending weights that decrease'
                        ' exponentially with increasing blending coordinate. '
                        '"dict": calculate weights using a dictionary passed '
                        'in as a command line argument.')

    parser.add_argument('coordinate',
                        type=str,
                        metavar='COORDINATE_TO_AVERAGE_OVER',
                        help='The coordinate over which the blending '
                        'will be applied.')
    parser.add_argument('--cycletime',
                        metavar='CYCLETIME',
                        type=str,
                        help='The forecast reference time to be used after '
                        'blending has been applied, in the format '
                        'YYYYMMDDTHHMMZ. If not provided, the blended file '
                        'will take the latest available forecast reference '
                        'time from the input cubes supplied.')
    parser.add_argument('--model_id_attr',
                        metavar='MODEL_ID_ATTR',
                        type=str,
                        default=None,
                        help='The name of the netCDF file attribute to be '
                        'used to identify the source model for '
                        'multi-model blends. Default is None. '
                        'Must be present on all input '
                        'files if blending over models.')
    parser.add_argument('--spatial_weights_from_mask',
                        action='store_true',
                        default=False,
                        help='If set this option will result in the generation'
                        ' of spatially varying weights based on the'
                        ' masks of the data we are blending. The'
                        ' one dimensional weights are first calculated '
                        ' using the chosen weights calculation method,'
                        ' but the weights will then be adjusted spatially'
                        ' based on where there is masked data in the data'
                        ' we are blending. The spatial weights are'
                        ' calculated using the'
                        ' SpatiallyVaryingWeightsFromMask plugin.')

    parser.add_argument('input_filepaths',
                        metavar='INPUT_FILES',
                        nargs="+",
                        help='Paths to input files to be blended.')
    parser.add_argument('output_filepath',
                        metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')

    spatial = parser.add_argument_group(
        'Spatial weights from mask options',
        'Options for calculating the spatial weights using the '
        'SpatiallyVaryingWeightsFromMask plugin.')
    spatial.add_argument('--fuzzy_length',
                         metavar='FUZZY_LENGTH',
                         type=float,
                         default=20000,
                         help='When calculating spatially varying weights we'
                         ' can smooth the weights so that areas close to'
                         ' areas that are masked have lower weights than'
                         ' those further away. This fuzzy length controls'
                         ' the scale over which the weights are smoothed.'
                         ' The fuzzy length is in terms of m, the'
                         ' default is 20km. This distance is then'
                         ' converted into a number of grid squares,'
                         ' which does not have to be an integer. Assumes'
                         ' the grid spacing is the same in the x and y'
                         ' directions, and raises an error if this is not'
                         ' true. See SpatiallyVaryingWeightsFromMask for'
                         ' more detail.')

    linear = parser.add_argument_group(
        'linear weights options', 'Options for the linear weights '
        'calculation in '
        'ChooseDefaultWeightsLinear')
    linear.add_argument('--y0val',
                        metavar='LINEAR_STARTING_POINT',
                        type=float,
                        help='The relative value of the weighting start point '
                        '(lowest value of blend coord) for choosing default '
                        'linear weights. This must be a positive float or 0.')
    linear.add_argument('--ynval',
                        metavar='LINEAR_END_POINT',
                        type=float,
                        help='The relative value of the weighting '
                        'end point (highest value of blend coord) for choosing'
                        ' default linear weights. This must be a positive '
                        'float or 0.  Note that if blending over forecast '
                        'reference time, ynval >= y0val would normally be '
                        'expected (to give greater weight to the more recent '
                        'forecast).')

    nonlinear = parser.add_argument_group(
        'nonlinear weights options', 'Options for the non-linear '
        'weights calculation in '
        'ChooseDefaultWeightsNonLinear')
    nonlinear.add_argument('--cval',
                           metavar='NON_LINEAR_FACTOR',
                           type=float,
                           help='Factor used to determine how skewed the '
                           'non linear weights will be. A value of 1 '
                           'implies equal weighting.')

    wts_dict = parser.add_argument_group(
        'dict weights options', 'Options for linear weights to be '
        'calculated based on parameters '
        'read from a json file dict')
    wts_dict.add_argument('--wts_dict',
                          metavar='WEIGHTS_DICTIONARY',
                          help='Path to json file containing dictionary from '
                          'which to calculate blending weights. Dictionary '
                          'format is as specified in the improver.blending.'
                          'weights.ChooseWeightsLinear plugin.')
    wts_dict.add_argument('--weighting_coord',
                          metavar='WEIGHTING_COORD',
                          default='forecast_period',
                          help='Name of '
                          'coordinate over which linear weights should be '
                          'scaled. This coordinate must be available in the '
                          'weights dictionary.')

    args = parser.parse_args(args=argv)

    # reject incorrect argument combinations
    if (args.wts_calc_method == "linear") and args.cval:
        parser.wrong_args_error('cval', 'linear')
    if ((args.wts_calc_method == "nonlinear")
            and np.any([args.y0val, args.ynval])):
        parser.wrong_args_error('y0val, ynval', 'non-linear')

    if (args.wts_calc_method == "dict") and not args.wts_dict:
        parser.error('Dictionary is required if --wts_calc_method="dict"')

    weights_dict = load_json_or_none(args.wts_dict)

    # Load cubes to be blended.
    cubelist = load_cubelist(args.input_filepaths)

    result = process(cubelist, args.wts_calc_method, args.coordinate,
                     args.cycletime, args.weighting_coord, weights_dict,
                     args.y0val, args.ynval, args.cval, args.model_id_attr,
                     args.spatial_weights_from_mask, args.fuzzy_length)

    save_netcdf(result, args.output_filepath)
コード例 #6
0
 def test_loading_file(self, m):
     """Tests if called with a filepath, loads a dict."""
     file_path = "filename"
     dict_read = load_json_or_none(file_path)
     self.assertEqual(dict_read, {"k": "v"})
     m.assert_called_with("filename", "r")
コード例 #7
0
def main(argv=None):
    """Calculate optical flow advection velocities"""

    parser = ArgParser(
        description="Calculate optical flow components from input fields.")

    parser.add_argument("input_filepaths",
                        metavar="INPUT_FILEPATHS",
                        nargs=3,
                        type=str,
                        help="Paths to the input radar "
                        "files. There should be 3 input files at T, T-1 and "
                        "T-2 from which to calculate optical flow velocities. "
                        "The files require a 'time' coordinate on which they "
                        "are sorted, so the order of inputs does not matter.")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILEPATH",
                        help="The output path for the resulting NetCDF")

    parser.add_argument("--nowcast_filepaths",
                        nargs="+",
                        type=str,
                        default=None,
                        help="Optional list of full paths to "
                        "output nowcast files. Overrides OUTPUT_DIR. Ignored "
                        "unless '--extrapolate' is set.")
    parser.add_argument("--orographic_enhancement_filepaths",
                        nargs="+",
                        type=str,
                        default=None,
                        help="List or wildcarded "
                        "file specification to the input orographic "
                        "enhancement files. Orographic enhancement files are "
                        "compulsory for precipitation fields.")
    parser.add_argument("--json_file",
                        metavar="JSON_FILE",
                        default=None,
                        help="Filename for the json file containing "
                        "required changes to attributes. "
                        "Every output file will have the attributes_dict "
                        "applied. Defaults to None.",
                        type=str)

    # OpticalFlow plugin configurable parameters
    parser.add_argument("--ofc_box_size",
                        type=int,
                        default=30,
                        help="Size of "
                        "square 'box' (in grid squares) within which to solve "
                        "the optical flow equations.")
    parser.add_argument("--smart_smoothing_iterations",
                        type=int,
                        default=100,
                        help="Number of iterations to perform in enforcing "
                        "smoothness constraint for optical flow velocities.")

    args = parser.parse_args(args=argv)

    # Load Cubes and JSON
    attributes_dict = load_json_or_none(args.json_file)
    original_cube_list = load_cubelist(args.input_filepaths)
    oe_cube = load_cube(args.orographic_enhancement_filepaths, allow_none=True)

    # Process
    result = process(original_cube_list, oe_cube, attributes_dict,
                     args.ofc_box_size, args.smart_smoothing_iterations)

    # Save Cubes
    save_netcdf(result, args.output_filepath)
コード例 #8
0
ファイル: nowcast_optical_flow.py プロジェクト: 15b3/improver
def main(argv=None):
    """Calculate optical flow advection velocities and (optionally)
    extrapolate data."""

    parser = ArgParser(
        description="Calculate optical flow components from input fields "
        "and (optionally) extrapolate to required lead times.")

    parser.add_argument("input_filepaths",
                        metavar="INPUT_FILEPATHS",
                        nargs=3,
                        type=str,
                        help="Paths to the input radar "
                        "files. There should be 3 input files at T, T-1 and "
                        "T-2 from which to calculate optical flow velocities. "
                        "The files require a 'time' coordinate on which they "
                        "are sorted, so the order of inputs does not matter.")
    parser.add_argument("--output_dir",
                        metavar="OUTPUT_DIR",
                        type=str,
                        default='',
                        help="Directory to write all output files,"
                        " or only advection velocity components if "
                        "NOWCAST_FILEPATHS is specified.")
    parser.add_argument("--nowcast_filepaths",
                        nargs="+",
                        type=str,
                        default=None,
                        help="Optional list of full paths to "
                        "output nowcast files. Overrides OUTPUT_DIR. Ignored "
                        "unless '--extrapolate' is set.")
    parser.add_argument("--orographic_enhancement_filepaths",
                        nargs="+",
                        type=str,
                        default=None,
                        help="List or wildcarded "
                        "file specification to the input orographic "
                        "enhancement files. Orographic enhancement files are "
                        "compulsory for precipitation fields.")
    parser.add_argument("--json_file",
                        metavar="JSON_FILE",
                        default=None,
                        help="Filename for the json file containing "
                        "required changes to the metadata. Information "
                        "describing the intended contents of the json file "
                        "is available in "
                        "improver.utilities.cube_metadata.amend_metadata."
                        "Every output cube will have the metadata_dict "
                        "applied. Defaults to None.",
                        type=str)

    # OpticalFlow plugin configurable parameters
    parser.add_argument("--ofc_box_size",
                        type=int,
                        default=30,
                        help="Size of "
                        "square 'box' (in grid squares) within which to solve "
                        "the optical flow equations.")
    parser.add_argument("--smart_smoothing_iterations",
                        type=int,
                        default=100,
                        help="Number of iterations to perform in enforcing "
                        "smoothness constraint for optical flow velocities.")

    # AdvectField options
    parser.add_argument("--extrapolate",
                        action="store_true",
                        default=False,
                        help="Optional flag to advect current data forward to "
                        "specified lead times.")
    parser.add_argument("--max_lead_time",
                        type=int,
                        default=360,
                        help="Maximum lead time required (mins).  Ignored "
                        "unless '--extrapolate' is set.")
    parser.add_argument("--lead_time_interval",
                        type=int,
                        default=15,
                        help="Interval between required lead times (mins). "
                        "Ignored unless '--extrapolate' is set.")

    args = parser.parse_args(args=argv)

    # Load Cubes and JSON.
    metadata_dict = load_json_or_none(args.json_file)
    original_cube_list = load_cubelist(args.input_filepaths)
    oe_cube = load_cube(args.orographic_enhancement_filepaths, allow_none=True)

    # Process
    forecast_cubes, u_and_v_mean = process(original_cube_list, oe_cube,
                                           metadata_dict, args.ofc_box_size,
                                           args.smart_smoothing_iterations,
                                           args.extrapolate,
                                           args.max_lead_time,
                                           args.lead_time_interval)

    # Save Cubes
    for wind_cube in u_and_v_mean:
        file_name = generate_file_name(wind_cube)
        save_netcdf(wind_cube, os.path.join(args.output_dir, file_name))

    # advect latest input data to the required lead times
    if args.extrapolate:
        if args.nowcast_filepaths:
            if len(args.nowcast_filepaths) != len(forecast_cubes):
                raise ValueError("Require exactly one output file name for "
                                 "each forecast lead time")

        for i, cube in enumerate(forecast_cubes):
            # save to a suitably-named output file
            if args.nowcast_filepaths:
                file_name = args.nowcast_filepaths[i]
            else:
                file_name = os.path.join(args.output_dir,
                                         generate_file_name(cube))
            save_netcdf(cube, file_name)
コード例 #9
0
def main(argv=None):
    """
    Standardise a source cube. Available options are regridding (bilinear or
    nearest-neighbour, optionally with land-mask awareness), updating meta-data
    and converting float64 data to float32. A check for float64 data compliance
    can be made by only specify a source NetCDF file with no other arguments.
    """
    parser = ArgParser(
        description='Standardise a source data cube. Three main options are '
                    'available; fixing float64 data, regridding and updating '
                    'metadata. If regridding then additional options are '
                    'available to use bilinear or nearest-neighbour '
                    '(optionally with land-mask awareness) modes. If only a '
                    'source file is specified with no other arguments, then '
                    'an exception will be raised if float64 data are found on '
                    'the source.')

    parser.add_argument('source_data_filepath', metavar='SOURCE_DATA',
                        help='A cube of data that is to be standardised and '
                             'optionally fixed for float64 data, regridded '
                             'and meta data changed')

    parser.add_argument("--output_filepath", metavar="OUTPUT_FILE",
                        default=None,
                        help="The output path for the processed NetCDF. "
                             "If only a source file is specified and no "
                             "output file, then the source will be checked"
                             "for float64 data.")

    regrid_group = parser.add_argument_group("Regridding options")
    regrid_group.add_argument(
        "--target_grid_filepath", metavar="TARGET_GRID",
        help=('If specified then regridding of the source '
              'against the target grid is enabled. If also using '
              'landmask-aware regridding, then this must be land_binary_mask '
              'data.'))

    regrid_group.add_argument(
        "--regrid_mode", default='bilinear',
        choices=['bilinear', 'nearest', 'nearest-with-mask'],
        help=('Selects which regridding technique to use. Default uses '
              'iris.analysis.Linear(); "nearest" uses Nearest() (Use for less '
              'continuous fields, e.g. precipitation.); "nearest-with-mask" '
              'ensures that target data are sourced from points with the same '
              'mask value (Use for coast-line-dependent variables like '
              'temperature).'))

    regrid_group.add_argument(
        "--extrapolation_mode", default='nanmask',
        help='Mode to use for extrapolating data into regions '
             'beyond the limits of the source_data domain. '
             'Refer to online documentation for iris.analysis. '
             'Modes are: '
             'extrapolate - The extrapolation points will '
             'take their value from the nearest source point. '
             'nan - The extrapolation points will be be '
             'set to NaN. '
             'error - A ValueError exception will be raised, '
             'notifying an attempt to extrapolate. '
             'mask  - The extrapolation points will always be '
             'masked, even if the source data is not a '
             'MaskedArray. '
             'nanmask - If the source data is a MaskedArray '
             'the extrapolation points will be masked. '
             'Otherwise they will be set to NaN. '
             'Defaults to nanmask.')

    regrid_group.add_argument(
        "--input_landmask_filepath", metavar="INPUT_LANDMASK_FILE",
        help=("A path to a NetCDF file describing the land_binary_mask on "
              "the source-grid if coastline-aware regridding is required."))

    regrid_group.add_argument(
        "--landmask_vicinity", metavar="LANDMASK_VICINITY",
        default=25000., type=float,
        help=("Radius of vicinity to search for a coastline, in metres. "
              "Default value; 25000 m"))

    parser.add_argument("--fix_float64", action='store_true', default=False,
                        help="Check and fix cube for float64 data. Without "
                             "this option an exception will be raised if "
                             "float64 data is found but no fix applied.")

    parser.add_argument("--json_file", metavar="JSON_FILE", default=None,
                        help='Filename for the json file containing required '
                             'changes that will be applied '
                             'to the metadata. Defaults to None.')

    args = parser.parse_args(args=argv)

    if args.target_grid_filepath or args.json_file or args.fix_float64:
        if not args.output_filepath:
            msg = ("An argument has been specified that requires an output "
                   "filepath but none has been provided")
            raise ValueError(msg)

    if (args.input_landmask_filepath and
            "nearest-with-mask" not in args.regrid_mode):
        msg = ("Land-mask file supplied without appropriate regrid_mode. "
               "Use --regrid_mode=nearest-with-mask.")
        raise ValueError(msg)

    if args.input_landmask_filepath and not args.target_grid_filepath:
        msg = ("Cannot specify input_landmask_filepath without "
               "target_grid_filepath")
        raise ValueError(msg)

    # Load Cube and json
    metadata_dict = load_json_or_none(args.json_file)
    # source file data path is a mandatory argument
    output_data = load_cube(args.source_data_filepath)
    target_grid = None
    source_landsea = None
    if args.target_grid_filepath:
        target_grid = load_cube(args.target_grid_filepath)
        if args.regrid_mode in ["nearest-with-mask"]:
            if not args.input_landmask_filepath:
                msg = ("An argument has been specified that requires an input "
                       "landmask filepath but none has been provided")
                raise ValueError(msg)
            source_landsea = load_cube(args.input_landmask_filepath)

    # Process Cube
    output_data = process(output_data, target_grid, source_landsea,
                          metadata_dict, args.regrid_mode,
                          args.extrapolation_mode, args.landmask_vicinity,
                          args.fix_float64)

    # Save Cube
    if args.output_filepath:
        save_netcdf(output_data, args.output_filepath)
コード例 #10
0
def main(argv=None):
    """Load in arguments and start spotdata extraction process."""
    parser = ArgParser(
        description="Extract diagnostic data from gridded fields for spot data"
        " sites. It is possible to apply a temperature lapse rate adjustment"
        " to temperature data that helps to account for differences between"
        " the spot sites real altitude and that of the grid point from which"
        " the temperature data is extracted.")

    # Input and output files required.
    parser.add_argument("neighbour_filepath",
                        metavar="NEIGHBOUR_FILEPATH",
                        help="Path to a NetCDF file of spot-data neighbours. "
                        "This file also contains the spot site information.")
    parser.add_argument("diagnostic_filepath",
                        metavar="DIAGNOSTIC_FILEPATH",
                        help="Path to a NetCDF file containing the diagnostic "
                        "data to be extracted.")
    parser.add_argument("temperature_lapse_rate_filepath",
                        metavar="LAPSE_RATE_FILEPATH",
                        nargs='?',
                        help="(Optional) Filepath to a NetCDF file containing"
                        " temperature lapse rates. If this cube is provided,"
                        " and a screen temperature cube is being processed,"
                        " the lapse rates will be used to adjust the"
                        " temperatures to better represent each spot's"
                        " site-altitude.")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILEPATH",
                        help="The output path for the resulting NetCDF")

    parser.add_argument(
        "--apply_lapse_rate_correction",
        default=False,
        action="store_true",
        help="If the option is set and a lapse rate cube has been "
        "provided, extracted screen temperatures will be adjusted to "
        "better match the altitude of the spot site for which they have "
        "been extracted.")

    method_group = parser.add_argument_group(
        title="Neighbour finding method",
        description="If none of these options are set, the nearest grid point "
        "to a spot site will be used without any other constraints.")
    method_group.add_argument(
        "--land_constraint",
        default=False,
        action='store_true',
        help="If set the neighbour cube will be interrogated for grid point"
        " neighbours that were identified using a land constraint. This means"
        " that the grid points should be land points except for sites where"
        " none were found within the search radius when the neighbour cube was"
        " created. May be used with minimum_dz.")
    method_group.add_argument(
        "--minimum_dz",
        default=False,
        action='store_true',
        help="If set the neighbour cube will be interrogated for grid point"
        " neighbours that were identified using a minimum height difference"
        " constraint. These are grid points that were found to be the closest"
        " in altitude to the spot site within the search radius defined when"
        " the neighbour cube was created. May be used with land_constraint.")

    percentile_group = parser.add_argument_group(
        title="Extract percentiles",
        description="Extract particular percentiles from probabilistic, "
        "percentile, or realization inputs. If deterministic input is "
        "provided a warning is raised and all leading dimensions are included "
        "in the returned spot-data cube.")
    percentile_group.add_argument(
        "--extract_percentiles",
        default=None,
        nargs='+',
        type=float,
        help="If set to a percentile value or a list of percentile values, "
        "data corresponding to those percentiles will be returned. For "
        "example setting '--extract_percentiles 25 50 75' will result in the "
        "25th, 50th, and 75th percentiles being returned from a cube of "
        "probabilities, percentiles, or realizations. Note that for "
        "percentile inputs, the desired percentile(s) must exist in the input "
        "cube.")
    parser.add_argument(
        "--ecc_bounds_warning",
        default=False,
        action="store_true",
        help="If True, where calculated percentiles are outside the ECC "
        "bounds range, raise a warning rather than an exception.")

    meta_group = parser.add_argument_group("Metadata")
    meta_group.add_argument(
        "--metadata_json",
        metavar="METADATA_JSON",
        default=None,
        help="If provided, this JSON file can be used to modify the metadata "
        "of the returned netCDF file. Defaults to None.")

    output_group = parser.add_argument_group("Suppress Verbose output")
    # This CLI may be used to prepare data for verification without knowing the
    # form of the input, be it deterministic, realizations or probabilistic.
    # A warning is normally raised when attempting to extract a percentile from
    # deterministic data as this is not possible; the spot-extraction of the
    # entire cube is returned. When preparing data for verification we know
    # that we will produce a large number of these warnings when passing in
    # deterministic data. This option to suppress warnings is provided to
    # reduce the amount of unneeded logging information that is written out.

    output_group.add_argument(
        "--suppress_warnings",
        default=False,
        action="store_true",
        help="Suppress warning output. This option should only be used if "
        "it is known that warnings will be generated but they are not "
        "required.")

    args = parser.parse_args(args=argv)

    # Load Cube and JSON.
    neighbour_cube = load_cube(args.neighbour_filepath)
    diagnostic_cube = load_cube(args.diagnostic_filepath)
    lapse_rate_cube = load_cube(args.temperature_lapse_rate_filepath,
                                allow_none=True)
    metadata_dict = load_json_or_none(args.metadata_json)

    # Process Cube
    result = process(neighbour_cube, diagnostic_cube, lapse_rate_cube,
                     args.apply_lapse_rate_correction, args.land_constraint,
                     args.minimum_dz, args.extract_percentiles,
                     args.ecc_bounds_warning, metadata_dict,
                     args.suppress_warnings)

    # Save Cube
    save_netcdf(result, args.output_filepath)
コード例 #11
0
def main(argv=None):
    """Load in arguments and get going."""
    parser = ArgParser(
        description=('Reads input orography and landmask fields. Creates a '
                     'series of masks, where each mask excludes data below or'
                     ' equal to the lower threshold, and excludes data above '
                     'the upper threshold.'))
    parser.add_argument('input_filepath_standard_orography',
                        metavar='INPUT_FILE_STANDARD_OROGRAPHY',
                        help=('A path to an input NetCDF orography file to '
                              'be processed'))
    parser.add_argument('output_filepath',
                        metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')
    parser.add_argument('--input_filepath_landmask',
                        metavar='INPUT_FILE_LAND',
                        help=('A path to an input NetCDF land mask file to be '
                              'processed. If provided, sea points will be '
                              'set to zero in every band. If '
                              'no land mask is provided, sea points will be '
                              'included in the appropriate topographic band.'))
    parser.add_argument('--force',
                        dest='force',
                        default=False,
                        action='store_true',
                        help=('If keyword is set (i.e. True), ancillaries '
                              'will be generated even if doing so will '
                              'overwrite existing files'))
    parser.add_argument('--thresholds_filepath',
                        metavar='THRESHOLDS_FILEPATH',
                        default=None,
                        help=("The path to a json file which can be used "
                              "to set the number and size of topographic "
                              "bounds. If unset a default bounds dictionary"
                              " will be used."
                              "The dictionary has the following form: "
                              "{'bounds': [[-500., 50.], [50., 100.], "
                              "[100., 150.],[150., 200.], [200., 250.], "
                              "[250., 300.], [300., 400.], [400., 500.], "
                              "[500., 650.],[650., 800.], [800., 950.], "
                              "[950., 6000.]], 'units': 'm'}"))
    args = parser.parse_args(args=argv)

    thresholds_dict = load_json_or_none(args.thresholds_filepath)
    if thresholds_dict is None:
        thresholds_dict = THRESHOLDS_DICT

    if not os.path.exists(args.output_filepath) or args.force:
        orography = load_cube(args.input_filepath_standard_orography)
        landmask = None
        if args.input_filepath_landmask:
            try:
                landmask = load_cube(args.input_filepath_landmask)
            except IOError as err:
                msg = ("Loading land mask has been unsuccessful: {}. "
                       "This may be because the land mask could not be "
                       "located in {}; run "
                       'improver-generate-landmask-ancillary first.').format(
                           err, args.input_filepath_landmask)
                raise IOError(msg)
        # Process Cube
        result = process(orography, landmask, thresholds_dict)

        # Save Cube
        save_netcdf(result, args.output_filepath)
    else:
        print('File already exists here: ', args.output_filepath)
コード例 #12
0
ファイル: nowcast_extrapolate.py プロジェクト: 15b3/improver
def main(argv=None):
    """Extrapolate data forward in time."""

    parser = ArgParser(
        description="Extrapolate input data to required lead times.")
    parser.add_argument("input_filepath",
                        metavar="INPUT_FILEPATH",
                        type=str,
                        help="Path to input NetCDF file.")

    group = parser.add_mutually_exclusive_group()
    group.add_argument("--output_dir",
                       metavar="OUTPUT_DIR",
                       type=str,
                       default="",
                       help="Directory to write output files.")
    group.add_argument("--output_filepaths",
                       nargs="+",
                       type=str,
                       help="List of full paths to output nowcast files, in "
                       "order of increasing lead time.")

    optflw = parser.add_argument_group('Advect using files containing the x '
                                       ' and y components of the velocity')
    optflw.add_argument("--eastward_advection_filepath",
                        type=str,
                        help="Path"
                        " to input file containing Eastward advection "
                        "velocities.")
    optflw.add_argument("--northward_advection_filepath",
                        type=str,
                        help="Path"
                        " to input file containing Northward advection "
                        "velocities.")

    speed = parser.add_argument_group('Advect using files containing speed and'
                                      ' direction')
    speed.add_argument("--advection_speed_filepath",
                       type=str,
                       help="Path"
                       " to input file containing advection speeds,"
                       " usually wind speeds, on multiple pressure levels.")
    speed.add_argument("--advection_direction_filepath",
                       type=str,
                       help="Path to input file containing the directions from"
                       " which advection speeds are coming (180 degrees from"
                       " the direction in which the speed is directed). The"
                       " directions should be on the same grid as the input"
                       " speeds, including the same vertical levels.")
    speed.add_argument("--pressure_level",
                       type=int,
                       default=75000,
                       help="The"
                       " pressure level in Pa to extract from the multi-level"
                       " advection_speed and advection_direction files. The"
                       " velocities at this level are used for advection.")
    parser.add_argument("--orographic_enhancement_filepaths",
                        nargs="+",
                        type=str,
                        default=None,
                        help="List or wildcarded "
                        "file specification to the input orographic "
                        "enhancement files. Orographic enhancement files are "
                        "compulsory for precipitation fields.")
    parser.add_argument("--json_file",
                        metavar="JSON_FILE",
                        default=None,
                        help="Filename for the json file containing "
                        "required changes to the metadata. Information "
                        "describing the intended contents of the json file "
                        "is available in "
                        "improver.utilities.cube_metadata.amend_metadata."
                        "Every output cube will have the metadata_dict "
                        "applied. Defaults to None.",
                        type=str)
    parser.add_argument("--max_lead_time",
                        type=int,
                        default=360,
                        help="Maximum lead time required (mins).")
    parser.add_argument("--lead_time_interval",
                        type=int,
                        default=15,
                        help="Interval between required lead times (mins).")

    accumulation_args = parser.add_argument_group(
        'Calculate accumulations from advected fields')
    accumulation_args.add_argument(
        "--accumulation_fidelity",
        type=int,
        default=0,
        help="If set, this CLI will additionally return accumulations"
        " calculated from the advected fields. This fidelity specifies the"
        " time interval in minutes between advected fields that is used to"
        " calculate these accumulations. This interval must be a factor of"
        " the lead_time_interval.")
    accumulation_args.add_argument(
        "--accumulation_period",
        type=int,
        default=15,
        help="The period over which the accumulation is calculated (mins). "
        "Only full accumulation periods will be computed. At lead times "
        "that are shorter than the accumulation period, no accumulation "
        "output will be produced.")
    accumulation_args.add_argument(
        "--accumulation_units",
        type=str,
        default='m',
        help="Desired units in which the accumulations should be expressed,"
        "e.g. mm")

    # Load Cubes
    args = parser.parse_args(args=argv)

    metadata_dict = load_json_or_none(args.json_file)

    upath, vpath = (args.eastward_advection_filepath,
                    args.northward_advection_filepath)
    spath, dpath = (args.advection_speed_filepath,
                    args.advection_direction_filepath)

    # load files and initialise advection plugin
    input_cube = load_cube(args.input_filepath)
    orographic_enhancement_cube = load_cube(
        args.orographic_enhancement_filepaths, allow_none=True)

    speed_cube = direction_cube = ucube = vcube = None
    if (upath and vpath) and not (spath or dpath):
        ucube = load_cube(upath)
        vcube = load_cube(vpath)
    elif (spath and dpath) and not (upath or vpath):
        level_constraint = Constraint(pressure=args.pressure_level)
        try:
            speed_cube = load_cube(spath, constraints=level_constraint)
            direction_cube = load_cube(dpath, constraints=level_constraint)
        except ValueError as err:
            raise ValueError(
                '{} Unable to extract specified pressure level from given '
                'speed and direction files.'.format(err))
    else:
        raise ValueError('Cannot mix advection component velocities with speed'
                         ' and direction')

    # Process Cubes
    accumulation_cubes, forecast_to_return = process(
        input_cube, ucube, vcube, speed_cube, direction_cube,
        orographic_enhancement_cube, metadata_dict, args.max_lead_time,
        args.lead_time_interval, args.accumulation_fidelity,
        args.accumulation_period, args.accumulation_units)

    # Save Cube
    if args.output_filepaths and \
            len(args.output_filepaths) != len(forecast_to_return):
        raise ValueError("Require exactly one output file name for each "
                         "forecast lead time")
    for i, cube in enumerate(forecast_to_return):
        # save to a suitably-named output file
        if args.output_filepaths:
            file_name = args.output_filepaths[i]
        else:
            file_name = os.path.join(args.output_dir, generate_file_name(cube))
        save_netcdf(cube, file_name)

    if args.accumulation_fidelity > 0:
        # return accumulation cubes
        for i, cube in enumerate(accumulation_cubes):
            file_name = os.path.join(args.output_dir, generate_file_name(cube))
            save_netcdf(cube, file_name)
コード例 #13
0
def main(argv=None):
    """Load in arguments and get going."""
    description = (
        "Determine grid point coordinates within the provided cubes that "
        "neighbour spot data sites defined within the provided JSON "
        "file. If no options are set the returned netCDF file will contain the"
        " nearest neighbour found for each site. Other constrained neighbour "
        "finding methods can be set with options below.")
    options = ("\n\nThese methods are:\n\n"
               " 1. nearest neighbour\n"
               " 2. nearest land point neighbour\n"
               " 3. nearest neighbour with minimum height difference\n"
               " 4. nearest land point neighbour with minimum height "
               "difference")

    parser = ArgParser(
        description=('\n'.join(wrap(description, width=79)) + options),
        formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument("site_list_filepath", metavar="SITE_LIST_FILEPATH",
                        help="Path to a JSON file that contains the spot sites"
                        " for which neighbouring grid points are to be found.")
    parser.add_argument("orography_filepath", metavar="OROGRAPHY_FILEPATH",
                        help="Path to a NetCDF file of model orography for the"
                        " model grid on which neighbours are being found.")
    parser.add_argument("landmask_filepath", metavar="LANDMASK_FILEPATH",
                        help="Path to a NetCDF file of model land mask for the"
                        " model grid on which neighbours are being found.")
    parser.add_argument("output_filepath", metavar="OUTPUT_FILEPATH",
                        help="The output path for the resulting NetCDF")

    parser.add_argument(
        "--all_methods", default=False, action='store_true',
        help="If set this will return a cube containing the nearest grid point"
        " neighbours to spot sites as defined by each possible combination of"
        " constraints.")

    group = parser.add_argument_group('Apply constraints to neighbour choice')
    group.add_argument(
        "--land_constraint", default=False, action='store_true',
        help="If set this will return a cube containing the nearest grid point"
        " neighbours to spot sites that are also land points. May be used with"
        " the minimum_dz option.")
    group.add_argument(
        "--minimum_dz", default=False, action='store_true',
        help="If set this will return a cube containing the nearest grid point"
        " neighbour to each spot site that is found, within a given search"
        " radius, to minimise the height difference between the two. May be"
        " used with the land_constraint option.")
    group.add_argument(
        "--search_radius", metavar="SEARCH_RADIUS", type=float,
        help="The radius in metres about a spot site within which to search"
        " for a grid point neighbour that is land or which has a smaller "
        " height difference than the nearest. The default value is 10000m "
        "(10km).")
    group.add_argument(
        "--node_limit", metavar="NODE_LIMIT", type=int,
        help="When searching within the defined search_radius for suitable "
        "neighbours, a KDTree is constructed. This node_limit prevents the "
        "tree from becoming too large for large search radii. A default of 36"
        " is set, which is to say the nearest 36 grid points will be "
        "considered. If the search_radius is likely to contain more than 36 "
        "points, this value should be increased to ensure all points are "
        "considered.")

    s_group = parser.add_argument_group('Site list options')
    s_group.add_argument(
        "--site_coordinate_system", metavar="SITE_COORDINATE_SYSTEM",
        help="The coordinate system in which the site coordinates are provided"
        " within the site list. This must be provided as the name of a cartopy"
        " coordinate system. The default is a PlateCarree system, with site"
        " coordinates given by latitude/longitude pairs. This can be a"
        " complete definition, including parameters required to modify a"
        " default system, e.g. Miller(central_longitude=90). If a globe is"
        " required this can be specified as e.g."
        " Globe(semimajor_axis=100, semiminor_axis=100).")
    s_group.add_argument(
        "--site_coordinate_options", metavar="SITE_COORDINATE_OPTIONS",
        help="JSON formatted string of options passed to the cartopy"
        " coordinate system given in site_coordinate_system. \"globe\""
        " is handled as a special case for options to construct a cartopy"
        " Globe object.")
    s_group.add_argument(
        "--site_x_coordinate", metavar="SITE_X_COORDINATE",
        help="The x coordinate key within the JSON file. The plugin default is"
        " 'longitude', but can be changed using this option if required.")
    s_group.add_argument(
        "--site_y_coordinate", metavar="SITE_Y_COORDINATE",
        help="The y coordinate key within the JSON file. The plugin default is"
        " 'latitude', but can be changed using this option if required.")

    args = parser.parse_args(args=argv)

    # Load Cubes and JSON.
    site_list = load_json_or_none(args.site_list_filepath)
    orography = load_cube(args.orography_filepath)
    landmask = load_cube(args.landmask_filepath)

    # Process Cube
    result = process(orography, landmask, site_list,
                     args.all_methods, args.land_constraint, args.minimum_dz,
                     args.search_radius, args.node_limit,
                     args.site_coordinate_system, args.site_coordinate_options,
                     args.site_x_coordinate, args.site_y_coordinate)

    # Save Cube
    save_netcdf(result, args.output_filepath)