def _minimal_valid_ff(num_cols, num_rows, num_levels,
                          start_lon, start_lat, col_spacing, row_spacing,
                          grid_stagger):
        """
        Return a basic field-file object; populating the bare minimum header
        inputs for validation.

        """
        ff = mule.FieldsFile()

        ff.fixed_length_header.dataset_type = 3
        ff.fixed_length_header.grid_staggering = grid_stagger

        ff.integer_constants = mule.ff.FF_IntegerConstants.empty()
        ff.integer_constants.num_cols = num_cols
        ff.integer_constants.num_rows = num_rows
        ff.integer_constants.num_p_levels = num_levels

        ff.real_constants = mule.ff.FF_RealConstants.empty()
        ff.real_constants.start_lon = start_lon
        ff.real_constants.start_lat = start_lat
        ff.real_constants.col_spacing = col_spacing
        ff.real_constants.row_spacing = row_spacing

        ff.level_dependent_constants = (
            mule.ff.FF_LevelDependentConstants.empty(num_levels + 1))
        ldc_range = np.arange(num_levels + 1)
        for idim in range(1, ff.level_dependent_constants.shape[1] + 1):
            ff.level_dependent_constants.raw[:, idim] = ldc_range*idim

        stashmaster = STASHmaster.from_file(SAMPLE_STASHMASTER)
        ff.stashmaster = stashmaster

        return ff
Exemple #2
0
def _main():
    """
    Main function; accepts command line argument for paths to
    the input and output files.

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] input_filename output_filename

    This script will write a new file where all WGDOS packed fields in the
    original are replaced by unpacked fields.
    """
    title = _banner(
        "UNPACK - Unpacks WGDOS packed fields in a UM FieldsFile "
        "(Using the Mule API)",
        banner_char="=")

    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        formatter_class=argparse.RawTextHelpFormatter,
    )

    # No need to output help text for the input files (it's obvious)
    parser.add_argument("input_filename", help=argparse.SUPPRESS)
    parser.add_argument("output_filename", help=argparse.SUPPRESS)

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(UNPACK) Module Information")),
    report_modules()
    print("")

    input_filename = args.input_filename
    if not os.path.exists(input_filename):
        msg = "File not found: {0}".format(input_filename)
        raise ValueError(msg)

    output_filename = args.output_filename

    # Check if the file is a pp file
    pp_mode = mule.pp.file_is_pp_file(input_filename)
    if pp_mode:
        # Make an empty fieldsfile object and attach the pp file's
        # field objects to it
        origfile = mule.FieldsFile()
        origfile.fields = mule.pp.fields_from_pp_file(input_filename)
        origfile._source_path = input_filename
    else:
        # Read in file as a FieldsFile
        origfile = mule.FieldsFile.from_file(input_filename)

    # Unpack fieldsfile
    unpackedfile = unpack(origfile)

    # Write file
    if pp_mode:
        mule.pp.fields_to_pp_file(output_filename, unpackedfile.fields)
    else:
        unpackedfile.to_file(output_filename)
Exemple #3
0
                    if key.startswith("lb"):
                        new_vals.append(int(val))
                    else:
                        new_vals.append(float(val))
                entry[inc_exc][key] = new_vals
        if entry:
            case_dicts.append(entry)

    # Load the input files
    selected_fields = []
    pp_mode = False
    for ifile, input_file in enumerate(input_files):
        # Check if this is a pp file
        if mule.pp.file_is_pp_file(input_file):
            pp_mode = True
            umf = mule.FieldsFile()
            umf.fields = mule.pp.fields_from_pp_file(input_file)
            umf._source_path = input_file
        elif not pp_mode:
            umf = mule.load_umfile(input_file)
        else:
            msg = "Cannot mix and match UM files and pp files"
            raise ValueError(msg)

        # Iterate through the cases - each returns a list of matching fields
        for case in case_dicts:
            selected_fields += (select(umf,
                                       include=case.get("include", None),
                                       exclude=case.get("exclude", None)))

        # Copy first file to use for writing output
Exemple #4
0
def gen_pert_file(umf_clim, alpha, ens_member, date):
    """
    Generate an SST perturbation file from an input file containing a
    set of climatological fields and some values to setup a random
    number generator.

    Args:
        * umf_clim:
            A :class:`mule.UMFile` object containing the 12 field objects
            giving the SST (lbuser4=24) for each month of the year.
        * alpha:
            Factor used by algorithm (higher values lead to more extreme
            perturbations).
        * ens_member:
            Ensemble member number - used in random generator.
        * date:
            Datetime object giving the desired date for the perturbed field.

    Returns:
        * umf_pert:
            A :class:`mule.FieldsFile` object containing the perturbed
            SST field.

    """

    # Generate the perturbations; assume that the first 12 fields
    # in the file are the SST fields
    pert_field = gen_pert_field(umf_clim.fields[:12], alpha, ens_member, date)

    # Create a FieldsFile object for the output file
    ff = mule.FieldsFile()

    # Copy the fixed length header from the input, changing the type
    # to match a FieldsFile
    ff.fixed_length_header = umf_clim.fixed_length_header
    ff.fixed_length_header.dataset_type = 3

    # Populate the time entries from the date given
    now = datetime.now()
    ff.fixed_length_header.t1_year = date.year
    ff.fixed_length_header.t2_year = date.year
    ff.fixed_length_header.t3_year = now.year
    ff.fixed_length_header.t1_month = date.month
    ff.fixed_length_header.t2_month = date.month
    ff.fixed_length_header.t3_month = now.month
    ff.fixed_length_header.t1_day = date.day
    ff.fixed_length_header.t2_day = date.day
    ff.fixed_length_header.t3_day = now.day
    ff.fixed_length_header.t1_hour = date.hour
    ff.fixed_length_header.t2_hour = date.hour
    ff.fixed_length_header.t3_hour = now.hour
    ff.fixed_length_header.t1_minute = date.minute
    ff.fixed_length_header.t2_minute = date.minute
    ff.fixed_length_header.t3_minute = now.minute
    ff.fixed_length_header.t1_second = date.second
    ff.fixed_length_header.t2_second = date.second
    ff.fixed_length_header.t3_second = now.second

    # Copy the integer headers that are populated
    ff.integer_constants = mule.ff.FF_IntegerConstants.empty()
    ff.integer_constants.raw[:len(umf_clim.integer_constants.raw)] = (
        umf_clim.integer_constants.raw)

    # Copy the real headers that are populated
    ff.real_constants = mule.ff.FF_RealConstants.empty()
    ff.real_constants.raw[:len(umf_clim.real_constants.raw)] = (
        umf_clim.real_constants.raw)

    # We can't be sure that the input file's grid was setup sensibly, so
    # assume that the copied field header is right and setup the file
    # headers from that
    ff.fixed_length_header.grid_staggering = 6
    ff.real_constants.start_lat = pert_field.bzy + 0.5 * pert_field.bdy
    ff.real_constants.start_lon = pert_field.bzx + 0.5 * pert_field.bdx

    # The ancil is likely missing the level dependent constants, so
    # put some in which are the minimum size for a FieldsFile
    if umf_clim.level_dependent_constants is None:
        ff.integer_constants.num_p_levels = 1
        ff.level_dependent_constants = (
            mule.ff.FF_LevelDependentConstants.empty(2))

    # Copy the row/column headers if they were set
    if (umf_clim.row_dependent_constants is not None
            and umf_clim.column_dependent_constants is not None):
        ff.row_dependent_constants = umf_clim.row_dependent_constants
        ff.column_dependent_constants = umf_clim.column_dependent_constants

    # Add the perturbation field and output
    ff.fields = [pert_field]

    return ff
Exemple #5
0
def _main():
    """
    Main function; accepts command line arguments to override the print
    settings and provides a UM file to summarise.

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] [options] input_file

    This script will output a summary table of the lookup headers in a UM
    file, with the columns selected by the user.
    """
    title = _banner(
        "SUMMARY - Print a summary of the fields in a UM File "
        "(using the Mule API)",
        banner_char="=")

    # Include a list of the lookup names as they appear in Mule
    lookup_names = [name for name, _ in mule._LOOKUP_HEADER_3]
    lookup_names += [
        name for name, _ in mule._LOOKUP_HEADER_2 if name not in lookup_names
    ]
    lookup_names = ", ".join(lookup_names)

    help_epilog = """
    possible lookup names for the column-names option:
    {0}

    for details of how these relate to indices see UMDP F03:
      https://code.metoffice.gov.uk/doc/um/latest/papers/umdp_F03.pdf
    """.format(
        textwrap.fill(lookup_names,
                      width=80,
                      initial_indent=4 * " ",
                      subsequent_indent=8 * " "))

    # Setup the parser
    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        epilog=textwrap.dedent(help_epilog),
        formatter_class=argparse.RawTextHelpFormatter,
    )

    # No need to output help text for hte input file (it's obvious)
    parser.add_argument("input_file", help=argparse.SUPPRESS)

    parser.add_argument(
        "--column-names",
        metavar="--column-names name1[,name2][...]",
        help="set the names of the lookup header items to print, in the \n"
        "order the columns should appear as a comma separated list. A \n"
        "special entry of \"stash_name\" will put in the field's name \n"
        "according to the STASHmaster, \"index\" will give the field's \n"
        "index number in the file, and \"t1\" or \"t2\" will give the first \n"
        "and second time from the lookup (nicely formatted)\n ")

    parser.add_argument(
        "--heading-frequency",
        metavar="N",
        type=int,
        help="repeat the column heading block every N lines (to avoid \n"
        "having to scroll too far to identify columns in the output) A \n"
        "value of 0 means do not repeat the heading block\n ")

    parser.add_argument(
        "--field-index",
        metavar="i1[,i2][,i3:i5][...]",
        help="limit the output to specific fields by index (comma-separated \n"
        "list of single indices, or ranges of indices separated by a single \n"
        "colon-character)\n ")

    parser.add_argument(
        "--field-property",
        metavar="key1=value1[,key2=value2][...]",
        help="limit the output to specific fields using a property string \n"
        "(comma-separated list of key=value pairs where key is the name of \n"
        "a lookup property and value is what it must be set to)\n ")

    parser.add_argument(
        "--stashmaster",
        help="either the full path to a valid stashmaster file, or a UM \n"
        "version number e.g. '10.2'; if given a number summary will look in \n"
        "the path defined by: \n"
        "  mule.stashmaster.STASHMASTER_PATH_PATTERN \n"
        "which by default is : \n"
        "  $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n")

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(SUMMARY) Module Information")),
    report_modules()
    print("")

    # Process column names
    if args.column_names is not None:
        properties = args.column_names.split(",")
        PRINT_SETTINGS["column_names"] = properties

    # Process field filtering by index argument
    field_index = []
    if args.field_index is not None:
        for arg in args.field_index.split(","):
            if re.match(r"^\d+$", arg):
                field_index.append(int(arg))
            elif re.match(r"^\d+:\d+$", arg):
                field_index += range(*[int(elt) for elt in arg.split(":")])
            else:
                msg = "Unrecognised field index option: {0}"
                raise ValueError(msg.format(arg))
    PRINT_SETTINGS["field_index"] = field_index

    # Process field filtering by property argument
    field_property = {}
    if args.field_property is not None:
        for arg in args.field_property.split(","):
            if re.match(r"^\w+=\d+$", arg):
                name, value = arg.split("=")
                field_property[name] = int(value)
            else:
                msg = "Unrecognised field property option: {0}"
                raise ValueError(msg.format(arg))
    PRINT_SETTINGS["field_property"] = field_property

    # If provided, load the given stashmaster
    stashm = None
    if args.stashmaster is not None:
        if re.match(r"\d+.\d+", args.stashmaster):
            stashm = STASHmaster.from_version(args.stashmaster)
        else:
            stashm = STASHmaster.from_file(args.stashmaster)
        if stashm is None:
            msg = "Cannot load user supplied STASHmaster"
            raise ValueError(msg)

    # Process heading lines
    if args.heading_frequency is not None:
        PRINT_SETTINGS["heading_frequency"] = args.heading_frequency

    # Get the filename and load it using Mule
    filename = args.input_file

    if os.path.exists(filename):
        # Check if this is a pp file
        if mule.pp.file_is_pp_file(filename):
            # Make an empty fieldsfile object and attach the pp file's
            # field objects to it
            um_file = mule.FieldsFile()
            um_file.fields = mule.pp.fields_from_pp_file(filename)
            um_file._source_path = filename
            if stashm is not None:
                um_file.attach_stashmaster_info(stashm)
            # Override the component filter as only the lookup is
            # available in a pp file
            PRINT_SETTINGS["component_filter"] = ["lookup"]
        else:
            um_file = mule.load_umfile(filename, stashmaster=stashm)

        # Now print the object to stdout, if a SIGPIPE is received handle
        # it appropriately
        try:
            field_summary(um_file)
        except IOError as error:
            if error.errno != errno.EPIPE:
                raise
    else:
        msg = "File not found: {0}".format(filename)
        raise ValueError(msg)
Exemple #6
0
def _main():
    """
    Main function; accepts command line arguments to override the print
    settings and provides a UM file to print.

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] [options] input_filename

    This script will output the contents of the headers from a UM file to
    stdout.  The default output may be customised with a variety of options
    (see below).
    """
    title = _banner(
        "PUMF-II - Pretty Printer for UM Files, version II "
        "(using the Mule API)",
        banner_char="=")

    # Include a list of the component names as they appear in Mule
    component_names = ", ".join(
        (["fixed_length_header"] +
         [name for name, _ in mule.UMFile.COMPONENTS] + ["lookup"]))

    lookup_names = [name for name, _ in mule._LOOKUP_HEADER_3]
    lookup_names += [
        name for name, _ in mule._LOOKUP_HEADER_2 if name not in lookup_names
    ]
    lookup_names = ", ".join(lookup_names)

    help_epilog = """
    possible component names for the component option:
    {0}

    possible lookup names for the field-property option:
    {1}

    for details of how these relate to indices see UMDP F03:
      https://code.metoffice.gov.uk/doc/um/latest/papers/umdp_F03.pdf
    """.format(
        textwrap.fill(component_names,
                      width=80,
                      initial_indent=4 * " ",
                      subsequent_indent=8 * " "),
        textwrap.fill(lookup_names,
                      width=80,
                      initial_indent=4 * " ",
                      subsequent_indent=8 * " "))

    # Setup the parser
    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        epilog=textwrap.dedent(help_epilog),
        formatter_class=argparse.RawTextHelpFormatter,
    )

    # No need to output help text for the input file (it's obvious)
    parser.add_argument("input_file", help=argparse.SUPPRESS)

    parser.add_argument(
        "--include-missing",
        help="include header values which are set to MDI and entries for \n"
        "components which are not present in the file (by default this will \n"
        "be hidden)\n ",
        action="store_true")
    parser.add_argument(
        "--use-indices",
        help="list headers by their indices (instead of only listing named \n"
        "headers)\n ",
        action="store_true")
    parser.add_argument(
        "--headers-only",
        help="only list headers (do not read data and calculate any derived \n"
        "statistics)\n",
        action="store_true")
    parser.add_argument(
        "--components",
        help="limit the header output to specific components \n"
        "(comma-separated list of component names, with no spaces)\n ",
        metavar="component1[,component2][...]")
    parser.add_argument(
        "--field-index",
        help="limit the lookup output to specific fields by index \n"
        "(comma-separated list of single indices, or ranges of indices \n"
        "separated by a single colon-character)\n ",
        metavar="i1[,i2][,i3:i5][...]")
    parser.add_argument(
        "--field-property",
        help="limit the lookup output to specific field using a property \n"
        "string (comma-separated list of key=value pairs where the key is \n"
        "the name of a lookup property and the value is the value it must \n"
        "take)\n ",
        metavar="key1=value1[,key2=value2][...]")
    parser.add_argument("--print-columns",
                        help="how many columns should be printed\n ",
                        metavar="N")
    parser.add_argument(
        "--stashmaster",
        help="either the full path to a valid stashmaster file, or a UM \n"
        "version number e.g. '10.2'; if given a number pumf will look in \n"
        "the path defined by: \n"
        "  mule.stashmaster.STASHMASTER_PATH_PATTERN \n"
        "which by default is : \n"
        "  $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n")

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(PUMF-II) Module Information")),
    report_modules()
    print("")

    # Process component filtering argument
    if args.components is not None:
        PRINT_SETTINGS["component_filter"] = (args.components.split(","))

    # Process field filtering by index argument
    field_index = []
    if args.field_index is not None:
        for arg in args.field_index.split(","):
            if re.match(r"^\d+$", arg):
                field_index.append(int(arg))
            elif re.match(r"^\d+:\d+$", arg):
                field_index += range(*[int(elt) for elt in arg.split(":")])
            else:
                msg = "Unrecognised field-index option: {0}"
                raise ValueError(msg.format(arg))
    PRINT_SETTINGS["field_index"] = field_index

    # Process field filtering by property argument
    field_property = {}
    if args.field_property is not None:
        for arg in args.field_property.split(","):
            if re.match(r"^\w+=\d+$", arg):
                name, value = arg.split("=")
                field_property[name] = int(value)
            else:
                msg = "Unrecognised field-property option: {0}"
                raise ValueError(msg.format(arg))
    PRINT_SETTINGS["field_property"] = field_property

    # If provided, load the given stashmaster
    stashm = None
    if args.stashmaster is not None:
        if re.match(r"\d+.\d+", args.stashmaster):
            stashm = STASHmaster.from_version(args.stashmaster)
        else:
            stashm = STASHmaster.from_file(args.stashmaster)
        if stashm is None:
            msg = "Cannot load user supplied STASHmaster"
            raise ValueError(msg)

    # Process remaining options
    if args.print_columns is not None:
        PRINT_SETTINGS["print_columns"] = int(args.print_columns)
    if args.include_missing:
        PRINT_SETTINGS["include_missing"] = True
    if args.use_indices:
        PRINT_SETTINGS["use_indices"] = True
    if args.headers_only:
        PRINT_SETTINGS["headers_only"] = True

    # Get the filename and load it using Mule
    filename = args.input_file
    if os.path.exists(filename):
        # Check if this is a pp file
        if mule.pp.file_is_pp_file(filename):
            # Make an empty fieldsfile object and attach the pp file's
            # field objects to it
            um_file = mule.FieldsFile()
            um_file.fields = mule.pp.fields_from_pp_file(filename)
            um_file._source_path = filename
            if stashm is not None:
                um_file.attach_stashmaster_info(stashm)
            # Override the component filter as only the lookup is
            # available in a pp file
            PRINT_SETTINGS["component_filter"] = ["lookup"]
        else:
            um_file = mule.load_umfile(filename, stashmaster=stashm)
        # Now print the object to stdout, if a SIGPIPE is received handle
        # it appropriately
        try:
            pprint(um_file)
        except IOError as error:
            if error.errno != errno.EPIPE:
                raise
    else:
        msg = "File not found: {0}".format(filename)
        raise ValueError(msg)