Exemplo n.º 1
0
def _main():
    """
    Main function; accepts command line argument for paths to
    the input and output files.

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] input_filename output_filename

    This script will write a new file where all WGDOS packed fields in the
    original are replaced by unpacked fields.
    """
    title = _banner(
        "UNPACK - Unpacks WGDOS packed fields in a UM FieldsFile "
        "(Using the Mule API)",
        banner_char="=")

    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        formatter_class=argparse.RawTextHelpFormatter,
    )

    # No need to output help text for the input files (it's obvious)
    parser.add_argument("input_filename", help=argparse.SUPPRESS)
    parser.add_argument("output_filename", help=argparse.SUPPRESS)

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(UNPACK) Module Information")),
    report_modules()
    print("")

    input_filename = args.input_filename
    if not os.path.exists(input_filename):
        msg = "File not found: {0}".format(input_filename)
        raise ValueError(msg)

    output_filename = args.output_filename

    # Check if the file is a pp file
    pp_mode = mule.pp.file_is_pp_file(input_filename)
    if pp_mode:
        # Make an empty fieldsfile object and attach the pp file's
        # field objects to it
        origfile = mule.FieldsFile()
        origfile.fields = mule.pp.fields_from_pp_file(input_filename)
        origfile._source_path = input_filename
    else:
        # Read in file as a FieldsFile
        origfile = mule.FieldsFile.from_file(input_filename)

    # Unpack fieldsfile
    unpackedfile = unpack(origfile)

    # Write file
    if pp_mode:
        mule.pp.fields_to_pp_file(output_filename, unpackedfile.fields)
    else:
        unpackedfile.to_file(output_filename)
Exemplo n.º 2
0
def _main():
    """
    Main function; accepts command line arguments and implements or logic by
    calling the select function multiple times.
    """

    help_prolog = """    usage:
      %(prog)s input_filename [input_filename2 [input_filename3]] \
                                            output_filename [filter arguments]

    This script will select or exclude fields from one or more UM file based
    on the values set in the lookup headers of each field.
    """
    title = _banner(
        "SELECT - Field filtering tool for UM files "
        "(using the Mule API)",
        banner_char="=")

    help_epilog = """
    examples:
      Select U and V wind components (lbfc 56 57) at model level 1
      mule-select ifile ofile --include lbfc=56,57 lblev=1

      Select U and V wind components (lbfc 56 57) at model level 1 from
      two input files
      mule-select ifile1 ifile2 ofile --include lbfc=56,57 lblev=1

      Select pressure (lbfc  8) but not at surface (lblev 9999)
      mule-select ifile ofile --include lbfc=8 --exclude blev=9999

      Select all fields which match U wind at model level 2 or
      fields which are at model level 1
      mule-select ifile ofile --include lblev=2 lbfc=56 --or --include lblev=1

    often used codes:
      lbfc    field code
      lbft    forecast period in hours
      lblev   fieldsfile level code
      lbuser4 stash section and item number (section x 1000 + item)
      lbproc  processing code
      lbpack  packing method
      lbvc    vertical co-ordinate type
      lbyr    year (validity time / start of processing period)
      lbmon   month (validity time / start of processing period)
      lbdat   day (validity time / start of processing period)
      lbhr    hour (validity time / start of processing period)
      lbmin   minute (validity time / start of processing period)
      lbsec   second (validity time / start of processing period)

    for other codes please see UMDP F03:
      https://code.metoffice.gov.uk/doc/um/latest/papers/umdp_F03.pdf
    """

    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        formatter_class=argparse.RawTextHelpFormatter,
        epilog=textwrap.dedent(help_epilog))

    pg = parser.add_argument_group('filter arguments')

    pg.add_argument(
        "--include",
        nargs="+",
        metavar="name=val1[,val2]",
        help="specify lookup headers to include, the names should \n"
        "correspond to lookup entry names and the values to the \n"
        "desired values which must match \n ")
    pg.add_argument(
        "--exclude",
        nargs="+",
        metavar="name=val1[,val2]",
        help="specify lookup headers to exclude, the names should \n"
        "correspond to lookup entry names and the values to the \n"
        "desired values which must not match \n ")
    pg.add_argument(
        "--or",
        metavar="",
        help="specify the separation of two criteria sections that \n"
        "should be \"or\"d together; this is a positional argument")

    # There should be at least 4 arguments (i.e. the two filename plus one
    # include/exclude flag and a value for it)
    if len(sys.argv) < 4:
        parser.print_help()
        parser.exit(1)

    # Print version information
    print(_banner("(SELECT) Module Information")),
    report_modules()
    print("")

    # The files must be the first arguments, with the output file being
    # the last one  Note that we don't include these in the parser
    # explicitly, because the way we wish to call the parser below is a
    # little odd (once per --or to separate the cases) and including
    # the files would interfere with this

    input_files = []
    while len(sys.argv) > 3:
        # Once the argument 2 positions ahead is a flag, exit
        if sys.argv[2].startswith("--"):
            break
        # Otherwise gather the input files up
        input_files.append(sys.argv.pop(1))

    output_file = sys.argv.pop(1)

    for input_file in input_files:
        if not os.path.isfile(input_file):
            msg = "File not found: " + input_file
            raise ValueError(msg)

    # Pickup the "--or" argument, splitting the arguments up and then pass them
    # through to the parser as if they were separate arguments
    cases = " ".join(sys.argv[1:]).split("--or")
    arglist = [parser.parse_args(case.split()) for case in cases]
Exemplo n.º 3
0
def _main():
    """
    Main function; accepts command line arguments and provides the cutout
    specification, input and output files to be cutout.

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] [--stashmaster STASHMASTER] {indices,coords} ...

    This script will extract a sub-region from a UM FieldsFile, producing
    a new file.
    """
    title = _banner(
        "CUTOUT-II - Cutout tool for UM Files, version II "
        "(using the Mule API)",
        banner_char="=")

    # Setup the parser
    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        formatter_class=argparse.RawTextHelpFormatter,
    )

    parser.add_argument(
        "--stashmaster",
        help="either the full path to a valid stashmaster file, or a UM \n"
        "version number e.g. '10.2'; if given a number cutout will look in \n"
        "the path defined by: \n"
        "  mule.stashmaster.STASHMASTER_PATH_PATTERN \n"
        "which by default is : \n"
        "  $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n")

    # The cutout command has 2 forms; the user may describe the region using
    # a series of indices or using the co-ordinates of two opposing corners
    subparsers = parser.add_subparsers()

    # Options for indices
    sub_prolog = """    usage:
      {0} indices [-h] input_file output_file zx zy nx ny

    The index based version of the script will extract a domain
    of whole points defined by the given start indices and lengths
               """.format(os.path.basename(sys.argv[0]))

    parser_index = subparsers.add_parser(
        "indices",
        formatter_class=argparse.RawTextHelpFormatter,
        description=title + textwrap.dedent(sub_prolog),
        usage=argparse.SUPPRESS,
        help="cutout by indices (run \"%(prog)s indices --help\" \n"
        "for specific help on this command)\n ")

    parser_index.add_argument("input_file", help="File containing source\n ")
    parser_index.add_argument("output_file", help="File for output\n ")

    parser_index.add_argument(
        "zx",
        type=int,
        help="the starting x (column) index of the region to cutout from \n"
        "the source file\n ")

    parser_index.add_argument(
        "zy",
        type=int,
        help="the starting y (row) index of the region to cutout from \n"
        "the source file\n ")

    parser_index.add_argument(
        "nx",
        type=int,
        help="the number of x (column) points to cutout from the source "
        "file\n ")

    parser_index.add_argument(
        "ny",
        type=int,
        help="the number of y (row) points to cutout from the source file\n")

    # Options for co-ordinates
    sub_prolog = """    usage:
      {0} coords [-h] [--native-grid]
               input_file output_file SW_lon SW_lat NE_lon NE_lat

    The co-ordinate based version of the script will extract a domain
    of whole points which fit within the given corner points
               """.format(os.path.basename(sys.argv[0]))

    parser_coords = subparsers.add_parser(
        "coords",
        formatter_class=argparse.RawTextHelpFormatter,
        description=title + textwrap.dedent(sub_prolog),
        usage=argparse.SUPPRESS,
        help="cutout by coordinates (run \"%(prog)s coords --help\" \n"
        "for specific help on this command)\n")

    parser_coords.add_argument("input_file", help="File containing source\n ")
    parser_coords.add_argument("output_file", help="File for output\n ")

    parser_coords.add_argument(
        "--native-grid",
        action="store_true",
        help="if set, cutout will take the provided co-ordinates to be on \n"
        "the file's native grid (otherwise it will assume they are regular \n"
        "co-ordinates and apply any needed rotations automatically). \n"
        "Therefore it does nothing for non-rotated grids\n ")

    parser_coords.add_argument(
        "SW_lon",
        type=float,
        help="the longitude of the South-West corner point of the region \n"
        "to cutout from the source file\n ")

    parser_coords.add_argument(
        "SW_lat",
        type=float,
        help="the latitude of the South-West corner point of the region \n"
        "to cutout from the source file\n ")

    parser_coords.add_argument(
        "NE_lon",
        type=float,
        help="the longitude of the North-East corner point of the region \n"
        "to cutout from the source file\n ")

    parser_coords.add_argument(
        "NE_lat",
        type=float,
        help="the latitude of the North-East corner point of the region \n"
        "to cutout from the source file\n")

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(CUTOUT-II) Module Information")),
    report_modules()
    print("")

    filename = args.input_file
    if os.path.exists(filename):
        # If provided, load the given stashmaster
        stashm = None
        if args.stashmaster is not None:
            if re.match(r"\d+.\d+", args.stashmaster):
                stashm = STASHmaster.from_version(args.stashmaster)
            else:
                stashm = STASHmaster.from_file(args.stashmaster)
            if stashm is None:
                msg = "Cannot load user supplied STASHmaster"
                raise ValueError(msg)

        # Abort for pp files (they don't have the required information)
        if mule.pp.file_is_pp_file(filename):
            msg = "File {0} is a pp file, which cutout does not support"
            raise ValueError(msg.format(filename))

        # Load the file using Mule - filter it according to the file types
        # which cutout can handle
        ff = mule.load_umfile(filename, stashmaster=stashm)
        if ff.fixed_length_header.dataset_type not in (1, 2, 3, 4):
            msg = (
                "Invalid dataset type ({0}) for file: {1}\nCutout is only "
                "compatible with FieldsFiles (3), Dumps (1|2) and Ancils (4)".
                format(ff.fixed_length_header.dataset_type, filename))
            raise ValueError(msg)

        # Perform the cutout
        if hasattr(args, "zx"):
            ff_out = cutout(ff, args.zx, args.zy, args.nx, args.ny)

        else:
            ff_out = cutout_coords(ff, args.SW_lon, args.SW_lat, args.NE_lon,
                                   args.NE_lat, args.native_grid)

        # Write the result out to the new file
        ff_out.to_file(args.output_file)

    else:
        msg = "File not found: {0}".format(filename)
        raise ValueError(msg)
Exemplo n.º 4
0
def _main():
    """
    Main function; accepts command line arguments and calls routines

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] [--date-fmt DATE_FMT]
                    input_file alpha date ens_member output_file

    This script will use 12 months of climatology data, plus a target date
    for the perturbed field and  produce the perturbation in a new file.
    """
    title = _banner("SSTPERT - Produce SST Perturbations (using the Mule API)",
                    banner_char="=")

    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        formatter_class=argparse.RawTextHelpFormatter,
    )

    # No need to output help text for the input file (it's obvious)
    parser.add_argument("input_file",
                        help="path to input file containing climatology")
    parser.add_argument("alpha", help="intensity factor for perturbations")
    parser.add_argument("date", help="desired date for perturbations")
    parser.add_argument("--date-fmt",
                        help="format string for date to be passed to "
                        "datetime.strptime - \nsyntax the same as Unix date "
                        "(default is %%Y%%m%%d%%H%%M)",
                        default="%Y%m%d%H%M")
    parser.add_argument("ens_member",
                        help="ensemble member number, used in generation "
                        "of random seed")
    parser.add_argument("output_file",
                        help="path to output file for perturbation")

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) < 5:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(SSTPERT) Module Information")),
    report_modules()
    print("")

    # Get the date information using the format if provided
    date = datetime.strptime(args.date, args.date_fmt)

    # Check for valid alpha
    try:
        alpha = float(args.alpha)
    except ValueError:
        msg = "Value for alpha factor not a valid float"
        raise ValueError(msg)

    if args.ens_member.isdigit():
        ens_member = int(args.ens_member)
    else:
        msg = "Value for ensemble member is not a valid integer"
        raise ValueError(msg)

    # Get the filename and load it using Mule
    filename = args.input_file
    if os.path.exists(filename):
        # Load the file (should be an ancil)
        um_file = mule.AncilFile.from_file(filename)
        # Generate the new file
        ff = gen_pert_file(um_file, alpha, ens_member, date)
        # Write it out
        ff.to_file(args.output_file)
    else:
        msg = "File not found: {0}".format(filename)
        raise ValueError(msg)
Exemplo n.º 5
0
def _main():
    """
    Main function; accepts command line arguments to override the print
    settings and provides a UM file to summarise.

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] [options] input_file

    This script will output a summary table of the lookup headers in a UM
    file, with the columns selected by the user.
    """
    title = _banner(
        "SUMMARY - Print a summary of the fields in a UM File "
        "(using the Mule API)",
        banner_char="=")

    # Include a list of the lookup names as they appear in Mule
    lookup_names = [name for name, _ in mule._LOOKUP_HEADER_3]
    lookup_names += [
        name for name, _ in mule._LOOKUP_HEADER_2 if name not in lookup_names
    ]
    lookup_names = ", ".join(lookup_names)

    help_epilog = """
    possible lookup names for the column-names option:
    {0}

    for details of how these relate to indices see UMDP F03:
      https://code.metoffice.gov.uk/doc/um/latest/papers/umdp_F03.pdf
    """.format(
        textwrap.fill(lookup_names,
                      width=80,
                      initial_indent=4 * " ",
                      subsequent_indent=8 * " "))

    # Setup the parser
    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        epilog=textwrap.dedent(help_epilog),
        formatter_class=argparse.RawTextHelpFormatter,
    )

    # No need to output help text for hte input file (it's obvious)
    parser.add_argument("input_file", help=argparse.SUPPRESS)

    parser.add_argument(
        "--column-names",
        metavar="--column-names name1[,name2][...]",
        help="set the names of the lookup header items to print, in the \n"
        "order the columns should appear as a comma separated list. A \n"
        "special entry of \"stash_name\" will put in the field's name \n"
        "according to the STASHmaster, \"index\" will give the field's \n"
        "index number in the file, and \"t1\" or \"t2\" will give the first \n"
        "and second time from the lookup (nicely formatted)\n ")

    parser.add_argument(
        "--heading-frequency",
        metavar="N",
        type=int,
        help="repeat the column heading block every N lines (to avoid \n"
        "having to scroll too far to identify columns in the output) A \n"
        "value of 0 means do not repeat the heading block\n ")

    parser.add_argument(
        "--field-index",
        metavar="i1[,i2][,i3:i5][...]",
        help="limit the output to specific fields by index (comma-separated \n"
        "list of single indices, or ranges of indices separated by a single \n"
        "colon-character)\n ")

    parser.add_argument(
        "--field-property",
        metavar="key1=value1[,key2=value2][...]",
        help="limit the output to specific fields using a property string \n"
        "(comma-separated list of key=value pairs where key is the name of \n"
        "a lookup property and value is what it must be set to)\n ")

    parser.add_argument(
        "--stashmaster",
        help="either the full path to a valid stashmaster file, or a UM \n"
        "version number e.g. '10.2'; if given a number summary will look in \n"
        "the path defined by: \n"
        "  mule.stashmaster.STASHMASTER_PATH_PATTERN \n"
        "which by default is : \n"
        "  $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n")

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(SUMMARY) Module Information")),
    report_modules()
    print("")

    # Process column names
    if args.column_names is not None:
        properties = args.column_names.split(",")
        PRINT_SETTINGS["column_names"] = properties

    # Process field filtering by index argument
    field_index = []
    if args.field_index is not None:
        for arg in args.field_index.split(","):
            if re.match(r"^\d+$", arg):
                field_index.append(int(arg))
            elif re.match(r"^\d+:\d+$", arg):
                field_index += range(*[int(elt) for elt in arg.split(":")])
            else:
                msg = "Unrecognised field index option: {0}"
                raise ValueError(msg.format(arg))
    PRINT_SETTINGS["field_index"] = field_index

    # Process field filtering by property argument
    field_property = {}
    if args.field_property is not None:
        for arg in args.field_property.split(","):
            if re.match(r"^\w+=\d+$", arg):
                name, value = arg.split("=")
                field_property[name] = int(value)
            else:
                msg = "Unrecognised field property option: {0}"
                raise ValueError(msg.format(arg))
    PRINT_SETTINGS["field_property"] = field_property

    # If provided, load the given stashmaster
    stashm = None
    if args.stashmaster is not None:
        if re.match(r"\d+.\d+", args.stashmaster):
            stashm = STASHmaster.from_version(args.stashmaster)
        else:
            stashm = STASHmaster.from_file(args.stashmaster)
        if stashm is None:
            msg = "Cannot load user supplied STASHmaster"
            raise ValueError(msg)

    # Process heading lines
    if args.heading_frequency is not None:
        PRINT_SETTINGS["heading_frequency"] = args.heading_frequency

    # Get the filename and load it using Mule
    filename = args.input_file

    if os.path.exists(filename):
        # Check if this is a pp file
        if mule.pp.file_is_pp_file(filename):
            # Make an empty fieldsfile object and attach the pp file's
            # field objects to it
            um_file = mule.FieldsFile()
            um_file.fields = mule.pp.fields_from_pp_file(filename)
            um_file._source_path = filename
            if stashm is not None:
                um_file.attach_stashmaster_info(stashm)
            # Override the component filter as only the lookup is
            # available in a pp file
            PRINT_SETTINGS["component_filter"] = ["lookup"]
        else:
            um_file = mule.load_umfile(filename, stashmaster=stashm)

        # Now print the object to stdout, if a SIGPIPE is received handle
        # it appropriately
        try:
            field_summary(um_file)
        except IOError as error:
            if error.errno != errno.EPIPE:
                raise
    else:
        msg = "File not found: {0}".format(filename)
        raise ValueError(msg)
Exemplo n.º 6
0
def _main():
    """
    Main function; accepts command line arguments to override the print
    settings and provides a UM file to print.

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] [options] input_filename

    This script will output the contents of the headers from a UM file to
    stdout.  The default output may be customised with a variety of options
    (see below).
    """
    title = _banner(
        "PUMF-II - Pretty Printer for UM Files, version II "
        "(using the Mule API)",
        banner_char="=")

    # Include a list of the component names as they appear in Mule
    component_names = ", ".join(
        (["fixed_length_header"] +
         [name for name, _ in mule.UMFile.COMPONENTS] + ["lookup"]))

    lookup_names = [name for name, _ in mule._LOOKUP_HEADER_3]
    lookup_names += [
        name for name, _ in mule._LOOKUP_HEADER_2 if name not in lookup_names
    ]
    lookup_names = ", ".join(lookup_names)

    help_epilog = """
    possible component names for the component option:
    {0}

    possible lookup names for the field-property option:
    {1}

    for details of how these relate to indices see UMDP F03:
      https://code.metoffice.gov.uk/doc/um/latest/papers/umdp_F03.pdf
    """.format(
        textwrap.fill(component_names,
                      width=80,
                      initial_indent=4 * " ",
                      subsequent_indent=8 * " "),
        textwrap.fill(lookup_names,
                      width=80,
                      initial_indent=4 * " ",
                      subsequent_indent=8 * " "))

    # Setup the parser
    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        epilog=textwrap.dedent(help_epilog),
        formatter_class=argparse.RawTextHelpFormatter,
    )

    # No need to output help text for the input file (it's obvious)
    parser.add_argument("input_file", help=argparse.SUPPRESS)

    parser.add_argument(
        "--include-missing",
        help="include header values which are set to MDI and entries for \n"
        "components which are not present in the file (by default this will \n"
        "be hidden)\n ",
        action="store_true")
    parser.add_argument(
        "--use-indices",
        help="list headers by their indices (instead of only listing named \n"
        "headers)\n ",
        action="store_true")
    parser.add_argument(
        "--headers-only",
        help="only list headers (do not read data and calculate any derived \n"
        "statistics)\n",
        action="store_true")
    parser.add_argument(
        "--components",
        help="limit the header output to specific components \n"
        "(comma-separated list of component names, with no spaces)\n ",
        metavar="component1[,component2][...]")
    parser.add_argument(
        "--field-index",
        help="limit the lookup output to specific fields by index \n"
        "(comma-separated list of single indices, or ranges of indices \n"
        "separated by a single colon-character)\n ",
        metavar="i1[,i2][,i3:i5][...]")
    parser.add_argument(
        "--field-property",
        help="limit the lookup output to specific field using a property \n"
        "string (comma-separated list of key=value pairs where the key is \n"
        "the name of a lookup property and the value is the value it must \n"
        "take)\n ",
        metavar="key1=value1[,key2=value2][...]")
    parser.add_argument("--print-columns",
                        help="how many columns should be printed\n ",
                        metavar="N")
    parser.add_argument(
        "--stashmaster",
        help="either the full path to a valid stashmaster file, or a UM \n"
        "version number e.g. '10.2'; if given a number pumf will look in \n"
        "the path defined by: \n"
        "  mule.stashmaster.STASHMASTER_PATH_PATTERN \n"
        "which by default is : \n"
        "  $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n")

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(PUMF-II) Module Information")),
    report_modules()
    print("")

    # Process component filtering argument
    if args.components is not None:
        PRINT_SETTINGS["component_filter"] = (args.components.split(","))

    # Process field filtering by index argument
    field_index = []
    if args.field_index is not None:
        for arg in args.field_index.split(","):
            if re.match(r"^\d+$", arg):
                field_index.append(int(arg))
            elif re.match(r"^\d+:\d+$", arg):
                field_index += range(*[int(elt) for elt in arg.split(":")])
            else:
                msg = "Unrecognised field-index option: {0}"
                raise ValueError(msg.format(arg))
    PRINT_SETTINGS["field_index"] = field_index

    # Process field filtering by property argument
    field_property = {}
    if args.field_property is not None:
        for arg in args.field_property.split(","):
            if re.match(r"^\w+=\d+$", arg):
                name, value = arg.split("=")
                field_property[name] = int(value)
            else:
                msg = "Unrecognised field-property option: {0}"
                raise ValueError(msg.format(arg))
    PRINT_SETTINGS["field_property"] = field_property

    # If provided, load the given stashmaster
    stashm = None
    if args.stashmaster is not None:
        if re.match(r"\d+.\d+", args.stashmaster):
            stashm = STASHmaster.from_version(args.stashmaster)
        else:
            stashm = STASHmaster.from_file(args.stashmaster)
        if stashm is None:
            msg = "Cannot load user supplied STASHmaster"
            raise ValueError(msg)

    # Process remaining options
    if args.print_columns is not None:
        PRINT_SETTINGS["print_columns"] = int(args.print_columns)
    if args.include_missing:
        PRINT_SETTINGS["include_missing"] = True
    if args.use_indices:
        PRINT_SETTINGS["use_indices"] = True
    if args.headers_only:
        PRINT_SETTINGS["headers_only"] = True

    # Get the filename and load it using Mule
    filename = args.input_file
    if os.path.exists(filename):
        # Check if this is a pp file
        if mule.pp.file_is_pp_file(filename):
            # Make an empty fieldsfile object and attach the pp file's
            # field objects to it
            um_file = mule.FieldsFile()
            um_file.fields = mule.pp.fields_from_pp_file(filename)
            um_file._source_path = filename
            if stashm is not None:
                um_file.attach_stashmaster_info(stashm)
            # Override the component filter as only the lookup is
            # available in a pp file
            PRINT_SETTINGS["component_filter"] = ["lookup"]
        else:
            um_file = mule.load_umfile(filename, stashmaster=stashm)
        # Now print the object to stdout, if a SIGPIPE is received handle
        # it appropriately
        try:
            pprint(um_file)
        except IOError as error:
            if error.errno != errno.EPIPE:
                raise
    else:
        msg = "File not found: {0}".format(filename)
        raise ValueError(msg)
Exemplo n.º 7
0
def _main():
    """
    Main function; accepts command line arguments and provides the fixed
    region specification, input and output files.

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] [options] input_file output_file region_x region_y

    This script will extract a fixed-grid sub-region from a variable
    resolution UM FieldsFile, producing a new file.
    """
    title = _banner(
        "TRIM - Fixed region extraction tool for UM Files "
        "(using the Mule API)",
        banner_char="=")

    # Setup the parser
    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        formatter_class=argparse.RawTextHelpFormatter,
        )

    # No need to output help text for the files (it's obvious)
    parser.add_argument("input_file", help=argparse.SUPPRESS)
    parser.add_argument("output_file", help=argparse.SUPPRESS)

    parser.add_argument(
        "region_x", type=int,
        help="the x index of the *region* to extract, starting from 1. \n"
        "In a typical variable resolution FieldsFile the central region \n"
        "will be given by '2'\n ")

    parser.add_argument(
        "region_y", type=int,
        help="the y index of the *region* to extract, starting from 1. \n"
        "In a typical variable resolution FieldsFile the central region \n"
        "will be given by '2'\n")

    parser.add_argument(
        "--stashmaster",
        help="either the full path to a valid stashmaster file, or a UM \n"
        "version number e.g. '10.2'; if given a number trim will look in \n"
        "the path defined by: \n"
        "  mule.stashmaster.STASHMASTER_PATH_PATTERN \n"
        "which by default is: \n"
        "  $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n")

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(TRIM) Module Information")),
    report_modules()
    print("")

    filename = args.input_file
    if os.path.exists(filename):
        # If provided, load the given stashmaster
        stashm = None
        if args.stashmaster is not None:
            if re.match(r"\d+.\d+", args.stashmaster):
                stashm = STASHmaster.from_version(args.stashmaster)
            else:
                stashm = STASHmaster.from_file(args.stashmaster)
            if stashm is None:
                msg = "Cannot load user supplied STASHmaster"
                raise ValueError(msg)

        # Abort for pp files (they don't have the required information)
        if mule.pp.file_is_pp_file(filename):
            msg = "File {0} is a pp file, which trim does not support"
            raise ValueError(msg.format(filename))

        # Load the file using Mule - filter it according to the file types
        # which cutout can handle
        ff = mule.load_umfile(filename, stashmaster=stashm)
        if ff.fixed_length_header.dataset_type not in (1, 2, 3, 4):
            msg = (
                "Invalid dataset type ({0}) for file: {1}\nTrim is only "
                "compatible with FieldsFiles (3), Dumps (1|2) and Ancils (4)"
                .format(ff.fixed_length_header.dataset_type, filename))
            raise ValueError(msg)

        # Perform the trim operation
        ff_out = trim_fixed_region(ff, args.region_x, args.region_y)

        # Write the result out to the new file
        ff_out.to_file(args.output_file)

    else:
        msg = "File not found: {0}".format(filename)
        raise ValueError(msg)
Exemplo n.º 8
0
def _main():
    """
    Main function; accepts command line argument for paths to
    the input and output files.

    """
    # Setup help text
    help_prolog = """    usage:
      %(prog)s [-h] input_filename output_filename

    This script will take a MakeBC generated frame file and produce
    a CreateBC compatible frame file.
    """
    title = _banner(
        "FIXFRAME - Converter for old-style UM frames files "
        "(Using the Mule API)",
        banner_char="=")

    # Setup the parser
    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=title + textwrap.dedent(help_prolog),
        formatter_class=argparse.RawTextHelpFormatter,
    )

    parser.add_argument(
        "input_filename",
        help="First argument is the path and name of the MakeBC frames file \n"
        "to be fixed\n ")
    parser.add_argument(
        "output_filename",
        help="Second argument is the path and name of the CreateBC frames \n"
        "file to be produced\n")

    # If the user supplied no arguments, print the help text and exit
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit(1)

    args = parser.parse_args()

    # Print version information
    print(_banner("(fixframe) Module Information")),
    report_modules()
    print("")

    input_filename = args.input_filename
    if not os.path.exists(input_filename):
        msg = "File not found: {0}".format(input_filename)
        raise ValueError(msg)

    # Abort for pp files (they don't have the required information)
    if mule.pp.file_is_pp_file(input_filename):
        msg = "File {0} is a pp file, which fixframe does not support"
        raise ValueError(msg.format(input_filename))

    output_filename = args.output_filename

    # Read in file as a FieldsFile - MakeBC frames do not pass fieldsfile
    # validation so will generate some warnings
    origfile = mule.FieldsFile.from_file(input_filename)
    _printgrid(origfile, input_filename)
    # Fix the headers
    fixedfile = fixframe(origfile)
    _printgrid(fixedfile, output_filename)
    # Write file
    fixedfile.to_file(output_filename)