def __call__(self, args): mf_target = mule.load_umfile(args.target) mf_source = mule.load_umfile(args.source) out = regrid(mf_target, mf_source) out.validate = lambda *args, **kwargs: None # utils.mule_write_with_replace(out, args.output) out.to_file(args.output)
def perform_comparison(self): """Compare the files using mule-cumf.""" # Turn the filenames into their absolute equivalents file1 = os.path.realpath(self.files[0]) file2 = os.path.realpath(self.files[1]) # Identify which of the two files is the KGO file if self.kgo is not None: kgo_file = [file1, file2][self.kgo] # If this file is missing, no comparison can be performed; it # could be that this task is brand new if not os.path.exists(kgo_file): self.parent.reporter( "KGO File (file {0}) appears to be missing".format( self.kgo + 1), prefix="[FAIL] ") # Note that by exiting early this task counts as failed return # Load them using Mule - if either file doesn't appear to be # a recognised file type, this will abort... if it is recognised but # fails to validate, a warning will be raised and it may fail later. # However rose_ana will catch this and report to the user if needed. self.umf1 = mule.load_umfile(file1) self.umf2 = mule.load_umfile(file2) if self.prognostic_only: self.select_prognostic_fields() # Create the comparison object using Mule cumf self.compare = UMFileComparison(self.umf1, self.umf2) # If the comparison was successful, nothing more needs to be done if self.compare.match: self.passed = True # Capture the output from cumf's summary output and put into # the rose_ana output prefix = "[INFO] " self.write_output_info(prefix=prefix) else: # Capture the output from cumf's summary output and put into # the rose_ana output prefix = "[FAIL] " self.write_output_info(prefix=prefix) # Get a reference to the log directory log_root = os.path.dirname(os.environ["ROSE_TASK_LOG_ROOT"]) # Create a suitable filename for the cumf output using the # task name (so it'll be unique) basename = self.get_output_basename() # Write the full results of the cumf comparison self.write_full_output(log_root, basename) # Write a summary of the field differences self.write_summ_output(log_root, basename)
def from_mask(cls, mask_path): umfile = mule.load_umfile(mask_path) mask_field = None for f in umfile.fields: if f.lbuser4 == 30: mask_field = f break mask = xarray.DataArray(mask_field.get_data(), dims=["lat", "lon"], name=os.path.basename(mask_path)) mask.coords["lon"] = ( mask_field.bzx + (1 + numpy.arange(mask.shape[1])) * mask_field.bdx) mask.coords["lat"] = ( mask_field.bzy + (1 + numpy.arange(mask.shape[0])) * mask_field.bdy) mask = mask.where(mask == 0) mask.lon.attrs["standard_name"] = "longitude" mask.lat.attrs["standard_name"] = "latitude" mask.lon.attrs["units"] = "degrees_east" mask.lat.attrs["units"] = "degrees_north" return mask
def era5grib_um( time, output=None, target=None, source="NCI", format="grib", era5land: bool = True, polar: bool = False, ): """ Convert the NCI ERA5 archive data to GRIB format for use in UM limited area modelling. Will generate a grib file for a single time, to be processed by the UM reconfiguration. The output area can be limited to reduce file size by supplying a UM file on the target grid as 'target' """ logging.info(f"Time: Target ({time})") if source == "CLEX": ds = clex.read_um(time) else: ds = nci.read_um(time, era5land=era5land) if output is None: output = f"um.era5.{pandas.to_datetime(ds.time.values[0]).strftime('%Y%m%dT%H%M')}.grib" if target is not None: mf = mule.load_umfile(target) ny = mf.integer_constants.num_rows nx = mf.integer_constants.num_cols y0 = mf.real_constants.start_lat x0 = mf.real_constants.start_lon dy = mf.real_constants.row_spacing dx = mf.real_constants.col_spacing lat = y0 + numpy.arange(ny) * dy if not polar: lon = x0 + numpy.arange(nx) * dx else: lon = numpy.array([0, 359.75]) print(x0, dx, nx, lon[0], lon[-1]) print(y0, dy, ny, lat[0], lat[-1]) ds = select_domain(ds, lats=lat, lons=lon) else: logging.warn( "Outputting the full domain, use --target=qrparm.mask to limit") save_grib(ds, output, format=format) logging.info(f"Wrote {output}")
def test_um_loadsave_all(self): # Check each test datafile can load with 'load_umfile' and re-save. for path, name, filetype in zip(TESTFILE_PATHS, TESTFILE_NAMES, TESTFILE_TYPES): # Skip the pp files (they don't work the same at all) if filetype == "pp": continue # Test load_umfile the file. ffv = load_umfile(path) # Check it loaded with the expected type. self.assertEqual(type(ffv), _UM_FILE_TYPES[filetype]) # Check against expected properties. self._file_specific_check(name, ffv) # Check you can then save it. with self.temp_filename() as temp_filepath: ffv.to_file(temp_filepath)
def from_mask(cls, mask_path): umfile = mule.load_umfile(mask_path) mask_field = None for f in umfile.fields: if f.lbuser4 == 30: mask_field = f break mask = xarray.DataArray(mask_field.get_data(), dims=['lat', 'lon'], name=os.path.basename(mask_path)) mask.coords['lon'] = mask_field.bzx + ( 1 + numpy.arange(mask.shape[1])) * mask_field.bdx mask.coords['lat'] = mask_field.bzy + ( 1 + numpy.arange(mask.shape[0])) * mask_field.bdy mask = mask.where(mask == 0) mask.lon.attrs['standard_name'] = 'longitude' mask.lat.attrs['standard_name'] = 'latitude' mask.lon.attrs['units'] = 'degrees_east' mask.lat.attrs['units'] = 'degrees_north' return mask
if lat is not None: out.real_constants.north_pole_lat = lat if lon is not None: out.real_constants.north_pole_lon = lon op = RotatePoleOp(lat, lon) for f in mf.fields: out.fields.append(op(f)) return out if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("input", help="Input UM file", type=argparse.FileType("r")) parser.add_argument("--output", help="Output UM file", required=True) parser.add_argument("--lat", help="New pole latitude", type=float) parser.add_argument("--lon", help="New pole longitude", type=float) args = parser.parse_args() mf = mule.load_umfile(args.input) out = rotate_pole(mf, args.lat, args.lon) out.to_file(args.output)
'ARCLBIOG': 'INPUT/biogenic_351sm.N96L38', 'BIOMASS': 'INPUT/Bio_1850_ESM1.anc', 'CHEMOXID': 'INPUT/sulpc_oxidants_N96_L38', 'DMSCONC': 'INPUT/DMS_conc.N96', 'NDEPFIL': 'INPUT/Ndep_1850_ESM1.anc', 'OCFFEMIS': 'INPUT/OCFF_1850_ESM1.anc', 'OZONE': 'INPUT/ozone_1850_ESM1.anc', 'SOOTEMIS': 'INPUT/BC_hi_1850_ESM1.anc', 'SULPEMIS': 'INPUT/scycl_1850_ESM1_v4.anc', 'VEGINIT': 'INPUT/cable_vegfunc_N96.anc', } import mule # Current date according to the UM mf = mule.load_umfile('work/atmosphere/restart_dump.astart') year = mf.fixed_length_header.t1_year print(f"Updating ozone for year {year}") if year <= 850: ozone = 'ozone_esm_pmip_0850-0850.anc' elif year <= 1050: ozone = 'ozone_esm_pmip_0851-1050.anc' elif year <= 1250: ozone = 'ozone_esm_pmip_1051-1250.anc' elif year <= 1450: ozone = 'ozone_esm_pmip_1251-1450.anc' elif year <= 1650: ozone = 'ozone_esm_pmip_1451-1650.anc' elif year <= 1850:
def _main(): """ Main function; accepts command line arguments and provides the cutout specification, input and output files to be cutout. """ # Setup help text help_prolog = """ usage: %(prog)s [-h] [--stashmaster STASHMASTER] {indices,coords} ... This script will extract a sub-region from a UM FieldsFile, producing a new file. """ title = _banner( "CUTOUT-II - Cutout tool for UM Files, version II " "(using the Mule API)", banner_char="=") # Setup the parser parser = argparse.ArgumentParser( usage=argparse.SUPPRESS, description=title + textwrap.dedent(help_prolog), formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument( "--stashmaster", help="either the full path to a valid stashmaster file, or a UM \n" "version number e.g. '10.2'; if given a number cutout will look in \n" "the path defined by: \n" " mule.stashmaster.STASHMASTER_PATH_PATTERN \n" "which by default is : \n" " $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n") # The cutout command has 2 forms; the user may describe the region using # a series of indices or using the co-ordinates of two opposing corners subparsers = parser.add_subparsers() # Options for indices sub_prolog = """ usage: {0} indices [-h] input_file output_file zx zy nx ny The index based version of the script will extract a domain of whole points defined by the given start indices and lengths """.format(os.path.basename(sys.argv[0])) parser_index = subparsers.add_parser( "indices", formatter_class=argparse.RawTextHelpFormatter, description=title + textwrap.dedent(sub_prolog), usage=argparse.SUPPRESS, help="cutout by indices (run \"%(prog)s indices --help\" \n" "for specific help on this command)\n ") parser_index.add_argument("input_file", help="File containing source\n ") parser_index.add_argument("output_file", help="File for output\n ") parser_index.add_argument( "zx", type=int, help="the starting x (column) index of the region to cutout from \n" "the source file\n ") parser_index.add_argument( "zy", type=int, help="the starting y (row) index of the region to cutout from \n" "the source file\n ") parser_index.add_argument( "nx", type=int, help="the number of x (column) points to cutout from the source " "file\n ") parser_index.add_argument( "ny", type=int, help="the number of y (row) points to cutout from the source file\n") # Options for co-ordinates sub_prolog = """ usage: {0} coords [-h] [--native-grid] input_file output_file SW_lon SW_lat NE_lon NE_lat The co-ordinate based version of the script will extract a domain of whole points which fit within the given corner points """.format(os.path.basename(sys.argv[0])) parser_coords = subparsers.add_parser( "coords", formatter_class=argparse.RawTextHelpFormatter, description=title + textwrap.dedent(sub_prolog), usage=argparse.SUPPRESS, help="cutout by coordinates (run \"%(prog)s coords --help\" \n" "for specific help on this command)\n") parser_coords.add_argument("input_file", help="File containing source\n ") parser_coords.add_argument("output_file", help="File for output\n ") parser_coords.add_argument( "--native-grid", action="store_true", help="if set, cutout will take the provided co-ordinates to be on \n" "the file's native grid (otherwise it will assume they are regular \n" "co-ordinates and apply any needed rotations automatically). \n" "Therefore it does nothing for non-rotated grids\n ") parser_coords.add_argument( "SW_lon", type=float, help="the longitude of the South-West corner point of the region \n" "to cutout from the source file\n ") parser_coords.add_argument( "SW_lat", type=float, help="the latitude of the South-West corner point of the region \n" "to cutout from the source file\n ") parser_coords.add_argument( "NE_lon", type=float, help="the longitude of the North-East corner point of the region \n" "to cutout from the source file\n ") parser_coords.add_argument( "NE_lat", type=float, help="the latitude of the North-East corner point of the region \n" "to cutout from the source file\n") # If the user supplied no arguments, print the help text and exit if len(sys.argv) == 1: parser.print_help() parser.exit(1) args = parser.parse_args() # Print version information print(_banner("(CUTOUT-II) Module Information")), report_modules() print("") filename = args.input_file if os.path.exists(filename): # If provided, load the given stashmaster stashm = None if args.stashmaster is not None: if re.match(r"\d+.\d+", args.stashmaster): stashm = STASHmaster.from_version(args.stashmaster) else: stashm = STASHmaster.from_file(args.stashmaster) if stashm is None: msg = "Cannot load user supplied STASHmaster" raise ValueError(msg) # Abort for pp files (they don't have the required information) if mule.pp.file_is_pp_file(filename): msg = "File {0} is a pp file, which cutout does not support" raise ValueError(msg.format(filename)) # Load the file using Mule - filter it according to the file types # which cutout can handle ff = mule.load_umfile(filename, stashmaster=stashm) if ff.fixed_length_header.dataset_type not in (1, 2, 3, 4): msg = ( "Invalid dataset type ({0}) for file: {1}\nCutout is only " "compatible with FieldsFiles (3), Dumps (1|2) and Ancils (4)". format(ff.fixed_length_header.dataset_type, filename)) raise ValueError(msg) # Perform the cutout if hasattr(args, "zx"): ff_out = cutout(ff, args.zx, args.zy, args.nx, args.ny) else: ff_out = cutout_coords(ff, args.SW_lon, args.SW_lat, args.NE_lon, args.NE_lat, args.native_grid) # Write the result out to the new file ff_out.to_file(args.output_file) else: msg = "File not found: {0}".format(filename) raise ValueError(msg)
entry[inc_exc][key] = new_vals if entry: case_dicts.append(entry) # Load the input files selected_fields = [] pp_mode = False for ifile, input_file in enumerate(input_files): # Check if this is a pp file if mule.pp.file_is_pp_file(input_file): pp_mode = True umf = mule.FieldsFile() umf.fields = mule.pp.fields_from_pp_file(input_file) umf._source_path = input_file elif not pp_mode: umf = mule.load_umfile(input_file) else: msg = "Cannot mix and match UM files and pp files" raise ValueError(msg) # Iterate through the cases - each returns a list of matching fields for case in case_dicts: selected_fields += (select(umf, include=case.get("include", None), exclude=case.get("exclude", None))) # Copy first file to use for writing output if ifile == 0 and not pp_mode: umf_out = umf.copy() # Prune out duplicates while preserving their order
def _main(): """ Main function; accepts command line arguments to override the print settings and provides a UM file to summarise. """ # Setup help text help_prolog = """ usage: %(prog)s [-h] [options] input_file This script will output a summary table of the lookup headers in a UM file, with the columns selected by the user. """ title = _banner( "SUMMARY - Print a summary of the fields in a UM File " "(using the Mule API)", banner_char="=") # Include a list of the lookup names as they appear in Mule lookup_names = [name for name, _ in mule._LOOKUP_HEADER_3] lookup_names += [ name for name, _ in mule._LOOKUP_HEADER_2 if name not in lookup_names ] lookup_names = ", ".join(lookup_names) help_epilog = """ possible lookup names for the column-names option: {0} for details of how these relate to indices see UMDP F03: https://code.metoffice.gov.uk/doc/um/latest/papers/umdp_F03.pdf """.format( textwrap.fill(lookup_names, width=80, initial_indent=4 * " ", subsequent_indent=8 * " ")) # Setup the parser parser = argparse.ArgumentParser( usage=argparse.SUPPRESS, description=title + textwrap.dedent(help_prolog), epilog=textwrap.dedent(help_epilog), formatter_class=argparse.RawTextHelpFormatter, ) # No need to output help text for hte input file (it's obvious) parser.add_argument("input_file", help=argparse.SUPPRESS) parser.add_argument( "--column-names", metavar="--column-names name1[,name2][...]", help="set the names of the lookup header items to print, in the \n" "order the columns should appear as a comma separated list. A \n" "special entry of \"stash_name\" will put in the field's name \n" "according to the STASHmaster, \"index\" will give the field's \n" "index number in the file, and \"t1\" or \"t2\" will give the first \n" "and second time from the lookup (nicely formatted)\n ") parser.add_argument( "--heading-frequency", metavar="N", type=int, help="repeat the column heading block every N lines (to avoid \n" "having to scroll too far to identify columns in the output) A \n" "value of 0 means do not repeat the heading block\n ") parser.add_argument( "--field-index", metavar="i1[,i2][,i3:i5][...]", help="limit the output to specific fields by index (comma-separated \n" "list of single indices, or ranges of indices separated by a single \n" "colon-character)\n ") parser.add_argument( "--field-property", metavar="key1=value1[,key2=value2][...]", help="limit the output to specific fields using a property string \n" "(comma-separated list of key=value pairs where key is the name of \n" "a lookup property and value is what it must be set to)\n ") parser.add_argument( "--stashmaster", help="either the full path to a valid stashmaster file, or a UM \n" "version number e.g. '10.2'; if given a number summary will look in \n" "the path defined by: \n" " mule.stashmaster.STASHMASTER_PATH_PATTERN \n" "which by default is : \n" " $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n") # If the user supplied no arguments, print the help text and exit if len(sys.argv) == 1: parser.print_help() parser.exit(1) args = parser.parse_args() # Print version information print(_banner("(SUMMARY) Module Information")), report_modules() print("") # Process column names if args.column_names is not None: properties = args.column_names.split(",") PRINT_SETTINGS["column_names"] = properties # Process field filtering by index argument field_index = [] if args.field_index is not None: for arg in args.field_index.split(","): if re.match(r"^\d+$", arg): field_index.append(int(arg)) elif re.match(r"^\d+:\d+$", arg): field_index += range(*[int(elt) for elt in arg.split(":")]) else: msg = "Unrecognised field index option: {0}" raise ValueError(msg.format(arg)) PRINT_SETTINGS["field_index"] = field_index # Process field filtering by property argument field_property = {} if args.field_property is not None: for arg in args.field_property.split(","): if re.match(r"^\w+=\d+$", arg): name, value = arg.split("=") field_property[name] = int(value) else: msg = "Unrecognised field property option: {0}" raise ValueError(msg.format(arg)) PRINT_SETTINGS["field_property"] = field_property # If provided, load the given stashmaster stashm = None if args.stashmaster is not None: if re.match(r"\d+.\d+", args.stashmaster): stashm = STASHmaster.from_version(args.stashmaster) else: stashm = STASHmaster.from_file(args.stashmaster) if stashm is None: msg = "Cannot load user supplied STASHmaster" raise ValueError(msg) # Process heading lines if args.heading_frequency is not None: PRINT_SETTINGS["heading_frequency"] = args.heading_frequency # Get the filename and load it using Mule filename = args.input_file if os.path.exists(filename): # Check if this is a pp file if mule.pp.file_is_pp_file(filename): # Make an empty fieldsfile object and attach the pp file's # field objects to it um_file = mule.FieldsFile() um_file.fields = mule.pp.fields_from_pp_file(filename) um_file._source_path = filename if stashm is not None: um_file.attach_stashmaster_info(stashm) # Override the component filter as only the lookup is # available in a pp file PRINT_SETTINGS["component_filter"] = ["lookup"] else: um_file = mule.load_umfile(filename, stashmaster=stashm) # Now print the object to stdout, if a SIGPIPE is received handle # it appropriately try: field_summary(um_file) except IOError as error: if error.errno != errno.EPIPE: raise else: msg = "File not found: {0}".format(filename) raise ValueError(msg)
def _main(): """ Main function; accepts command line arguments to override the print settings and provides a UM file to print. """ # Setup help text help_prolog = """ usage: %(prog)s [-h] [options] input_filename This script will output the contents of the headers from a UM file to stdout. The default output may be customised with a variety of options (see below). """ title = _banner( "PUMF-II - Pretty Printer for UM Files, version II " "(using the Mule API)", banner_char="=") # Include a list of the component names as they appear in Mule component_names = ", ".join( (["fixed_length_header"] + [name for name, _ in mule.UMFile.COMPONENTS] + ["lookup"])) lookup_names = [name for name, _ in mule._LOOKUP_HEADER_3] lookup_names += [ name for name, _ in mule._LOOKUP_HEADER_2 if name not in lookup_names ] lookup_names = ", ".join(lookup_names) help_epilog = """ possible component names for the component option: {0} possible lookup names for the field-property option: {1} for details of how these relate to indices see UMDP F03: https://code.metoffice.gov.uk/doc/um/latest/papers/umdp_F03.pdf """.format( textwrap.fill(component_names, width=80, initial_indent=4 * " ", subsequent_indent=8 * " "), textwrap.fill(lookup_names, width=80, initial_indent=4 * " ", subsequent_indent=8 * " ")) # Setup the parser parser = argparse.ArgumentParser( usage=argparse.SUPPRESS, description=title + textwrap.dedent(help_prolog), epilog=textwrap.dedent(help_epilog), formatter_class=argparse.RawTextHelpFormatter, ) # No need to output help text for the input file (it's obvious) parser.add_argument("input_file", help=argparse.SUPPRESS) parser.add_argument( "--include-missing", help="include header values which are set to MDI and entries for \n" "components which are not present in the file (by default this will \n" "be hidden)\n ", action="store_true") parser.add_argument( "--use-indices", help="list headers by their indices (instead of only listing named \n" "headers)\n ", action="store_true") parser.add_argument( "--headers-only", help="only list headers (do not read data and calculate any derived \n" "statistics)\n", action="store_true") parser.add_argument( "--components", help="limit the header output to specific components \n" "(comma-separated list of component names, with no spaces)\n ", metavar="component1[,component2][...]") parser.add_argument( "--field-index", help="limit the lookup output to specific fields by index \n" "(comma-separated list of single indices, or ranges of indices \n" "separated by a single colon-character)\n ", metavar="i1[,i2][,i3:i5][...]") parser.add_argument( "--field-property", help="limit the lookup output to specific field using a property \n" "string (comma-separated list of key=value pairs where the key is \n" "the name of a lookup property and the value is the value it must \n" "take)\n ", metavar="key1=value1[,key2=value2][...]") parser.add_argument("--print-columns", help="how many columns should be printed\n ", metavar="N") parser.add_argument( "--stashmaster", help="either the full path to a valid stashmaster file, or a UM \n" "version number e.g. '10.2'; if given a number pumf will look in \n" "the path defined by: \n" " mule.stashmaster.STASHMASTER_PATH_PATTERN \n" "which by default is : \n" " $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n") # If the user supplied no arguments, print the help text and exit if len(sys.argv) == 1: parser.print_help() parser.exit(1) args = parser.parse_args() # Print version information print(_banner("(PUMF-II) Module Information")), report_modules() print("") # Process component filtering argument if args.components is not None: PRINT_SETTINGS["component_filter"] = (args.components.split(",")) # Process field filtering by index argument field_index = [] if args.field_index is not None: for arg in args.field_index.split(","): if re.match(r"^\d+$", arg): field_index.append(int(arg)) elif re.match(r"^\d+:\d+$", arg): field_index += range(*[int(elt) for elt in arg.split(":")]) else: msg = "Unrecognised field-index option: {0}" raise ValueError(msg.format(arg)) PRINT_SETTINGS["field_index"] = field_index # Process field filtering by property argument field_property = {} if args.field_property is not None: for arg in args.field_property.split(","): if re.match(r"^\w+=\d+$", arg): name, value = arg.split("=") field_property[name] = int(value) else: msg = "Unrecognised field-property option: {0}" raise ValueError(msg.format(arg)) PRINT_SETTINGS["field_property"] = field_property # If provided, load the given stashmaster stashm = None if args.stashmaster is not None: if re.match(r"\d+.\d+", args.stashmaster): stashm = STASHmaster.from_version(args.stashmaster) else: stashm = STASHmaster.from_file(args.stashmaster) if stashm is None: msg = "Cannot load user supplied STASHmaster" raise ValueError(msg) # Process remaining options if args.print_columns is not None: PRINT_SETTINGS["print_columns"] = int(args.print_columns) if args.include_missing: PRINT_SETTINGS["include_missing"] = True if args.use_indices: PRINT_SETTINGS["use_indices"] = True if args.headers_only: PRINT_SETTINGS["headers_only"] = True # Get the filename and load it using Mule filename = args.input_file if os.path.exists(filename): # Check if this is a pp file if mule.pp.file_is_pp_file(filename): # Make an empty fieldsfile object and attach the pp file's # field objects to it um_file = mule.FieldsFile() um_file.fields = mule.pp.fields_from_pp_file(filename) um_file._source_path = filename if stashm is not None: um_file.attach_stashmaster_info(stashm) # Override the component filter as only the lookup is # available in a pp file PRINT_SETTINGS["component_filter"] = ["lookup"] else: um_file = mule.load_umfile(filename, stashmaster=stashm) # Now print the object to stdout, if a SIGPIPE is received handle # it appropriately try: pprint(um_file) except IOError as error: if error.errno != errno.EPIPE: raise else: msg = "File not found: {0}".format(filename) raise ValueError(msg)
def _main(): """ Main function; accepts command line arguments and provides the fixed region specification, input and output files. """ # Setup help text help_prolog = """ usage: %(prog)s [-h] [options] input_file output_file region_x region_y This script will extract a fixed-grid sub-region from a variable resolution UM FieldsFile, producing a new file. """ title = _banner( "TRIM - Fixed region extraction tool for UM Files " "(using the Mule API)", banner_char="=") # Setup the parser parser = argparse.ArgumentParser( usage=argparse.SUPPRESS, description=title + textwrap.dedent(help_prolog), formatter_class=argparse.RawTextHelpFormatter, ) # No need to output help text for the files (it's obvious) parser.add_argument("input_file", help=argparse.SUPPRESS) parser.add_argument("output_file", help=argparse.SUPPRESS) parser.add_argument( "region_x", type=int, help="the x index of the *region* to extract, starting from 1. \n" "In a typical variable resolution FieldsFile the central region \n" "will be given by '2'\n ") parser.add_argument( "region_y", type=int, help="the y index of the *region* to extract, starting from 1. \n" "In a typical variable resolution FieldsFile the central region \n" "will be given by '2'\n") parser.add_argument( "--stashmaster", help="either the full path to a valid stashmaster file, or a UM \n" "version number e.g. '10.2'; if given a number trim will look in \n" "the path defined by: \n" " mule.stashmaster.STASHMASTER_PATH_PATTERN \n" "which by default is: \n" " $UMDIR/vnX.X/ctldata/STASHmaster/STASHmaster_A\n") # If the user supplied no arguments, print the help text and exit if len(sys.argv) == 1: parser.print_help() parser.exit(1) args = parser.parse_args() # Print version information print(_banner("(TRIM) Module Information")), report_modules() print("") filename = args.input_file if os.path.exists(filename): # If provided, load the given stashmaster stashm = None if args.stashmaster is not None: if re.match(r"\d+.\d+", args.stashmaster): stashm = STASHmaster.from_version(args.stashmaster) else: stashm = STASHmaster.from_file(args.stashmaster) if stashm is None: msg = "Cannot load user supplied STASHmaster" raise ValueError(msg) # Abort for pp files (they don't have the required information) if mule.pp.file_is_pp_file(filename): msg = "File {0} is a pp file, which trim does not support" raise ValueError(msg.format(filename)) # Load the file using Mule - filter it according to the file types # which cutout can handle ff = mule.load_umfile(filename, stashmaster=stashm) if ff.fixed_length_header.dataset_type not in (1, 2, 3, 4): msg = ( "Invalid dataset type ({0}) for file: {1}\nTrim is only " "compatible with FieldsFiles (3), Dumps (1|2) and Ancils (4)" .format(ff.fixed_length_header.dataset_type, filename)) raise ValueError(msg) # Perform the trim operation ff_out = trim_fixed_region(ff, args.region_x, args.region_y) # Write the result out to the new file ff_out.to_file(args.output_file) else: msg = "File not found: {0}".format(filename) raise ValueError(msg)
def test_fieldsfile_byfile(self): with open(COMMON_N48_TESTDATA_PATH) as open_file: ffv = load_umfile(open_file) self.assertEqual(type(ffv), FieldsFile) check_common_n48_testdata(self, ffv)
def test_fieldsfile_bypath(self): ffv = load_umfile(COMMON_N48_TESTDATA_PATH) self.assertEqual(type(ffv), FieldsFile) check_common_n48_testdata(self, ffv)