def test_iter(self): from satpy import Scene, Dataset import numpy as np scene = Scene() scene["1"] = Dataset(np.arange(5)) scene["2"] = Dataset(np.arange(5)) scene["3"] = Dataset(np.arange(5)) for x in scene: self.assertIsInstance(x, Dataset)
def test_getitem(self): """Test __getitem__ with names only""" from satpy import Scene, Dataset import numpy as np scene = Scene() scene["1"] = ds1 = Dataset(np.arange(5)) scene["2"] = ds2 = Dataset(np.arange(5)) scene["3"] = ds3 = Dataset(np.arange(5)) self.assertIs(scene['1'], ds1) self.assertIs(scene['2'], ds2) self.assertIs(scene['3'], ds3) self.assertRaises(KeyError, scene.__getitem__, '4')
def test_delitem(self): from satpy import Scene, Dataset import numpy as np scene = Scene() scene["1"] = ds1 = Dataset(np.arange(5), wavelength=(0.1, 0.2, 0.3)) scene["2"] = ds2 = Dataset(np.arange(5), wavelength=(0.4, 0.5, 0.6)) scene["3"] = ds3 = Dataset(np.arange(5), wavelength=(0.7, 0.8, 0.9)) del scene['1'] del scene['3'] del scene[0.45] self.assertEquals(len(scene.wishlist), 0) self.assertEquals(len(scene.datasets.keys()), 0) self.assertRaises(KeyError, scene.__delitem__, 0.2)
def test_basic_numbered_1_tile(self): """Test creating a single numbered tile""" from satpy.writers.scmi import SCMIWriter from satpy import Dataset from pyresample.geometry import AreaDefinition from pyresample.utils import proj4_str_to_dict w = SCMIWriter(base_dir=self.base_dir, compress=True) area_def = AreaDefinition( 'test', 'test', 'test', proj_dict=proj4_str_to_dict( '+proj=lcc +datum=WGS84 +ellps=WGS84 +lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs' ), x_size=100, y_size=200, area_extent=(-1000., -1500., 1000., 1500.), ) now = datetime.utcnow() ds = Dataset( np.linspace(0., 1., 20000, dtype=np.float32).reshape((200, 100)), name='test_ds', platform='PLAT', sensor='SENSOR', units='1', area=area_def, start_time=now, end_time=now + timedelta(minutes=20), ) fn = w.save_dataset(ds, sector_id='TEST', source_name="TESTS") self.assertTrue(os.path.isfile(fn))
def test_setitem(self): from satpy import Scene, Dataset import numpy as np scene = Scene() scene["1"] = ds1 = Dataset(np.arange(5)) self.assertSetEqual(set(scene.datasets.keys()), {ds1.id}) self.assertSetEqual(set(scene.wishlist), {ds1.id})
def test_iter_by_area_swath(self): from satpy import Scene, Dataset from pyresample.geometry import SwathDefinition import numpy as np scene = Scene() sd = SwathDefinition(lons=np.arange(5), lats=np.arange(5)) scene["1"] = Dataset(np.arange(5), area=sd) scene["2"] = Dataset(np.arange(5), area=sd) scene["3"] = Dataset(np.arange(5)) for area_obj, ds_list in scene.iter_by_area(): ds_list_names = set(ds.name for ds in ds_list) if area_obj is sd: self.assertSetEqual(ds_list_names, {'1', '2'}) else: self.assertIsNone(area_obj) self.assertSetEqual(ds_list_names, {'3'})
def _se(datasets, optional_datasets=None, ds_id=ds_id, **kwargs): if ds_id.name == 'comp14': # used as a test when composites update the dataset id with # information from prereqs ds_id = ds_id._replace(resolution=555) if len(datasets) != len(prereqs): raise ValueError("Not enough prerequisite datasets passed") return Dataset(data=np.arange(5), **ds_id.to_dict())
def test_getitem_modifiers(self): """Test __getitem__ with names and modifiers""" from satpy import Scene, Dataset, DatasetID import numpy as np # Return least modified item scene = Scene() scene['1'] = ds1_m0 = Dataset(np.arange(5)) scene[DatasetID(name='1', modifiers=('mod1',))] = ds1_m1 = Dataset(np.arange(5)) self.assertIs(scene['1'], ds1_m0) self.assertEquals(len(list(scene.keys())), 2) scene = Scene() scene['1'] = ds1_m0 = Dataset(np.arange(5)) scene[DatasetID(name='1', modifiers=('mod1',))] = ds1_m1 = Dataset(np.arange(5)) scene[DatasetID(name='1', modifiers=('mod1', 'mod2'))] = ds1_m2 = Dataset(np.arange(5)) self.assertIs(scene['1'], ds1_m0) self.assertEquals(len(list(scene.keys())), 3) scene = Scene() scene[DatasetID(name='1', modifiers=('mod1', 'mod2'))] = ds1_m2 = Dataset(np.arange(5)) scene[DatasetID(name='1', modifiers=('mod1',))] = ds1_m1 = Dataset(np.arange(5)) self.assertIs(scene['1'], ds1_m1) self.assertIs(scene[DatasetID('1', modifiers=('mod1', 'mod2'))], ds1_m2) self.assertRaises(KeyError, scene.__getitem__, DatasetID(name='1', modifiers=tuple())) self.assertEquals(len(list(scene.keys())), 2)
def test_contains(self): from satpy import Scene, Dataset import numpy as np scene = Scene() scene["1"] = ds1 = Dataset(np.arange(5), wavelength=(0.1, 0.2, 0.3)) self.assertTrue('1' in scene) self.assertTrue(0.15 in scene) self.assertFalse('2' in scene) self.assertFalse(0.31 in scene)
def test_enhance_with_sensor_no_entry(self): """Test enhancing an image that has no configuration sections""" from satpy.writers import Enhancer, get_enhanced_image from satpy import Dataset ds = Dataset(np.arange(1, 11.).reshape((2, 5)), sensor='test_sensor2', mode='L') e = Enhancer() self.assertIsNotNone(e.enhancement_tree) get_enhanced_image(ds, enhancer=e) self.assertSetEqual(set(e.sensor_enhancement_configs), {'test_sensor2.yaml', 'enhancements/test_sensor2.yaml'})
def __call__(self, datasets, optional_datasets, **info): if name == 'res_change' and datasets[0].id.resolution is not None: i = datasets[0].info.copy() i['resolution'] *= 5 elif name == 'incomp_areas': raise IncompatibleAreas("Test modifier 'incomp_areas' always raises IncompatibleAreas") else: i = datasets[0].info info = datasets[0].info.copy() self.apply_modifier_info(i, info) return Dataset(data=np.ma.MaskedArray(datasets[0]), **info)
def test_enhance_with_sensor_entry2(self): """Test enhancing an image with a more detailed configuration section""" from satpy.writers import Enhancer, get_enhanced_image from satpy import Dataset ds = Dataset(np.arange(1, 11.).reshape((2, 5)), name='test1', units='kelvin', sensor='test_sensor', mode='L') e = Enhancer() self.assertIsNotNone(e.enhancement_tree) img = get_enhanced_image(ds, enhancer=e) self.assertSetEqual(set(e.sensor_enhancement_configs), {'test_sensor.yaml', 'enhancements/test_sensor.yaml'}) np.testing.assert_almost_equal(img.channels[0].max(), 0.5)
def _reader_load(self, dataset_keys): from satpy import DatasetDict, Dataset import numpy as np dataset_ids = self.datasets loaded_datasets = DatasetDict() for k in dataset_keys: if k == 'ds9_fail_load': continue for ds in dataset_ids: if ds == k: loaded_datasets[ds] = Dataset(data=np.arange(5), **ds.to_dict()) return loaded_datasets
def test_lettered_tiles_bad_filename(self): """Test creating a lettered grid with a bad filename""" from satpy.writers.scmi import SCMIWriter from satpy import Dataset from pyresample.geometry import AreaDefinition from pyresample.utils import proj4_str_to_dict w = SCMIWriter(base_dir=self.base_dir, compress=True, file_pattern="{Bad Key}.nc") area_def = AreaDefinition( 'test', 'test', 'test', proj_dict=proj4_str_to_dict( '+proj=lcc +datum=WGS84 +ellps=WGS84 +lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs' ), x_size=1000, y_size=2000, area_extent=(-1000000., -1500000., 1000000., 1500000.), ) now = datetime.utcnow() ds = Dataset( np.linspace(0., 1., 2000000, dtype=np.float32).reshape( (2000, 1000)), name='test_ds', platform='PLAT', sensor='SENSOR', units='1', area=area_def, start_time=now, end_time=now + timedelta(minutes=20), ) self.assertRaises(KeyError, w.save_dataset, ds, sector_id='LCC', source_name="TESTS", tile_count=(3, 3), lettered_grid=True)
def test_lettered_tiles_no_fit(self): """Test creating a lettered grid with no data""" from satpy.writers.scmi import SCMIWriter from satpy import Dataset from pyresample.geometry import AreaDefinition from pyresample.utils import proj4_str_to_dict w = SCMIWriter(base_dir=self.base_dir, compress=True) area_def = AreaDefinition( 'test', 'test', 'test', proj_dict=proj4_str_to_dict( '+proj=lcc +datum=WGS84 +ellps=WGS84 +lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs' ), x_size=1000, y_size=2000, area_extent=(4000000., 5000000., 5000000., 6000000.), ) now = datetime.utcnow() ds = Dataset( np.linspace(0., 1., 2000000, dtype=np.float32).reshape( (2000, 1000)), name='test_ds', platform='PLAT', sensor='SENSOR', units='1', area=area_def, start_time=now, end_time=now + timedelta(minutes=20), ) fn = w.save_dataset(ds, sector_id='LCC', source_name="TESTS", tile_count=(3, 3), lettered_grid=True) # `fn` is currently the last file created # No files created self.assertIsNone(fn)
def main(argv=sys.argv[1:]): from polar2grid.core.script_utils import setup_logging, create_basic_parser, create_exc_handler, rename_log_file, ExtendAction from polar2grid.compositors import CompositorManager frontends = available_frontends() backends = available_backends() parser = create_basic_parser(description="Extract swath data, remap it, and write it to a new file format") parser.add_argument("frontend", choices=sorted(frontends.keys()), help="Specify the swath extractor to use to read data (additional arguments are determined after this is specified)") parser.add_argument("backend", choices=sorted(backends.keys()), help="Specify the backend to use to write data output (additional arguments are determined after this is specified)") parser.add_argument("--compositor-configs", nargs="*", default=None, help="Specify alternative configuration file(s) for compositors") # don't include the help flag argv_without_help = [x for x in argv if x not in ["-h", "--help"]] args, remaining_args = parser.parse_known_args(argv_without_help) glue_name = args.frontend + "2" + args.backend LOG = logging.getLogger(glue_name) # Load compositor information (we can't know the compositor choices until we've loaded the configuration) compositor_manager = CompositorManager(config_files=args.compositor_configs) # Hack: argparse doesn't let you use choices and nargs=* on a positional argument parser.add_argument("compositors", choices=compositor_manager.keys() + [[]], nargs="*", help="Specify the compositors to apply to the provided scene (additional arguments are determined after this is specified)") # load the actual components we need farg_func = get_frontend_argument_func(frontends, args.frontend) fcls = get_frontend_class(frontends, args.frontend) barg_func = get_backend_argument_func(backends, args.backend) bcls = get_backend_class(backends, args.backend) # add_frontend_arguments(parser) subgroup_titles = [] subgroup_titles += farg_func(parser) subgroup_titles += add_remap_argument_groups(parser) subgroup_titles += barg_func(parser) parser.add_argument('-f', dest='data_files', nargs="+", default=[], action=ExtendAction, help="List of files or directories to extract data from") parser.add_argument('-d', dest='data_files', nargs="+", default=[], action=ExtendAction, help="Data directories to look for input data files (equivalent to -f)") global_keywords = ("keep_intermediate", "overwrite_existing", "exit_on_error") args = parser.parse_args(argv, global_keywords=global_keywords, subgroup_titles=subgroup_titles) if not args.data_files: # FUTURE: When the -d flag is removed this won't be needed because -f will be required parser.print_usage() parser.exit(1, "ERROR: No data files provided (-f flag)\n") # Logs are renamed once data the provided start date is known rename_log = False if args.log_fn is None: rename_log = True args.log_fn = glue_name + "_fail.log" levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG] setup_logging(console_level=levels[min(3, args.verbosity)], log_filename=args.log_fn) sys.excepthook = create_exc_handler(LOG.name) LOG.debug("Starting script with arguments: %s", " ".join(sys.argv)) # Keep track of things going wrong to tell the user what went wrong (we want to create as much as possible) status_to_return = STATUS_SUCCESS # Compositor validation # XXX: Hack to make `polar2grid.sh crefl gtiff` work like legacy crefl2gtiff.sh script if args.subgroup_args['Frontend Swath Extraction'].get('no_compositors'): LOG.debug("Removing all compositors") args.compositors = [] elif args.frontend == 'crefl': if args.backend in ['awips', 'scmi']: LOG.debug("Adding 'crefl_sharpen' compositor") args.compositors.append('crefl_sharpen' if args.backend == 'scmi' else 'crefl_sharpen_awips') else: LOG.debug("Adding 'true_color' compositor") args.compositors.append('true_color') if '--true-color' in sys.argv and 'true_color' not in args.compositors: LOG.debug("Adding 'true_color' compositor") args.compositors.append('true_color') if '--false-color' in sys.argv and 'false_color' not in args.compositors: LOG.debug("Adding 'false_color' compositor") args.compositors.append('false_color') # if "--true-color" in for c in args.compositors: if c not in compositor_manager: LOG.error("Compositor '%s' is unknown" % (c,)) raise RuntimeError("Compositor '%s' is unknown" % (c,)) # Frontend try: LOG.info("Initializing reader...") list_products = args.subgroup_args["Frontend Initialization"].pop("list_products") f = fcls(search_paths=args.data_files, **args.subgroup_args["Frontend Initialization"]) except StandardError: LOG.debug("Frontend exception: ", exc_info=True) LOG.error("%s frontend failed to load and sort data files (see log for details)", args.frontend) return STATUS_FRONTEND_FAIL # Rename the log file if rename_log: rename_log_file(glue_name + f.begin_time.strftime("_%Y%m%d_%H%M%S.log")) if list_products: print("\n".join(sorted(f.available_product_names))) return STATUS_SUCCESS try: LOG.info("Initializing remapping...") remapper = Remapper(**args.subgroup_args["Remapping Initialization"]) remap_kwargs = args.subgroup_args["Remapping"] except StandardError: LOG.debug("Remapping initialization exception: ", exc_info=True) LOG.error("Remapping initialization failed (see log for details)") return STATUS_REMAP_FAIL try: LOG.info("Initializing backend...") backend = bcls(**args.subgroup_args["Backend Initialization"]) except StandardError: LOG.debug("Writer initialization exception: ", exc_info=True) LOG.error("Writer initialization failed (see log for details)") return STATUS_BACKEND_FAIL try: LOG.info("Initializing compositor objects...") compositor_objects = {} for c in args.compositors: compositor_objects[c] = compositor_manager.get_compositor(c, **args.global_kwargs) except StandardError: LOG.debug("Compositor initialization exception: ", exc_info=True) LOG.error("Compositor initialization failed (see log for details)") return STATUS_COMP_FAIL try: LOG.info("Extracting swaths from data files available...") scene = f.create_scene(**args.subgroup_args["Frontend Swath Extraction"]) # Determine if we have a satpy scene if we should convert it to # a P2G Scene to continue processing resample_method = args.subgroup_args["Remapping"].get("remap_method") is_satpy_resample_method = resample_method in SATPY_RESAMPLERS if is_satpy_resample_method and not isinstance(scene, Scene): raise RuntimeError("Resampling method '{}' only supports 'satpy' readers".format(resample_method)) elif not is_satpy_resample_method and isinstance(scene, Scene): # convert satpy scene to P2G Scene to be compatible with old P2G resamplers scene = convert_satpy_to_p2g_swath(f, scene) if isinstance(scene, Scene): if not scene.datasets: LOG.error("No products were returned by the frontend") raise RuntimeError("No products were returned by the frontend") if args.keep_intermediate: raise RuntimeError("satpy readers do not currently support saving intermediate files") else: if (isinstance(scene, Scene) and not scene.datasets) or not scene: LOG.error("No products were returned by the frontend") raise RuntimeError("No products were returned by the frontend") if args.keep_intermediate: filename = glue_name + "_swath_scene.json" LOG.info("Saving intermediate swath scene as '%s'", filename) scene.save(filename) except StandardError: LOG.debug("Frontend data extraction exception: ", exc_info=True) LOG.error("Frontend data extraction failed (see log for details)") return STATUS_FRONTEND_FAIL # What grids should we remap to (the user should tell us or the backend should have a good set of defaults) known_grids = backend.known_grids LOG.debug("Writer known grids: %r", known_grids) grids = remap_kwargs.pop("forced_grids", None) LOG.debug("Forced Grids: %r", grids) if resample_method == "sensor" and grids != ["sensor"]: LOG.error("'sensor' resampling method only supports the 'sensor' grid") return STATUS_GDETER_FAIL if not grids and not known_grids: # the user didn't ask for any grids and the backend doesn't have specific defaults LOG.error("No grids specified and no known defaults") return STATUS_GDETER_FAIL elif not grids: # the user didn't tell us what to do, so let's try everything the backend knows how to do grids = known_grids elif known_grids is not None: # the user told us what to do, let's make sure the backend can do it grids = list(set(grids) & set(known_grids)) if not grids: LOG.error("%s backend doesn't know how to handle any of the grids specified", args.backend) return STATUS_GDETER_FAIL LOG.debug("Grids that will be mapped to: %r", grids) # Remap gridded_scenes = {} for grid_name in grids: LOG.info("Remapping to grid %s", grid_name) try: gridded_scene = remapper.remap_scene(scene, grid_name, **remap_kwargs) gridded_scenes[grid_name] = gridded_scene if args.keep_intermediate: filename = glue_name + "_gridded_scene_" + grid_name + ".json" LOG.debug("saving intermediate gridded scene as '%s'", filename) gridded_scene.save(filename) except StandardError: LOG.debug("Remapping data exception: ", exc_info=True) LOG.error("Remapping data failed") status_to_return |= STATUS_REMAP_FAIL if args.exit_on_error: return status_to_return continue if not isinstance(scene, Scene): # Composition for c, comp in compositor_objects.items(): try: LOG.info("Running gridded scene through '%s' compositor", c) gridded_scene = comp.modify_scene(gridded_scene, **args.subgroup_args[c + " Modification"]) if args.keep_intermediate: filename = glue_name + "_gridded_scene_" + grid_name + ".json" LOG.debug("Updating saved intermediate gridded scene (%s) after compositor", filename) gridded_scene.save(filename) except StandardError: LOG.debug("Compositor Error: ", exc_info=True) LOG.error("Could not properly modify scene using compositor '%s'" % (c,)) if args.exit_on_error: raise RuntimeError("Could not properly modify scene using compositor '%s'" % (c,)) if isinstance(f, ReaderWrapper) and not isinstance(gridded_scene, Scene): this_grid_definition = None # HACK: Create SatPy composites that were either separated before # resampling or needed resampling to be created rgbs = {} for product_name in gridded_scene.keys(): rgb_name = product_name[:-6] # Keep track of one of the grid definitions if this_grid_definition is None: this_grid_definition = gridded_scene[product_name]["grid_definition"] if product_name.endswith("rgb_0") or product_name.endswith("rgb_1") or product_name.endswith("rgb_2"): if rgb_name not in rgbs: rgbs[rgb_name] = [None, None, None] chn_idx = int(product_name[-1]) rgbs[rgb_name][chn_idx] = product_name LOG.debug("Putting RGBs back together again") for rgb_name, v in rgbs.items(): r = gridded_scene.pop(v[0]) g = gridded_scene.pop(v[1]) b = gridded_scene.pop(v[2]) new_info = r.copy() new_info["grid_data"] = new_info["grid_data"].replace(v[0], rgb_name) new_info["product_name"] = rgb_name data = np.memmap(new_info["grid_data"], dtype=new_info["data_type"], mode="w+", shape=(3, new_info["grid_definition"]["height"], new_info["grid_definition"]["width"])) data[0] = r.get_data_array()[:] data[1] = g.get_data_array()[:] data[2] = b.get_data_array()[:] gridded_scene[rgb_name] = new_info # Create composites that satpy couldn't complete until after remapping composite_names = [x for x in f.wishlist if not isinstance(x, DatasetID)] if composite_names: tmp_scene = Scene() for k, v in gridded_scene.items(): if not isinstance(v["sensor"], set): v["sensor"] = set([v["sensor"]]) # turn sensor back in to a set to match satpy usage tmp_scene[v["id"]] = Dataset(v.get_data_array(), **v) tmp_scene[v["id"]].info["area"] = this_grid_definition.to_satpy_area() # tmp_scene[v["id"]].info = {} if v["sensor"] not in tmp_scene.info["sensor"]: tmp_scene.info["sensor"].extend(v["sensor"]) # Overwrite the wishlist that will include the above assigned datasets tmp_scene.wishlist = f.wishlist for cname in composite_names: tmp_scene.compositors[cname] = tmp_scene.cpl.load_compositor(cname, tmp_scene.info["sensor"]) tmp_scene.compute() tmp_scene.unload() # Add any new Datasets to our P2G Scene if SatPy created them for ds in tmp_scene: if ds.info["id"].name not in gridded_scene: LOG.debug("Adding Dataset from SatPy Commpositing: %s", ds.info["id"]) gridded_scene[ds.info["id"].name] = dataset_to_gridded_product(ds) gridded_scene[ds.info["id"].name]["grid_definition"] = this_grid_definition # Remove any Products from P2G Scene that SatPy decided it didn't need anymore for k, v in list(gridded_scene.items()): if v["id"].name not in tmp_scene: LOG.debug("Removing Dataset that is no longer used: %s", k) del gridded_scene[k] if isinstance(gridded_scene, Scene): LOG.debug("Converting satpy Scene to P2G Gridded Scene") # Convert it to P2G Gridded Scene gridded_scene = convert_satpy_to_p2g_gridded(f, gridded_scene) # Writer try: LOG.info("Creating output from data mapped to grid %s", grid_name) backend.create_output_from_scene(gridded_scene, **args.subgroup_args["Backend Output Creation"]) except StandardError: LOG.debug("Writer output creation exception: ", exc_info=True) LOG.error("Writer output creation failed (see log for details)") status_to_return |= STATUS_BACKEND_FAIL if args.exit_on_error: return status_to_return continue LOG.info("Processing data for grid %s complete", grid_name) return status_to_return
def main(argv=sys.argv[1:]): from polar2grid.core.script_utils import setup_logging, create_basic_parser, create_exc_handler, rename_log_file, ExtendAction from polar2grid.compositors import CompositorManager frontends = available_frontends() backends = available_backends() parser = create_basic_parser( description= "Extract swath data, remap it, and write it to a new file format") parser.add_argument( "frontend", choices=sorted(frontends.keys()), help= "Specify the swath extractor to use to read data (additional arguments are determined after this is specified)" ) parser.add_argument( "backend", choices=sorted(backends.keys()), help= "Specify the backend to use to write data output (additional arguments are determined after this is specified)" ) parser.add_argument( "--compositor-configs", nargs="*", default=None, help="Specify alternative configuration file(s) for compositors") # don't include the help flag argv_without_help = [x for x in argv if x not in ["-h", "--help"]] args, remaining_args = parser.parse_known_args(argv_without_help) glue_name = args.frontend + "2" + args.backend LOG = logging.getLogger(glue_name) # Load compositor information (we can't know the compositor choices until we've loaded the configuration) compositor_manager = CompositorManager( config_files=args.compositor_configs) # Hack: argparse doesn't let you use choices and nargs=* on a positional argument parser.add_argument( "compositors", choices=compositor_manager.keys() + [[]], nargs="*", help= "Specify the compositors to apply to the provided scene (additional arguments are determined after this is specified)" ) # load the actual components we need farg_func = get_frontend_argument_func(frontends, args.frontend) fcls = get_frontend_class(frontends, args.frontend) barg_func = get_backend_argument_func(backends, args.backend) bcls = get_backend_class(backends, args.backend) # add_frontend_arguments(parser) subgroup_titles = [] subgroup_titles += farg_func(parser) subgroup_titles += add_remap_argument_groups(parser) subgroup_titles += barg_func(parser) parser.add_argument( '-f', dest='data_files', nargs="+", default=[], action=ExtendAction, help="List of files or directories to extract data from") parser.add_argument( '-d', dest='data_files', nargs="+", default=[], action=ExtendAction, help="Data directories to look for input data files (equivalent to -f)" ) global_keywords = ("keep_intermediate", "overwrite_existing", "exit_on_error") args = parser.parse_args(argv, global_keywords=global_keywords, subgroup_titles=subgroup_titles) if not args.data_files: # FUTURE: When the -d flag is removed this won't be needed because -f will be required parser.print_usage() parser.exit(1, "ERROR: No data files provided (-f flag)\n") # Logs are renamed once data the provided start date is known rename_log = False if args.log_fn is None: rename_log = True args.log_fn = glue_name + "_fail.log" levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG] setup_logging(console_level=levels[min(3, args.verbosity)], log_filename=args.log_fn) sys.excepthook = create_exc_handler(LOG.name) LOG.debug("Starting script with arguments: %s", " ".join(sys.argv)) # Keep track of things going wrong to tell the user what went wrong (we want to create as much as possible) status_to_return = STATUS_SUCCESS # Compositor validation # XXX: Hack to make `polar2grid.sh crefl gtiff` work like legacy crefl2gtiff.sh script if args.subgroup_args['Frontend Swath Extraction'].get('no_compositors'): LOG.debug("Removing all compositors") args.compositors = [] elif args.frontend == 'crefl': if args.backend == 'awips': LOG.debug("Adding 'crefl_sharpen' compositor") args.compositors.append('crefl_sharpen') else: LOG.debug("Adding 'true_color' compositor") args.compositors.append('true_color') if '--true-color' in sys.argv and 'true_color' not in args.compositors: LOG.debug("Adding 'true_color' compositor") args.compositors.append('true_color') if '--false-color' in sys.argv and 'false_color' not in args.compositors: LOG.debug("Adding 'false_color' compositor") args.compositors.append('false_color') # if "--true-color" in for c in args.compositors: if c not in compositor_manager: LOG.error("Compositor '%s' is unknown" % (c, )) raise RuntimeError("Compositor '%s' is unknown" % (c, )) # Frontend try: LOG.info("Initializing reader...") list_products = args.subgroup_args["Frontend Initialization"].pop( "list_products") f = fcls(search_paths=args.data_files, **args.subgroup_args["Frontend Initialization"]) except StandardError: LOG.debug("Frontend exception: ", exc_info=True) LOG.error( "%s frontend failed to load and sort data files (see log for details)", args.frontend) return STATUS_FRONTEND_FAIL # Rename the log file if rename_log: rename_log_file(glue_name + f.begin_time.strftime("_%Y%m%d_%H%M%S.log")) if list_products: print("\n".join(sorted(f.available_product_names))) return STATUS_SUCCESS try: LOG.info("Initializing remapping...") remapper = Remapper(**args.subgroup_args["Remapping Initialization"]) remap_kwargs = args.subgroup_args["Remapping"] except StandardError: LOG.debug("Remapping initialization exception: ", exc_info=True) LOG.error("Remapping initialization failed (see log for details)") return STATUS_REMAP_FAIL try: LOG.info("Initializing backend...") backend = bcls(**args.subgroup_args["Backend Initialization"]) except StandardError: LOG.debug("Backend initialization exception: ", exc_info=True) LOG.error("Backend initialization failed (see log for details)") return STATUS_BACKEND_FAIL try: LOG.info("Initializing compositor objects...") compositor_objects = {} for c in args.compositors: compositor_objects[c] = compositor_manager.get_compositor( c, **args.global_kwargs) except StandardError: LOG.debug("Compositor initialization exception: ", exc_info=True) LOG.error("Compositor initialization failed (see log for details)") return STATUS_COMP_FAIL try: LOG.info("Extracting swaths from data files available...") scene = f.create_scene( **args.subgroup_args["Frontend Swath Extraction"]) # Determine if we have a satpy scene if we should convert it to # a P2G Scene to continue processing resample_method = args.subgroup_args["Remapping"].get("remap_method") is_satpy_resample_method = resample_method in SATPY_RESAMPLERS if is_satpy_resample_method and not isinstance(scene, Scene): raise RuntimeError( "Resampling method '{}' only supports 'satpy' readers".format( resample_method)) elif not is_satpy_resample_method and isinstance(scene, Scene): # convert satpy scene to P2G Scene to be compatible with old P2G resamplers scene = convert_satpy_to_p2g_swath(f, scene) if isinstance(scene, Scene): if not scene.datasets: LOG.error("No products were returned by the frontend") raise RuntimeError("No products were returned by the frontend") if args.keep_intermediate: raise RuntimeError( "satpy readers do not currently support saving intermediate files" ) else: if (isinstance(scene, Scene) and not scene.datasets) or not scene: LOG.error("No products were returned by the frontend") raise RuntimeError("No products were returned by the frontend") if args.keep_intermediate: filename = glue_name + "_swath_scene.json" LOG.info("Saving intermediate swath scene as '%s'", filename) scene.save(filename) except StandardError: LOG.debug("Frontend data extraction exception: ", exc_info=True) LOG.error("Frontend data extraction failed (see log for details)") return STATUS_FRONTEND_FAIL # What grids should we remap to (the user should tell us or the backend should have a good set of defaults) known_grids = backend.known_grids LOG.debug("Backend known grids: %r", known_grids) grids = remap_kwargs.pop("forced_grids", None) LOG.debug("Forced Grids: %r", grids) if resample_method == "sensor" and grids != ["sensor"]: LOG.error("'sensor' resampling method only supports the 'sensor' grid") return STATUS_GDETER_FAIL if not grids and not known_grids: # the user didn't ask for any grids and the backend doesn't have specific defaults LOG.error("No grids specified and no known defaults") return STATUS_GDETER_FAIL elif not grids: # the user didn't tell us what to do, so let's try everything the backend knows how to do grids = known_grids elif known_grids is not None: # the user told us what to do, let's make sure the backend can do it grids = list(set(grids) & set(known_grids)) if not grids: LOG.error( "%s backend doesn't know how to handle any of the grids specified", args.backend) return STATUS_GDETER_FAIL LOG.debug("Grids that will be mapped to: %r", grids) # Remap gridded_scenes = {} for grid_name in grids: LOG.info("Remapping to grid %s", grid_name) try: gridded_scene = remapper.remap_scene(scene, grid_name, **remap_kwargs) gridded_scenes[grid_name] = gridded_scene if args.keep_intermediate: filename = glue_name + "_gridded_scene_" + grid_name + ".json" LOG.debug("saving intermediate gridded scene as '%s'", filename) gridded_scene.save(filename) except StandardError: LOG.debug("Remapping data exception: ", exc_info=True) LOG.error("Remapping data failed") status_to_return |= STATUS_REMAP_FAIL if args.exit_on_error: return status_to_return continue if not isinstance(scene, Scene): # Composition for c, comp in compositor_objects.items(): try: LOG.info("Running gridded scene through '%s' compositor", c) gridded_scene = comp.modify_scene( gridded_scene, **args.subgroup_args[c + " Modification"]) if args.keep_intermediate: filename = glue_name + "_gridded_scene_" + grid_name + ".json" LOG.debug( "Updating saved intermediate gridded scene (%s) after compositor", filename) gridded_scene.save(filename) except StandardError: LOG.debug("Compositor Error: ", exc_info=True) LOG.error( "Could not properly modify scene using compositor '%s'" % (c, )) if args.exit_on_error: raise RuntimeError( "Could not properly modify scene using compositor '%s'" % (c, )) if isinstance(f, ReaderWrapper) and not isinstance(gridded_scene, Scene): this_grid_definition = None # HACK: Create SatPy composites that were either separated before # resampling or needed resampling to be created rgbs = {} for product_name in gridded_scene.keys(): rgb_name = product_name[:-6] # Keep track of one of the grid definitions if this_grid_definition is None: this_grid_definition = gridded_scene[product_name][ "grid_definition"] if product_name.endswith("rgb_0") or product_name.endswith( "rgb_1") or product_name.endswith("rgb_2"): if rgb_name not in rgbs: rgbs[rgb_name] = [None, None, None] chn_idx = int(product_name[-1]) rgbs[rgb_name][chn_idx] = product_name LOG.debug("Putting RGBs back together again") for rgb_name, v in rgbs.items(): r = gridded_scene.pop(v[0]) g = gridded_scene.pop(v[1]) b = gridded_scene.pop(v[2]) new_info = r.copy() new_info["grid_data"] = new_info["grid_data"].replace( v[0], rgb_name) new_info["product_name"] = rgb_name data = np.memmap(new_info["grid_data"], dtype=new_info["data_type"], mode="w+", shape=(3, new_info["grid_definition"]["height"], new_info["grid_definition"]["width"])) data[0] = r.get_data_array()[:] data[1] = g.get_data_array()[:] data[2] = b.get_data_array()[:] gridded_scene[rgb_name] = new_info # Create composites that satpy couldn't complete until after remapping composite_names = [ x for x in f.wishlist if not isinstance(x, DatasetID) ] if composite_names: tmp_scene = Scene() for k, v in gridded_scene.items(): if not isinstance(v["sensor"], set): v["sensor"] = set([ v["sensor"] ]) # turn sensor back in to a set to match satpy usage tmp_scene[v["id"]] = Dataset(v.get_data_array(), **v) tmp_scene[v["id"]].info[ "area"] = this_grid_definition.to_satpy_area() # tmp_scene[v["id"]].info = {} if v["sensor"] not in tmp_scene.info["sensor"]: tmp_scene.info["sensor"].extend(v["sensor"]) # Overwrite the wishlist that will include the above assigned datasets tmp_scene.wishlist = f.wishlist for cname in composite_names: tmp_scene.compositors[ cname] = tmp_scene.cpl.load_compositor( cname, tmp_scene.info["sensor"]) tmp_scene.compute() tmp_scene.unload() # Add any new Datasets to our P2G Scene if SatPy created them for ds in tmp_scene: if ds.info["id"].name not in gridded_scene: LOG.debug("Adding Dataset from SatPy Commpositing: %s", ds.info["id"]) gridded_scene[ds.info["id"]. name] = dataset_to_gridded_product(ds) gridded_scene[ds.info["id"].name][ "grid_definition"] = this_grid_definition # Remove any Products from P2G Scene that SatPy decided it didn't need anymore for k, v in list(gridded_scene.items()): if v["id"].name not in tmp_scene: LOG.debug( "Removing Dataset that is no longer used: %s", k) del gridded_scene[k] if isinstance(gridded_scene, Scene): LOG.debug("Converting satpy Scene to P2G Gridded Scene") # Convert it to P2G Gridded Scene gridded_scene = convert_satpy_to_p2g_gridded(f, gridded_scene) # Backend try: LOG.info("Creating output from data mapped to grid %s", grid_name) backend.create_output_from_scene( gridded_scene, **args.subgroup_args["Backend Output Creation"]) except StandardError: LOG.debug("Backend output creation exception: ", exc_info=True) LOG.error("Backend output creation failed (see log for details)") status_to_return |= STATUS_BACKEND_FAIL if args.exit_on_error: return status_to_return continue LOG.info("Processing data for grid %s complete", grid_name) return status_to_return
def se(datasets, optional_datasets=None, **kwargs): if len(datasets) != len(prereqs): raise ValueError("Not enough prerequisite datasets passed") return Dataset(data=np.arange(5), **ds_id.to_dict())