Exemplo n.º 1
0
 def test_load_all_lcc(self, pg):
     """Test loading all test datasets with lcc projections"""
     lons = np.array([
         [12.19, 0, 0, 0, 14.34208538],
         [0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0],
         [54.56534318, 0, 0, 0, 57.32843565]])
     lats = np.array([
         [-133.459, 0, 0, 0, -65.12555139],
         [0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0],
         [-152.8786225, 0, 0, 0, -49.41598659]])
     pg.open.return_value = FakeGRIB(
         proj_params={
             'a': 6371229, 'b': 6371229, 'proj': 'lcc',
             'lon_0': 265.0, 'lat_0': 25.0,
             'lat_1': 25.0, 'lat_2': 25.0},
         latlons=(lats, lons))
     from satpy.readers import load_reader
     from satpy import DatasetID
     r = load_reader(self.reader_configs)
     loadables = r.select_files_from_pathnames([
         'gfs.t18z.sfluxgrbf106.grib2',
     ])
     r.create_filehandlers(loadables)
     datasets = r.load([
         DatasetID(name='t', level=100),
         DatasetID(name='t', level=200),
         DatasetID(name='t', level=300)])
     self.assertEqual(len(datasets), 3)
     for v in datasets.values():
         self.assertEqual(v.attrs['units'], 'K')
         self.assertIsInstance(v, xr.DataArray)
Exemplo n.º 2
0
    def test_excs(self, reader_configs, caplog):
        """Test that exceptions are raised where expected."""
        from satpy import DatasetID
        from satpy.readers import load_reader

        filenames = [
            "W_XX-EUMETSAT-Darmstadt,IMG+SAT,MTI1+FCI-1C-RRAD-FDHSI-FD--"
            "CHK-BODY--L2P-NC4E_C_EUMT_20170410114434_GTT_DEV_"
            "20170410113925_20170410113934_N__C_0070_0067.nc",
        ]

        reader = load_reader(reader_configs)
        loadables = reader.select_files_from_pathnames(filenames)
        fhs = reader.create_filehandlers(loadables)

        with pytest.raises(ValueError):
            fhs["fci_l1c_fdhsi"][0].get_dataset(DatasetID(name="invalid"), {})
        with pytest.raises(ValueError):
            fhs["fci_l1c_fdhsi"][0]._get_dataset_quality(
                DatasetID(name="invalid"), {})
        with caplog.at_level(logging.ERROR):
            fhs["fci_l1c_fdhsi"][0].get_dataset(
                DatasetID(name="ir_123", calibration="unknown"),
                {"units": "unknown"})
            assert "unknown calibration key" in caplog.text
Exemplo n.º 3
0
    def test_fy3d_1km_resolutions(self):
        """Test loading data when only 1km resolutions are available."""
        from satpy import DatasetID
        from satpy.readers import load_reader, get_key
        filenames = [
            'tf2019071182739.FY3D-X_MERSI_1000M_L1B.HDF',
            'tf2019071182739.FY3D-X_MERSI_GEO1K_L1B.HDF',
        ]
        reader = load_reader(self.reader_configs)
        files = reader.select_files_from_pathnames(filenames)
        self.assertTrue(4, len(files))
        reader.create_filehandlers(files)
        # Make sure we have some files
        self.assertTrue(reader.file_handlers)

        # Verify that we have multiple resolutions for:
        #     - Bands 1-4 (visible)
        #     - Bands 24-25 (IR)
        available_datasets = reader.available_dataset_ids
        for band_name in ('1', '2', '3', '4', '24', '25'):
            if band_name in ('24', '25'):
                # don't know how to get radiance for IR bands
                num_results = 2
            else:
                num_results = 3
            ds_id = DatasetID(name=band_name, resolution=250)
            res = get_key(ds_id, available_datasets,
                          num_results=num_results, best=False)
            self.assertEqual(0, len(res))
            ds_id = DatasetID(name=band_name, resolution=1000)
            res = get_key(ds_id, available_datasets,
                          num_results=num_results, best=False)
            self.assertEqual(num_results, len(res))

        res = reader.load(['1', '2', '3', '4', '5', '20', '24', '25'])
        self.assertEqual(8, len(res))
        self.assertEqual((2 * 10, 2048), res['1'].shape)
        self.assertEqual('reflectance', res['1'].attrs['calibration'])
        self.assertEqual('%', res['1'].attrs['units'])
        self.assertEqual((2 * 10, 2048), res['2'].shape)
        self.assertEqual('reflectance', res['2'].attrs['calibration'])
        self.assertEqual('%', res['2'].attrs['units'])
        self.assertEqual((2 * 10, 2048), res['3'].shape)
        self.assertEqual('reflectance', res['3'].attrs['calibration'])
        self.assertEqual('%', res['3'].attrs['units'])
        self.assertEqual((2 * 10, 2048), res['4'].shape)
        self.assertEqual('reflectance', res['4'].attrs['calibration'])
        self.assertEqual('%', res['4'].attrs['units'])
        self.assertEqual((2 * 10, 2048), res['5'].shape)
        self.assertEqual('reflectance', res['5'].attrs['calibration'])
        self.assertEqual('%', res['5'].attrs['units'])
        self.assertEqual((2 * 10, 2048), res['20'].shape)
        self.assertEqual('brightness_temperature', res['20'].attrs['calibration'])
        self.assertEqual('K', res['20'].attrs['units'])
        self.assertEqual((2 * 10, 2048), res['24'].shape)
        self.assertEqual('brightness_temperature', res['24'].attrs['calibration'])
        self.assertEqual('K', res['24'].attrs['units'])
        self.assertEqual((2 * 10, 2048), res['25'].shape)
        self.assertEqual('brightness_temperature', res['25'].attrs['calibration'])
        self.assertEqual('K', res['25'].attrs['units'])
Exemplo n.º 4
0
    def _get_test_dataset_three_bands_two_prereq(self, bands=3):
        """Helper function to create a single test dataset."""
        import xarray as xr
        import dask.array as da
        from datetime import datetime
        from pyresample.geometry import AreaDefinition
        from pyresample.utils import proj4_str_to_dict
        from satpy import DatasetID
        area_def = AreaDefinition(
            'test',
            'test',
            'test',
            proj4_str_to_dict('+proj=stere +datum=WGS84 +ellps=WGS84 '
                              '+lon_0=0. +lat_0=90 +lat_ts=60 +units=km'),
            100,
            200,
            (-1000., -1500., 1000., 1500.),
        )

        ds1 = xr.DataArray(
            da.zeros((bands, 100, 200), chunks=50),
            coords=[['R', 'G', 'B'], list(range(100)), list(range(200))],
            dims=('bands', 'y', 'x'),
            attrs={'name': 'test',
                   'start_time': datetime.utcnow(),
                   'platform_name': "TEST_PLATFORM_NAME",
                   'sensor': 'TEST_SENSOR_NAME',
                   'area': area_def,
                   'prerequisites': [DatasetID(name='1', calibration='reflectance'),
                                     DatasetID(name='2', calibration='reflectance')]}
        )
        return ds1
Exemplo n.º 5
0
    def test_load_longitude_latitude(self):
        """Test that longitude and latitude datasets are loaded correctly."""
        from satpy import DatasetID

        def test_func(dname, x, y):
            if dname == 'longitude':
                # assert less
                np.testing.assert_array_less(x, y)
            else:
                # assert greater
                # np.testing.assert_equal(x > y, True)
                np.testing.assert_array_less(y, x)

        scene = Scene(reader='modis_l2', filenames=[self.file_name])
        for dataset_name in ['longitude', 'latitude']:
            # Default resolution should be the interpolated 1km
            scene.load([dataset_name])
            longitude_1km_id = DatasetID(name=dataset_name, resolution=1000)
            longitude_1km = scene[longitude_1km_id]
            self.assertEqual(longitude_1km.shape, (5*SCAN_WIDTH, 5*SCAN_LEN+4))
            test_func(dataset_name, longitude_1km.values, 0)
            # Specify original 5km scale
            scene.load([dataset_name], resolution=5000)
            longitude_5km_id = DatasetID(name=dataset_name, resolution=5000)
            longitude_5km = scene[longitude_5km_id]
            self.assertEqual(longitude_5km.shape, TEST_DATA[dataset_name.capitalize()]['data'].shape)
            test_func(dataset_name, longitude_5km.values, 0)
Exemplo n.º 6
0
    def test_properties(self):
        """Test basic properties/attributes of the MultiScene."""
        from satpy import MultiScene, DatasetID

        area = _create_test_area()
        scenes = _create_test_scenes(area=area)
        ds1_id = DatasetID(name='ds1')
        ds2_id = DatasetID(name='ds2')
        ds3_id = DatasetID(name='ds3')
        ds4_id = DatasetID(name='ds4')

        # Add a dataset to only one of the Scenes
        scenes[1]['ds3'] = _create_test_dataset('ds3')
        mscn = MultiScene(scenes)

        self.assertSetEqual(mscn.loaded_dataset_ids, {ds1_id, ds2_id, ds3_id})
        self.assertSetEqual(mscn.shared_dataset_ids, {ds1_id, ds2_id})
        self.assertTrue(mscn.all_same_area)

        bigger_area = _create_test_area(shape=(20, 40))
        scenes[0]['ds4'] = _create_test_dataset('ds4',
                                                shape=(20, 40),
                                                area=bigger_area)

        self.assertSetEqual(mscn.loaded_dataset_ids,
                            {ds1_id, ds2_id, ds3_id, ds4_id})
        self.assertSetEqual(mscn.shared_dataset_ids, {ds1_id, ds2_id})
        self.assertFalse(mscn.all_same_area)
Exemplo n.º 7
0
 def test_load_all_i_radiances(self):
     """Load all I band radiances."""
     from satpy.readers import load_reader
     from satpy import DatasetID
     r = load_reader(self.reader_configs)
     loadables = r.select_files_from_pathnames([
         'SVI01_npp_d20120225_t1801245_e1802487_b01708_c20120226002130255476_noaa_ops.h5',
         'SVI02_npp_d20120225_t1801245_e1802487_b01708_c20120226002130255476_noaa_ops.h5',
         'SVI03_npp_d20120225_t1801245_e1802487_b01708_c20120226002130255476_noaa_ops.h5',
         'SVI04_npp_d20120225_t1801245_e1802487_b01708_c20120226002130255476_noaa_ops.h5',
         'SVI05_npp_d20120225_t1801245_e1802487_b01708_c20120226002130255476_noaa_ops.h5',
         'GITCO_npp_d20120225_t1801245_e1802487_b01708_c20120226002130255476_noaa_ops.h5',
     ])
     r.create_filehandlers(loadables)
     ds = r.load([
         DatasetID(name='I01', calibration='radiance', modifiers=None),
         DatasetID(name='I02', calibration='radiance', modifiers=None),
         DatasetID(name='I03', calibration='radiance', modifiers=None),
         DatasetID(name='I04', calibration='radiance', modifiers=None),
         DatasetID(name='I05', calibration='radiance', modifiers=None),
     ])
     self.assertEqual(len(ds), 5)
     for d in ds.values():
         self.assertEqual(d.attrs['calibration'], 'radiance')
         self.assertEqual(d.attrs['units'], 'W m-2 um-1 sr-1')
         self.assertEqual(d.attrs['rows_per_scan'], 32)
         self.assertIn('area', d.attrs)
         self.assertIsNotNone(d.attrs['area'])
Exemplo n.º 8
0
    def test_instantiate(self, mocked_dataset):
        """Test initialization of file handlers."""
        from satpy.readers.olci_nc import (NCOLCIBase, NCOLCICal, NCOLCIGeo,
                                           NCOLCIChannelBase, NCOLCI1B,
                                           NCOLCI2)
        from satpy import DatasetID
        import xarray as xr

        cal_data = xr.Dataset(
            {
                'solar_flux': (('bands'), [0, 1, 2]),
                'detector_index': (('bands'), [0, 1, 2]),
            },
            {
                'bands': [0, 1, 2],
            },
        )

        ds_id = DatasetID(name='Oa01', calibration='reflectance')
        ds_id2 = DatasetID(name='wsqf', calibration='reflectance')
        filename_info = {
            'mission_id': 'S3A',
            'dataset_name': 'Oa01',
            'start_time': 0,
            'end_time': 0
        }

        test = NCOLCIBase('somedir/somefile.nc', filename_info, 'c')
        test.get_dataset(ds_id, filename_info)
        mocked_dataset.assert_called()
        mocked_dataset.reset_mock()

        test = NCOLCICal('somedir/somefile.nc', filename_info, 'c')
        test.get_dataset(ds_id, filename_info)
        mocked_dataset.assert_called()
        mocked_dataset.reset_mock()

        test = NCOLCIGeo('somedir/somefile.nc', filename_info, 'c')
        test.get_dataset(ds_id, filename_info)
        mocked_dataset.assert_called()
        mocked_dataset.reset_mock()

        test = NCOLCIChannelBase('somedir/somefile.nc', filename_info, 'c')
        test.get_dataset(ds_id, filename_info)
        mocked_dataset.assert_called()
        mocked_dataset.reset_mock()

        cal = mock.Mock()
        cal.nc = cal_data
        test = NCOLCI1B('somedir/somefile.nc', filename_info, 'c', cal)
        test.get_dataset(ds_id, filename_info)
        mocked_dataset.assert_called()
        mocked_dataset.reset_mock()

        test = NCOLCI2('somedir/somefile.nc', filename_info, 'c')
        test.get_dataset(ds_id, {'nc_key': 'the_key'})
        test.get_dataset(ds_id2, {'nc_key': 'the_key'})
        mocked_dataset.assert_called()
        mocked_dataset.reset_mock()
Exemplo n.º 9
0
    def test_fy4a_all_resolutions(self):
        """Test loading data when all resolutions are available."""
        from satpy import DatasetID
        from satpy.readers import load_reader, get_key
        filenames = [
            'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_0500M_V0001.HDF',
            'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_1000M_V0001.HDF',
            'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_2000M_V0001.HDF',
            'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_4000M_V0001.HDF',
        ]
        reader = load_reader(self.reader_configs)
        files = reader.select_files_from_pathnames(filenames)
        self.assertTrue(4, len(files))
        reader.create_filehandlers(files)
        # Make sure we have some files
        self.assertTrue(reader.file_handlers)

        available_datasets = reader.available_dataset_ids

        # 500m
        band_names = ['C' + '%02d' % ch for ch in np.linspace(2, 2, 1)]
        for band_name in band_names:
            ds_id = DatasetID(name=band_name, resolution=500)
            res = get_key(ds_id, available_datasets, num_results=0, best=False)
            self.assertEqual(2, len(res))

        # 1km
        band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 3, 3)]
        for band_name in band_names:
            ds_id = DatasetID(name=band_name, resolution=1000)
            res = get_key(ds_id, available_datasets, num_results=0, best=False)
            self.assertEqual(2, len(res))

        # 2km
        band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 7, 7)]
        for band_name in band_names:
            ds_id = DatasetID(name=band_name, resolution=2000)
            res = get_key(ds_id, available_datasets, num_results=0, best=False)
            if band_name < 'C07':
                self.assertEqual(2, len(res))
            else:
                self.assertEqual(3, len(res))

        band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 14, 14)]
        res = reader.load(band_names)
        self.assertEqual(14, len(res))

        for band_name in band_names:
            self.assertEqual((2, 5), res[band_name].shape)
            if band_name < 'C07':
                self.assertEqual('reflectance',
                                 res[band_name].attrs['calibration'])
            else:
                self.assertEqual('brightness_temperature',
                                 res[band_name].attrs['calibration'])
            if band_name < 'C07':
                self.assertEqual('%', res[band_name].attrs['units'])
            else:
                self.assertEqual('K', res[band_name].attrs['units'])
Exemplo n.º 10
0
    def test_fy4a_1km_resolutions(self):
        """Test loading data when only 1km resolutions are available."""
        from satpy import DatasetID
        from satpy.readers import load_reader, get_key
        filenames = [
            'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_1000M_V0001.HDF',
        ]
        reader = load_reader(self.reader_configs)
        files = reader.select_files_from_pathnames(filenames)
        self.assertTrue(1, len(files))
        reader.create_filehandlers(files)
        # Make sure we have some files
        self.assertTrue(reader.file_handlers)

        # Verify that the resolution is only 1km
        available_datasets = reader.available_dataset_ids
        band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 3, 3)]

        for band_name in band_names:
            ds_id = DatasetID(name=band_name, resolution=500)
            res = get_key(ds_id, available_datasets, num_results=0, best=False)
            self.assertEqual(0, len(res))
            ds_id = DatasetID(name=band_name, resolution=1000)
            res = get_key(ds_id, available_datasets, num_results=0, best=False)
            self.assertEqual(2, len(res))
            ds_id = DatasetID(name=band_name, resolution=2000)
            res = get_key(ds_id, available_datasets, num_results=0, best=False)
            self.assertEqual(0, len(res))
            ds_id = DatasetID(name=band_name, resolution=4000)
            res = get_key(ds_id, available_datasets, num_results=0, best=False)
            self.assertEqual(0, len(res))

        res = reader.load(band_names)
        self.assertEqual(3, len(res))
        expected = {
            1:
            np.array([[2.01, 2.02, 2.03, 2.04, 2.05],
                      [2.06, 2.07, 2.08, 2.09, 2.1]]),
            2:
            np.array([[4.03, 4.06, 4.09, 4.12, 4.15],
                      [4.18, 4.21, 4.24, 4.27, 4.3]]),
            3:
            np.array([[6.05, 6.1, 6.15, 6.2, 6.25],
                      [6.3, 6.35, 6.4, 6.45, 6.5]])
        }

        for index, band_name in enumerate(band_names):
            self.assertEqual(1, res[band_name].attrs['sensor'].islower())
            self.assertEqual((2, 5), res[band_name].shape)
            self.assertEqual('reflectance',
                             res[band_name].attrs['calibration'])
            self.assertEqual('%', res[band_name].attrs['units'])
            self.assertTrue(
                np.allclose(res[band_name].values,
                            expected[index + 1],
                            equal_nan=True))
Exemplo n.º 11
0
 def _create_dataset_ids(self, keys):
     from itertools import product
     ordered_keys = [k for k in keys.keys() if 'id_key' in keys[k]]
     for id_vals in product(*[keys[k]['values'] for k in ordered_keys]):
         id_keys = [keys[k]['id_key'] for k in ordered_keys]
         msg_info = dict(zip(ordered_keys, id_vals))
         ds_info = dict(zip(id_keys, id_vals))
         msg_id = DatasetID(**ds_info)
         ds_info = msg_id.to_dict()
         ds_info.update(msg_info)
         ds_info['file_type'] = self.filetype_info['file_type']
         self._msg_datasets[msg_id] = ds_info
Exemplo n.º 12
0
 def _create_dataset_ids(self, keys):
     from itertools import product
     ordered_keys = [k for k in keys.keys() if 'id_key' in keys[k]]
     for id_vals in product(*[keys[k]['values'] for k in ordered_keys]):
         id_keys = [keys[k]['id_key'] for k in ordered_keys]
         msg_info = dict(zip(ordered_keys, id_vals))
         ds_info = dict(zip(id_keys, id_vals))
         msg_id = DatasetID(**ds_info)
         ds_info = msg_id.to_dict()
         ds_info.update(msg_info)
         ds_info['file_type'] = self.filetype_info['file_type']
         self._msg_datasets[msg_id] = ds_info
Exemplo n.º 13
0
 def test_read_geo(self):
     """Test read_geo() function"""
     import h5py
     from satpy.readers.iasi_l2 import read_geo
     from satpy import DatasetID
     with h5py.File(self.fname, 'r') as fid:
         key = DatasetID(name='sensing_time')
         data = read_geo(fid, key).compute()
         self.assertEqual(data.shape, (NUM_SCANLINES, SCAN_WIDTH))
         key = DatasetID(name='latitude')
         data = read_geo(fid, key).compute()
         self.assertEqual(data.shape, (NUM_SCANLINES, SCAN_WIDTH))
Exemplo n.º 14
0
 def test_get_dataset_coords(self):
     """Test whether coordinates returned by get_dataset() are correct"""
     lon = self.reader.get_dataset(key=DatasetID(name='longitude',
                                                 calibration=None),
                                   info={})
     lat = self.reader.get_dataset(key=DatasetID(name='latitude',
                                                 calibration=None),
                                   info={})
     # ... this only compares the valid (unmasked) elements
     self.assertTrue(np.all(lat.to_masked_array() == self.lat),
                     msg='get_dataset() returns invalid latitude')
     self.assertTrue(np.all(lon.to_masked_array() == self.lon),
                     msg='get_dataset() returns invalid longitude')
Exemplo n.º 15
0
 def test_get_dataset(self):
     """Test get_dataset() for different datasets"""
     from satpy import DatasetID
     info = {'eggs': 'spam'}
     key = DatasetID(name='pressure')
     data = self.reader.get_dataset(key, info).compute()
     self.check_pressure(data)
     self.assertTrue('eggs' in data.attrs)
     self.assertEqual(data.attrs['eggs'], 'spam')
     key = DatasetID(name='emissivity')
     data = self.reader.get_dataset(key, info).compute()
     self.check_emissivity(data)
     key = DatasetID(name='sensing_time')
     data = self.reader.get_dataset(key, info).compute()
     self.assertEqual(data.shape, (NUM_SCANLINES, SCAN_WIDTH))
Exemplo n.º 16
0
 def test_load_longitude_latitude(self):
     """Test that longitude and latitude datasets are loaded correctly."""
     from satpy import DatasetID
     scene = Scene(reader='modis_l2', filenames=[self.file_name])
     for dataset_name in ['longitude', 'latitude']:
         # Default resolution should be the interpolated 1km
         scene.load([dataset_name])
         longitude_1km_id = DatasetID(name=dataset_name, resolution=1000)
         longitude_1km = scene[longitude_1km_id]
         self.assertEqual(longitude_1km.shape, (5*SCAN_WIDTH, 5*SCAN_LEN+4))
         # Specify original 5km scale
         longitude_5km = scene.load([dataset_name], resolution=5000)
         longitude_5km_id = DatasetID(name=dataset_name, resolution=5000)
         longitude_5km = scene[longitude_5km_id]
         self.assertEqual(longitude_5km.shape, TEST_DATA[dataset_name.capitalize()]['data'].shape)
Exemplo n.º 17
0
    def test_get_dataset_invalid(self):
        """Test handling of invalid calibrations"""
        # VIS -> BT
        args = dict(key=DatasetID(name='00_7',
                                  calibration='brightness_temperature'),
                    info={})
        self.assertRaises(ValueError, self.reader.get_dataset, **args)

        # IR -> Reflectance
        args = dict(key=DatasetID(name='10_7', calibration='reflectance'),
                    info={})
        self.assertRaises(ValueError, self.reader.get_dataset, **args)

        # Unsupported calibration
        args = dict(key=DatasetID(name='10_7', calibration='invalid'), info={})
        self.assertRaises(ValueError, self.reader.get_dataset, **args)
Exemplo n.º 18
0
    def _init_summary_page(self):
        # we are going to use the id table to get our summary
        # so make sure the values are correct
        if self.ui.selectByTabWidget.currentIndex() != BY_ID_TAB:
            self.ui.selectByTabWidget.setCurrentIndex(BY_ID_TAB)

        selected_text = []
        selected_ids = []
        id_format = "| {name:<20s} | {level:>8s} |"
        header_format = "| {name:<20s} | {level:>8s} |"
        header_line = "|-{0:-^20s}-|-{0:-^8s}-|".format('-')
        for item_idx in range(self.ui.selectIDTable.rowCount()):
            name_item = self.ui.selectIDTable.item(item_idx, 0)
            level_item = self.ui.selectIDTable.item(item_idx, 1)
            if name_item.checkState():
                name = name_item.data(QtCore.Qt.UserRole)
                level = level_item.data(QtCore.Qt.UserRole)
                selected_ids.append(DatasetID(name=name, level=level))
                selected_text.append(id_format.format(
                    name=name_item.text(),
                    level=level_item.text(),
                ))

        self.selected_ids = selected_ids

        summary_text = """Products to be loaded: {}

""".format(len(selected_ids))

        header = header_format.format(name="Name", level="Level")
        summary_text += "\n".join([header, header_line] + selected_text)
        self.ui.productSummaryText.setText(summary_text)
Exemplo n.º 19
0
 def test_read_dataset(self):
     """Test read_dataset() function"""
     import h5py
     from satpy.readers.iasi_l2 import read_dataset
     from satpy import DatasetID
     with h5py.File(self.fname, 'r') as fid:
         key = DatasetID(name='pressure')
         data = read_dataset(fid, key).compute()
         self.check_pressure(data)
         key = DatasetID(name='emissivity')
         data = read_dataset(fid, key).compute()
         self.check_emissivity(data)
         # This dataset doesn't have any attributes
         key = DatasetID(name='ozone_total_column')
         data = read_dataset(fid, key).compute()
         self.assertEqual(len(data.attrs), 0)
Exemplo n.º 20
0
    def test_get_dataset(self):
        """Test the get_dataset method."""
        from satpy import DatasetID
        key = DatasetID(name='Rad', calibration='radiance')
        res = self.reader.get_dataset(key, {'info': 'info'})
        exp = {'calibration': 'radiance',
               'instrument_ID': None,
               'modifiers': (),
               'name': 'Rad',
               'observation_type': 'Rad',
               'orbital_parameters': {'projection_altitude': 1.0,
                                      'projection_latitude': 0.0,
                                      'projection_longitude': -90.0,
                                      'satellite_nominal_altitude': 35786020.,
                                      'satellite_nominal_latitude': 0.0,
                                      'satellite_nominal_longitude': -89.5,
                                      'yaw_flip': True},
               'orbital_slot': None,
               'platform_name': 'GOES-16',
               'platform_shortname': 'G16',
               'production_site': None,
               'scan_mode': 'M3',
               'scene_abbr': 'C',
               'scene_id': None,
               'sensor': 'abi',
               'timeline_ID': None,
               'units': 'W m-2 um-1 sr-1'}

        self.assertDictEqual(res.attrs, exp)
        # we remove any time dimension information
        self.assertNotIn('t', res.coords)
        self.assertNotIn('t', res.dims)
        self.assertNotIn('time', res.coords)
        self.assertNotIn('time', res.dims)
Exemplo n.º 21
0
 def test_save_array(self):
     from satpy import Scene
     import xarray as xr
     import tempfile
     scn = Scene()
     start_time = datetime(2018, 5, 30, 10, 0)
     end_time = datetime(2018, 5, 30, 10, 15)
     scn['test-array'] = xr.DataArray([1, 2, 3],
                                      attrs=dict(
                                          start_time=start_time,
                                          end_time=end_time,
                                          prerequisites=[DatasetID('hej')]))
     try:
         handle, filename = tempfile.mkstemp()
         os.close(handle)
         scn.save_datasets(filename=filename, writer='cf')
         import h5netcdf as nc4
         with nc4.File(filename) as f:
             self.assertTrue(all(f['test-array'][:] == [1, 2, 3]))
             expected_prereq = (
                 "DatasetID(name='hej', wavelength=None, "
                 "resolution=None, polarization=None, "
                 "calibration=None, level=None, modifiers=())")
             self.assertEqual(f['test-array'].attrs['prerequisites'][0],
                              np.string_(expected_prereq))
     finally:
         os.remove(filename)
Exemplo n.º 22
0
    def test_get_dataset_counts(self):
        """Test whether counts returned by get_dataset() are correct"""
        from satpy.readers.goes_imager_nc import ALTITUDE, UNKNOWN_SECTOR

        self.reader.meta.update({'lon0': -75.0,
                                 'lat0': 0.0,
                                 'sector': UNKNOWN_SECTOR,
                                 'nadir_row': 1,
                                 'nadir_col': 2,
                                 'area_def_uni': 'some_area'})
        attrs_exp = {'orbital_parameters': {'projection_longitude': -75.0,
                                            'projection_latitude': 0.0,
                                            'projection_altitude': ALTITUDE,
                                            'yaw_flip': True},
                     'satellite_longitude': -75.0,
                     'satellite_latitude': 0.0,
                     'satellite_altitude': ALTITUDE,
                     'platform_name': 'GOES-15',
                     'sensor': 'goes_imager',
                     'sector': UNKNOWN_SECTOR,
                     'nadir_row': 1,
                     'nadir_col': 2,
                     'area_def_uniform_sampling': 'some_area'}

        for ch in self.channels:
            counts = self.reader.get_dataset(
                key=DatasetID(name=ch, calibration='counts'), info={})
            # ... this only compares the valid (unmasked) elements
            self.assertTrue(np.all(self.counts/32. == counts.to_masked_array()),
                            msg='get_dataset() returns invalid counts for '
                                'channel {}'.format(ch))

            # Check attributes
            self.assertDictEqual(counts.attrs, attrs_exp)
Exemplo n.º 23
0
    def test_get_dataset(self):
        """Test basic L2 load."""
        from satpy import DatasetID
        key = DatasetID(name='HT')
        res = self.reader.get_dataset(key, {'file_key': 'HT'})

        exp_data = np.array([[2 * 0.3052037, np.nan],
                             [32768 * 0.3052037, 32767 * 0.3052037]])

        exp_attrs = {
            'instrument_ID': None,
            'modifiers': (),
            'name': 'HT',
            'orbital_slot': None,
            'platform_name': 'GOES-16',
            'platform_shortname': 'G16',
            'production_site': None,
            'satellite_altitude': 35786020.,
            'satellite_latitude': 0.0,
            'satellite_longitude': -89.5,
            'scan_mode': 'M3',
            'scene_id': None,
            'sensor': 'abi',
            'timeline_ID': None,
            'units': 'm'
        }

        self.assertTrue(np.allclose(res.data, exp_data, equal_nan=True))
        self.assertDictEqual(dict(res.attrs), exp_attrs)
Exemplo n.º 24
0
    def test_load_bt(self):
        """Test loading with bt
        """
        from satpy import DatasetID
        from satpy.readers import load_reader

        filenames = [
            "W_XX-EUMETSAT-Darmstadt,IMG+SAT,MTI1+FCI-1C-RRAD-FDHSI-FD--"
            "CHK-BODY--L2P-NC4E_C_EUMT_20170410114434_GTT_DEV_"
            "20170410113925_20170410113934_N__C_0070_0067.nc",
        ]

        reader = load_reader(self.reader_configs)
        loadables = reader.select_files_from_pathnames(filenames)
        reader.create_filehandlers(loadables)
        res = reader.load([
            DatasetID(name=name, calibration="brightness_temperature")
            for name in self._chans["terran"]
        ])
        self.assertEqual(8, len(res))
        for ch in self._chans["terran"]:
            self.assertEqual(res[ch].shape, (200, 11136))
            self.assertEqual(res[ch].dtype, np.float64)
            self.assertEqual(res[ch].attrs["calibration"],
                             "brightness_temperature")
            self.assertEqual(res[ch].attrs["units"], "K")
            numpy.testing.assert_array_almost_equal(res[ch], 181.917084)
Exemplo n.º 25
0
 def test_get_dataset(self, mocked_dataset):
     """Test reading datasets."""
     from satpy.readers.olci_nc import NCOLCI2
     from satpy import DatasetID
     import numpy as np
     import xarray as xr
     mocked_dataset.return_value = xr.Dataset(
         {
             'mask':
             (['rows', 'columns'], np.array([1 << x for x in range(30)
                                             ]).reshape(5, 6))
         },
         coords={
             'rows': np.arange(5),
             'columns': np.arange(6)
         })
     ds_id = DatasetID(name='mask')
     filename_info = {
         'mission_id': 'S3A',
         'dataset_name': 'mask',
         'start_time': 0,
         'end_time': 0
     }
     test = NCOLCI2('somedir/somefile.nc', filename_info, 'c')
     res = test.get_dataset(ds_id, {'nc_key': 'mask'})
     self.assertEqual(res.dtype, np.dtype('bool'))
Exemplo n.º 26
0
    def test_get_area_def(self):
        """Get the area definition."""
        from satpy.readers.nwcsaf_msg2013_hdf5 import Hdf5NWCSAF
        from satpy import DatasetID

        filename_info = {}
        filetype_info = {}
        dsid = DatasetID(name="ct")
        test = Hdf5NWCSAF(self.filename_ct, filename_info, filetype_info)

        area_def = test.get_area_def(dsid)

        aext_res = AREA_DEF_DICT['area_extent']
        for i in range(4):
            self.assertAlmostEqual(area_def.area_extent[i], aext_res[i], 4)

        proj_dict = AREA_DEF_DICT['proj_dict']
        self.assertEqual(proj_dict['proj'], area_def.proj_dict['proj'])
        # Not all elements passed on Appveyor, so skip testing every single element of the proj-dict:
        # for key in proj_dict:
        #    self.assertEqual(proj_dict[key], area_def.proj_dict[key])

        self.assertEqual(AREA_DEF_DICT['x_size'], area_def.width)
        self.assertEqual(AREA_DEF_DICT['y_size'], area_def.height)

        self.assertEqual(AREA_DEF_DICT['area_id'], area_def.area_id)
Exemplo n.º 27
0
    def test_navigation(self):
        """Test reading the lon and lats."""
        with tempfile.TemporaryFile() as tmpfile:
            self._header.tofile(tmpfile)
            tmpfile.seek(22016, 0)
            self._data.tofile(tmpfile)

            fh = AVHRRAAPPL1BFile(tmpfile, self.filename_info,
                                  self.filetype_info)
            info = {}
            key = DatasetID(name='longitude')
            res = fh.get_dataset(key, info)
            assert (np.all(res == 0))
            key = DatasetID(name='latitude')
            res = fh.get_dataset(key, info)
            assert (np.all(res == 0))
Exemplo n.º 28
0
    def test_load_bt(self, reader_configs, caplog):
        """Test loading with bt."""
        from satpy import DatasetID
        from satpy.readers import load_reader
        filenames = [
            "W_XX-EUMETSAT-Darmstadt,IMG+SAT,MTI1+FCI-1C-RRAD-FDHSI-FD--"
            "CHK-BODY--L2P-NC4E_C_EUMT_20170410114434_GTT_DEV_"
            "20170410113925_20170410113934_N__C_0070_0067.nc",
        ]

        reader = load_reader(reader_configs)
        loadables = reader.select_files_from_pathnames(filenames)
        reader.create_filehandlers(loadables)
        with caplog.at_level(logging.WARNING):
            res = reader.load([
                DatasetID(name=name, calibration="brightness_temperature")
                for name in self._chans["terran"]
            ])
            assert caplog.text == ""
        for ch in self._chans["terran"]:
            assert res[ch].shape == (200, 11136)
            assert res[ch].dtype == np.float64
            assert res[ch].attrs["calibration"] == "brightness_temperature"
            assert res[ch].attrs["units"] == "K"

            if ch == 'ir_38':
                numpy.testing.assert_array_almost_equal(
                    res[ch][~0], 209.68274099)
                numpy.testing.assert_array_almost_equal(
                    res[ch][0], 1888.851296)
            else:
                numpy.testing.assert_array_almost_equal(res[ch], 209.68274099)
Exemplo n.º 29
0
    def test_fy4a_counts_calib(self):
        """Test loading data at counts calibration."""
        from satpy import DatasetID
        from satpy.readers import load_reader
        filenames = [
            'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_0500M_V0001.HDF',
            'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_1000M_V0001.HDF',
            'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_2000M_V0001.HDF',
            'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_4000M_V0001.HDF',
        ]
        reader = load_reader(self.reader_configs)
        files = reader.select_files_from_pathnames(filenames)
        self.assertTrue(4, len(files))
        reader.create_filehandlers(files)
        # Make sure we have some files
        self.assertTrue(reader.file_handlers)

        ds_ids = []
        band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 14, 14)]
        for band_name in band_names:
            ds_ids.append(DatasetID(name=band_name, calibration='counts'))
        res = reader.load(ds_ids)
        self.assertEqual(14, len(res))

        for band_name in band_names:
            self.assertEqual((2, 5), res[band_name].shape)
            self.assertEqual('counts', res[band_name].attrs['calibration'])
            self.assertEqual(res[band_name].dtype, np.uint16)
            self.assertEqual('1', res[band_name].attrs['units'])
Exemplo n.º 30
0
    def test_load_reflectance(self, reader_configs):
        """Test loading with reflectance."""
        from satpy import DatasetID
        from satpy.readers import load_reader

        filenames = [
            "W_XX-EUMETSAT-Darmstadt,IMG+SAT,MTI1+FCI-1C-RRAD-FDHSI-FD--"
            "CHK-BODY--L2P-NC4E_C_EUMT_20170410114434_GTT_DEV_"
            "20170410113925_20170410113934_N__C_0070_0067.nc",
        ]

        reader = load_reader(reader_configs)
        loadables = reader.select_files_from_pathnames(filenames)
        reader.create_filehandlers(loadables)
        res = reader.load([
            DatasetID(name=name, calibration="reflectance")
            for name in self._chans["solar"]
        ])
        assert 8 == len(res)
        for ch in self._chans["solar"]:
            assert res[ch].shape == (200, 11136)
            assert res[ch].dtype == np.float64
            assert res[ch].attrs["calibration"] == "reflectance"
            assert res[ch].attrs["units"] == "%"
            numpy.testing.assert_array_equal(res[ch],
                                             100 * 15 * 1 * np.pi / 50)
Exemplo n.º 31
0
    def test_load_counts(self, reader_configs):
        """Test loading with counts."""
        from satpy import DatasetID
        from satpy.readers import load_reader

        # testing two filenames to test correctly combined
        filenames = [
            "W_XX-EUMETSAT-Darmstadt,IMG+SAT,MTI1+FCI-1C-RRAD-FDHSI-FD--"
            "CHK-BODY--L2P-NC4E_C_EUMT_20170410114434_GTT_DEV_"
            "20170410113925_20170410113934_N__C_0070_0067.nc",
            "W_XX-EUMETSAT-Darmstadt,IMG+SAT,MTI1+FCI-1C-RRAD-FDHSI-FD--"
            "CHK-BODY--L2P-NC4E_C_EUMT_20170410114442_GTT_DEV_"
            "20170410113934_20170410113942_N__C_0070_0068.nc",
        ]

        reader = load_reader(reader_configs)
        loadables = reader.select_files_from_pathnames(filenames)
        reader.create_filehandlers(loadables)
        res = reader.load([
            DatasetID(name=name, calibration="counts")
            for name in self._chans["solar"] + self._chans["terran"]
        ])
        assert 16 == len(res)
        for ch in self._chans["solar"] + self._chans["terran"]:
            assert res[ch].shape == (200 * 2, 11136)
            assert res[ch].dtype == np.uint16
            assert res[ch].attrs["calibration"] == "counts"
            assert res[ch].attrs["units"] == "1"
            if ch == 'ir_38':
                numpy.testing.assert_array_equal(res[ch][~0], 1)
                numpy.testing.assert_array_equal(res[ch][0], 5000)
            else:
                numpy.testing.assert_array_equal(res[ch], 1)
Exemplo n.º 32
0
 def filter_nighttime(self, scene):
     if self.fraction_day_scene is None:
         self._calc_percent_day(scene)
     # make a copy of the scene list so we can edit it later
     for ds in list(scene):
         if ds.attrs['name'] in ('ifog',) and \
                         self.fraction_night_scene <= self.night_fraction:
             ds_id = DatasetID.from_dict(ds.attrs)
             LOG.info("Will not create product '%s' because there is less than %f%% of night data",
                      ds.attrs['name'], self.night_fraction * 100.)
             del scene[ds_id]
Exemplo n.º 33
0
 def filter_daytime(self, scene):
     if self.fraction_day_scene is None:
         self._calc_percent_day(scene)
     # make a copy of the scene list so we can edit it later
     for ds in list(scene):
         if ds.attrs['standard_name'] in ('toa_bidirectional_reflectance', 'false_color', 'true_color') and \
                         self.fraction_day_scene <= self.day_fraction:
             ds_id = DatasetID.from_dict(ds.attrs)
             LOG.info("Will not create product '%s' because there is less than %f%% of day data",
                      ds.attrs['name'], self.day_fraction * 100.)
             del scene[ds_id]
Exemplo n.º 34
0
 def __call__(self, datasets, optional_datasets, **info):
     if self.attrs['optional_prerequisites']:
         for opt_dep in self.attrs['optional_prerequisites']:
             if 'NOPE' in opt_dep or 'fail' in opt_dep:
                 continue
             assert optional_datasets is not None and \
                 len(optional_datasets)
     resolution = DatasetID.from_dict(datasets[0].attrs).resolution
     if name == 'res_change' and resolution is not None:
         i = datasets[0].attrs.copy()
         i['resolution'] *= 5
     elif name == 'incomp_areas':
         raise IncompatibleAreas(
             "Test modifier 'incomp_areas' always raises IncompatibleAreas")
     else:
         i = datasets[0].attrs
     info = datasets[0].attrs.copy()
     self.apply_modifier_info(i, info)
     return DataArray(np.ma.MaskedArray(datasets[0]), attrs=info)
Exemplo n.º 35
0
    def _update_modifier_key(self, orig_key, dep_key):
        """Update a key based on the dataset it will modified (dep).

        Typical use case is requesting a modified dataset (orig_key). This
        modified dataset most likely depends on a less-modified
        dataset (dep_key). The less-modified dataset must come from a reader
        (at least for now) or will eventually depend on a reader dataset.
        The original request key may be limited like
        (wavelength=0.67, modifiers=('a', 'b')) while the reader-based key
        should have all of its properties specified. This method updates the
        original request key so it is fully specified and should reduce the
        chance of Node's not being unique.

        """
        orig_dict = orig_key._asdict()
        dep_dict = dep_key._asdict()
        # don't change the modifiers
        for k in DATASET_KEYS[:-1]:
            orig_dict[k] = dep_dict[k]
        return DatasetID.from_dict(orig_dict)
Exemplo n.º 36
0
def main(argv=sys.argv[1:]):
    from polar2grid.core.script_utils import setup_logging, create_basic_parser, create_exc_handler, rename_log_file, ExtendAction
    from polar2grid.compositors import CompositorManager
    frontends = available_frontends()
    backends = available_backends()
    parser = create_basic_parser(description="Extract swath data, remap it, and write it to a new file format")
    parser.add_argument("frontend", choices=sorted(frontends.keys()),
                        help="Specify the swath extractor to use to read data (additional arguments are determined after this is specified)")
    parser.add_argument("backend", choices=sorted(backends.keys()),
                        help="Specify the backend to use to write data output (additional arguments are determined after this is specified)")
    parser.add_argument("--compositor-configs", nargs="*", default=None,
                        help="Specify alternative configuration file(s) for compositors")
    # don't include the help flag
    argv_without_help = [x for x in argv if x not in ["-h", "--help"]]
    args, remaining_args = parser.parse_known_args(argv_without_help)
    glue_name = args.frontend + "2" + args.backend
    LOG = logging.getLogger(glue_name)

    # Load compositor information (we can't know the compositor choices until we've loaded the configuration)
    compositor_manager = CompositorManager(config_files=args.compositor_configs)
    # Hack: argparse doesn't let you use choices and nargs=* on a positional argument
    parser.add_argument("compositors", choices=list(compositor_manager.keys()) + [[]], nargs="*",
                        help="Specify the compositors to apply to the provided scene (additional arguments are determined after this is specified)")

    # load the actual components we need
    farg_func = get_frontend_argument_func(frontends, args.frontend)
    fcls = get_frontend_class(frontends, args.frontend)
    barg_func = get_backend_argument_func(backends, args.backend)
    bcls = get_backend_class(backends, args.backend)

    # add_frontend_arguments(parser)
    subgroup_titles = []
    subgroup_titles += farg_func(parser)
    subgroup_titles += add_remap_argument_groups(parser)
    subgroup_titles += barg_func(parser)

    parser.add_argument('-f', dest='data_files', nargs="+", default=[], action=ExtendAction,
                        help="List of files or directories to extract data from")
    parser.add_argument('-d', dest='data_files', nargs="+", default=[], action=ExtendAction,
                        help="Data directories to look for input data files (equivalent to -f)")
    global_keywords = ("keep_intermediate", "overwrite_existing", "exit_on_error")
    args = parser.parse_args(argv, global_keywords=global_keywords, subgroup_titles=subgroup_titles)

    if not args.data_files:
        # FUTURE: When the -d flag is removed this won't be needed because -f will be required
        parser.print_usage()
        parser.exit(1, "ERROR: No data files provided (-f flag)\n")

    # Logs are renamed once data the provided start date is known
    rename_log = False
    if args.log_fn is None:
        rename_log = True
        args.log_fn = glue_name + "_fail.log"
    levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
    setup_logging(console_level=levels[min(3, args.verbosity)], log_filename=args.log_fn)
    sys.excepthook = create_exc_handler(LOG.name)
    LOG.debug("Starting script with arguments: %s", " ".join(sys.argv))

    # Keep track of things going wrong to tell the user what went wrong (we want to create as much as possible)
    status_to_return = STATUS_SUCCESS

    # Compositor validation
    # XXX: Hack to make `polar2grid.sh crefl gtiff` work like legacy crefl2gtiff.sh script
    if args.subgroup_args['Frontend Swath Extraction'].get('no_compositors'):
        LOG.debug("Removing all compositors")
        args.compositors = []
    elif args.frontend == 'crefl':
        if args.backend in ['awips', 'scmi']:
            LOG.debug("Adding 'crefl_sharpen' compositor")
            args.compositors.append('crefl_sharpen' if args.backend == 'scmi' else 'crefl_sharpen_awips')
        else:
            LOG.debug("Adding 'true_color' compositor")
            args.compositors.append('true_color')
            if '--true-color' in sys.argv and 'true_color' not in args.compositors:
                LOG.debug("Adding 'true_color' compositor")
                args.compositors.append('true_color')
            if '--false-color' in sys.argv and 'false_color' not in args.compositors:
                LOG.debug("Adding 'false_color' compositor")
                args.compositors.append('false_color')

    # if "--true-color" in
    for c in args.compositors:
        if c not in compositor_manager:
            LOG.error("Compositor '%s' is unknown" % (c,))
            raise RuntimeError("Compositor '%s' is unknown" % (c,))

    # Frontend
    try:
        LOG.info("Initializing reader...")
        list_products = args.subgroup_args["Frontend Initialization"].pop("list_products")
        f = fcls(search_paths=args.data_files, **args.subgroup_args["Frontend Initialization"])
    except (ValueError, KeyError):
        LOG.debug("Frontend exception: ", exc_info=True)
        LOG.error("%s frontend failed to load and sort data files (see log for details)", args.frontend)
        return STATUS_FRONTEND_FAIL

    # Rename the log file
    if rename_log:
        rename_log_file(glue_name + f.begin_time.strftime("_%Y%m%d_%H%M%S.log"))

    if list_products:
        print("\n".join(sorted(f.available_product_names)))
        return STATUS_SUCCESS

    try:
        LOG.info("Initializing remapping...")
        remapper = Remapper(**args.subgroup_args["Remapping Initialization"])
        remap_kwargs = args.subgroup_args["Remapping"]
    except (ValueError, KeyError):
        LOG.debug("Remapping initialization exception: ", exc_info=True)
        LOG.error("Remapping initialization failed (see log for details)")
        return STATUS_REMAP_FAIL

    try:
        LOG.info("Initializing backend...")
        backend = bcls(**args.subgroup_args["Backend Initialization"])
    except (ValueError, KeyError):
        LOG.debug("Writer initialization exception: ", exc_info=True)
        LOG.error("Writer initialization failed (see log for details)")
        return STATUS_BACKEND_FAIL

    try:
        LOG.info("Initializing compositor objects...")
        compositor_objects = {}
        for c in args.compositors:
            compositor_objects[c] = compositor_manager.get_compositor(c, **args.global_kwargs)
    except (ValueError, KeyError):
        LOG.debug("Compositor initialization exception: ", exc_info=True)
        LOG.error("Compositor initialization failed (see log for details)")
        return STATUS_COMP_FAIL

    try:
        LOG.info("Extracting swaths from data files available...")
        scene = f.create_scene(**args.subgroup_args["Frontend Swath Extraction"])

        # Determine if we have a satpy scene if we should convert it to
        # a P2G Scene to continue processing
        resample_method = args.subgroup_args["Remapping"].get("remap_method")
        is_satpy_resample_method = resample_method in SATPY_RESAMPLERS
        if is_satpy_resample_method and not isinstance(scene, Scene):
            raise RuntimeError("Resampling method '{}' only supports 'satpy' readers".format(resample_method))
        elif not is_satpy_resample_method and isinstance(scene, Scene):
            # convert satpy scene to P2G Scene to be compatible with old P2G resamplers
            scene = convert_satpy_to_p2g_swath(f, scene)

        if isinstance(scene, Scene):
            if not scene.datasets:
                LOG.error("No products were returned by the frontend")
                raise RuntimeError("No products were returned by the frontend")
            if args.keep_intermediate:
                raise RuntimeError("satpy readers do not currently support saving intermediate files")
        else:
            if (isinstance(scene, Scene) and not scene.datasets) or not scene:
                LOG.error("No products were returned by the frontend")
                raise RuntimeError("No products were returned by the frontend")
            if args.keep_intermediate:
                filename = glue_name + "_swath_scene.json"
                LOG.info("Saving intermediate swath scene as '%s'", filename)
                scene.save(filename)
    except (ValueError, KeyError):
        LOG.debug("Frontend data extraction exception: ", exc_info=True)
        LOG.error("Frontend data extraction failed (see log for details)")
        return STATUS_FRONTEND_FAIL

    # What grids should we remap to (the user should tell us or the backend should have a good set of defaults)
    known_grids = backend.known_grids
    LOG.debug("Writer known grids: %r", known_grids)
    grids = remap_kwargs.pop("forced_grids", None)
    LOG.debug("Forced Grids: %r", grids)
    if resample_method == "sensor" and grids != ["sensor"]:
        LOG.error("'sensor' resampling method only supports the 'sensor' grid")
        return STATUS_GDETER_FAIL
    if not grids and not known_grids:
        # the user didn't ask for any grids and the backend doesn't have specific defaults
        LOG.error("No grids specified and no known defaults")
        return STATUS_GDETER_FAIL
    elif not grids:
        # the user didn't tell us what to do, so let's try everything the backend knows how to do
        grids = known_grids
    elif known_grids is not None:
        # the user told us what to do, let's make sure the backend can do it
        grids = list(set(grids) & set(known_grids))
        if not grids:
            LOG.error("%s backend doesn't know how to handle any of the grids specified", args.backend)
            return STATUS_GDETER_FAIL
    LOG.debug("Grids that will be mapped to: %r", grids)

    # Remap
    for grid_name in grids:
        LOG.info("Remapping to grid %s", grid_name)
        try:
            gridded_scene = remapper.remap_scene(scene, grid_name, **remap_kwargs)
            if args.keep_intermediate:
                filename = glue_name + "_gridded_scene_" + grid_name + ".json"
                LOG.debug("saving intermediate gridded scene as '%s'", filename)
                gridded_scene.save(filename)
        except (ValueError, KeyError):
            LOG.debug("Remapping data exception: ", exc_info=True)
            LOG.error("Remapping data failed")
            status_to_return |= STATUS_REMAP_FAIL
            if args.exit_on_error:
                return status_to_return
            continue

        if not isinstance(scene, Scene):
            # Composition
            for c, comp in compositor_objects.items():
                try:
                    LOG.info("Running gridded scene through '%s' compositor", c)
                    gridded_scene = comp.modify_scene(gridded_scene, **args.subgroup_args[c + " Modification"])
                    if args.keep_intermediate:
                        filename = glue_name + "_gridded_scene_" + grid_name + ".json"
                        LOG.debug("Updating saved intermediate gridded scene (%s) after compositor", filename)
                        gridded_scene.save(filename)
                except (KeyError, ValueError):
                    LOG.debug("Compositor Error: ", exc_info=True)
                    LOG.error("Could not properly modify scene using compositor '%s'" % (c,))
                    if args.exit_on_error:
                        raise RuntimeError("Could not properly modify scene using compositor '%s'" % (c,))

        if isinstance(f, ReaderWrapper) and not isinstance(gridded_scene, Scene):
            this_grid_definition = None
            # HACK: Create SatPy composites that were either separated before
            # resampling or needed resampling to be created
            rgbs = {}
            for product_name in gridded_scene.keys():
                rgb_name = product_name[:-6]
                # Keep track of one of the grid definitions
                if this_grid_definition is None:
                    this_grid_definition = gridded_scene[product_name]["grid_definition"]

                if product_name.endswith("rgb_0") or product_name.endswith("rgb_1") or product_name.endswith("rgb_2"):
                    if rgb_name not in rgbs:
                        rgbs[rgb_name] = [None, None, None]
                    chn_idx = int(product_name[-1])
                    rgbs[rgb_name][chn_idx] = product_name
            LOG.debug("Putting RGBs back together again")
            for rgb_name, v in rgbs.items():
                r = gridded_scene.pop(v[0])
                g = gridded_scene.pop(v[1])
                b = gridded_scene.pop(v[2])
                new_info = r.copy()
                new_info["grid_data"] = new_info["grid_data"].replace(v[0], rgb_name)
                new_info["product_name"] = rgb_name
                data = np.memmap(new_info["grid_data"], dtype=new_info["data_type"],
                                 mode="w+", shape=(3, new_info["grid_definition"]["height"], new_info["grid_definition"]["width"]))
                data[0] = r.get_data_array()[:]
                data[1] = g.get_data_array()[:]
                data[2] = b.get_data_array()[:]
                gridded_scene[rgb_name] = new_info
                del data, new_info

            # Create composites that satpy couldn't complete until after remapping
            composite_names = [x for x in f.wishlist if not isinstance(x, DatasetID)]
            if composite_names:
                tmp_scene = Scene()
                for k, v in gridded_scene.items():
                    if not isinstance(v["sensor"], set):
                        v["sensor"] = set([v["sensor"]])  # turn sensor back in to a set to match satpy usage
                    tmp_scene[v["id"]] = DataArray(v.get_data_array(), attrs=v)
                    tmp_scene[v["id"]].attrs["area"] = this_grid_definition.to_satpy_area()
                    # tmp_scene[v["id"]].info = {}
                    if v["sensor"] not in tmp_scene.attrs["sensor"]:
                        tmp_scene.attrs["sensor"].extend(v["sensor"])
                # Overwrite the wishlist that will include the above assigned datasets
                tmp_scene.wishlist = f.wishlist
                for cname in composite_names:
                    tmp_scene.compositors[cname] = tmp_scene.cpl.load_compositor(cname, tmp_scene.attrs["sensor"])
                tmp_scene.compute()
                tmp_scene.unload()
                # Add any new Datasets to our P2G Scene if SatPy created them
                for ds in tmp_scene:
                    ds_id = DatasetID.from_dict(ds.attrs)
                    if ds_id.name not in gridded_scene:
                        LOG.debug("Adding Dataset from SatPy Commpositing: %s", ds_id)
                        gridded_scene[ds_id.name] = dataarray_to_gridded_product(ds)
                        gridded_scene[ds_id.name]["grid_definition"] = this_grid_definition
                # Remove any Products from P2G Scene that SatPy decided it didn't need anymore
                for k, v in list(gridded_scene.items()):
                    if v["id"].name not in tmp_scene:
                        LOG.debug("Removing Dataset that is no longer used: %s", k)
                        del gridded_scene[k]
                del tmp_scene, v

        if isinstance(gridded_scene, Scene):
            LOG.debug("Converting satpy Scene to P2G Gridded Scene")
            # Convert it to P2G Gridded Scene
            gridded_scene = convert_satpy_to_p2g_gridded(f, gridded_scene)

        # Writer
        try:
            LOG.info("Creating output from data mapped to grid %s", grid_name)
            backend.create_output_from_scene(gridded_scene, **args.subgroup_args["Backend Output Creation"])
        except (ValueError, KeyError):
            LOG.debug("Writer output creation exception: ", exc_info=True)
            LOG.error("Writer output creation failed (see log for details)")
            status_to_return |= STATUS_BACKEND_FAIL
            if args.exit_on_error:
                return status_to_return
            continue

        LOG.info("Processing data for grid %s complete", grid_name)
        # Force deletion and eventual garbage collection of the scene objects
        del gridded_scene
    del scene
    return status_to_return