示例#1
0
    def test_scene_available_datasets(self, input_files, expected_names, expected_data_res, expected_geo_res):
        """Test that datasets are available."""
        scene = Scene(reader='modis_l1b', filenames=input_files)
        available_datasets = scene.available_dataset_names()
        assert len(available_datasets) > 0
        assert 'longitude' in available_datasets
        assert 'latitude' in available_datasets
        for chan_name in expected_names:
            assert chan_name in available_datasets

        available_data_ids = scene.available_dataset_ids()
        available_datas = {x: [] for x in expected_data_res}
        available_geos = {x: [] for x in expected_geo_res}
        # Make sure that every resolution from the reader is what we expect
        for data_id in available_data_ids:
            res = data_id['resolution']
            if data_id['name'] in ['longitude', 'latitude']:
                assert res in expected_geo_res
                available_geos[res].append(data_id)
            else:
                assert res in expected_data_res
                available_datas[res].append(data_id)

        # Make sure that every resolution we expect has at least one dataset
        for exp_res, avail_id in available_datas.items():
            assert avail_id, f"Missing datasets for data resolution {exp_res}"
        for exp_res, avail_id in available_geos.items():
            assert avail_id, f"Missing geo datasets for geo resolution {exp_res}"
示例#2
0
def step_impl_user_checks_availability(context):
    from satpy import Scene, find_files_and_readers
    from datetime import datetime
    os.chdir("/tmp/")
    reader_files = find_files_and_readers(sensor="viirs",
                                          start_time=datetime(2015, 3, 11, 11, 20),
                                          end_time=datetime(2015, 3, 11, 11, 26))
    scn = Scene(filenames=reader_files)
    context.available_dataset_ids = scn.available_dataset_ids()
示例#3
0
def step_impl(context):
    from satpy import Scene, find_files_and_readers
    from datetime import datetime
    os.chdir("/tmp/")
    reader_files = find_files_and_readers(sensor="viirs",
                                          start_time=datetime(2015, 3, 11, 11, 20),
                                          end_time=datetime(2015, 3, 11, 11, 26))
    scn = Scene(filenames=reader_files)
    context.available_dataset_ids = scn.available_dataset_ids()
示例#4
0
    def read(self, filename, fields=None, **kwargs):
        scene = Scene(reader=self.satpy_reader, filenames=[filename.path])

        # If the user has not passed any fields to us, we load all per default.
        if fields is None:
            fields = scene.available_dataset_ids()

        # Load all selected fields
        scene.load(fields, **kwargs)

        if isinstance(fields[0], str):
            data_arrays = {field: scene.get(field) for field in fields}
        else:
            data_arrays = {field.name: scene.get(field) for field in fields}

        for name, array in data_arrays.items():
            array.name = name

        dataset = xr.merge(data_arrays.values())

        return dataset
示例#5
0
    def read(self, filename, **kwargs):
        scene = Scene(reader=self.satpy_reader, filenames=[filename.path])

        # We need to import at least the standard fields
        fields = self.standard_fields | set(self.user_fields)

        # If the user has not passed any fields to us, we load all per default.
        if fields is None:
            fields = scene.available_dataset_ids()

        # Load all selected fields
        scene.load(fields, **kwargs)

        # convert into dataset
        dataset = scene.to_xarray_dataset()

        # convert string array to datetime array
        dataset['time_utc'] = dataset['time_utc'].astype("datetime64[ns]")

        # delete useless coords
        dataset = dataset.drop_vars(['time', 'crs'])
        # rename standard variables
        dataset = dataset.rename(self.mapping)

        # We catch the user mapping here, since we do not want to deal with
        # user-defined names in the further processing. Instead, we use our own
        # mapping
        user_mapping = kwargs.pop("mapping", None)
        if user_mapping is not None:
            dataset = dataset.rename(user_mapping)

        # clean attributes
        for var in dataset.data_vars:
            dataset[var].attrs = []

        return dataset
示例#6
0
    def _init_product_select_page(self):
        if self._selected_files == self._filenames:
            return

        # Disconnect the signals until we are done setting up the widgets
        self._disconnect_product_select_complete()

        self._selected_files = self._filenames.copy()
        all_available_products = set()
        for fn in self._selected_files:
            these_files = tuple(sorted([fn]))
            # TODO: We need to be able to figure out how many paths go to each
            #       Scene (add to satpy as utility function)
            if these_files in self.scenes:
                continue
            reader = self.ui.readerComboBox.currentText()
            scn = Scene(reader=reader, filenames=these_files)
            self.scenes[these_files] = scn
            all_available_products.update(scn.available_dataset_ids())

        # update the widgets
        all_available_products = sorted(all_available_products)
        self.ui.selectIDTable.setRowCount(len(all_available_products))
        # name and level
        self.ui.selectIDTable.setColumnCount(2)
        self.ui.selectIDTable.setHorizontalHeaderLabels(['name', 'level'])
        properties = OrderedDict((
            ('name', set()),
            ('level', set()),
        ))
        for idx, ds_id in enumerate(all_available_products):
            pretty_name = ds_id.name.upper() if ds_id.level is not None else ds_id.name
            properties['name'].add((ds_id.name, pretty_name))
            pretty_level = "{:d} hPa".format(ds_id.level) if ds_id.level is not None else 'NA'
            properties['level'].add((ds_id.level, pretty_level))

            item = QtWidgets.QTableWidgetItem(pretty_name)
            item.setData(QtCore.Qt.UserRole, ds_id.name)
            item.setFlags((item.flags() ^ QtCore.Qt.ItemIsEditable) | QtCore.Qt.ItemIsUserCheckable)
            item.setCheckState(QtCore.Qt.Unchecked)
            self.ui.selectIDTable.setItem(idx, 0, item)
            item = QtWidgets.QTableWidgetItem(pretty_level)
            item.setData(QtCore.Qt.UserRole, ds_id.level)
            item.setFlags((item.flags() ^ QtCore.Qt.ItemIsEditable) | QtCore.Qt.ItemIsUserCheckable)
            self.ui.selectIDTable.setItem(idx, 1, item)

        # Update the per-property lists
        names = sorted(properties['name'])
        for name, pretty_name in names:
            item = QtWidgets.QListWidgetItem(pretty_name)
            item.setData(QtCore.Qt.UserRole, name)
            item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
            item.setCheckState(QtCore.Qt.Unchecked)
            self.ui.selectByNameList.addItem(item)
        levels = sorted(properties['level'])
        for level, pretty_level in levels:
            item = QtWidgets.QListWidgetItem(pretty_level)
            item.setData(QtCore.Qt.UserRole, level)
            item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
            item.setCheckState(QtCore.Qt.Unchecked)
            self.ui.selectByLevelList.addItem(item)

        self._connect_product_select_complete()
示例#7
0

def hex_to_rgb(value):
    value = value.lstrip('#')
    lv = len(value)
    return [int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)]


if __name__ == '__main__':
    if len(sys.argv) < 2:
        print("Usage: " + sys.argv[0] + " MAIA_file ")
        sys.exit()

    fnmaia = sys.argv[1]
    maia_scene = Scene(reader='maia', filenames=[fnmaia])
    print(maia_scene.available_dataset_ids())
    maia_scene.load(["CloudType", "ct", "cma", "cma_conf",
                     'opaq_cloud', "CloudTopPres",
                     "CloudTopTemp", "Alt_surface"])

    # CloudType is a bit field containing the actual "ct" with values
    # from 0 to 20 which can be interpreted according to the cpool colormap

    # "ct" can be display in black and white:
    maia_scene.show("ct")

    # but it is better to palettize the image:
    # step 1: creation of the palette
    mycolors = []
    for i in range(21):
        mycolors.append(hex_to_rgb(cpool[i]))
示例#8
0

def hex_to_rgb(value):
    value = value.lstrip('#')
    lv = len(value)
    return [int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)]


if __name__ == '__main__':
    if len(sys.argv) < 2:
        print("Usage: " + sys.argv[0] + " MAIA_file ")
        sys.exit()

    fnmaia = sys.argv[1]
    maia_scene = Scene(reader='maia', filenames=[fnmaia])
    print(maia_scene.available_dataset_ids())
    maia_scene.load([
        "CloudType", "ct", "cma", "cma_conf", 'opaq_cloud', "CloudTopPres",
        "CloudTopTemp", "Alt_surface"
    ])

    # CloudType is a bit field containing the actual "ct" with values
    # from 0 to 20 which can be interpreted according to the cpool colormap

    # "ct" can be display in black and white:
    maia_scene.show("ct")

    # but it is better to palettize the image:
    # step 1: creation of the palette
    mycolors = []
    for i in range(21):