コード例 #1
0
ファイル: test_cf.py プロジェクト: davidh-ssec/satpy
 def test_encoding_kwarg(self):
     from satpy import Scene
     import xarray as xr
     import tempfile
     scn = Scene()
     start_time = datetime(2018, 5, 30, 10, 0)
     end_time = datetime(2018, 5, 30, 10, 15)
     scn['test-array'] = xr.DataArray([1, 2, 3],
                                      attrs=dict(start_time=start_time,
                                                 end_time=end_time))
     try:
         handle, filename = tempfile.mkstemp()
         os.close(handle)
         encoding = {'test-array': {'dtype': 'int8',
                                    'scale_factor': 0.1,
                                    'add_offset': 0.0,
                                    '_FillValue': 3}}
         scn.save_datasets(filename=filename, encoding=encoding, writer='cf')
         import h5netcdf as nc4
         with nc4.File(filename) as f:
             self.assertTrue(all(f['test-array'][:] == [10, 20, 30]))
             self.assertTrue(f['test-array'].attrs['scale_factor'] == 0.1)
             self.assertTrue(f['test-array'].attrs['_FillValue'] == 3)
             # check that dtype behave as int8
             self.assertTrue(np.iinfo(f['test-array'][:].dtype).max == 127)
     finally:
         os.remove(filename)
コード例 #2
0
ファイル: geocat2scmi.py プロジェクト: khunger/polar2grid
def main():
    from satpy import Scene
    from satpy.writers.scmi import add_backend_argument_groups as add_writer_argument_groups
    import argparse
    parser = argparse.ArgumentParser(description="Convert GEOCAT Level 1 and 2 to AWIPS SCMI files")
    parser.add_argument('-v', '--verbose', dest='verbosity', action="count", default=0,
                        help='each occurrence increases verbosity 1 level through ERROR-WARNING-INFO-DEBUG (default INFO)')
    parser.add_argument('-l', '--log', dest="log_fn", default=None,
                        help="specify the log filename")
    subgroups = add_scene_argument_groups(parser)
    subgroups += add_writer_argument_groups(parser)
    args = parser.parse_args()

    scene_args = {ga.dest: getattr(args, ga.dest) for ga in subgroups[0]._group_actions}
    load_args = {ga.dest: getattr(args, ga.dest) for ga in subgroups[1]._group_actions}
    writer_init_args = {ga.dest: getattr(args, ga.dest) for ga in subgroups[2]._group_actions}
    writer_call_args = {ga.dest: getattr(args, ga.dest) for ga in subgroups[3]._group_actions}

    levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
    logging.basicConfig(level=levels[min(3, args.verbosity)], filename=args.log_fn)

    scn = Scene(**scene_args)
    scn.load(load_args['datasets'])
    writer_args = {}
    writer_args.update(writer_init_args)
    writer_args.update(writer_call_args)
    scn.save_datasets(writer='scmi', **writer_args)
コード例 #3
0
ファイル: test_cf.py プロジェクト: davidh-ssec/satpy
 def test_header_attrs(self):
     from satpy import Scene
     import xarray as xr
     import tempfile
     scn = Scene()
     start_time = datetime(2018, 5, 30, 10, 0)
     end_time = datetime(2018, 5, 30, 10, 15)
     scn['test-array'] = xr.DataArray([1, 2, 3],
                                      attrs=dict(start_time=start_time,
                                                 end_time=end_time))
     try:
         handle, filename = tempfile.mkstemp()
         os.close(handle)
         header_attrs = {'sensor': 'SEVIRI',
                         'orbit': None}
         scn.save_datasets(filename=filename,
                           header_attrs=header_attrs,
                           writer='cf')
         import h5netcdf as nc4
         with nc4.File(filename) as f:
             self.assertTrue(f.attrs['sensor'] == 'SEVIRI')
             self.assertTrue('sensor' in f.attrs.keys())
             self.assertTrue('orbit' not in f.attrs.keys())
     finally:
         os.remove(filename)
コード例 #4
0
def step_impl(context, composite):
    from satpy import Scene
    scn = Scene(reader=context.dformat,
                filenames=get_all_files(os.path.join(context.data_path, 'data'),
                                        '*'))
    scn.load([composite])
    context.scn = scn
    context.composite = composite
コード例 #5
0
ファイル: steps-load.py プロジェクト: davidh-ssec/satpy
def step_impl(context):
    from satpy import Scene, find_files_and_readers
    from datetime import datetime
    os.chdir("/tmp/")
    reader_files = find_files_and_readers(sensor="viirs",
                                          start_time=datetime(2015, 3, 11, 11, 20),
                                          end_time=datetime(2015, 3, 11, 11, 26))
    scn = Scene(filenames=reader_files)
    context.available_dataset_ids = scn.available_dataset_ids()
コード例 #6
0
ファイル: steps-load.py プロジェクト: davidh-ssec/satpy
def step_impl(context):
    from satpy import Scene, find_files_and_readers
    from datetime import datetime
    os.chdir("/tmp/")
    readers_files = find_files_and_readers(sensor='viirs',
                                           start_time=datetime(2015, 3, 11, 11, 20),
                                           end_time=datetime(2015, 3, 11, 11, 26))
    scn = Scene(filenames=readers_files)
    scn.load(["M02"])
    context.scene = scn
コード例 #7
0
ファイル: test_cf.py プロジェクト: davidh-ssec/satpy
 def test_save_array(self):
     from satpy import Scene
     import xarray as xr
     import tempfile
     scn = Scene()
     start_time = datetime(2018, 5, 30, 10, 0)
     end_time = datetime(2018, 5, 30, 10, 15)
     scn['test-array'] = xr.DataArray([1, 2, 3],
                                      attrs=dict(start_time=start_time,
                                                 end_time=end_time))
     try:
         handle, filename = tempfile.mkstemp()
         os.close(handle)
         scn.save_datasets(filename=filename, writer='cf')
         import h5netcdf as nc4
         with nc4.File(filename) as f:
             self.assertTrue(all(f['test-array'][:] == [1, 2, 3]))
     finally:
         os.remove(filename)
コード例 #8
0
ファイル: test_cf.py プロジェクト: davidh-ssec/satpy
 def test_bounds(self):
     from satpy import Scene
     import xarray as xr
     import tempfile
     scn = Scene()
     start_time = datetime(2018, 5, 30, 10, 0)
     end_time = datetime(2018, 5, 30, 10, 15)
     test_array = np.array([[1, 2], [3, 4]]).reshape(2, 2, 1)
     scn['test-array'] = xr.DataArray(test_array,
                                      dims=['x', 'y', 'time'],
                                      coords={'time': [np.datetime64('2018-05-30T10:05:00')]},
                                      attrs=dict(start_time=start_time,
                                                 end_time=end_time))
     try:
         handle, filename = tempfile.mkstemp()
         os.close(handle)
         scn.save_datasets(filename=filename, writer='cf')
         import h5netcdf as nc4
         with nc4.File(filename) as f:
             self.assertTrue(all(f['time_bnds'][:] == np.array([-300.,  600.])))
     finally:
         os.remove(filename)
コード例 #9
0
ファイル: glue.py プロジェクト: davidh-ssec/polar2grid
def main(argv=sys.argv[1:]):
    global LOG
    from satpy import Scene
    from satpy.resample import get_area_def
    from satpy.writers import compute_writer_results
    from dask.diagnostics import ProgressBar
    from polar2grid.core.script_utils import (
        setup_logging, rename_log_file, create_exc_handler)
    import argparse
    prog = os.getenv('PROG_NAME', sys.argv[0])
    # "usage: " will be printed at the top of this:
    usage = """
    %(prog)s -h
see available products:
    %(prog)s -r <reader> -w <writer> --list-products -f file1 [file2 ...]
basic processing:
    %(prog)s -r <reader> -w <writer> [options] -f file1 [file2 ...]
basic processing with limited products:
    %(prog)s -r <reader> -w <writer> [options] -p prod1 prod2 -f file1 [file2 ...]
"""
    parser = argparse.ArgumentParser(prog=prog, usage=usage,
                                     description="Load, composite, resample, and save datasets.")
    parser.add_argument('-v', '--verbose', dest='verbosity', action="count", default=0,
                        help='each occurrence increases verbosity 1 level through ERROR-WARNING-INFO-DEBUG (default INFO)')
    parser.add_argument('-l', '--log', dest="log_fn", default=None,
                        help="specify the log filename")
    parser.add_argument('--progress', action='store_true',
                        help="show processing progress bar (not recommended for logged output)")
    parser.add_argument('--num-workers', type=int, default=4,
                        help="specify number of worker threads to use (default: 4)")
    parser.add_argument('--match-resolution', dest='preserve_resolution', action='store_false',
                        help="When using the 'native' resampler for composites, don't save data "
                             "at its native resolution, use the resolution used to create the "
                             "composite.")
    parser.add_argument('-w', '--writers', nargs='+',
                        help='writers to save datasets with')
    parser.add_argument("--list-products", dest="list_products", action="store_true",
                        help="List available reader products and exit")
    subgroups = add_scene_argument_groups(parser)
    subgroups += add_resample_argument_groups(parser)

    argv_without_help = [x for x in argv if x not in ["-h", "--help"]]
    args, remaining_args = parser.parse_known_args(argv_without_help)

    # get the logger if we know the readers and writers that will be used
    if args.reader is not None and args.writers is not None:
        glue_name = args.reader + "_" + "-".join(args.writers or [])
        LOG = logging.getLogger(glue_name)
    # add writer arguments
    if args.writers is not None:
        for writer in (args.writers or []):
            parser_func = WRITER_PARSER_FUNCTIONS.get(writer)
            if parser_func is None:
                continue
            subgroups += parser_func(parser)
    args = parser.parse_args(argv)

    if args.reader is None:
        parser.print_usage()
        parser.exit(1, "\nERROR: Reader must be provided (-r flag).\n"
                       "Supported readers:\n\t{}\n".format('\n\t'.join(['abi_l1b', 'ahi_hsd', 'hrit_ahi'])))
    if args.writers is None:
        parser.print_usage()
        parser.exit(1, "\nERROR: Writer must be provided (-w flag) with one or more writer.\n"
                       "Supported writers:\n\t{}\n".format('\n\t'.join(['geotiff'])))

    def _args_to_dict(group_actions):
        return {ga.dest: getattr(args, ga.dest) for ga in group_actions if hasattr(args, ga.dest)}
    scene_args = _args_to_dict(subgroups[0]._group_actions)
    load_args = _args_to_dict(subgroups[1]._group_actions)
    resample_args = _args_to_dict(subgroups[2]._group_actions)
    writer_args = {}
    for idx, writer in enumerate(args.writers):
        sgrp1, sgrp2 = subgroups[3 + idx * 2: 5 + idx * 2]
        wargs = _args_to_dict(sgrp1._group_actions)
        if sgrp2 is not None:
            wargs.update(_args_to_dict(sgrp2._group_actions))
        writer_args[writer] = wargs
        # get default output filename
        if 'filename' in wargs and wargs['filename'] is None:
            wargs['filename'] = get_default_output_filename(args.reader, writer)

    if not args.filenames:
        parser.print_usage()
        parser.exit(1, "\nERROR: No data files provided (-f flag)\n")

    # Prepare logging
    rename_log = False
    if args.log_fn is None:
        rename_log = True
        args.log_fn = glue_name + "_fail.log"
    levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
    setup_logging(console_level=levels[min(3, args.verbosity)], log_filename=args.log_fn)
    logging.getLogger('rasterio').setLevel(levels[min(2, args.verbosity)])
    sys.excepthook = create_exc_handler(LOG.name)
    if levels[min(3, args.verbosity)] > logging.DEBUG:
        import warnings
        warnings.filterwarnings("ignore")
    LOG.debug("Starting script with arguments: %s", " ".join(sys.argv))

    # Set up dask and the number of workers
    if args.num_workers:
        from multiprocessing.pool import ThreadPool
        dask.config.set(pool=ThreadPool(args.num_workers))

    # Parse provided files and search for files if provided directories
    scene_args['filenames'] = get_input_files(scene_args['filenames'])
    # Create a Scene, analyze the provided files
    LOG.info("Sorting and reading input files...")
    try:
        scn = Scene(**scene_args)
    except ValueError as e:
        LOG.error("{} | Enable debug message (-vvv) or see log file for details.".format(str(e)))
        LOG.debug("Further error information: ", exc_info=True)
        return -1
    except OSError:
        LOG.error("Could not open files. Enable debug message (-vvv) or see log file for details.")
        LOG.debug("Further error information: ", exc_info=True)
        return -1

    if args.list_products:
        print("\n".join(sorted(scn.available_dataset_names(composites=True))))
        return 0

    # Rename the log file
    if rename_log:
        rename_log_file(glue_name + scn.attrs['start_time'].strftime("_%Y%m%d_%H%M%S.log"))

    # Load the actual data arrays and metadata (lazy loaded as dask arrays)
    if load_args['products'] is None:
        try:
            reader_mod = importlib.import_module('polar2grid.readers.' + scene_args['reader'])
            load_args['products'] = reader_mod.DEFAULT_PRODUCTS
            LOG.info("Using default product list: {}".format(load_args['products']))
        except (ImportError, AttributeError):
            LOG.error("No default products list set, please specify with `--products`.")
            return -1

    LOG.info("Loading product metadata from files...")
    scn.load(load_args['products'])

    resample_kwargs = resample_args.copy()
    areas_to_resample = resample_kwargs.pop('grids')
    grid_configs = resample_kwargs.pop('grid_configs')
    resampler = resample_kwargs.pop('resampler')

    if areas_to_resample is None and resampler in [None, 'native']:
        # no areas specified
        areas_to_resample = ['MAX']
    elif areas_to_resample is None:
        raise ValueError("Resampling method specified (--method) without any destination grid/area (-g flag).")
    elif not areas_to_resample:
        # they don't want any resampling (they used '-g' with no args)
        areas_to_resample = [None]

    has_custom_grid = any(g not in ['MIN', 'MAX', None] for g in areas_to_resample)
    if has_custom_grid and resampler == 'native':
        LOG.error("Resampling method 'native' can only be used with 'MIN' or 'MAX' grids "
                  "(use 'nearest' method instead).")
        return -1

    p2g_grid_configs = [x for x in grid_configs if x.endswith('.conf')]
    pyresample_area_configs = [x for x in grid_configs if not x.endswith('.conf')]
    if not grid_configs or p2g_grid_configs:
        # if we were given p2g grid configs or we weren't given any to choose from
        from polar2grid.grids import GridManager
        grid_manager = GridManager(*p2g_grid_configs)
    else:
        grid_manager = {}

    if pyresample_area_configs:
        from pyresample.utils import parse_area_file
        custom_areas = parse_area_file(pyresample_area_configs)
        custom_areas = {x.area_id: x for x in custom_areas}
    else:
        custom_areas = {}

    ll_bbox = resample_kwargs.pop('ll_bbox')
    if ll_bbox:
        scn = scn.crop(ll_bbox=ll_bbox)

    wishlist = scn.wishlist.copy()
    preserve_resolution = get_preserve_resolution(args, resampler, areas_to_resample)
    if preserve_resolution:
        preserved_products = set(wishlist) & set(scn.datasets.keys())
        resampled_products = set(wishlist) - preserved_products

        # original native scene
        to_save = write_scene(scn, args.writers, writer_args, preserved_products)
    else:
        preserved_products = set()
        resampled_products = set(wishlist)
        to_save = []

    LOG.debug("Products to preserve resolution for: {}".format(preserved_products))
    LOG.debug("Products to use new resolution for: {}".format(resampled_products))
    for area_name in areas_to_resample:
        if area_name is None:
            # no resampling
            area_def = None
        elif area_name == 'MAX':
            area_def = scn.max_area()
        elif area_name == 'MIN':
            area_def = scn.min_area()
        elif area_name in custom_areas:
            area_def = custom_areas[area_name]
        elif area_name in grid_manager:
            from pyresample.geometry import DynamicAreaDefinition
            p2g_def = grid_manager[area_name]
            area_def = p2g_def.to_satpy_area()
            if isinstance(area_def, DynamicAreaDefinition) and p2g_def['cell_width'] is not None:
                area_def = area_def.freeze(scn.max_area(),
                                           resolution=(abs(p2g_def['cell_width']), abs(p2g_def['cell_height'])))
        else:
            area_def = get_area_def(area_name)

        if resampler is None and area_def is not None:
            rs = 'native' if area_name in ['MIN', 'MAX'] else 'nearest'
            LOG.debug("Setting default resampling to '{}' for grid '{}'".format(rs, area_name))
        else:
            rs = resampler

        if area_def is not None:
            LOG.info("Resampling data to '%s'", area_name)
            new_scn = scn.resample(area_def, resampler=rs, **resample_kwargs)
        elif not preserve_resolution:
            # the user didn't want to resample to any areas
            # the user also requested that we don't preserve resolution
            # which means we have to save this Scene's datasets
            # because they won't be saved
            new_scn = scn

        to_save = write_scene(new_scn, args.writers, writer_args, resampled_products, to_save=to_save)

    if args.progress:
        pbar = ProgressBar()
        pbar.register()

    LOG.info("Computing products and saving data to writers...")
    compute_writer_results(to_save)
    LOG.info("SUCCESS")
    return 0
コード例 #10
0
def get_scene_obj(file_list,
                  latlon_extent,
                  sensor,
                  width=750,
                  height=750,
                  tmp_cache=False,
                  resample_method='native_bilinear'):
    """Get Scene object, apply the resample area, to a small box 
    centered on the lat lon point.

    inputs:
    file_list: list of netCDF L1b ABI files, must contain bands 1,2,3
    latlon_extent: extent of displayed region in latlon:
        [min_lon, min_lat, max_lon, max_lat]
    width: number of resampled image pixels in width (x-dimension)
    height: number of resampled image pixels in width (y-dimension)

    tmp_cache: optional keyword, set to True to copy the located files
    to a temporary dir (from tempfile) before loading.
    Note the temporary files are NOT cleaned up.
    Use this option if the data_home access is slow or limited in some
    way, which can be mitigated by copying to a temp file on the local
    filesystem

    resample_method: string keyword to specify the resampling method.
    valid options are:
    nearest: perform nearest neighbor resampling in one step
    bilinear: perform bilinear interpolation in one step
    native_nearest: perform a native interpolation first, to upsample
       lower resolution bands to the highest native resolution; 
       then perform nearest neighbor interpolation to the output grid.
    native_nearest: perform a native interpolation first, to upsample
       lower resolution bands to the highest native resolution; 
       then perform bilinear interpolation to the output grid.

    outputs: the satpy Scene object.

    """

    valid_resample_methods = [
        'nearest', 'bilinear', 'native_nearest', 'native_bilinear'
    ]

    if resample_method not in valid_resample_methods:
        raise ValueError('resample_method ' + resample_method +
                         ' is not valid')

    if tmp_cache:
        tdir = tempfile.mkdtemp()
        cached_file_list = []
        for f in file_list:
            src_f = f
            dst_f = os.path.join(tdir, os.path.split(f)[-1])
            shutil.copyfile(src_f, dst_f)
            cached_file_list.append(dst_f)
        scn = Scene(reader=sensor, filenames=cached_file_list)
    else:
        scn = Scene(reader=sensor, filenames=file_list)

    scn.load(['true_color'])

    my_area = pyresample.create_area_def('testC',
                                         "epsg:3857",
                                         width=width,
                                         height=height,
                                         area_extent=latlon_extent,
                                         units='degrees')

    if resample_method.startswith('native'):
        tmp_scn = scn.resample(resampler='native')
    else:
        tmp_scn = scn

    # this would split the second string str1_str2,
    # or just return the str if there is no underscore.
    # thus, it should be the resample method after the
    # optional native resampling.
    method = resample_method.split('_')[-1]
    new_scn = tmp_scn.resample(my_area, resampler=method)

    return new_scn
コード例 #11
0
    def setUp(self):
        """Create a test scene."""
        tstart = datetime(2019, 4, 1, 12, 0)
        tend = datetime(2019, 4, 1, 12, 15)
        data_visir = [[1, 2], [3, 4]]
        y_visir = [1, 2]
        x_visir = [1, 2]
        z_visir = [1, 2, 3, 4, 5, 6, 7]
        qual_data = [[1, 2, 3, 4, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7]]
        time_vis006 = [1, 2]
        lat = 33.0 * np.array([[1, 2], [3, 4]])
        lon = -13.0 * np.array([[1, 2], [3, 4]])
        common_attrs = {
            'start_time': tstart,
            'end_time': tend,
            'platform_name': 'tirosn',
            'orbit_number': 99999
        }
        vis006 = xr.DataArray(data_visir,
                              dims=('y', 'x'),
                              coords={
                                  'y': y_visir,
                                  'x': x_visir,
                                  'acq_time': ('y', time_vis006)
                              },
                              attrs={
                                  'name': 'image0',
                                  'id_tag': 'ch_r06',
                                  'coordinates': 'lat lon'
                              })

        ir_108 = xr.DataArray(data_visir,
                              dims=('y', 'x'),
                              coords={
                                  'y': y_visir,
                                  'x': x_visir,
                                  'acq_time': ('y', time_vis006)
                              },
                              attrs={
                                  'name': 'image1',
                                  'id_tag': 'ch_tb11',
                                  'coordinates': 'lat lon'
                              })
        qual_f = xr.DataArray(qual_data,
                              dims=('y', 'z'),
                              coords={
                                  'y': y_visir,
                                  'z': z_visir,
                                  'acq_time': ('y', time_vis006)
                              },
                              attrs={
                                  'name': 'qual_flags',
                                  'id_tag': 'qual_flags'
                              })
        lat = xr.DataArray(lat,
                           dims=('y', 'x'),
                           coords={
                               'y': y_visir,
                               'x': x_visir
                           },
                           attrs={
                               'name': 'lat',
                               'standard_name': 'latitude',
                               'modifiers': np.array([])
                           })
        lon = xr.DataArray(lon,
                           dims=('y', 'x'),
                           coords={
                               'y': y_visir,
                               'x': x_visir
                           },
                           attrs={
                               'name': 'lon',
                               'standard_name': 'longitude',
                               'modifiers': np.array([])
                           })
        self.scene = Scene()
        self.scene.attrs['sensor'] = ['avhrr-1', 'avhrr-2', 'avhrr-3']
        scene_dict = {
            'image0': vis006,
            'image1': ir_108,
            'lat': lat,
            'lon': lon,
            'qual_flags': qual_f
        }
        for key in scene_dict:
            self.scene[key] = scene_dict[key]
            self.scene[key].attrs.update(common_attrs)
コード例 #12
0
ファイル: welcome-ch.py プロジェクト: loreclem/radarraster
min = int(sys.argv[5])

# year = 2018
# month = 8
# day = 17
# hour = 13
# min = 15
yearS = str(year)
yearS = yearS[2:]
monthS = "%02d" % month
dayS = "%02d" % day
hourS = "%02d" % hour
minS = "%02d" % min

filenames = glob("/var/tmp/cll/data/H-*MSG4*"+yearS+monthS+dayS+hourS+minS+"*__")
global_scene = Scene(reader="hrit_msg", filenames=filenames)

# first try, it stays here only for the memory
# global_scene.load(["HRV", "IR_108"])
# local_scene = global_scene.resample("ccs4")
# lonlats = local_scene["HRV"].area.get_lonlats()
# sza = sun_zenith_angle(local_scene.start_time, lonlats[0], lonlats[1])
# ds = DataArray(sza, dims=['y','x'])
# local_scene['sza'] = ds
# end of the first try, stuff below here is working again

global_scene.load(["ir108", "hrv", "IR_108", "hrv_with_ir"])
local_scene = global_scene.resample("ccs4")
local_scene.load(["hrv_with_ir", "IR_108"])

swiss = load_area("/opt/users/cll/cllwork/etc_work/areas.def", "ccs4")
コード例 #13
0
ファイル: glue_legacy.py プロジェクト: davidh-ssec/polar2grid
def main(argv=sys.argv[1:]):
    from polar2grid.core.script_utils import setup_logging, create_basic_parser, create_exc_handler, rename_log_file, ExtendAction
    from polar2grid.compositors import CompositorManager
    frontends = available_frontends()
    backends = available_backends()
    parser = create_basic_parser(description="Extract swath data, remap it, and write it to a new file format")
    parser.add_argument("frontend", choices=sorted(frontends.keys()),
                        help="Specify the swath extractor to use to read data (additional arguments are determined after this is specified)")
    parser.add_argument("backend", choices=sorted(backends.keys()),
                        help="Specify the backend to use to write data output (additional arguments are determined after this is specified)")
    parser.add_argument("--compositor-configs", nargs="*", default=None,
                        help="Specify alternative configuration file(s) for compositors")
    # don't include the help flag
    argv_without_help = [x for x in argv if x not in ["-h", "--help"]]
    args, remaining_args = parser.parse_known_args(argv_without_help)
    glue_name = args.frontend + "2" + args.backend
    LOG = logging.getLogger(glue_name)

    # Load compositor information (we can't know the compositor choices until we've loaded the configuration)
    compositor_manager = CompositorManager(config_files=args.compositor_configs)
    # Hack: argparse doesn't let you use choices and nargs=* on a positional argument
    parser.add_argument("compositors", choices=list(compositor_manager.keys()) + [[]], nargs="*",
                        help="Specify the compositors to apply to the provided scene (additional arguments are determined after this is specified)")

    # load the actual components we need
    farg_func = get_frontend_argument_func(frontends, args.frontend)
    fcls = get_frontend_class(frontends, args.frontend)
    barg_func = get_backend_argument_func(backends, args.backend)
    bcls = get_backend_class(backends, args.backend)

    # add_frontend_arguments(parser)
    subgroup_titles = []
    subgroup_titles += farg_func(parser)
    subgroup_titles += add_remap_argument_groups(parser)
    subgroup_titles += barg_func(parser)

    parser.add_argument('-f', dest='data_files', nargs="+", default=[], action=ExtendAction,
                        help="List of files or directories to extract data from")
    parser.add_argument('-d', dest='data_files', nargs="+", default=[], action=ExtendAction,
                        help="Data directories to look for input data files (equivalent to -f)")
    global_keywords = ("keep_intermediate", "overwrite_existing", "exit_on_error")
    args = parser.parse_args(argv, global_keywords=global_keywords, subgroup_titles=subgroup_titles)

    if not args.data_files:
        # FUTURE: When the -d flag is removed this won't be needed because -f will be required
        parser.print_usage()
        parser.exit(1, "ERROR: No data files provided (-f flag)\n")

    # Logs are renamed once data the provided start date is known
    rename_log = False
    if args.log_fn is None:
        rename_log = True
        args.log_fn = glue_name + "_fail.log"
    levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
    setup_logging(console_level=levels[min(3, args.verbosity)], log_filename=args.log_fn)
    sys.excepthook = create_exc_handler(LOG.name)
    LOG.debug("Starting script with arguments: %s", " ".join(sys.argv))

    # Keep track of things going wrong to tell the user what went wrong (we want to create as much as possible)
    status_to_return = STATUS_SUCCESS

    # Compositor validation
    # XXX: Hack to make `polar2grid.sh crefl gtiff` work like legacy crefl2gtiff.sh script
    if args.subgroup_args['Frontend Swath Extraction'].get('no_compositors'):
        LOG.debug("Removing all compositors")
        args.compositors = []
    elif args.frontend == 'crefl':
        if args.backend in ['awips', 'scmi']:
            LOG.debug("Adding 'crefl_sharpen' compositor")
            args.compositors.append('crefl_sharpen' if args.backend == 'scmi' else 'crefl_sharpen_awips')
        else:
            LOG.debug("Adding 'true_color' compositor")
            args.compositors.append('true_color')
            if '--true-color' in sys.argv and 'true_color' not in args.compositors:
                LOG.debug("Adding 'true_color' compositor")
                args.compositors.append('true_color')
            if '--false-color' in sys.argv and 'false_color' not in args.compositors:
                LOG.debug("Adding 'false_color' compositor")
                args.compositors.append('false_color')

    # if "--true-color" in
    for c in args.compositors:
        if c not in compositor_manager:
            LOG.error("Compositor '%s' is unknown" % (c,))
            raise RuntimeError("Compositor '%s' is unknown" % (c,))

    # Frontend
    try:
        LOG.info("Initializing reader...")
        list_products = args.subgroup_args["Frontend Initialization"].pop("list_products")
        f = fcls(search_paths=args.data_files, **args.subgroup_args["Frontend Initialization"])
    except (ValueError, KeyError):
        LOG.debug("Frontend exception: ", exc_info=True)
        LOG.error("%s frontend failed to load and sort data files (see log for details)", args.frontend)
        return STATUS_FRONTEND_FAIL

    # Rename the log file
    if rename_log:
        rename_log_file(glue_name + f.begin_time.strftime("_%Y%m%d_%H%M%S.log"))

    if list_products:
        print("\n".join(sorted(f.available_product_names)))
        return STATUS_SUCCESS

    try:
        LOG.info("Initializing remapping...")
        remapper = Remapper(**args.subgroup_args["Remapping Initialization"])
        remap_kwargs = args.subgroup_args["Remapping"]
    except (ValueError, KeyError):
        LOG.debug("Remapping initialization exception: ", exc_info=True)
        LOG.error("Remapping initialization failed (see log for details)")
        return STATUS_REMAP_FAIL

    try:
        LOG.info("Initializing backend...")
        backend = bcls(**args.subgroup_args["Backend Initialization"])
    except (ValueError, KeyError):
        LOG.debug("Writer initialization exception: ", exc_info=True)
        LOG.error("Writer initialization failed (see log for details)")
        return STATUS_BACKEND_FAIL

    try:
        LOG.info("Initializing compositor objects...")
        compositor_objects = {}
        for c in args.compositors:
            compositor_objects[c] = compositor_manager.get_compositor(c, **args.global_kwargs)
    except (ValueError, KeyError):
        LOG.debug("Compositor initialization exception: ", exc_info=True)
        LOG.error("Compositor initialization failed (see log for details)")
        return STATUS_COMP_FAIL

    try:
        LOG.info("Extracting swaths from data files available...")
        scene = f.create_scene(**args.subgroup_args["Frontend Swath Extraction"])

        # Determine if we have a satpy scene if we should convert it to
        # a P2G Scene to continue processing
        resample_method = args.subgroup_args["Remapping"].get("remap_method")
        is_satpy_resample_method = resample_method in SATPY_RESAMPLERS
        if is_satpy_resample_method and not isinstance(scene, Scene):
            raise RuntimeError("Resampling method '{}' only supports 'satpy' readers".format(resample_method))
        elif not is_satpy_resample_method and isinstance(scene, Scene):
            # convert satpy scene to P2G Scene to be compatible with old P2G resamplers
            scene = convert_satpy_to_p2g_swath(f, scene)

        if isinstance(scene, Scene):
            if not scene.datasets:
                LOG.error("No products were returned by the frontend")
                raise RuntimeError("No products were returned by the frontend")
            if args.keep_intermediate:
                raise RuntimeError("satpy readers do not currently support saving intermediate files")
        else:
            if (isinstance(scene, Scene) and not scene.datasets) or not scene:
                LOG.error("No products were returned by the frontend")
                raise RuntimeError("No products were returned by the frontend")
            if args.keep_intermediate:
                filename = glue_name + "_swath_scene.json"
                LOG.info("Saving intermediate swath scene as '%s'", filename)
                scene.save(filename)
    except (ValueError, KeyError):
        LOG.debug("Frontend data extraction exception: ", exc_info=True)
        LOG.error("Frontend data extraction failed (see log for details)")
        return STATUS_FRONTEND_FAIL

    # What grids should we remap to (the user should tell us or the backend should have a good set of defaults)
    known_grids = backend.known_grids
    LOG.debug("Writer known grids: %r", known_grids)
    grids = remap_kwargs.pop("forced_grids", None)
    LOG.debug("Forced Grids: %r", grids)
    if resample_method == "sensor" and grids != ["sensor"]:
        LOG.error("'sensor' resampling method only supports the 'sensor' grid")
        return STATUS_GDETER_FAIL
    if not grids and not known_grids:
        # the user didn't ask for any grids and the backend doesn't have specific defaults
        LOG.error("No grids specified and no known defaults")
        return STATUS_GDETER_FAIL
    elif not grids:
        # the user didn't tell us what to do, so let's try everything the backend knows how to do
        grids = known_grids
    elif known_grids is not None:
        # the user told us what to do, let's make sure the backend can do it
        grids = list(set(grids) & set(known_grids))
        if not grids:
            LOG.error("%s backend doesn't know how to handle any of the grids specified", args.backend)
            return STATUS_GDETER_FAIL
    LOG.debug("Grids that will be mapped to: %r", grids)

    # Remap
    for grid_name in grids:
        LOG.info("Remapping to grid %s", grid_name)
        try:
            gridded_scene = remapper.remap_scene(scene, grid_name, **remap_kwargs)
            if args.keep_intermediate:
                filename = glue_name + "_gridded_scene_" + grid_name + ".json"
                LOG.debug("saving intermediate gridded scene as '%s'", filename)
                gridded_scene.save(filename)
        except (ValueError, KeyError):
            LOG.debug("Remapping data exception: ", exc_info=True)
            LOG.error("Remapping data failed")
            status_to_return |= STATUS_REMAP_FAIL
            if args.exit_on_error:
                return status_to_return
            continue

        if not isinstance(scene, Scene):
            # Composition
            for c, comp in compositor_objects.items():
                try:
                    LOG.info("Running gridded scene through '%s' compositor", c)
                    gridded_scene = comp.modify_scene(gridded_scene, **args.subgroup_args[c + " Modification"])
                    if args.keep_intermediate:
                        filename = glue_name + "_gridded_scene_" + grid_name + ".json"
                        LOG.debug("Updating saved intermediate gridded scene (%s) after compositor", filename)
                        gridded_scene.save(filename)
                except (KeyError, ValueError):
                    LOG.debug("Compositor Error: ", exc_info=True)
                    LOG.error("Could not properly modify scene using compositor '%s'" % (c,))
                    if args.exit_on_error:
                        raise RuntimeError("Could not properly modify scene using compositor '%s'" % (c,))

        if isinstance(f, ReaderWrapper) and not isinstance(gridded_scene, Scene):
            this_grid_definition = None
            # HACK: Create SatPy composites that were either separated before
            # resampling or needed resampling to be created
            rgbs = {}
            for product_name in gridded_scene.keys():
                rgb_name = product_name[:-6]
                # Keep track of one of the grid definitions
                if this_grid_definition is None:
                    this_grid_definition = gridded_scene[product_name]["grid_definition"]

                if product_name.endswith("rgb_0") or product_name.endswith("rgb_1") or product_name.endswith("rgb_2"):
                    if rgb_name not in rgbs:
                        rgbs[rgb_name] = [None, None, None]
                    chn_idx = int(product_name[-1])
                    rgbs[rgb_name][chn_idx] = product_name
            LOG.debug("Putting RGBs back together again")
            for rgb_name, v in rgbs.items():
                r = gridded_scene.pop(v[0])
                g = gridded_scene.pop(v[1])
                b = gridded_scene.pop(v[2])
                new_info = r.copy()
                new_info["grid_data"] = new_info["grid_data"].replace(v[0], rgb_name)
                new_info["product_name"] = rgb_name
                data = np.memmap(new_info["grid_data"], dtype=new_info["data_type"],
                                 mode="w+", shape=(3, new_info["grid_definition"]["height"], new_info["grid_definition"]["width"]))
                data[0] = r.get_data_array()[:]
                data[1] = g.get_data_array()[:]
                data[2] = b.get_data_array()[:]
                gridded_scene[rgb_name] = new_info
                del data, new_info

            # Create composites that satpy couldn't complete until after remapping
            composite_names = [x for x in f.wishlist if not isinstance(x, DatasetID)]
            if composite_names:
                tmp_scene = Scene()
                for k, v in gridded_scene.items():
                    if not isinstance(v["sensor"], set):
                        v["sensor"] = set([v["sensor"]])  # turn sensor back in to a set to match satpy usage
                    tmp_scene[v["id"]] = DataArray(v.get_data_array(), attrs=v)
                    tmp_scene[v["id"]].attrs["area"] = this_grid_definition.to_satpy_area()
                    # tmp_scene[v["id"]].info = {}
                    if v["sensor"] not in tmp_scene.attrs["sensor"]:
                        tmp_scene.attrs["sensor"].extend(v["sensor"])
                # Overwrite the wishlist that will include the above assigned datasets
                tmp_scene.wishlist = f.wishlist
                for cname in composite_names:
                    tmp_scene.compositors[cname] = tmp_scene.cpl.load_compositor(cname, tmp_scene.attrs["sensor"])
                tmp_scene.compute()
                tmp_scene.unload()
                # Add any new Datasets to our P2G Scene if SatPy created them
                for ds in tmp_scene:
                    ds_id = DatasetID.from_dict(ds.attrs)
                    if ds_id.name not in gridded_scene:
                        LOG.debug("Adding Dataset from SatPy Commpositing: %s", ds_id)
                        gridded_scene[ds_id.name] = dataarray_to_gridded_product(ds)
                        gridded_scene[ds_id.name]["grid_definition"] = this_grid_definition
                # Remove any Products from P2G Scene that SatPy decided it didn't need anymore
                for k, v in list(gridded_scene.items()):
                    if v["id"].name not in tmp_scene:
                        LOG.debug("Removing Dataset that is no longer used: %s", k)
                        del gridded_scene[k]
                del tmp_scene, v

        if isinstance(gridded_scene, Scene):
            LOG.debug("Converting satpy Scene to P2G Gridded Scene")
            # Convert it to P2G Gridded Scene
            gridded_scene = convert_satpy_to_p2g_gridded(f, gridded_scene)

        # Writer
        try:
            LOG.info("Creating output from data mapped to grid %s", grid_name)
            backend.create_output_from_scene(gridded_scene, **args.subgroup_args["Backend Output Creation"])
        except (ValueError, KeyError):
            LOG.debug("Writer output creation exception: ", exc_info=True)
            LOG.error("Writer output creation failed (see log for details)")
            status_to_return |= STATUS_BACKEND_FAIL
            if args.exit_on_error:
                return status_to_return
            continue

        LOG.info("Processing data for grid %s complete", grid_name)
        # Force deletion and eventual garbage collection of the scene objects
        del gridded_scene
    del scene
    return status_to_return
コード例 #14
0
## choose reader
reader = "native_msg"
#reader="hrit_msg"

## different ways to define the satellite data object
from satpy import Scene

#   def __init__(self, filenames=None, reader=None, filter_parameters=None, reader_kwargs=None,
#                  ppp_config_dir=get_environ_config_dir(),
#                  base_dir=None,
#                  sensor=None,
#                  start_time=None,
#                  end_time=None,
#                  area=None):

global_scene = Scene(sensor="seviri", reader=reader, filenames=filenames)
#global_scene = Scene(platform_name="Meteosat-9", sensor="seviri", reader=reader, filenames=filenames)
#global_scene = Scene(platform_name="Meteosat-10", sensor="seviri", start_time=datetime(2015, 4, 20, 10, 0), base_dir="/home/a001673/data/satellite/Meteosat-10/seviri/lvl1.5/2015/04/20/HRIT")
#global_scene = Scene(platform_name="Meteosat-10", sensor="seviri", start_time=lastdate, base_dir="/data/cinesat/in/eumetcast1")
#global_scene = Scene(platform_name="Meteosat-10", sensor="seviri", start_time=lastdate, reader="hrit_msg", basedir="/data/cinesat/in/eumetcast1/")
#global_scene = Scene(platform_name="Meteosat-9", sensor="seviri", reader="hrit_msg", start_time=lastdate)

### there is no satellite in satpy !!!
#from satpy.satellites import GeostationaryFactory
#global_scene = GeostationaryFactory.create_scene("meteosat", "09", "seviri", time_slot)

## get some infos about the satellite data object
print("========================")
print("... dir(global_scene)")
print(dir(global_scene))
print("========================")
コード例 #15
0
import os
from satpy import Scene
from datetime import datetime
from satpy.utils import debug_on
import pyninjotiff
from glob import glob
from pyresample.utils import load_area
debug_on()


chn = "airmass"
ninjoRegion = load_area("areas.def", "nrEURO3km")

filenames = glob("data/*__")
global_scene = Scene(reader="hrit_msg", filenames=filenames)
global_scene.load([chn])
local_scene = global_scene.resample(ninjoRegion)
local_scene.save_dataset(chn, filename="airmass.tif", writer='ninjotiff',
                      sat_id=6300014,
                      chan_id=6500015,
                      data_cat='GPRN',
                      data_source='EUMCAST',
                      nbits=8)
コード例 #16
0
# Exercise 6
from pathlib import Path
from satpy import Scene, find_files_and_readers
import satpy as satpy

input_dir = Path("data")
output_dir = Path("output")

# 1. Read the Scene that you downloaded from the data directory using SatPy. [2P]

files = find_files_and_readers(base_dir=input_dir, reader="seviri_l1b_nc")
scn = Scene(filenames=files)

# 2. Load the composites "natural_color" and "convection" [2P]

scn.load(["natural_color"])
scn.load(["convection"])

# 3. Resample the fulldisk to the Dem. Rep. Kongo and its neighbours [4P]
#    by defining your own area in Lambert Azimuthal Equal Area.
#    Use the following settings:
#      - lat and lon of origin: -3/23
#      - width and height of the resulting domain: 500px
#      - projection x/y coordinates of lower left: -15E5
#      - projection x/y coordinates of upper right: 15E5

from utils import resample_area
local_scn = resample_area(
    area_id="Democratic_Republic_Kongo",
    description=
    "Democratic Republic Kongo and its neighbors in Lambert Azimuthal Equal Area projection",
コード例 #17
0
class TestCFReader(unittest.TestCase):
    """Test case for CF reader."""
    def setUp(self):
        """Create a test scene."""
        tstart = datetime(2019, 4, 1, 12, 0)
        tend = datetime(2019, 4, 1, 12, 15)
        data_visir = [[1, 2], [3, 4]]
        y_visir = [1, 2]
        x_visir = [1, 2]
        z_visir = [1, 2, 3, 4, 5, 6, 7]
        qual_data = [[1, 2, 3, 4, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7]]
        time_vis006 = [1, 2]
        lat = 33.0 * np.array([[1, 2], [3, 4]])
        lon = -13.0 * np.array([[1, 2], [3, 4]])
        common_attrs = {
            'start_time': tstart,
            'end_time': tend,
            'platform_name': 'tirosn',
            'orbit_number': 99999
        }
        vis006 = xr.DataArray(data_visir,
                              dims=('y', 'x'),
                              coords={
                                  'y': y_visir,
                                  'x': x_visir,
                                  'acq_time': ('y', time_vis006)
                              },
                              attrs={
                                  'name': 'image0',
                                  'id_tag': 'ch_r06',
                                  'coordinates': 'lat lon'
                              })

        ir_108 = xr.DataArray(data_visir,
                              dims=('y', 'x'),
                              coords={
                                  'y': y_visir,
                                  'x': x_visir,
                                  'acq_time': ('y', time_vis006)
                              },
                              attrs={
                                  'name': 'image1',
                                  'id_tag': 'ch_tb11',
                                  'coordinates': 'lat lon'
                              })
        qual_f = xr.DataArray(qual_data,
                              dims=('y', 'z'),
                              coords={
                                  'y': y_visir,
                                  'z': z_visir,
                                  'acq_time': ('y', time_vis006)
                              },
                              attrs={
                                  'name': 'qual_flags',
                                  'id_tag': 'qual_flags'
                              })
        lat = xr.DataArray(lat,
                           dims=('y', 'x'),
                           coords={
                               'y': y_visir,
                               'x': x_visir
                           },
                           attrs={
                               'name': 'lat',
                               'standard_name': 'latitude',
                               'modifiers': np.array([])
                           })
        lon = xr.DataArray(lon,
                           dims=('y', 'x'),
                           coords={
                               'y': y_visir,
                               'x': x_visir
                           },
                           attrs={
                               'name': 'lon',
                               'standard_name': 'longitude',
                               'modifiers': np.array([])
                           })
        self.scene = Scene()
        self.scene.attrs['sensor'] = ['avhrr-1', 'avhrr-2', 'avhrr-3']
        scene_dict = {
            'image0': vis006,
            'image1': ir_108,
            'lat': lat,
            'lon': lon,
            'qual_flags': qual_f
        }
        for key in scene_dict:
            self.scene[key] = scene_dict[key]
            self.scene[key].attrs.update(common_attrs)

    def test_write_and_read(self):
        """Save a file with cf_writer and read the data again."""
        # '{testin}-{sensor}-{start_time:%Y%m%d%H%M%S}-{end_time:%Y%m%d%H%M%S}.nc'
        filename = 'testingcfwriter{:s}-viirs-mband-20201007075915-20201007080744.nc'.format(
            datetime.utcnow().strftime('%Y%j%H%M%S'))
        self.scene.save_datasets(writer='cf',
                                 filename=filename,
                                 header_attrs={'instrument': 'avhrr'},
                                 engine='h5netcdf',
                                 flatten_attrs=True,
                                 pretty=True)
        scn_ = Scene(reader='satpy_cf_nc', filenames=[filename])
        scn_.load(['image0', 'image1', 'lat'])
        self.assertTrue(
            np.all(scn_['image0'].data == self.scene['image0'].data))
        self.assertTrue(
            np.all(scn_['lat'].data ==
                   self.scene['lat'].data))  # lat loaded as dataset
        self.assertTrue(
            np.all(scn_['image0'].coords['lon'] ==
                   self.scene['lon'].data))  # lon loded as coord
        try:
            os.remove(filename)
        except PermissionError:
            pass

    def test_fix_modifier_attr(self):
        """Check that fix modifier can handle empty list as modifier attribute."""
        self.reader = SatpyCFFileHandler('filename', {}, {'filetype': 'info'})
        ds_info = {'modifiers': []}
        self.reader.fix_modifier_attr(ds_info)
        self.assertEqual(ds_info['modifiers'], ())
コード例 #18
0
ファイル: glue_legacy.py プロジェクト: jiaozhh/polar2grid
def main(argv=sys.argv[1:]):
    from polar2grid.core.script_utils import setup_logging, create_basic_parser, create_exc_handler, rename_log_file, ExtendAction
    from polar2grid.compositors import CompositorManager
    frontends = available_frontends()
    backends = available_backends()
    parser = create_basic_parser(description="Extract swath data, remap it, and write it to a new file format")
    parser.add_argument("frontend", choices=sorted(frontends.keys()),
                        help="Specify the swath extractor to use to read data (additional arguments are determined after this is specified)")
    parser.add_argument("backend", choices=sorted(backends.keys()),
                        help="Specify the backend to use to write data output (additional arguments are determined after this is specified)")
    parser.add_argument("--compositor-configs", nargs="*", default=None,
                        help="Specify alternative configuration file(s) for compositors")
    # don't include the help flag
    argv_without_help = [x for x in argv if x not in ["-h", "--help"]]
    args, remaining_args = parser.parse_known_args(argv_without_help)
    glue_name = args.frontend + "2" + args.backend
    LOG = logging.getLogger(glue_name)

    # Load compositor information (we can't know the compositor choices until we've loaded the configuration)
    compositor_manager = CompositorManager(config_files=args.compositor_configs)
    # Hack: argparse doesn't let you use choices and nargs=* on a positional argument
    parser.add_argument("compositors", choices=list(compositor_manager.keys()) + [[]], nargs="*",
                        help="Specify the compositors to apply to the provided scene (additional arguments are determined after this is specified)")

    # load the actual components we need
    farg_func = get_frontend_argument_func(frontends, args.frontend)
    fcls = get_frontend_class(frontends, args.frontend)
    barg_func = get_backend_argument_func(backends, args.backend)
    bcls = get_backend_class(backends, args.backend)

    # add_frontend_arguments(parser)
    subgroup_titles = []
    subgroup_titles += farg_func(parser)
    subgroup_titles += add_remap_argument_groups(parser)
    subgroup_titles += barg_func(parser)

    parser.add_argument('-f', dest='data_files', nargs="+", default=[], action=ExtendAction,
                        help="List of files or directories to extract data from")
    parser.add_argument('-d', dest='data_files', nargs="+", default=[], action=ExtendAction,
                        help="Data directories to look for input data files (equivalent to -f)")
    global_keywords = ("keep_intermediate", "overwrite_existing", "exit_on_error")
    args = parser.parse_args(argv, global_keywords=global_keywords, subgroup_titles=subgroup_titles)

    if not args.data_files:
        # FUTURE: When the -d flag is removed this won't be needed because -f will be required
        parser.print_usage()
        parser.exit(1, "ERROR: No data files provided (-f flag)\n")

    # Logs are renamed once data the provided start date is known
    rename_log = False
    if args.log_fn is None:
        rename_log = True
        args.log_fn = glue_name + "_fail.log"
    levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG, TRACE_LEVEL]
    setup_logging(console_level=levels[min(4, args.verbosity)], log_filename=args.log_fn)
    sys.excepthook = create_exc_handler(LOG.name)
    LOG.debug("Starting script with arguments: %s", " ".join(sys.argv))

    # Keep track of things going wrong to tell the user what went wrong (we want to create as much as possible)
    status_to_return = STATUS_SUCCESS

    # Compositor validation
    # XXX: Hack to make `polar2grid.sh crefl gtiff` work like legacy crefl2gtiff.sh script
    if args.subgroup_args['Frontend Swath Extraction'].get('no_compositors'):
        LOG.debug("Removing all compositors")
        args.compositors = []
    elif args.frontend == 'crefl':
        if args.backend in ['awips', 'scmi']:
            LOG.debug("Adding 'crefl_sharpen' compositor")
            args.compositors.append('crefl_sharpen' if args.backend == 'scmi' else 'crefl_sharpen_awips')
        else:
            LOG.debug("Adding 'true_color' compositor")
            args.compositors.append('true_color')
            if '--true-color' in sys.argv and 'true_color' not in args.compositors:
                LOG.debug("Adding 'true_color' compositor")
                args.compositors.append('true_color')
            if '--false-color' in sys.argv and 'false_color' not in args.compositors:
                LOG.debug("Adding 'false_color' compositor")
                args.compositors.append('false_color')

    # if "--true-color" in
    for c in args.compositors:
        if c not in compositor_manager:
            LOG.error("Compositor '%s' is unknown" % (c,))
            raise RuntimeError("Compositor '%s' is unknown" % (c,))

    # Frontend
    try:
        LOG.info("Initializing reader...")
        list_products = args.subgroup_args["Frontend Initialization"].pop("list_products")
        f = fcls(search_paths=args.data_files, **args.subgroup_args["Frontend Initialization"])
    except (ValueError, KeyError):
        LOG.debug("Frontend exception: ", exc_info=True)
        LOG.error("%s frontend failed to load and sort data files (see log for details)", args.frontend)
        return STATUS_FRONTEND_FAIL

    # Rename the log file
    if rename_log:
        rename_log_file(glue_name + f.begin_time.strftime("_%Y%m%d_%H%M%S.log"))

    if list_products:
        print("\n".join(sorted(f.available_product_names)))
        return STATUS_SUCCESS

    try:
        LOG.info("Initializing remapping...")
        remapper = Remapper(**args.subgroup_args["Remapping Initialization"])
        remap_kwargs = args.subgroup_args["Remapping"]
    except (ValueError, KeyError):
        LOG.debug("Remapping initialization exception: ", exc_info=True)
        LOG.error("Remapping initialization failed (see log for details)")
        return STATUS_REMAP_FAIL

    try:
        LOG.info("Initializing backend...")
        backend = bcls(**args.subgroup_args["Backend Initialization"])
    except (ValueError, KeyError):
        LOG.debug("Writer initialization exception: ", exc_info=True)
        LOG.error("Writer initialization failed (see log for details)")
        return STATUS_BACKEND_FAIL

    try:
        LOG.info("Initializing compositor objects...")
        compositor_objects = {}
        for c in args.compositors:
            compositor_objects[c] = compositor_manager.get_compositor(c, **args.global_kwargs)
    except (ValueError, KeyError):
        LOG.debug("Compositor initialization exception: ", exc_info=True)
        LOG.error("Compositor initialization failed (see log for details)")
        return STATUS_COMP_FAIL

    try:
        LOG.info("Extracting swaths from data files available...")
        scene = f.create_scene(**args.subgroup_args["Frontend Swath Extraction"])

        # Determine if we have a satpy scene if we should convert it to
        # a P2G Scene to continue processing
        resample_method = args.subgroup_args["Remapping"].get("remap_method")
        is_satpy_resample_method = resample_method in SATPY_RESAMPLERS
        if is_satpy_resample_method and not isinstance(scene, Scene):
            raise RuntimeError("Resampling method '{}' only supports 'satpy' readers".format(resample_method))
        elif not is_satpy_resample_method and isinstance(scene, Scene):
            # convert satpy scene to P2G Scene to be compatible with old P2G resamplers
            scene = convert_satpy_to_p2g_swath(f, scene)

        if isinstance(scene, Scene):
            if not scene.datasets:
                LOG.error("No products were returned by the frontend")
                raise RuntimeError("No products were returned by the frontend")
            if args.keep_intermediate:
                raise RuntimeError("satpy readers do not currently support saving intermediate files")
        else:
            if (isinstance(scene, Scene) and not scene.datasets) or not scene:
                LOG.error("No products were returned by the frontend")
                raise RuntimeError("No products were returned by the frontend")
            if args.keep_intermediate:
                filename = glue_name + "_swath_scene.json"
                LOG.info("Saving intermediate swath scene as '%s'", filename)
                scene.save(filename)
    except (ValueError, KeyError, RuntimeError):
        LOG.debug("Frontend data extraction exception: ", exc_info=True)
        LOG.error("Frontend data extraction failed (see log for details)")
        return STATUS_FRONTEND_FAIL

    # What grids should we remap to (the user should tell us or the backend should have a good set of defaults)
    known_grids = backend.known_grids
    LOG.debug("Writer known grids: %r", known_grids)
    grids = remap_kwargs.pop("forced_grids", None)
    LOG.debug("Forced Grids: %r", grids)
    if resample_method == "sensor" and grids != ["sensor"]:
        LOG.error("'sensor' resampling method only supports the 'sensor' grid")
        return STATUS_GDETER_FAIL
    if not grids and not known_grids:
        # the user didn't ask for any grids and the backend doesn't have specific defaults
        LOG.error("No grids specified and no known defaults")
        return STATUS_GDETER_FAIL
    elif not grids:
        # the user didn't tell us what to do, so let's try everything the backend knows how to do
        grids = known_grids
    elif known_grids is not None:
        # the user told us what to do, let's make sure the backend can do it
        grids = list(set(grids) & set(known_grids))
        if not grids:
            LOG.error("%s backend doesn't know how to handle any of the grids specified", args.backend)
            return STATUS_GDETER_FAIL
    LOG.debug("Grids that will be mapped to: %r", grids)

    # Remap
    for grid_name in grids:
        LOG.info("Remapping to grid %s", grid_name)
        try:
            gridded_scene = remapper.remap_scene(scene, grid_name, **remap_kwargs)
            if args.keep_intermediate:
                filename = glue_name + "_gridded_scene_" + grid_name + ".json"
                LOG.debug("saving intermediate gridded scene as '%s'", filename)
                gridded_scene.save(filename)
        except (ValueError, KeyError, RuntimeError):
            LOG.debug("Remapping data exception: ", exc_info=True)
            LOG.error("Remapping data failed")
            status_to_return |= STATUS_REMAP_FAIL
            if args.exit_on_error:
                return status_to_return
            continue

        if not isinstance(scene, Scene):
            # Composition
            for c, comp in compositor_objects.items():
                try:
                    LOG.info("Running gridded scene through '%s' compositor", c)
                    gridded_scene = comp.modify_scene(gridded_scene, **args.subgroup_args[c + " Modification"])
                    if args.keep_intermediate:
                        filename = glue_name + "_gridded_scene_" + grid_name + ".json"
                        LOG.debug("Updating saved intermediate gridded scene (%s) after compositor", filename)
                        gridded_scene.save(filename)
                except (KeyError, ValueError, RuntimeError):
                    LOG.debug("Compositor Error: ", exc_info=True)
                    LOG.error("Could not properly modify scene using compositor '%s'" % (c,))
                    if args.exit_on_error:
                        raise RuntimeError("Could not properly modify scene using compositor '%s'" % (c,))

        if isinstance(f, ReaderWrapper) and not isinstance(gridded_scene, Scene):
            this_grid_definition = None
            # HACK: Create SatPy composites that were either separated before
            # resampling or needed resampling to be created
            rgbs = {}
            for product_name in gridded_scene.keys():
                rgb_name = product_name[:-6]
                # Keep track of one of the grid definitions
                if this_grid_definition is None:
                    this_grid_definition = gridded_scene[product_name]["grid_definition"]

                if product_name.endswith("rgb_0") or product_name.endswith("rgb_1") or product_name.endswith("rgb_2"):
                    if rgb_name not in rgbs:
                        rgbs[rgb_name] = [None, None, None]
                    chn_idx = int(product_name[-1])
                    rgbs[rgb_name][chn_idx] = product_name
            LOG.debug("Putting RGBs back together again")
            for rgb_name, v in rgbs.items():
                r = gridded_scene.pop(v[0])
                g = gridded_scene.pop(v[1])
                b = gridded_scene.pop(v[2])
                new_info = r.copy()
                new_info["grid_data"] = new_info["grid_data"].replace(v[0], rgb_name)
                new_info["product_name"] = rgb_name
                data = np.memmap(new_info["grid_data"], dtype=new_info["data_type"],
                                 mode="w+", shape=(3, new_info["grid_definition"]["height"], new_info["grid_definition"]["width"]))
                data[0] = r.get_data_array()[:]
                data[1] = g.get_data_array()[:]
                data[2] = b.get_data_array()[:]
                gridded_scene[rgb_name] = new_info
                del data, new_info

            # Create composites that satpy couldn't complete until after remapping
            composite_names = f.missing_datasets
            if composite_names:
                tmp_scene = Scene()
                for k, v in gridded_scene.items():
                    ds_id = DatasetID.from_dict(v)
                    dask_arr = da.from_array(v.get_data_array(), chunks=CHUNK_SIZE)
                    tmp_scene[ds_id] = DataArray(dask_arr, attrs=v)
                    tmp_scene[ds_id].attrs["area"] = this_grid_definition.to_satpy_area()
                    if isinstance(v, set):
                        tmp_scene.attrs["sensor"].update(v["sensor"])
                    else:
                        tmp_scene.attrs["sensor"].add(v["sensor"])
                # Overwrite the wishlist that will include the above assigned datasets
                tmp_scene.wishlist = f.wishlist.copy()
                comps, mods = tmp_scene.cpl.load_compositors(tmp_scene.attrs["sensor"])
                tmp_scene.dep_tree.compositors = comps
                tmp_scene.dep_tree.modifiers = mods
                tmp_scene.dep_tree.find_dependencies(tmp_scene.wishlist.copy())
                tmp_scene.generate_composites()
                tmp_scene.unload()
                # Add any new Datasets to our P2G Scene if SatPy created them
                for ds in tmp_scene:
                    ds_id = DatasetID.from_dict(ds.attrs)
                    if ds_id.name not in gridded_scene:
                        LOG.debug("Adding Dataset from SatPy Commpositing: %s", ds_id)
                        gridded_scene[ds_id.name] = dataarray_to_gridded_product(ds, this_grid_definition)
                # Remove any Products from P2G Scene that SatPy decided it didn't need anymore
                for k, v in list(gridded_scene.items()):
                    if v['name'] not in tmp_scene:
                        LOG.debug("Removing Dataset that is no longer used: %s", k)
                        del gridded_scene[k]
                del tmp_scene, v

        if isinstance(gridded_scene, Scene):
            LOG.debug("Converting satpy Scene to P2G Gridded Scene")
            # Convert it to P2G Gridded Scene
            gridded_scene = convert_satpy_to_p2g_gridded(f, gridded_scene)

        # Writer
        try:
            LOG.info("Creating output from data mapped to grid %s", grid_name)
            backend.create_output_from_scene(gridded_scene, **args.subgroup_args["Backend Output Creation"])
        except (ValueError, KeyError, RuntimeError):
            LOG.debug("Writer output creation exception: ", exc_info=True)
            LOG.error("Writer output creation failed (see log for details)")
            status_to_return |= STATUS_BACKEND_FAIL
            if args.exit_on_error:
                return status_to_return
            continue

        LOG.info("Processing data for grid %s complete", grid_name)
        # Force deletion and eventual garbage collection of the scene objects
        del gridded_scene
    del scene
    return status_to_return
コード例 #19
0
T_d_RS = pd.DataFrame(T_d_RS_list) 


##############################  NUCAPS ##############################
### read file ###
file_index = 0
date_NUCAPS = Year + Month + Day
date_NUCAPS = date_NUCAPS +'.txt'
filtered_files = pd.read_csv(os.path.join('/data/COALITION2/PicturesSatellite/results_NAL/NUCAPS', date_NUCAPS))

index_CP = filtered_files.iloc[file_index,3]

min_dist = filtered_files.iloc[file_index,2]

filenames = [filtered_files.iloc[file_index,1]]
global_scene = Scene(reader="nucaps", filenames=filenames)    

NUCAPS_time = filenames = filtered_files.iloc[file_index,1]
NUCAPS_time = NUCAPS_time[65:80]
NUCAPS_time = datetime.strptime(NUCAPS_time,"%Y%m%d%H%M%S%f")

time_dif = np.abs(datetime.strptime(Year+Month+Day+Hour+Minute+Seconds,dynfmt) - NUCAPS_time)

var_pres="H2O_MR"
global_scene.load([var_pres], pressure_levels=True)

var_temp = "Temperature"
global_scene.load([var_temp], pressure_levels=True)

### define variables ###
# PRESSURE 
コード例 #20
0
parser.add_argument('--sat_id', dest='sat_id', action="store", help="Satellite ID", default="8888")
parser.add_argument('--data_cat', dest='data_cat', action="store", help="Category of data (one of GORN, GPRN, P**N)", default="GORN")
parser.add_argument('--area', dest='areadef', action="store", help="Area name, the definition must exist in your areas configuration file", default="nrEURO1km_NPOL_COALeqc")
parser.add_argument('--ph_unit', dest='ph_unit', action="store", help="Physical unit", default="CELSIUS")
parser.add_argument('--data_src', dest='data_src', action="store", help="Data source", default="EUMETCAST")
args = parser.parse_args()

if (args.input_dir != None):
    os.chdir(args.input_dir)

cfg = vars(args)
if (args.cfg != None):
    with open(args.cfg, 'r') as ymlfile:
        cfg = yaml.load(ymlfile)

narea = get_area_def(args.areadef)
global_data = Scene(sensor="images", reader="generic_image", area=narea)
global_data.load(['image'])

global_data['image'].info['area'] = narea
fname = global_data['image'].info['filename']
ofname = fname[:-3] + "tif"

#global_data.save_dataset('image', filename="out.png", writer="simple_image")
global_data.save_dataset('image', filename=ofname, writer="ninjotiff",
                      sat_id=cfg['sat_id'],
                      chan_id=cfg['chan_id'],
                      data_cat=cfg['data_cat'],
                      data_source=cfg['data_src'],
                      physic_unit=cfg['ph_unit'])
コード例 #21
0
 def test_scene_load_available_datasets(self):
     """Test that all datasets are available."""
     from satpy import Scene
     fname = os.path.join(self.base_dir, FILENAME)
     scn = Scene(reader='iasi_l2_so2_bufr', filenames=[fname])
     scn.load(scn.available_dataset_names())
コード例 #22
0
def get_eye(start_point, end_point):
    lat_0, lat_1 = start_point["USA_LAT"], end_point["USA_LAT"]
    lon_0, lon_1 = start_point["USA_LON"], end_point["USA_LON"]
    try:
        files, urls = get_data(DATA_DIRECTORY,
                               start_point["ISO_TIME"].to_pydatetime(),
                               end_point["ISO_TIME"].to_pydatetime(),
                               north=max(lat_0, lat_1) + DEFAULT_MARGIN,
                               south=min(lat_0, lat_1) - DEFAULT_MARGIN,
                               east=wrap(max(lon_0, lon_1) + DEFAULT_MARGIN),
                               west=wrap(min(lon_0, lon_1) - DEFAULT_MARGIN),
                               dayOrNight="D")
    except FileNotFoundError:
        return None
    raw_scene = Scene(filenames=files, reader="viirs_l1b")
    raw_scene.load(
        ["I04", "I05", "i_lat", "i_lon", "i_satellite_azimuth_angle"])
    t = raw_scene.start_time - start_point["ISO_TIME"].to_pydatetime()

    metadata = interpolate(start_point, end_point, t)
    eye_radius = metadata["USA_RMW"] / 60
    # core_area = create_area_def("core_eye",{
    #     "proj":"lcc","ellps":"WGS84","lat_0":metadata["USA_LAT"],"lon_1":metadata["USA_LON"]},units="degrees",
    #
    # })
    first_pass = create_area_def("first_pass", {
        "proj": "lcc",
        "ellps": "WGS84",
        "lat_0": metadata["USA_LAT"],
        "lon_0": metadata["USA_LON"],
        "lat_1": metadata["USA_LAT"]
    },
                                 units="degrees",
                                 resolution=RESOLUTION_DEF,
                                 area_extent=[
                                     metadata["USA_LON"] - 2 * eye_radius,
                                     metadata["USA_LAT"] - 2 * eye_radius,
                                     metadata["USA_LON"] + 2 * eye_radius,
                                     metadata["USA_LAT"] + 2 * eye_radius
                                 ])
    cropped_scene = raw_scene.resample(first_pass)
    centered_lon, centered_lat = first_pass.get_lonlat(*np.unravel_index(
        cropped_scene["I05"].values.argmax(), cropped_scene["I05"].shape))
    recentered_area = create_area_def("better_eye_area", {
        "proj": "lcc",
        "ellps": "WGS84",
        "lat_0": centered_lat,
        "lon_0": centered_lon,
        "lat_1": centered_lat
    },
                                      units="degrees",
                                      resolution=RESOLUTION_DEF,
                                      area_extent=[
                                          centered_lon - 2 * eye_radius,
                                          centered_lat - 2 * eye_radius,
                                          centered_lon + 2 * eye_radius,
                                          centered_lat + 2 * eye_radius
                                      ])
    new_scene = raw_scene.resample(recentered_area)

    return CycloneSnapshot(new_scene["I04"].values,
                           new_scene["I05"].values,
                           recentered_area.pixel_size_x,
                           recentered_area.pixel_size_y,
                           new_scene["i_satellite_azimuth_angle"].values,
                           metadata,
                           b_lon=centered_lon - 2 * eye_radius,
                           b_lat=centered_lat - 2 * eye_radius)
コード例 #23
0
import os
from satpy import Scene
from datetime import datetime
from satpy.utils import debug_on
import pyninjotiff
from glob import glob
from pyresample.utils import load_area
import copy
debug_on()

chn = "C13"
ninjoRegion = load_area("areas.def", "NinJoGOESWregion")
filenames = glob("/var/tmp/cll/goes17/*")
global_scene = Scene(reader="abi_l1b", filenames=filenames)
global_scene.load([chn])
local_scene = global_scene.resample(ninjoRegion, cache_dir="/var/tmp/cll")
local_scene[chn].clip(-87.5 - 273.15, 40 - 273.15)
local_scene.save_dataset(chn,
                         filename="goes17.tif",
                         writer='ninjotiff',
                         sat_id=6300014,
                         chan_id=900015,
                         data_cat='GORN',
                         data_source='EUMCAST',
                         physic_unit='C',
                         ch_max_measurement_unit=40,
                         ch_min_measurement_unit=-87.5,
                         zero_seconds=True,
                         invert_colorscale=True)
コード例 #24
0
ファイル: polar_maia.py プロジェクト: davidh-ssec/satpy
         ]


def hex_to_rgb(value):
    value = value.lstrip('#')
    lv = len(value)
    return [int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)]


if __name__ == '__main__':
    if len(sys.argv) < 2:
        print("Usage: " + sys.argv[0] + " MAIA_file ")
        sys.exit()

    fnmaia = sys.argv[1]
    maia_scene = Scene(reader='maia', filenames=[fnmaia])
    print(maia_scene.available_dataset_ids())
    maia_scene.load(["CloudType", "ct", "cma", "cma_conf",
                     'opaq_cloud', "CloudTopPres",
                     "CloudTopTemp", "Alt_surface"])

    # CloudType is a bit field containing the actual "ct" with values
    # from 0 to 20 which can be interpreted according to the cpool colormap

    # "ct" can be display in black and white:
    maia_scene.show("ct")

    # but it is better to palettize the image:
    # step 1: creation of the palette
    mycolors = []
    for i in range(21):
コード例 #25
0
    raise ValueError("Unknown computer" + hostname +
                     ": no example file is provided")

print("========================")
print("... read file:")
print(filenames)

## choose reader
reader = "native_msg"
#reader="hrit_msg"

## different ways to define the satellite data object
from satpy import Scene

global_scene = Scene(platform_name="Meteosat-9",
                     sensor="seviri",
                     reader=reader,
                     filenames=filenames)
#global_scene = Scene(platform_name="Meteosat-10", sensor="seviri", start_time=datetime(2015, 4, 20, 10, 0), base_dir="/home/a001673/data/satellite/Meteosat-10/seviri/lvl1.5/2015/04/20/HRIT")
#global_scene = Scene(platform_name="Meteosat-10", sensor="seviri", start_time=lastdate, base_dir="/data/cinesat/in/eumetcast1")
#global_scene = Scene(platform_name="Meteosat-10", sensor="seviri", start_time=lastdate, reader="hrit_msg", basedir="/data/cinesat/in/eumetcast1/")
#global_scene = Scene(platform_name="Meteosat-9", sensor="seviri", reader="hrit_msg", start_time=lastdate)

## get some infos about the satellite data object
print("========================")
print("... dir(global_scene)")
print(dir(global_scene))
print("========================")
print("... global_scene.info")
print(global_scene.info)
print("========================")
#print(global_scene.available_composite_names())  ### currently not all of them work, I have to check with the PyTroll satpy guys