Пример #1
0
    def test_find_by_name(self):
        ds_io = find_dataset_io('netcdf4')
        self.assertIsInstance(ds_io, Netcdf4DatasetIO)

        ds_io = find_dataset_io('zarr', modes=['a'])
        self.assertIsInstance(ds_io, ZarrDatasetIO)
        ds_io = find_dataset_io('zarr', modes=['w'])
        self.assertIsInstance(ds_io, ZarrDatasetIO)
        ds_io = find_dataset_io('zarr', modes=['r'])
        self.assertIsInstance(ds_io, ZarrDatasetIO)

        ds_io = find_dataset_io('mem')
        self.assertIsInstance(ds_io, MemDatasetIO)

        ds_io = find_dataset_io('bibo', default=MemDatasetIO())
        self.assertIsInstance(ds_io, MemDatasetIO)
Пример #2
0
def gen_cube(input_paths: Sequence[str] = None,
             input_processor_name: str = None,
             input_processor_params: Dict = None,
             input_reader_name: str = None,
             input_reader_params: Dict[str, Any] = None,
             output_region: Tuple[float, float, float, float] = None,
             output_size: Tuple[int, int] = DEFAULT_OUTPUT_SIZE,
             output_resampling: str = DEFAULT_OUTPUT_RESAMPLING,
             output_path: str = DEFAULT_OUTPUT_PATH,
             output_writer_name: str = None,
             output_writer_params: Dict[str, Any] = None,
             output_metadata: NameAnyDict = None,
             output_variables: NameDictPairList = None,
             processed_variables: NameDictPairList = None,
             profile_mode: bool = False,
             no_sort_mode: bool = False,
             append_mode: bool = None,
             dry_run: bool = False,
             monitor: Callable[..., None] = None) -> bool:
    """
    Generate a xcube dataset from one or more input files.

    :param no_sort_mode:
    :param input_paths: The input paths.
    :param input_processor_name: Name of a registered input processor
        (xcube.core.gen.inputprocessor.InputProcessor) to be used to transform the inputs.
    :param input_processor_params: Parameters to be passed to the input processor.
    :param input_reader_name: Name of a registered input reader (xcube.core.util.dsio.DatasetIO).
    :param input_reader_params: Parameters passed to the input reader.
    :param output_region: Output region as tuple of floats: (lon_min, lat_min, lon_max, lat_max).
    :param output_size: The spatial dimensions of the output as tuple of ints: (width, height).
    :param output_resampling: The resampling method for the output.
    :param output_path: The output directory.
    :param output_writer_name: Name of an output writer
        (xcube.core.util.dsio.DatasetIO) used to write the cube.
    :param output_writer_params: Parameters passed to the output writer.
    :param output_metadata: Extra metadata passed to output cube.
    :param output_variables: Output variables.
    :param processed_variables: Processed variables computed on-the-fly.
    :param profile_mode: Whether profiling should be enabled.
    :param append_mode: Deprecated. The function will always either insert, replace, or append new time slices.
    :param dry_run: Doesn't write any data. For testing.
    :param monitor: A progress monitor.
    :return: True for success.
    """

    if append_mode is not None:
        warnings.warn(
            'append_mode in gen_cube() is deprecated, '
            'time slices will now always be inserted, replaced, or appended.')

    if input_processor_name is None:
        input_processor_name = 'default'
    elif input_processor_name == '':
        raise ValueError('input_processor_name must not be empty')

    input_processor_class = find_input_processor_class(input_processor_name)
    if not input_processor_class:
        raise ValueError(
            f'Unknown input_processor_name {input_processor_name!r}')

    if not issubclass(input_processor_class, InputProcessor):
        raise ValueError(
            f'Invalid input_processor_name {input_processor_name!r}: '
            f'must name a sub-class of {InputProcessor.__qualname__}')

    try:
        input_processor = input_processor_class(
            **(input_processor_params or {}))
    except (ValueError, TypeError) as e:
        raise ValueError(
            f'Invalid input_processor_name or input_processor_params: {e}'
        ) from e

    input_reader = find_dataset_io(input_reader_name
                                   or input_processor.input_reader)
    if not input_reader:
        raise ValueError(f'Unknown input_reader_name {input_reader_name!r}')

    if not output_path:
        raise ValueError('Missing output_path')

    output_writer_name = output_writer_name or guess_dataset_format(
        output_path)
    if not output_writer_name:
        raise ValueError(
            f'Failed to guess output_writer_name from path {output_path}')
    output_writer = find_dataset_io(output_writer_name, modes={'w', 'a'})
    if not output_writer:
        raise ValueError(f'Unknown output_writer_name {output_writer_name!r}')

    if monitor is None:
        # noinspection PyUnusedLocal
        def monitor(*args):
            pass

    input_paths = [
        input_file for f in input_paths
        for input_file in glob.glob(f, recursive=True)
    ]

    if not no_sort_mode and len(input_paths) > 1:
        input_paths = _get_sorted_input_paths(input_processor, input_paths)

    if not dry_run:
        output_dir = os.path.abspath(os.path.dirname(output_path))
        os.makedirs(output_dir, exist_ok=True)

    effective_input_reader_params = dict(input_processor.input_reader_params
                                         or {})
    effective_input_reader_params.update(input_reader_params or {})

    effective_output_writer_params = output_writer_params or {}

    status = False

    ds_count = len(input_paths)
    ds_count_ok = 0
    ds_index = 0
    for input_file in input_paths:
        monitor(
            f'processing dataset {ds_index + 1} of {ds_count}: {input_file!r}...'
        )
        # noinspection PyTypeChecker
        status = _process_input(input_processor, input_reader,
                                effective_input_reader_params, output_writer,
                                effective_output_writer_params, input_file,
                                output_size, output_region, output_resampling,
                                output_path, output_metadata, output_variables,
                                processed_variables, profile_mode, dry_run,
                                monitor)
        ds_index += 1
        if status:
            ds_count_ok += 1

    monitor(f'{ds_count_ok} of {ds_count} datasets processed successfully, '
            f'{ds_count - ds_count_ok} were dropped due to errors')

    return status
Пример #3
0
 def test_find_by_ext(self):
     ds_io = find_dataset_io('nc')
     self.assertIsInstance(ds_io, Netcdf4DatasetIO)
Пример #4
0
def compute(script: str,
            cube: List[str],
            input_var_names: str,
            input_params: str,
            output_path: str,
            output_format: str,
            output_var_name: str,
            output_var_dtype: str):
    """
    Compute a cube from one or more other cubes.

    The command computes a cube variable from other cube variables in CUBEs
    using a user-provided Python function in SCRIPT.

    The SCRIPT must define a function named "compute":

    \b
        def compute(*input_vars: numpy.ndarray,
                    input_params: Mapping[str, Any] = None,
                    dim_coords: Mapping[str, np.ndarray] = None,
                    dim_ranges: Mapping[str, Tuple[int, int]] = None) \\
                    -> numpy.ndarray:
            # Compute new numpy array from inputs
            # output_array = ...
            return output_array

    where input_vars are numpy arrays (chunks) in the order given by VARIABLES or given by the variable names returned
    by an optional "initialize" function that my be defined in SCRIPT too, see below. input_params is a mapping of
    parameter names to values according to PARAMS or the ones returned by the aforesaid "initialize" function.
    dim_coords is a mapping from dimension name to coordinate labels for the current chunk to be computed.
    dim_ranges is a mapping from dimension name to index ranges into coordinate arrays of the cube.

    The SCRIPT may define a function named "initialize":

    \b
        def initialize(input_cubes: Sequence[xr.Dataset],
                       input_var_names: Sequence[str],
                       input_params: Mapping[str, Any]) \\
                       -> Tuple[Sequence[str], Mapping[str, Any]]:
            # Compute new variable names and/or new parameters
            # new_input_var_names = ...
            # new_input_params = ...
            return new_input_var_names, new_input_params

    where input_cubes are the respective CUBEs, input_var_names the respective VARIABLES, and input_params
    are the respective PARAMS. The "initialize" function can be used to validate the data cubes, extract
    the desired variables in desired order and to provide some extra processing parameters passed to the
    "compute" function.

    Note that if no input variable names are specified, no variables are passed to the "compute" function.

    The SCRIPT may also define a function named "finalize":

    \b
        def finalize(output_cube: xr.Dataset,
                     input_params: Mapping[str, Any]) \\
                     -> Optional[xr.Dataset]:
            # Optionally modify output_cube and return it or return None
            return output_cube

    If defined, the "finalize" function will be called before the command writes the
    new cube and then exists. The functions may perform a cleaning up or perform side effects such
    as write the cube to some sink. If the functions returns None, the CLI will *not* write
    any cube data.

    """
    from xcube.cli.common import parse_cli_kwargs
    from xcube.core.compute import compute_cube
    from xcube.core.dsio import open_cube
    from xcube.core.dsio import guess_dataset_format, find_dataset_io

    input_paths = cube

    compute_function_name = "compute"
    initialize_function_name = "initialize"
    finalize_function_name = "finalize"

    with open(script, "r") as fp:
        code = fp.read()

    locals_dict = dict()
    exec(code, globals(), locals_dict)

    input_var_names = list(map(lambda s: s.strip(), input_var_names.split(","))) if input_var_names else None

    compute_function = _get_function(locals_dict, compute_function_name, script, force=True)
    initialize_function = _get_function(locals_dict, initialize_function_name, script, force=False)
    finalize_function = _get_function(locals_dict, finalize_function_name, script, force=False)

    input_params = parse_cli_kwargs(input_params, "PARAMS")

    input_cubes = []
    for input_path in input_paths:
        input_cubes.append(open_cube(input_path=input_path))

    if initialize_function:
        input_var_names, input_params = initialize_function(input_cubes, input_var_names, input_params)

    output_cube = compute_cube(compute_function,
                               *input_cubes,
                               input_var_names=input_var_names,
                               input_params=input_params,
                               output_var_name=output_var_name,
                               output_var_dtype=output_var_dtype)

    if finalize_function:
        output_cube = finalize_function(output_cube)

    if output_cube is not None:
        output_format = output_format or guess_dataset_format(output_path)
        dataset_io = find_dataset_io(output_format, {"w"})
        dataset_io.write(output_cube, output_path)