Ejemplo n.º 1
0
 def write_cube(self, cube: xr.Dataset,
                gm: GridMapping) -> Tuple[str, xr.Dataset]:
     output_config = self._output_config
     dataset = encode_cube(cube, grid_mapping=gm)
     with observe_dask_progress('writing cube', 100):
         write_params = output_config.write_params or {}
         store_params = output_config.store_params or {}
         if output_config.store_id:
             store_instance = get_data_store_instance(
                 output_config.store_id,
                 store_params=store_params,
                 store_pool=self._store_pool)
             writer = store_instance.store
             write_params.update(writer_id=output_config.writer_id,
                                 **write_params)
         else:
             writer = new_data_writer(output_config.writer_id)
             write_params.update(**store_params, **write_params)
         if not dataset.attrs.get('title'):
             # Set fallback title, so we can distinguish
             # datasets from stores in xcube-viewer
             dataset = dataset.assign_attrs(title=output_config.data_id)
         data_id = writer.write_data(dataset,
                                     data_id=output_config.data_id,
                                     replace=output_config.replace or False,
                                     **write_params)
     return data_id, dataset
Ejemplo n.º 2
0
 def get_write_data_params_schema(self,
                                  writer_id: str = None
                                  ) -> JsonObjectSchema:
     if not writer_id:
         extensions = find_data_writer_extensions(
             predicate=get_data_accessor_predicate(
                 type_specifier='dataset',
                 format_id=_DEFAULT_FORMAT_ID,
                 storage_id=_STORAGE_ID))
         assert extensions
         writer_id = extensions[0].name
     return new_data_writer(writer_id).get_write_data_params_schema()
Ejemplo n.º 3
0
def writer_info(writer_id: str):
    """
    Show data opener information.
    You can obtain valid WRITER names using command "xcube io writer list".
    """
    extension = get_extension_registry().get_extension(
        EXTENSION_POINT_DATA_WRITERS, writer_id)
    description = extension.metadata.get('description')
    if description:
        print(description)
    from xcube.core.store import new_data_writer
    writer_ = new_data_writer(writer_id)
    params_schema = writer_.get_write_data_params_schema()
    print(_format_params_schema(params_schema))
Ejemplo n.º 4
0
 def write_data(self,
                data: Any,
                data_id: str = None,
                writer_id: str = None,
                replace: bool = False,
                **write_params) -> str:
     assert_instance(data,
                     (xr.Dataset, MultiLevelDataset, gpd.GeoDataFrame))
     if not writer_id:
         if isinstance(data, MultiLevelDataset):
             predicate = get_data_accessor_predicate(
                 type_specifier=TYPE_SPECIFIER_MULTILEVEL_DATASET,
                 format_id='levels',
                 storage_id=_STORAGE_ID)
         elif isinstance(data, xr.Dataset):
             predicate = get_data_accessor_predicate(
                 type_specifier=TYPE_SPECIFIER_DATASET,
                 format_id='zarr',
                 storage_id=_STORAGE_ID)
         elif isinstance(data, gpd.GeoDataFrame):
             predicate = get_data_accessor_predicate(
                 type_specifier=TYPE_SPECIFIER_GEODATAFRAME,
                 format_id='geojson',
                 storage_id=_STORAGE_ID)
         else:
             raise DataStoreError(f'Unsupported data type "{type(data)}"')
         extensions = find_data_writer_extensions(predicate=predicate)
         assert extensions
         writer_id = extensions[0].name
     data_id = self._ensure_valid_data_id(data_id, data)
     path = self._resolve_data_id_to_path(data_id)
     new_data_writer(writer_id).write_data(data,
                                           path,
                                           replace=replace,
                                           **write_params)
     return data_id
Ejemplo n.º 5
0
def write_cube(cube: xr.Dataset,
               output_config: OutputConfig,
               store_pool: DataStorePool = None) -> str:
    with observe_progress('Writing output', 1) as progress:
        write_params = dict()
        if output_config.store_id:
            store_instance = get_data_store_instance(output_config.store_id,
                                                     store_params=output_config.store_params,
                                                     store_pool=store_pool)
            writer = store_instance.store
            write_params.update(writer_id=output_config.writer_id, **output_config.write_params)
        else:
            writer = new_data_writer(output_config.writer_id)
            write_params.update(**output_config.store_params, **output_config.write_params)

        # TODO: develop an adapter from Dask callback to ProgressObserver and use it here.
        data_id = writer.write_data(cube,
                                    data_id=output_config.data_id,
                                    replace=output_config.replace or False,
                                    **write_params)
        progress.worked(1)
        return data_id
Ejemplo n.º 6
0
Archivo: s3.py Proyecto: micder/xcube
 def _new_s3_writer(self, writer_id):
     self._assert_not_closed()
     return new_data_writer(writer_id, s3=self._s3)
Ejemplo n.º 7
0
 def delete_data(self, data_id: str):
     accessor_id_parts = self._get_accessor_id_parts(data_id)
     writer_id = ':'.join(accessor_id_parts)
     path = self._resolve_data_id_to_path(data_id)
     new_data_writer(writer_id).delete_data(path)