Exemplo n.º 1
0
Arquivo: gibs.py Projeto: up42/modis
    def requests_wmts_tile(
        self, tile: mercantile.Tile, layer: str, date: str, img_format: str = "jpg"
    ) -> requests.Response:
        tile_url = self.wmts_url + self.wmts_endpoint.format(
            layer=layer,
            date=date,
            x=tile.x,
            y=tile.y,
            zoom=tile.z,
            img_format=img_format,
        )

        logger.debug(tile_url)

        try:
            wmts_response = requests.get(tile_url)
            logger.info(f"response returned: {wmts_response.status_code}")
            wmts_response.raise_for_status()
        except requests.exceptions.ConnectionError as conn_err:
            logger.error("Network related error occured")
            raise UP42Error(
                SupportedErrors.API_CONNECTION_ERROR, str(conn_err)
            ) from conn_err
        except requests.exceptions.HTTPError as err:
            logger.error("HTTP error occured")
            raise UP42Error(SupportedErrors.API_CONNECTION_ERROR, str(err)) from err

        return wmts_response
Exemplo n.º 2
0
 def assert_input_params(self):
     if not self.params.__dict__["clip_to_aoi"]:
         if self.params.bbox or self.params.contains or self.params.intersects:
             raise UP42Error(
                 SupportedErrors.INPUT_PARAMETERS_ERROR,
                 "When clip_to_aoi is set to False, bbox, contains and intersects must be set to null.",
             )
     else:
         if (self.params.bbox is None and self.params.contains is None
                 and self.params.intersects is None):
             raise UP42Error(
                 SupportedErrors.INPUT_PARAMETERS_ERROR,
                 "When clip_to_aoi set to True, you MUST define one of bbox, contains or intersect.",
             )
    def process(self, input_fc: FeatureCollection) -> FeatureCollection:
        if not input_fc.features:
            raise UP42Error(SupportedErrors.NO_INPUT_ERROR)

        output_fc = FeatureCollection([])

        for feat in input_fc["features"]:
            logger.info(f"Processing {feat}...")
            input_path = Path("/tmp/input/") / Path(get_data_path(feat))
            with rio.open(input_path) as src:
                src_win = WindowsUtil(src)
                (
                    output_name,
                    output_path,
                ) = get_output_filename_and_path(input_path.name,
                                                 postfix="processed")
                dst_meta = src.meta.copy()
                with rio.open(output_path, "w", **dst_meta) as dst:
                    for win in src_win.windows_regular():
                        exp = src.read(window=win)**self.exponent
                        dst.write(exp, window=win)

                out_feat = Feature(bbox=feat.bbox, geometry=feat.geometry)
                out_feat["properties"] = self.get_metadata(feat)
                out_feat = set_data_path(out_feat, output_name)
                logger.info(f"Processed {out_feat}...")
                output_fc.features.append(out_feat)

        return output_fc
Exemplo n.º 4
0
    def process(self, input_fc: FeatureCollection) -> FeatureCollection:
        """
        This method takes the raster data at 10, 20, and 60 m resolutions and by applying
        data_final method creates the input data for the the convolutional neural network.
        It returns 10 m resolution for all the bands in 20 and 60 m resolutions.

        Args:
            input_fc: geojson FeatureCollection of all input images
        """
        self.assert_input_params()
        output_jsonfile = self.get_final_json()

        LOGGER.info("Started process...")
        for feature in input_fc.features:
            LOGGER.info(f"Processing feature {feature}")
            path_to_input_img = feature["properties"]["up42.data_path"]
            path_to_output_img = Path(
                path_to_input_img).stem + "_superresolution.tif"
            try:
                subprocess.run(
                    f"python3 src/inference.py {path_to_input_img} {path_to_output_img}",
                    check=True,
                    shell=True,
                )
            except subprocess.CalledProcessError as e:
                raise UP42Error(SupportedErrors(e.returncode)) from e

        self.save_output_json(output_jsonfile, self.output_dir)
        return output_jsonfile
Exemplo n.º 5
0
 def assert_input_params(self):
     if not self.params.clip_to_aoi:
         if self.params.bbox or self.params.contains or self.params.intersects:
             raise UP42Error(
                 SupportedErrors.WRONG_INPUT_ERROR,
                 "When clip_to_aoi is set to False, bbox, contains "
                 "and intersects must be set to null.",
             )
     else:
         if (self.params.bbox is None and self.params.contains is None
                 and self.params.intersects is None):
             raise UP42Error(
                 SupportedErrors.WRONG_INPUT_ERROR,
                 "When clip_to_aoi set to True, you MUST define the same "
                 "coordinates in bbox, contains or intersect for both "
                 "the S1 and SNAP blocks.",
             )
Exemplo n.º 6
0
    def process(self, input_fc: FeatureCollection) -> FeatureCollection:
        """
        Given the necessary parameters and a feature collection describing the input datasets,
        run K-means clustering for each input data set and create output feature collection

        :param metadata: A GeoJSON FeatureCollection describing all input datasets
        :return: A GeoJSON FeatureCollection describing all output datasets
        """
        if not input_fc.features:
            raise UP42Error(SupportedErrors.NO_INPUT_ERROR)

        results = []  # type: List[Feature]
        for feature in input_fc.features:
            try:
                path_to_input_img = feature["properties"]["up42.data_path"]
                path_to_output_img = Path(
                    path_to_input_img).stem + "_kmeans.tif"
            except KeyError as err:
                raise UP42Error(
                    SupportedErrors.NO_INPUT_ERROR,
                    "up42.data_path was not specified in the metadata.",
                ) from err

            out_feature = feature.copy()
            out_feature["properties"]["up42.data_path"] = path_to_output_img

            try:
                self.run_kmeans_clustering(
                    "/tmp/input/" + path_to_input_img,
                    "/tmp/output/" + path_to_output_img,
                )
                results.append(out_feature)
            except UP42Error as e:
                logger.warning(e)
                logger.warning(
                    f"{path_to_input_img} is too large to process, skipping..."
                )

        if not results:
            raise UP42Error(SupportedErrors.NO_OUTPUT_ERROR)
        return FeatureCollection(results)
Exemplo n.º 7
0
def raise_if_too_large(input_ds: rio.DatasetReader,
                       max_size_bytes: int = 31621877760):
    """Raises UP42Error if input dataset allocation in memory expected_size
    max_size_bytes.

    Xlarge machine has 31621877760 bytes memory

    Parameters
    ----------
    input_ds : rio.DatasetReader
        A raster dataset.
    max_size_bytes : int
        Maximum allowed size of dataset in bytes (usually memory in machine)

    Raises
    -------
    UP42Error
        When estimated input dataset allocation in memory exceedes max_size_bytes

    """

    if input_ds.meta["dtype"] == "uint8":
        multiplier = 8
    elif input_ds.meta["dtype"] == "uint16":
        multiplier = 16
    elif input_ds.meta["dtype"] == "float32":
        multiplier = 32
    else:
        multiplier = 16

    # Calculate expected_size in bytes
    expected_size = (input_ds.shape[0] * input_ds.shape[1] * input_ds.count *
                     multiplier) / 8
    # KMeansClustering algorithm uses at least x4 size of image in bytes in memory
    # Add x4 buffer for safety
    expected_size *= 4 * 4
    logger.info(f"expected_size is {expected_size}")

    if expected_size > max_size_bytes:
        logger.info(
            f"expected_size {expected_size} is more than max_size_bytes {max_size_bytes}"
        )
        raise UP42Error(
            SupportedErrors.WRONG_INPUT_ERROR,
            "Dataset is too large! Please select a smaller AOI.",
        )
Exemplo n.º 8
0
Arquivo: modis.py Projeto: up42/modis
    def fetch(self,
              query: STACQuery,
              dry_run: bool = False) -> FeatureCollection:

        query.set_param_if_not_exists("zoom_level", self.default_zoom_level)
        query.set_param_if_not_exists("imagery_layers",
                                      [self.default_imagery_layer])

        # Get the list of tiles that cover the query AOI. Sorted by (y, x) in ascending order
        try:
            tile_list = list(
                filter_tiles_intersect_with_geometry(
                    tiles=mercantile.tiles(*query.bounds(),
                                           zooms=query.zoom_level,
                                           truncate=True),
                    geometry=query.geometry(),
                ))
        except MercantileError as mercerr:
            raise UP42Error(
                SupportedErrors.INPUT_PARAMETERS_ERROR) from mercerr

        output_features: List[Feature] = []

        date_list = extract_query_dates(query)

        logger.debug(f"Checking layer {query.imagery_layers}")
        are_valid, invalid, valid_imagery_layers = self.api.validate_imagery_layers(
            query.imagery_layers, query.bounds())
        if are_valid:
            logger.debug(f"Layers {query.imagery_layers} OK!")
        else:
            raise UP42Error(
                SupportedErrors.INPUT_PARAMETERS_ERROR,
                f"Invalid Layers. {invalid} have invalid names."
                f"{invalid} are layer bounds, search should be within this.",
            )

        for query_date in date_list:
            self.api.get_layer_bands_count(tile_list, valid_imagery_layers,
                                           query_date)
            for layer in valid_imagery_layers:
                feature_id: str = str(uuid.uuid4())
                return_poly = tiles_to_geom(tile_list)

                feature = Feature(id=feature_id,
                                  bbox=return_poly.bounds,
                                  geometry=return_poly)

                try:
                    self.api.write_quicklook(layer, return_poly.bounds,
                                             query_date, feature_id)
                except requests.exceptions.HTTPError:
                    continue

            if not dry_run:
                # Fetch tiles and patch them together
                img_filename = self.get_final_merged_image(
                    tile_list, valid_imagery_layers, query_date, feature_id)
                self.api.post_process(img_filename, valid_imagery_layers)
                to_cog(img_filename, forward_band_tags=True)
                set_data_path(feature, f"{feature_id}.tif")

            logger.debug(feature)
            output_features.append(feature)

        logger.debug(f"Saving {len(output_features)} result features")

        return FeatureCollection(list(output_features))
Exemplo n.º 9
0
    def process(self, input_fc: FeatureCollection):
        """
        Main wrapper method to facilitate snap processing per feature
        """
        polarisations: List = self.params.polarisations or ["VV"]

        self.assert_input_params()

        results: List[Feature] = []
        out_dict: dict = {}
        for in_feature in input_fc.get("features"):
            coordinate = in_feature["bbox"]
            self.assert_dem(coordinate)
            try:
                processed_graphs = self.process_snap(in_feature, polarisations)
                LOGGER.info("SNAP processing is finished!")
                if not processed_graphs:
                    LOGGER.debug("No processed images returned, will continue")
                    continue
                out_feature = copy.deepcopy(in_feature)
                processed_tif_uuid = out_feature.properties["up42.data_path"]
                out_path = f"/tmp/output/{processed_tif_uuid}/"
                if not os.path.exists(out_path):
                    os.mkdir(out_path)
                for out_polarisation in processed_graphs:
                    # Besides the path we only need to change the capabilities
                    shutil.move(
                        ("%s.tif" % out_polarisation),
                        ("%s%s.tif" %
                         (out_path, out_polarisation.split("_")[-1])),
                    )
                del out_feature["properties"]["up42.data_path"]
                set_data_path(out_feature, processed_tif_uuid + ".tif")
                results.append(out_feature)
                out_dict[processed_tif_uuid] = {
                    "id": processed_tif_uuid,
                    "z": [i.split("_")[-1] for i in processed_graphs],
                    "out_path": out_path,
                }
                Path(__file__).parent.joinpath(
                    "template/"
                    "snap_polarimetry_graph_%s.xml" % "copy").unlink()
            except WrongPolarizationError:
                LOGGER.error(
                    f"WrongPolarizationError: some or all of the polarisations "
                    f"({polarisations}) don't exist in this product "
                    f"({self.safe_file_name(in_feature),}), skipping.", )
                continue

        if not results:
            raise UP42Error(
                SupportedErrors.NO_OUTPUT_ERROR,
                "The used input parameters don't result in any output "
                "when applied to the provided input images.",
            )

        for out_id in out_dict:  # pylint: disable=consider-using-dict-items
            my_out_path = out_dict[out_id]["out_path"]
            out_id_z = out_dict[out_id]["z"]
            if self.params.mask is not None:
                self.post_process(my_out_path, out_id_z)
            self.rename_final_stack(my_out_path, out_id_z)

        result_fc = FeatureCollection(results)

        if self.params.clip_to_aoi:
            result_fc = update_extents(result_fc)

        return result_fc