Ejemplo n.º 1
0
    def read(self, obj):
        ds = open_gdal(obj)
        if ds is not None:
            driver = ds.GetDriver()
            size = (ds.RasterXSize, ds.RasterYSize)
            gt = ds.GetGeoTransform()
            extent = None

            if gt != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0):
                x_extent = (gt[0], gt[0] + size[0] * gt[1])
                y_extent = (gt[3] + size[1] * gt[5], gt[3])

                extent = (
                    min(x_extent),
                    min(y_extent),
                    max(x_extent),
                    max(y_extent)
                )
            
            projection = ds.GetProjection()

            values = {
                "size": (ds.RasterXSize, ds.RasterYSize),
            }

            if projection:
                values["projection"] = (projection, "WKT")
            if extent:
                values["extent"] = extent

            reader = self._find_additional_reader(ds)
            if reader:
                additional_values = reader.read_ds(ds)
                for key, value in additional_values.items():
                    values.setdefault(key, value)

            if ds.GetGCPCount() > 0:
                rt_prm = rt.suggest_transformer(raw_metadata)
                fp_wkt = rt.get_footprint_wkt(raw_metadata,**rt_prm)
                values["footprint"] = GEOSGeometry(
                    fp_wkt
                )

            driver_metadata = driver.GetMetadata()
            frmt = driver_metadata.get("DMD_MIMETYPE")
            if frmt:
                values["format"] = frmt

            return values
            
        raise Exception("Could not parse from obj '%s'." % repr(obj))
Ejemplo n.º 2
0
def getExtentFromReferenceableDS( ds ):
    """ Calculates the extent tuple of the given gdal.Dataset. The dataset 
    must be encoded using the tie-points. 
    """

    filelist = ds.GetFileList()

    if 1 != len( filelist ) : 
        raise RuntimeError( "Cannot get a single dataset filename!" ) 
        
    rt_prm = rt.suggest_transformer(filelist[0]) 
    fp_wkt = rt.get_footprint_wkt(filelist[0],**rt_prm)

    return GEOSGeometry( fp_wkt ).extent 
Ejemplo n.º 3
0
        path = argv[1]
    except IndexError:
        print("Requires filename")
        exit(1)

    reader = GDALDatasetEnvisatMetadataFormatReader(env)
    ds = gdal.Open(path)

    if not ds:
        print("Cannot open '%s' as GDAL Dataset." % path)
        exit(1)
    elif not reader.test_ds(ds):
        print("Dataset '%s' does not contain required ENVISAT metadata." %
              path)
        exit(1)

    md = reader.read_ds(ds)
    del ds

    footprint = GEOSGeometry(get_footprint_wkt(ds))
    footprint.srid = 4326

    encoder = EOP20Encoder()

    xml = encoder.serialize(
        encoder.encode_earth_observation(EOMetadata(footprint=footprint,
                                                    **md)))

    with open(os.path.join(os.path.dirname(path))) as f:
        f.write(xml)
Ejemplo n.º 4
0
    second = int(m.group(6))
    
    return datetime(year, month, day, hour, minute, second)

if __name__=="__main__":
    path = argv[1]

    ds = gdal.Open(path)
    
    eo_id = os.path.splitext(os.path.basename(path))[0]
    begin_time = parse_timestamp(ds.GetMetadataItem("MPH_SENSING_START"))
    end_time = parse_timestamp(ds.GetMetadataItem("MPH_SENSING_STOP"))
    
    del ds

    footprint = GEOSGeometry(get_footprint_wkt(path))
    footprint.srid = 4326

    encoder = EOPEncoder()  
    
    xml = DOMElementToXML(
        encoder.encodeEarthObservation(
            eo_id,
            begin_time,
            end_time,
            footprint
        )
    )
  
    xml_file = open(os.path.join(os.path.dirname(path), "%s.xml" % eo_id), "w")
    xml_file.write(xml)
Ejemplo n.º 5
0
    def read(self, obj):
        ds = open_gdal(obj)
        if ds is None:
            raise Exception("Could not parse from obj '%s'." % repr(obj))

        driver = ds.GetDriver()
        size = (ds.RasterXSize, ds.RasterYSize)
        values = {"size": size}

        # --= rectified datasets =--
        # NOTE: If the projection is a non-zero string then
        #       the geocoding is given by the Geo-Trasnformation
        #       matrix - not matter what are the values.
        if ds.GetProjection():
            values["coverage_type"] = "RectifiedDataset"
            values["projection"] = (ds.GetProjection(), "WKT")

            # get coordinates of all four image corners
            gt = ds.GetGeoTransform()
            def gtrans(x, y):
                return gt[0] + x*gt[1] + y*gt[2], gt[3] + x*gt[4] + y*gt[5]
            vpix = [(0, 0), (0, size[1]), (size[0], 0), (size[0], size[1])]
            vx, vy = zip(*(gtrans(x, y) for x, y in vpix))

            # find the extent
            values["extent"] = (min(vx), min(vy), max(vx), max(vy))

        # --= tie-point encoded referenceable datasets =--
        # NOTE: If the GCP projection is a non-zero string and
        #       there are GCPs we are dealing with a tie-point geocoded
        #       referenceable dataset. The extent is given by the image
        #       footprint. The fooprint must not be wrapped arround
        #       the date-line!
        elif ds.GetGCPProjection() and ds.GetGCPCount() > 0:
            values["coverage_type"] = "ReferenceableDataset"
            projection = ds.GetGCPProjection()
            values["projection"] = (projection, "WKT")

            # parse the spatial reference to get the EPSG code
            sr = osr.SpatialReference(projection, "WKT")

            # NOTE: GeosGeometry can't handle non-EPSG geometry projections.
            if sr.GetAuthorityName(None) == "EPSG":
                srid = int(sr.GetAuthorityCode(None))

                # get the footprint
                rt_prm = rt.suggest_transformer(ds)
                fp_wkt = rt.get_footprint_wkt(ds, **rt_prm)
                footprint = GEOSGeometry(fp_wkt, srid)

                if isinstance(footprint, Polygon):
                    footprint = MultiPolygon(footprint)
                elif not isinstance(footprint, MultiPolygon):
                    raise TypeError(
                        "Got invalid geometry %s" % type(footprint).__name__
                    )

                values["footprint"] = footprint
                values["extent"] = footprint.extent

        # --= dataset with no geocoding =--
        # TODO: Handling of other types of GDAL geocoding (e.g, RPC).
        else:
            pass

        reader = self._find_additional_reader(ds)
        if reader:
            additional_values = reader.read_ds(ds)
            for key, value in additional_values.items():
                values.setdefault(key, value)

        driver_metadata = driver.GetMetadata()
        frmt = driver_metadata.get("DMD_MIMETYPE")
        if frmt is None:
            _driver_name = "GDAL/" + driver.ShortName
            _frmt = getFormatRegistry().getFormatsByDriver(_driver_name)
            if _frmt:
                frmt = _frmt[0].mimeType
        if frmt:
            values["format"] = frmt

        return values
Ejemplo n.º 6
0
    def apply(self, src_ds):
        # setup
        dst_sr = osr.SpatialReference()
        gcp_sr = osr.SpatialReference()

        dst_sr.ImportFromEPSG(self.srid if self.srid is not None else self.gcp_srid)
        gcp_sr.ImportFromEPSG(self.gcp_srid)

        logger.debug("Using GCP Projection '%s'" % gcp_sr.ExportToWkt())
        logger.debug(
            "Applying GCPs: MULTIPOINT(%s) -> MULTIPOINT(%s)"
            % (
                ", ".join([("(%f %f)") % (gcp.GCPX, gcp.GCPY) for gcp in self.gcps]),
                ", ".join([("(%f %f)") % (gcp.GCPPixel, gcp.GCPLine) for gcp in self.gcps]),
            )
        )
        # set the GCPs
        src_ds.SetGCPs(self.gcps, gcp_sr.ExportToWkt())

        # Try to find and use the best transform method/order.
        # Orders are: -1 (TPS), 3, 2, and 1 (all GCP)
        # Loop over the min and max GCP number to order map.
        for min_gcpnum, max_gcpnum, order in [(3, None, -1), (10, None, 3), (6, None, 2), (3, None, 1)]:
            # if the number of GCP matches
            if len(self.gcps) >= min_gcpnum and (max_gcpnum is None or len(self.gcps) <= max_gcpnum):
                try:

                    if order < 0:
                        # let the reftools suggest the right interpolator
                        rt_prm = rt.suggest_transformer(src_ds)
                    else:
                        # use the polynomial GCP interpolation as requested
                        rt_prm = {"method": rt.METHOD_GCP, "order": order}

                    logger.debug(
                        "Trying order '%i' {method:%s,order:%s}"
                        % (order, rt.METHOD2STR[rt_prm["method"]], rt_prm["order"])
                    )
                    # get the suggested pixel size/geotransform
                    size_x, size_y, geotransform = rt.suggested_warp_output(
                        src_ds, None, dst_sr.ExportToWkt(), **rt_prm
                    )
                    if size_x > 100000 or size_y > 100000:
                        raise RuntimeError("Calculated size exceeds limit.")
                    logger.debug("New size is '%i x %i'" % (size_x, size_y))

                    # create the output dataset
                    dst_ds = create_mem(size_x, size_y, src_ds.RasterCount, src_ds.GetRasterBand(1).DataType)

                    # reproject the image
                    dst_ds.SetProjection(dst_sr.ExportToWkt())
                    dst_ds.SetGeoTransform(geotransform)

                    rt.reproject_image(src_ds, "", dst_ds, "", **rt_prm)

                    copy_metadata(src_ds, dst_ds)

                    # retrieve the footprint from the given GCPs
                    footprint_wkt = rt.get_footprint_wkt(src_ds, **rt_prm)

                except RuntimeError, e:
                    logger.debug("Failed using order '%i'. Error was '%s'." % (order, str(e)))
                    # the given method was not applicable, use the next one
                    continue

                else:
                    logger.debug("Successfully used order '%i'" % order)
                    # the transform method was successful, exit the loop
                    break
    def render(self, params):
        # get the requested coverage, data items and range type.
        coverage = params.coverage
        data_items = coverage.data_items.filter(semantic__startswith="bands")
        range_type = coverage.range_type

        subsets = params.subsets

        # GDAL source dataset. Either a single file dataset or a composed VRT 
        # dataset.
        src_ds = self.get_source_dataset(
            coverage, data_items, range_type
        )

        # retrieve area of interest of the source image according to given 
        # subsets
        src_rect, dst_rect = self.get_source_and_dest_rect(src_ds, subsets)

        # deduct "native" format of the source image
        native_format = data_items[0].format if len(data_items) == 1 else None

        # get the requested image format, which defaults to the native format
        # if available
        frmt = params.format or native_format

        if not frmt:
            raise RenderException("No format specified.", "format")

        if params.scalefactor is not None or params.scales:
            raise RenderException(
                "ReferenceableDataset cannot be scaled.",
                "scalefactor" if params.scalefactor is not None else "scale"
            )

        maxsize = WCSConfigReader(get_eoxserver_config()).maxsize
        if maxsize is not None:
            if maxsize < dst_rect.size_x or maxsize < dst_rect.size_y:
                raise RenderException(
                    "Requested image size %dpx x %dpx exceeds the allowed "
                    "limit maxsize=%dpx." % (
                        dst_rect.size_x, dst_rect.size_y, maxsize
                    ), "size"
                )

        # perform subsetting either with or without rangesubsetting
        subsetted_ds = self.perform_subset(
            src_ds, range_type, src_rect, dst_rect, params.rangesubset
        )

        # encode the processed dataset and save it to the filesystem
        out_ds, out_driver = self.encode(
            subsetted_ds, frmt, getattr(params, "encoding_params", {})
        )

        driver_metadata = out_driver.GetMetadata_Dict()
        mime_type = driver_metadata.get("DMD_MIMETYPE")
        extension = driver_metadata.get("DMD_EXTENSION")

        time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
        filename_base = "%s_%s" % (coverage.identifier, time_stamp)

        result_set = [
            ResultFile(
                path, mime_type, "%s.%s" % (filename_base, extension),
                ("cid:coverage/%s" % coverage.identifier) if i == 0 else None
            ) for i, path in enumerate(out_ds.GetFileList())
        ]

        if params.mediatype and params.mediatype.startswith("multipart"):
            reference = "cid:coverage/%s" % result_set[0].filename
            
            if subsets.has_x and subsets.has_y:
                footprint = GEOSGeometry(reftools.get_footprint_wkt(out_ds))
                if not subsets.srid:
                    extent = footprint.extent
                else:
                    extent = subsets.xy_bbox
                encoder_subset = (
                    subsets.srid, src_rect.size, extent, footprint
                )
            else:
                encoder_subset = None

            encoder = WCS20EOXMLEncoder()
            content = encoder.serialize(
                encoder.encode_referenceable_dataset(
                    coverage, range_type, reference, mime_type, encoder_subset
                )
            )
            result_set.insert(0, ResultBuffer(content, encoder.content_type))

        return result_set
Ejemplo n.º 8
0
    def apply(self, src_ds):
        # setup
        dst_sr = osr.SpatialReference()
        gcp_sr = osr.SpatialReference()

        dst_sr.ImportFromEPSG(self.srid if self.srid is not None
                              else self.gcp_srid)
        gcp_sr.ImportFromEPSG(self.gcp_srid)


        logger.debug("Using GCP Projection '%s'" % gcp_sr.ExportToWkt())
        logger.debug("Applying GCPs: MULTIPOINT(%s) -> MULTIPOINT(%s)"
                      % (", ".join([("(%f %f)") % (gcp.GCPX, gcp.GCPY) for gcp in self.gcps]) ,
                      ", ".join([("(%f %f)") % (gcp.GCPPixel, gcp.GCPLine) for gcp in self.gcps])))
        # set the GCPs
        src_ds.SetGCPs(self.gcps, gcp_sr.ExportToWkt())

        # Try to find and use the best transform method/order.
        # Orders are: -1 (TPS), 3, 2, and 1 (all GCP)
        # Loop over the min and max GCP number to order map.
        for min_gcpnum, max_gcpnum, order in [(3, None, -1), (10, None, 3), (6, None, 2), (3, None, 1)]:
            # if the number of GCP matches
            if len(self.gcps) >= min_gcpnum and (max_gcpnum is None or len(self.gcps) <= max_gcpnum):
                try:

                    if ( order < 0 ) :
                        # let the reftools suggest the right interpolator
                        rt_prm = rt.suggest_transformer( src_ds )
                    else:
                        # use the polynomial GCP interpolation as requested
                        rt_prm = { "method":rt.METHOD_GCP, "order":order }

                    logger.debug("Trying order '%i' {method:%s,order:%s}" % \
                        (order, rt.METHOD2STR[rt_prm["method"]] , rt_prm["order"] ) )
                    # get the suggested pixel size/geotransform
                    size_x, size_y, geotransform = rt.suggested_warp_output(
                        src_ds,
                        None,
                        dst_sr.ExportToWkt(),
                        **rt_prm
                    )
                    if size_x > 100000 or size_y > 100000:
                        raise RuntimeError("Calculated size exceeds limit.")
                    logger.debug("New size is '%i x %i'" % (size_x, size_y))

                    # create the output dataset
                    dst_ds = create_mem(size_x, size_y,
                                        src_ds.RasterCount,
                                        src_ds.GetRasterBand(1).DataType)

                    # reproject the image
                    dst_ds.SetProjection(dst_sr.ExportToWkt())
                    dst_ds.SetGeoTransform(geotransform)

                    rt.reproject_image(src_ds, "", dst_ds, "", **rt_prm )

                    copy_metadata(src_ds, dst_ds)

                    # retrieve the footprint from the given GCPs
                    footprint_wkt = rt.get_footprint_wkt(src_ds, **rt_prm )

                except RuntimeError as e:
                    logger.debug("Failed using order '%i'. Error was '%s'."
                                 % (order, str(e)))
                    # the given method was not applicable, use the next one
                    continue

                else:
                    logger.debug("Successfully used order '%i'" % order)
                    # the transform method was successful, exit the loop
                    break
        else:
            # no method worked, so raise an error
            raise GCPTransformException("Could not find a valid transform method.")

        # reproject the footprint to a lon/lat projection if necessary
        if not gcp_sr.IsGeographic():
            out_sr = osr.SpatialReference()
            out_sr.ImportFromEPSG(4326)
            geom = ogr.CreateGeometryFromWkt(footprint_wkt, gcp_sr)
            geom.TransformTo(out_sr)
            footprint_wkt = geom.ExportToWkt()

        logger.debug("Calculated footprint: '%s'." % footprint_wkt)

        return dst_ds, footprint_wkt
Ejemplo n.º 9
0
    def apply(self, src_ds):
        # setup
        dst_sr = osr.SpatialReference()
        dst_sr.ImportFromEPSG(self.srid)

        logger.debug("Using internal GCP Projection.")
        num_gcps = src_ds.GetGCPCount()

        # Try to find and use the best transform method/order.
        # Orders are: -1 (TPS), 3, 2, and 1 (all GCP)
        # Loop over the min and max GCP number to order map.
        for min_gcpnum, max_gcpnum, order in [(3, None, -1), (10, None, 3),
                                              (6, None, 2), (3, None, 1)]:
            # if the number of GCP matches
            if num_gcps >= min_gcpnum and (max_gcpnum is None
                                           or num_gcps <= max_gcpnum):
                try:

                    if (order < 0):
                        # let the reftools suggest the right interpolator
                        rt_prm = reftools.suggest_transformer(src_ds)
                    else:
                        # use the polynomial GCP interpolation as requested
                        rt_prm = {
                            "method": reftools.METHOD_GCP,
                            "order": order
                        }

                    logger.debug("Trying order '%i' {method:%s,order:%s}" %
                                 (order, reftools.METHOD2STR[rt_prm["method"]],
                                  rt_prm["order"]))

                    # get the suggested pixel size/geotransform
                    size_x, size_y, gt = reftools.suggested_warp_output(
                        src_ds, None, dst_sr.ExportToWkt(), **rt_prm)
                    if size_x > 100000 or size_y > 100000:
                        raise RuntimeError("Calculated size exceeds limit.")
                    logger.debug("New size is '%i x %i'" % (size_x, size_y))

                    # create the output dataset
                    dst_ds = create_mem(size_x, size_y, src_ds.RasterCount,
                                        src_ds.GetRasterBand(1).DataType)

                    # reproject the image
                    dst_ds.SetProjection(dst_sr.ExportToWkt())
                    dst_ds.SetGeoTransform(gt)

                    reftools.reproject_image(src_ds, "", dst_ds, "", **rt_prm)

                    copy_metadata(src_ds, dst_ds)

                    # retrieve the footprint from the given GCPs
                    footprint_wkt = reftools.get_footprint_wkt(
                        src_ds, **rt_prm)

                except RuntimeError, e:
                    logger.debug("Failed using order '%i'. Error was '%s'." %
                                 (order, str(e)))
                    # the given method was not applicable, use the next one
                    continue

                else:
                    logger.debug("Successfully used order '%i'" % order)
                    # the transform method was successful, exit the loop
                    break
    def render(self, params):
        # get the requested coverage, data items and range type.
        coverage = params.coverage
        data_items = coverage.data_items.filter(semantic__startswith="bands")
        range_type = coverage.range_type

        subsets = params.subsets

        # GDAL source dataset. Either a single file dataset or a composed VRT
        # dataset.
        src_ds = self.get_source_dataset(
            coverage, data_items, range_type
        )

        # retrieve area of interest of the source image according to given
        # subsets
        src_rect, dst_rect = self.get_src_and_dst_rect(src_ds, subsets)

        # deduct "native" format of the source image
        def _src2nat(src_format):
            if src_format is None:
                return None
            frmreg = getFormatRegistry()
            f_src = frmreg.getFormatByMIME(src_format)
            f_dst = frmreg.mapSourceToNativeWCS20(f_src)
            f_nat = frmreg._mapSourceToNativeWCS20(f_src)
            if src_format == 'application/x-esa-envisat' and f_src == f_nat:
                return src_format
            elif f_dst is not None:
                return f_dst.mimeType

        source_format = data_items[0].format if len(data_items) == 1 else None
        native_format = _src2nat(source_format)

        # get the requested image format, which defaults to the native format
        # if available
        output_format = params.format or native_format

        if not output_format:
            raise RenderException("Failed to deduce the native format of "
                "the coverage. Output format must be provided!", "format")

        if params.scalefactor is not None or params.scales:
            raise RenderException(
                "ReferenceableDataset cannot be scaled.",
                "scalefactor" if params.scalefactor is not None else "scale"
            )

        # check it the requested image fits the max. allowed coverage size
        maxsize = WCSConfigReader(get_eoxserver_config()).maxsize
        if maxsize < dst_rect.size_x or maxsize < dst_rect.size_y:
            raise RenderException(
                "Requested image size %dpx x %dpx exceeds the allowed "
                "limit maxsize=%dpx!" % (dst_rect.size_x,
                dst_rect.size_y, maxsize), "size"
            )

        # get the output backend and driver for the requested format
        def _get_driver(mime_src, mime_out):
            """Select backend for the given source and output formats."""
            # TODO: make this configurable
            if mime_src == 'application/x-esa-envisat' and \
               mime_out == 'application/x-netcdf':
                return "BEAM", "NetCDF4-BEAM"
            elif mime_src == 'application/x-esa-envisat' and \
               mime_out == 'application/x-esa-envisat':
                return "EOXS", "envisat"

            frmreg = getFormatRegistry()
            fobj = frmreg.getFormatByMIME(mime_out)
            if fobj is None:
                raise RenderException("Invallid output format '%s'!"%mime_out, "format")
            backend, _, driver = fobj.driver.partition("/")
            return backend, driver

        driver_backend, driver_name = _get_driver(source_format, output_format)

        if driver_backend not in ("GDAL", "BEAM", "EOXS"):
            raise RenderException("Invallid output format backend name %s!"
                                  "" % driver_backend, "format")

        # prepare output
        # ---------------------------------------------------------------------
        if driver_backend == "BEAM":

            path_out, extension = self.encode_beam(
                driver_name,
                src_ds.GetFileList()[0],
                src_rect,
                getattr(params, "encoding_params", {})
            )

            mime_type = output_format
            path_list = [path_out]

            time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
            filename_base = "%s_%s" % (coverage.identifier, time_stamp)

            result_set = [
                ResultFile(
                    path_item, mime_type, "%s.%s" % (filename_base, extension),
                    ("cid:coverage/%s" % coverage.identifier) if i == 0 else None
                ) for i, path_item in enumerate(path_list)
            ]

        # ---------------------------------------------------------------------
        elif driver_backend == "EOXS": #EOxServer native backend

            result_set = [ResultAlt(
                file(src_ds.GetFileList()[0]),
                content_type=output_format,
                filename=basename(src_ds.GetFileList()[0]),
                identifier="cid:coverage/%s" % coverage.identifier,
                close=True,
            )]

            mime_type = output_format
            subsets = Subsets(()) # reset all subsets

        # ---------------------------------------------------------------------
        elif driver_backend == "GDAL":

            # get the output driver
            driver = gdal.GetDriverByName(driver_name)
            if driver is None:
                raise RenderException("Unsupported GDAL driver %s!" % driver_name)

            # perform subsetting either with or without rangesubsetting
            subsetted_ds = self.get_subset(
                src_ds, range_type, src_rect, dst_rect, params.rangesubset
            )

            # encode the processed dataset and save it to the filesystem
            out_ds = self.encode(driver, subsetted_ds, output_format,
                        getattr(params, "encoding_params", {}))

            driver_metadata = driver.GetMetadata_Dict()
            mime_type = driver_metadata.get("DMD_MIMETYPE")
            extension = driver_metadata.get("DMD_EXTENSION")
            path_list = out_ds.GetFileList()

            time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
            filename_base = "%s_%s" % (coverage.identifier, time_stamp)

            result_set = [
                ResultFile(
                    path_item, mime_type, "%s.%s" % (filename_base, extension),
                    ("cid:coverage/%s" % coverage.identifier) if i == 0 else None
                ) for i, path_item in enumerate(path_list)
            ]

        # ---------------------------------------------------------------------

        if params.mediatype and params.mediatype.startswith("multipart"):
            reference = "cid:coverage/%s" % result_set[0].filename

            if subsets.has_x and subsets.has_y:
                footprint = GEOSGeometry(reftools.get_footprint_wkt(out_ds))
                if not subsets.srid:
                    extent = footprint.extent
                else:
                    extent = subsets.xy_bbox
                encoder_subset = (
                    subsets.srid, src_rect.size, extent, footprint
                )
            else:
                encoder_subset = None

            encoder = WCS20EOXMLEncoder()
            content = encoder.serialize(
                encoder.encode_referenceable_dataset(
                    coverage, range_type, reference, mime_type, encoder_subset
                )
            )
            result_set.insert(0, ResultBuffer(content, encoder.content_type))

        return result_set
Ejemplo n.º 11
0
    try:
        path = argv[1]
    except IndexError:
        print "Requires filename"
        exit(1)

    reader = GDALDatasetEnvisatMetadataFormatReader(env)
    ds = gdal.Open(path)

    if not ds:
        print "Cannot open '%s' as GDAL Dataset." % path
        exit(1)
    elif not reader.test_ds(ds):
        print "Dataset '%s' does not contain required ENVISAT metadata." % path
        exit(1)
    
    md = reader.read_ds(ds)
    del ds

    footprint = GEOSGeometry(get_footprint_wkt(ds))
    footprint.srid = 4326

    encoder = EOP20Encoder()
    
    xml = encoder.serialize(
        encoder.encode_earth_observation(EOMetadata(footprint=footprint, **md))
    )
  
    with open(os.path.join(os.path.dirname(path))) as f:
        f.write(xml)
    def render(self, params):
        # get the requested coverage, data items and range type.
        coverage = params.coverage
        data_items = coverage.data_items.filter(semantic__startswith="bands")
        range_type = coverage.range_type

        subsets = Subsets(params.subsets)

        # GDAL source dataset. Either a single file dataset or a composed VRT 
        # dataset.
        src_ds = self.get_source_dataset(
            coverage, data_items, range_type
        )

        # retrieve area of interest of the source image according to given 
        # subsets
        src_rect = self.get_source_image_rect(src_ds, subsets)

        # deduct "native" format of the source image
        native_format = data_items[0].format if len(data_items) == 1 else None

        # get the requested image format, which defaults to the native format
        # if available
        format = params.format or native_format

        if not format:
            raise Exception("No format specified.")


        # perform subsetting either with or without rangesubsetting
        if params.rangesubset:
            subsetted_ds = self.perform_range_subset(
                src_ds, range_type, subset_bands, src_rect, 
            )

        else:
            subsetted_ds = self.perform_subset(
                src_ds, src_rect
            )

        # encode the processed dataset and save it to the filesystem
        out_ds, out_driver = self.encode(subsetted_ds, format)

        driver_metadata = out_driver.GetMetadata_Dict()
        mime_type = driver_metadata.get("DMD_MIMETYPE")

        time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
        filename_base = "%s_%s" % (coverage.identifier, time_stamp)

        result_set = [
            ResultFile(
                path, mime_type, filename_base + splitext(path)[1],
                ("cid:coverage/%s" % coverage.identifier) if i == 0 else None
            ) for i, path in enumerate(out_ds.GetFileList())
        ]

        if params.mediatype.startswith("multipart"):
            reference = result_set[0].identifier
            
            if subsets.has_x and subsets.has_y:
                footprint = GEOSGeometry(reftools.get_footprint_wkt(out_ds))
                encoder_subset = (
                    subsets.xy_srid, src_rect.size, coverage.extent, footprint
                )
            else:
                encoder_subset = None

            encoder = WCS20EOXMLEncoder()
            content = encoder.serialize(
                encoder.encode_referenceable_dataset(
                    coverage, range_type, reference, mime_type, encoder_subset
                )
            )
            result_set.insert(0, ResultBuffer(content, encoder.content_type))

        return result_set
Ejemplo n.º 13
0
    def read(self, obj):
        ds = open_gdal(obj)
        if ds is None:
            raise Exception("Could not parse from obj '%s'." % repr(obj))

        driver = ds.GetDriver()
        size = (ds.RasterXSize, ds.RasterYSize)
        values = {"size": size}

        # --= rectified datasets =--
        # NOTE: If the projection is a non-zero string then
        #       the geocoding is given by the Geo-Trasnformation
        #       matrix - not matter what are the values.
        if ds.GetProjection():
            values["coverage_type"] = "RectifiedDataset"
            values["projection"] = (ds.GetProjection(), "WKT")

            # get coordinates of all four image corners
            gt = ds.GetGeoTransform()

            def gtrans(x, y):
                return gt[0] + x * gt[1] + y * gt[2], gt[
                    3] + x * gt[4] + y * gt[5]

            vpix = [(0, 0), (0, size[1]), (size[0], 0), (size[0], size[1])]
            vx, vy = zip(*(gtrans(x, y) for x, y in vpix))

            # find the extent
            values["extent"] = (min(vx), min(vy), max(vx), max(vy))

        # --= tie-point encoded referenceable datasets =--
        # NOTE: If the GCP projection is a non-zero string and
        #       there are GCPs we are dealing with a tie-point geocoded
        #       referenceable dataset. The extent is given by the image
        #       footprint. The fooprint must not be wrapped arround
        #       the date-line!
        elif ds.GetGCPProjection() and ds.GetGCPCount() > 0:
            values["coverage_type"] = "ReferenceableDataset"
            projection = ds.GetGCPProjection()
            values["projection"] = (projection, "WKT")

            # parse the spatial reference to get the EPSG code
            sr = osr.SpatialReference(projection, "WKT")

            # NOTE: GeosGeometry can't handle non-EPSG geometry projections.
            if sr.GetAuthorityName(None) == "EPSG":
                srid = int(sr.GetAuthorityCode(None))

                # get the footprint
                rt_prm = rt.suggest_transformer(ds)
                fp_wkt = rt.get_footprint_wkt(ds, **rt_prm)
                footprint = GEOSGeometry(fp_wkt, srid)

                if isinstance(footprint, Polygon):
                    footprint = MultiPolygon(footprint)
                elif not isinstance(footprint, MultiPolygon):
                    raise TypeError("Got invalid geometry %s" %
                                    type(footprint).__name__)

                values["footprint"] = footprint
                values["extent"] = footprint.extent

        # --= dataset with no geocoding =--
        # TODO: Handling of other types of GDAL geocoding (e.g, RPC).
        else:
            pass

        reader = self._find_additional_reader(ds)
        if reader:
            additional_values = reader.read_ds(ds)
            for key, value in additional_values.items():
                values.setdefault(key, value)

        driver_metadata = driver.GetMetadata()
        frmt = driver_metadata.get("DMD_MIMETYPE")
        if frmt:
            values["format"] = frmt

        return values
Ejemplo n.º 14
0
    """ Print simple usage help. """
    exename = basename(sys.argv[0])
    print >>sys.stderr, (
        "USAGE: %s <input image> [%s]" % (exename, "|".join(OUTPUT_FORMATS))
    )


if __name__ == "__main__":
    DEBUG = False
    FORMAT = "WKB"
    try:
        INPUT = sys.argv[1]
        for arg in sys.argv[2:]:
            if arg in OUTPUT_FORMATS:
                FORMAT = arg # output format
            elif arg == "DEBUG":
                DEBUG = True # dump debugging output
    except IndexError:
        error("Not enough input arguments!\n")
        usage()
        sys.exit(1)

    # get the referenceable dataset outline
    #NOTE: It is assumed, that the outline is not wrapped around the date-line.
    ds = gdal.Open(INPUT)
    prm = rt.suggest_transformer(ds)
    geom = setSR(parseGeom(rt.get_footprint_wkt(ds, **prm)), OSR_WGS84)

    # print geometry
    sys.stdout.write(dumpGeom(geom, FORMAT))
Ejemplo n.º 15
0
    def render(self, params):
        # get the requested coverage, data items and range type.
        coverage = params.coverage
        data_items = coverage.data_items.filter(semantic__startswith="bands")
        range_type = coverage.range_type

        subsets = params.subsets

        # GDAL source dataset. Either a single file dataset or a composed VRT
        # dataset.
        src_ds = self.get_source_dataset(coverage, data_items, range_type)

        # retrieve area of interest of the source image according to given
        # subsets
        src_rect, dst_rect = self.get_source_and_dest_rect(src_ds, subsets)

        # deduct "native" format of the source image
        native_format = data_items[0].format if len(data_items) == 1 else None

        # get the requested image format, which defaults to the native format
        # if available
        frmt = params.format or native_format

        if not frmt:
            raise RenderException("No format specified.", "format")

        if params.scalefactor is not None or params.scales:
            raise RenderException(
                "ReferenceableDataset cannot be scaled.",
                "scalefactor" if params.scalefactor is not None else "scale")

        maxsize = WCSConfigReader(get_eoxserver_config()).maxsize
        if maxsize is not None:
            if maxsize < dst_rect.size_x or maxsize < dst_rect.size_y:
                raise RenderException(
                    "Requested image size %dpx x %dpx exceeds the allowed "
                    "limit maxsize=%dpx." %
                    (dst_rect.size_x, dst_rect.size_y, maxsize), "size")

        # perform subsetting either with or without rangesubsetting
        subsetted_ds = self.perform_subset(src_ds, range_type, src_rect,
                                           dst_rect, params.rangesubset)

        # encode the processed dataset and save it to the filesystem
        out_ds, out_driver = self.encode(
            subsetted_ds, frmt, getattr(params, "encoding_params", {}))

        driver_metadata = out_driver.GetMetadata_Dict()
        mime_type = driver_metadata.get("DMD_MIMETYPE")
        extension = driver_metadata.get("DMD_EXTENSION")

        time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
        filename_base = "%s_%s" % (coverage.identifier, time_stamp)

        result_set = [
            ResultFile(path, mime_type, "%s.%s" % (filename_base, extension),
                       ("cid:coverage/%s" %
                        coverage.identifier) if i == 0 else None)
            for i, path in enumerate(out_ds.GetFileList())
        ]

        if params.mediatype and params.mediatype.startswith("multipart"):
            reference = "cid:coverage/%s" % result_set[0].filename

            if subsets.has_x and subsets.has_y:
                footprint = GEOSGeometry(reftools.get_footprint_wkt(out_ds))
                if not subsets.srid:
                    extent = footprint.extent
                else:
                    extent = subsets.xy_bbox
                encoder_subset = (subsets.srid, src_rect.size, extent,
                                  footprint)
            else:
                encoder_subset = None

            encoder = WCS20EOXMLEncoder()
            content = encoder.serialize(
                encoder.encode_referenceable_dataset(coverage, range_type,
                                                     reference, mime_type,
                                                     encoder_subset))
            result_set.insert(0, ResultBuffer(content, encoder.content_type))

        return result_set
Ejemplo n.º 16
0
    def read(self, obj):
        ds = open_gdal(obj)
        if ds is None:
            raise Exception("Could not parse from obj '%s'." % repr(obj))

        driver = ds.GetDriver()
        size = (ds.RasterXSize, ds.RasterYSize)
        values = {"size": size}

        projection = ds.GetProjection()

        # --= rectified datasets =--
        # NOTE: If the projection is a non-zero string then
        #       the geocoding is given by the Geo-Trasnformation
        #       matrix - not matter what are the values.
        if projection and not (ds.GetGCPProjection() and ds.GetGCPCount() > 0):
            sr = osr.SpatialReference(projection)
            if sr.srid is not None:
                projection = 'EPSG:%d' % sr.srid

            gt = ds.GetGeoTransform()

            values['origin'] = [gt[0], gt[3]]

            values['grid'] = {
                'coordinate_reference_system': projection,
                'axis_offsets': [gt[1], gt[5]],
                'axis_types': ['spatial', 'spatial'],
                'axis_names':
                ['x', 'y'] if sr.IsProjected() else ['long', 'lat'],
            }

            if sr.GetLinearUnitsName() in ('metre', 'meter', 'm') \
                    and abs(gt[1]) == abs(gt[5]):
                values['grid']['resolution'] = abs(gt[1])

        # --= tie-point encoded referenceable datasets =--
        # NOTE: If the GCP projection is a non-zero string and
        #       there are GCPs we are dealing with a tie-point geocoded
        #       referenceable dataset. The extent is given by the image
        #       footprint. The fooprint must not be wrapped arround
        #       the date-line!
        elif ds.GetGCPProjection() and ds.GetGCPCount() > 0:
            projection = ds.GetGCPProjection()
            sr = osr.SpatialReference(projection)
            if sr.srid is not None:
                projection = 'EPSG:%d' % sr.srid

            values['grid'] = {
                'coordinate_reference_system': projection,
                'axis_offsets': [None, None],
                'axis_types': ['spatial', 'spatial'],
                'axis_names':
                ['x', 'y'] if sr.IsProjected() else ['long', 'lat']
            }
            values['origin'] = [None, None]

            # # parse the spatial reference to get the EPSG code
            sr = osr.SpatialReference(ds.GetGCPProjection(), "WKT")

            # NOTE: GeosGeometry can't handle non-EPSG geometry projections.
            if sr.GetAuthorityName(None) == "EPSG":
                srid = int(sr.GetAuthorityCode(None))

                # get the footprint
                rt_prm = rt.suggest_transformer(ds)
                fp_wkt = rt.get_footprint_wkt(ds, **rt_prm)
                footprint = GEOSGeometry(fp_wkt, srid)

                if isinstance(footprint, Polygon):
                    footprint = MultiPolygon(footprint)
                elif not isinstance(footprint, MultiPolygon):
                    raise TypeError("Got invalid geometry %s" %
                                    type(footprint).__name__)

                values['footprint'] = footprint
            pass

        # --= dataset with no geocoding =--
        # TODO: Handling of other types of GDAL geocoding (e.g, RPC).
        else:
            pass

        reader = self._find_additional_reader(ds)
        if reader:
            additional_values = reader.read_ds(ds)
            for key, value in additional_values.items():
                values.setdefault(key, value)

        driver_metadata = driver.GetMetadata()
        frmt = driver_metadata.get("DMD_MIMETYPE")
        if frmt:
            values["format"] = frmt

        return values