コード例 #1
0
    def setUp(self):
        super(GDALDatasetTestCase, self).setUp()
        _, self.tmppath = tempfile.mkstemp("." +
                                           self.getFileExtension("raster"))

        data = self.getResponseData()
        if isinstance(data, binary_type):
            mode = 'wb'
        else:
            mode = 'w'
        with open(self.tmppath, mode) as f:
            f.write(data)

        gdal.AllRegister()

        exp_path = os.path.join(self.getExpectedFileDir(),
                                self.getExpectedFileName("raster"))

        try:
            self.res_ds = gdal.Open(self.tmppath, gdal.GA_ReadOnly)
        except RuntimeError as e:
            self.fail("Response could not be opened with GDAL. Error was %s" %
                      e)

        try:
            self.exp_ds = gdal.Open(exp_path, gdal.GA_ReadOnly)
        except RuntimeError:
            self.skipTest("Expected response in '%s' is not present" %
                          exp_path)
コード例 #2
0
ファイル: gdal_dataset.py プロジェクト: darshangan/eoxserver
def open_gdal(obj):
    if isinstance(obj, gdal.Dataset):
        return obj
    try:
        return gdal.Open(obj)
    except RuntimeError:
        return None
コード例 #3
0
    def connect(self, coverage, data_items, layer, options):
        data = data_items[0].path

        if coverage.grid.is_referenceable:
            vrt_path = join("/vsimem", uuid4().hex)
            reftools.create_rectified_vrt(data, vrt_path)
            data = vrt_path
            layer.setMetaData("eoxs_ref_data", data)

        if not layer.metadata.get("eoxs_wrap_dateline") == "true":
            layer.data = data
        else:
            sr = coverage.grid.spatial_reference
            extent = coverage.extent
            e = wrap_extent_around_dateline(extent, sr.srid)

            vrt_path = join("/vsimem", uuid4().hex)
            ds = gdal.Open(data)
            vrt_ds = create_simple_vrt(ds, vrt_path)
            size_x = ds.RasterXSize
            size_y = ds.RasterYSize

            dx = abs(e[0] - e[2]) / size_x
            dy = abs(e[1] - e[3]) / size_y

            vrt_ds.SetGeoTransform([e[0], dx, 0, e[3], 0, -dy])
            vrt_ds = None

            layer.data = vrt_path
コード例 #4
0
    def connect(self, coverage, data_items, layer, options):
        filtered = filter(lambda d: d.semantic.startswith("bands"), data_items)
        data = connect(filtered[0])

        if isinstance(coverage, models.ReferenceableDataset):
            vrt_path = join("/vsimem", uuid4().hex)
            reftools.create_rectified_vrt(data, vrt_path)
            data = vrt_path
            layer.setMetaData("eoxs_ref_data", data)

        if not layer.metadata.get("eoxs_wrap_dateline") == "true":
            layer.data = data
        else:
            e = wrap_extent_around_dateline(coverage.extent, coverage.srid)

            vrt_path = join("/vsimem", uuid4().hex)
            ds = gdal.Open(data)
            vrt_ds = create_simple_vrt(ds, vrt_path)
            size_x = ds.RasterXSize
            size_y = ds.RasterYSize

            dx = abs(e[0] - e[2]) / size_x
            dy = abs(e[1] - e[3]) / size_y

            vrt_ds.SetGeoTransform([e[0], dx, 0, e[3], 0, -dy])
            vrt_ds = None

            layer.data = vrt_path
コード例 #5
0
    def _testBinaryComparison(self, file_type, data=None):
        """
        Helper function for the `testBinaryComparisonRaster` function.
        """
        expected_path = os.path.join(self.getExpectedFileDir(),
                                     self.getExpectedFileName(file_type))
        response_path = os.path.join(self.getResponseFileDir(),
                                     self.getResponseFileName(file_type))

        actual_response = None
        if data is None:
            if file_type in ("raster", "html"):
                actual_response = self.getResponseData()
            elif file_type == "xml":
                actual_response = self.getXMLData()
            else:
                self.fail("Unknown file_type '%s'." % file_type)
        else:
            actual_response = data

        # read the expected response, either binary or as string
        try:
            if isinstance(actual_response, binary_type):
                open_type = 'rb'
            else:
                open_type = 'r'
            with open(expected_path, open_type) as f:
                expected = f.read()

        except IOError:
            expected = None

        if expected != actual_response:
            if self.getFileExtension("raster") in ("hdf", "nc"):
                self.skipTest(
                    "Skipping binary comparison for HDF or NetCDF file '%s'." %
                    expected_path)

            # save the contents of the file
            if isinstance(actual_response, binary_type):
                open_type = 'wb'
            else:
                open_type = 'w'
            with open(response_path, open_type) as f:
                f.write(actual_response)

            if file_type == "raster":
                try:
                    gdal.Open(expected_path)
                except RuntimeError:
                    self.skipTest("Expected response in '%s' is not present" %
                                  expected_path)

            if expected is None:
                self.skipTest("Expected response in '%s' is not present" %
                              expected_path)
            else:
                self.fail("Response returned in '%s' is not equal to expected "
                          "response in '%s'." % (response_path, expected_path))
コード例 #6
0
    def testBinaryComparisonRaster(self):

        response_path = os.path.join(self.getResponseFileDir(),
                                     self.getResponseFileName("raster"))
        expected_path = os.path.join(self.getExpectedFileDir(),
                                     self.getExpectedFileName("raster"))
        # creates a response image that contains the encoded text of the response xml file
        doc = etree.fromstring(self.prepareXMLData(self.getXMLData()))

        try:
            encodedText = doc.xpath(
                '//wps:ComplexData',
                namespaces={'wps': 'http://www.opengis.net/wps/1.0.0'})[0].text
        except IndexError:
            self.fail('No complex data found in the XML tree')

        _, self.tmppath = tempfile.mkstemp("." +
                                           self.getFileExtension("raster"))
        with open(self.tmppath, 'wb') as f:
            f.write(b64decode(encodedText))
        gdal.AllRegister()

        exp_path = os.path.join(self.getExpectedFileDir(),
                                self.getExpectedFileName("raster"))

        try:
            self.res_ds = gdal.Open(self.tmppath, gdal.GA_ReadOnly)
        except RuntimeError as e:
            self.fail("Response could not be opened with GDAL. Error was %s" %
                      e)

        try:
            self.exp_ds = gdal.Open(expected_path, gdal.GA_ReadOnly)
        except RuntimeError:
            self.skipTest("Expected response in '%s' is not present" %
                          exp_path)

        # compare the size
        self.assertEqual((self.res_ds.RasterXSize, self.res_ds.RasterYSize),
                         (self.exp_ds.RasterXSize, self.exp_ds.RasterYSize))
        # compare the band count
        self.assertEqual(self.res_ds.RasterCount, self.exp_ds.RasterCount)
コード例 #7
0
    def from_file(cls, filename, env=None):
        env = env or {}
        ds = gdal.Open(filename)
        size = (ds.RasterXSize, ds.RasterYSize)
        extent = gdal.get_extent(ds)
        mode = _get_ds_mode(ds)

        return cls(
            filename, env, filename, size, extent,
            ds.GetProjection(), mode, None
        )
コード例 #8
0
    def _openDatasets(self):
        _, self.tmppath = tempfile.mkstemp("." +
                                           self.getFileExtension("raster"))
        f = open(self.tmppath, "w")
        f.write(self.getResponseData())
        f.close()
        gdal.AllRegister()

        exp_path = os.path.join(self.getExpectedFileDir(),
                                self.getExpectedFileName("raster"))

        try:
            self.res_ds = gdal.Open(self.tmppath, gdal.GA_ReadOnly)
        except RuntimeError, e:
            self.fail("Response could not be opened with GDAL. Error was %s" %
                      e)
コード例 #9
0
    def __init__(self, fname):

        # get the dataset and its params
        gdal.AllRegister()
        ds = gdal.Open(fname)

        self.fileName = fname
        self.driverName = ds.GetDriver().LongName
        self.size = (ds.RasterYSize, ds.RasterXSize, ds.RasterCount)
        self.GCPCount = ds.GetGCPCount()
        self.GCPProjection = ds.GetGCPProjection()
        self.Projection = ds.GetProjection()
        self.ProjectionRef = ds.GetProjectionRef()
        self.GeoTransform = ds.GetGeoTransform()
        self.GCP = ds.GetGCPs()
        self.isRectified = bool(self.Projection)
        self.isReferenceable = bool(self.GCPProjection) and (self.GCPCount > 0)
コード例 #10
0
ファイル: gdal.py プロジェクト: darshangan/eoxserver
    def _read_metadata_from_data(self, data_item, retrieved_metadata, cache):
        metadata_component = MetadataComponent(env)

        ds = gdal.Open(connect(data_item, cache))
        reader = metadata_component.get_reader_by_test(ds)
        if reader:
            values = reader.read(ds)

            format = values.pop("format", None)
            if format:
                data_item.format = format
                data_item.full_clean()
                data_item.save()

            for key, value in values.items():
                retrieved_metadata.setdefault(key, value)
        ds = None
コード例 #11
0
def warp_fields(coverages, field_name, bbox, crs, width, height):
    driver = gdal.GetDriverByName('MEM')
    out_ds = driver.Create(
        '', width, height, 1,
        coverages[0].range_type.get_field(field_name).data_type)

    out_ds.SetGeoTransform([
        bbox[0],
        (bbox[2] - bbox[0]) / width,
        0,
        bbox[3],
        0,
        -(bbox[3] - bbox[1]) / height,
    ])
    epsg = crss.parseEPSGCode(crs, [crss.fromShortCode])
    sr = osr.SpatialReference()
    sr.ImportFromEPSG(epsg)

    out_ds.SetProjection(sr.ExportToWkt())

    for coverage in coverages:
        location = coverage.get_location_for_field(field_name)
        band_index = coverage.get_band_index_for_field(field_name)

        orig_ds = gdal.open_with_env(location.path, location.env)

        vrt_filename = None
        if orig_ds.RasterCount > 1:
            vrt_filename = '/vsimem/' + uuid4().hex
            gdal.BuildVRT(vrt_filename, orig_ds, bandList=[band_index])
            ds = gdal.Open(vrt_filename)
        else:
            ds = orig_ds

        gdal.Warp(out_ds, ds)
        ds = None

        if vrt_filename:
            gdal.Unlink(vrt_filename)

    band = out_ds.GetRasterBand(1)
    return band.ReadAsArray()
コード例 #12
0
    def __call__(self, src_ds):
        logger.info("Applying ColorIndexOptimization")
        try:
            dst_ds = create_temp(src_ds.RasterXSize,
                                 src_ds.RasterYSize,
                                 1,
                                 gdal.GDT_Byte,
                                 temp_root=self.temporary_directory)

            if not self.palette_file:
                # create a color table as a median of the given dataset
                ct = gdal.ColorTable()
                gdal.ComputeMedianCutPCT(src_ds.GetRasterBand(1),
                                         src_ds.GetRasterBand(2),
                                         src_ds.GetRasterBand(3), 256, ct)

            else:
                # copy the color table from the given palette file
                pct_ds = gdal.Open(self.palette_file)
                pct_ct = pct_ds.GetRasterBand(1).GetRasterColorTable()
                if not pct_ct:
                    raise ValueError("The palette file '%s' does not have a "
                                     "Color Table." % self.palette_file)
                ct = pct_ct.Clone()
                pct_ds = None

            dst_ds.GetRasterBand(1).SetRasterColorTable(ct)
            gdal.DitherRGB2PCT(src_ds.GetRasterBand(1),
                               src_ds.GetRasterBand(2),
                               src_ds.GetRasterBand(3),
                               dst_ds.GetRasterBand(1), ct)

            copy_projection(src_ds, dst_ds)
            copy_metadata(src_ds, dst_ds)

            return dst_ds
        except:
            cleanup_temp(dst_ds)
            raise
コード例 #13
0
class GDALDatasetTestCase(RasterTestCase):
    """
    Extended RasterTestCases that open the result with GDAL and
    perform several tests.
    """
    def tearDown(self):
        super(GDALDatasetTestCase, self).tearDown()
        try:
            del self.res_ds
            del self.exp_ds
            os.remove(self.tmppath)
        except AttributeError:
            pass

    def _openDatasets(self):
        _, self.tmppath = tempfile.mkstemp("." +
                                           self.getFileExtension("raster"))
        f = open(self.tmppath, "w")
        f.write(self.getResponseData())
        f.close()
        gdal.AllRegister()

        exp_path = os.path.join(self.getExpectedFileDir(),
                                self.getExpectedFileName("raster"))

        try:
            self.res_ds = gdal.Open(self.tmppath, gdal.GA_ReadOnly)
        except RuntimeError, e:
            self.fail("Response could not be opened with GDAL. Error was %s" %
                      e)

        try:
            self.exp_ds = gdal.Open(exp_path, gdal.GA_ReadOnly)
        except RuntimeError:
            self.skipTest("Expected response in '%s' is not present" %
                          exp_path)
コード例 #14
0
    def render(self, params):
        # get coverage related stuff
        coverage = params.coverage

        # ReferenceableDataset are not supported in WCS < 2.0
        if params.coverage.grid.is_referenceable and params.version:
            raise NoSuchCoverageException((coverage.identifier, ))

        data_locations = self.arraydata_locations_for_coverage(coverage)

        range_type = coverage.range_type
        bands = list(range_type)

        subsets = params.subsets

        if subsets:
            subsets.srid  # this automatically checks the validity

        # create and configure map object
        map_ = self.create_map()

        env = {}
        for data_location in data_locations:
            env.update(data_location.env)
        gdal.set_env(env, False)

        # configure outputformat
        native_format = self.get_native_format(coverage, data_locations)
        if native_format and get_format_by_mime(native_format) is None:
            native_format = "image/tiff"

        frmt = params.format or native_format

        if frmt is None:
            raise RenderException("Format could not be determined", "format")

        mime_type, frmt = split_format(frmt)

        imagemode = ms.gdalconst_to_imagemode(bands[0].data_type)
        time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
        basename = "%s_%s" % (coverage.identifier, time_stamp)
        of = create_outputformat(mime_type, frmt, imagemode, basename,
                                 getattr(params, "encoding_params", {}))

        map_.appendOutputFormat(of)
        map_.setOutputFormat(of)

        # TODO: use layer factory here
        layer = self.layer_for_coverage(coverage, native_format,
                                        params.version)

        map_.insertLayer(layer)
        connector = get_connector_by_test(coverage, data_locations)

        if not connector:
            raise OperationNotSupportedException(
                "Could not find applicable layer connector.", "coverage")

        try:
            connector.connect(coverage, data_locations, layer, {})
            # create request object and dispatch it against the map
            request = ms.create_request(
                self.translate_params(params, range_type))
            request.setParameter("format", mime_type)
            raw_result = ms.dispatch(map_, request)

        finally:
            # perform any required layer related cleanup
            connector.disconnect(coverage, data_locations, layer, {})

        result_set = result_set_from_raw_data(raw_result)

        if params.version == Version(2, 0):
            mediatype = getattr(params, "mediatype", None)
            if mediatype in ("multipart/mixed", "multipart/related"):
                with vsi.TemporaryVSIFile.from_buffer(result_set[1].data) as f:
                    ds = gdal.Open(f.name)
                    grid = objects.Grid.from_gdal_dataset(ds)

                    # get the output CRS definition
                    crs = params.outputcrs or subsets.crs or 'imageCRS'
                    if crs == 'imageCRS':
                        crs = coverage.grid.coordinate_reference_system
                    grid._coordinate_reference_system = crs

                    origin = objects.Origin.from_gdal_dataset(ds)
                    size = [ds.RasterXSize, ds.RasterYSize]

                range_type = coverage.range_type
                if params.rangesubset:
                    range_type = range_type.subset(params.rangesubset)

                coverage._grid = grid
                coverage._origin = origin
                coverage._size = size
                coverage._range_type = range_type
                if isinstance(result_set[1].filename, binary_type):
                    file_name = result_set[1].filename.decode()
                else:
                    file_name = result_set[1].filename

                reference = 'cid:coverage/%s' % file_name

                encoder = WCS20EOXMLEncoder()

                if not isinstance(coverage, objects.Mosaic):
                    tree = encoder.encode_rectified_dataset(
                        coverage, getattr(params, "http_request", None),
                        reference, mime_type,
                        subsets.bounding_polygon(coverage)
                        if subsets else None)
                else:
                    tree = encoder.encode_rectified_stitched_mosaic(
                        coverage, getattr(params, "http_request", None),
                        reference, mime_type,
                        subsets.bounding_polygon(coverage)
                        if subsets else None)

                result_set[0] = ResultBuffer(encoder.serialize(tree),
                                             encoder.content_type)

        # "default" response
        return result_set
コード例 #15
0
ファイル: merge.py プロジェクト: baloola/ngeo-b
 def __init__(self, dataset, use_nodata=True):
     if isinstance(dataset, basestring):
         dataset = gdal.Open(dataset)
     self.dataset = dataset
     self.use_nodata = use_nodata
コード例 #16
0
    def process(self, input_filename, output_filename,
                geo_reference=None, generate_metadata=True):

        # open the dataset and create an In-Memory Dataset as copy
        # to perform optimizations
        ds = create_mem_copy(gdal.Open(input_filename))

        gt = ds.GetGeoTransform()
        footprint_wkt = None

        if not geo_reference:
            if gt == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0):
                # TODO: maybe use a better check
                raise ValueError("No geospatial reference for unreferenced "
                                 "dataset given.")
        else:
            logger.debug("Applying geo reference '%s'."
                         % type(geo_reference).__name__)
            ds, footprint_wkt = geo_reference.apply(ds)

        # apply optimizations
        for optimization in self.get_optimizations(ds):
            logger.debug("Applying optimization '%s'."
                         % type(optimization).__name__)

            try:
                new_ds = optimization(ds)

                if new_ds is not ds:
                    # cleanup afterwards
                    cleanup_temp(ds)
                    ds = new_ds
            except:
                cleanup_temp(ds)
                raise

        # generate the footprint from the dataset
        if not footprint_wkt:
            logger.debug("Generating footprint.")
            footprint_wkt = self._generate_footprint_wkt(ds)
        # check that footprint is inside of extent of generated image
        # regenerate otherwise
        else:
            tmp_extent = getExtentFromRectifiedDS(ds)
            tmp_bbox = Polygon.from_bbox((tmp_extent[0], tmp_extent[1],
                                          tmp_extent[2], tmp_extent[3]))
            tmp_footprint = GEOSGeometry(footprint_wkt)
            if not tmp_bbox.contains(tmp_footprint):
                footprint_wkt = tmp_footprint.intersection(tmp_bbox).wkt

        if self.footprint_alpha:
            logger.debug("Applying optimization 'AlphaBandOptimization'.")
            opt = AlphaBandOptimization()
            opt(ds, footprint_wkt)

        output_filename = self.generate_filename(output_filename)

        logger.debug("Writing file to disc using options: %s."
                     % ", ".join(self.format_selection.creation_options))

        logger.debug("Metadata tags to be written: %s"
                     % ", ".join(ds.GetMetadata_List("") or []))

        # save the file to the disc
        driver = gdal.GetDriverByName(self.format_selection.driver_name)
        ds = driver.CreateCopy(output_filename, ds,
                               options=self.format_selection.creation_options)

        for optimization in self.get_post_optimizations(ds):
            logger.debug("Applying post-optimization '%s'."
                         % type(optimization).__name__)
            optimization(ds)

        # generate metadata if requested
        footprint = None
        if generate_metadata:
            normalized_space = Polygon.from_bbox((-180, -90, 180, 90))
            non_normalized_space = Polygon.from_bbox((180, -90, 360, 90))

            footprint = GEOSGeometry(footprint_wkt)
            #.intersection(normalized_space)
            outer = non_normalized_space.intersection(footprint)

            if len(outer):
                footprint = MultiPolygon(
                    *map(lambda p:
                        Polygon(*map(lambda ls:
                            LinearRing(*map(lambda point:
                                (point[0] - 360, point[1]), ls.coords
                            )), tuple(p)
                        )), (outer,)
                    )
                ).union(normalized_space.intersection(footprint))
            else:
                if isinstance(footprint, Polygon):
                    footprint = MultiPolygon(footprint)

            logger.info("Calculated Footprint: '%s'" % footprint.wkt)

            # use the provided footprint
            #geom = OGRGeometry(footprint_wkt)
            #exterior = []
            #for x, y in geom.exterior_ring.tuple:
            #    exterior.append(y); exterior.append(x)

            #polygon = [exterior]
        num_bands = ds.RasterCount

        # finally close the dataset and write it to the disc
        ds = None

        return PreProcessResult(output_filename, footprint, num_bands)
コード例 #17
0
ファイル: preprocessor.py プロジェクト: baloola/ngeo-b
class NGEOPreProcessor(WMSPreProcessor):
    def __init__(self,
                 format_selection,
                 overviews=True,
                 crs=None,
                 bands=None,
                 bandmode=RGB,
                 footprint_alpha=False,
                 color_index=False,
                 palette_file=None,
                 no_data_value=None,
                 overview_resampling=None,
                 overview_levels=None,
                 overview_minsize=None,
                 radiometric_interval_min=None,
                 radiometric_interval_max=None,
                 sieve_max_threshold=None,
                 simplification_factor=None,
                 temporary_directory=None):

        self.format_selection = format_selection
        self.overviews = overviews
        self.overview_resampling = overview_resampling
        self.overview_levels = overview_levels
        self.overview_minsize = overview_minsize

        self.crs = crs

        self.bands = bands
        self.bandmode = bandmode
        self.footprint_alpha = footprint_alpha
        self.color_index = color_index
        self.palette_file = palette_file
        self.no_data_value = no_data_value
        self.radiometric_interval_min = radiometric_interval_min
        self.radiometric_interval_max = radiometric_interval_max

        if sieve_max_threshold is not None:
            self.sieve_max_threshold = sieve_max_threshold
        else:
            self.sieve_max_threshold = 0

        if simplification_factor is not None:
            self.simplification_factor = simplification_factor
        else:
            # default 2 * resolution == 2 pixels
            self.simplification_factor = 2

        self.temporary_directory = temporary_directory

    def process(self,
                input_filename,
                output_filename,
                geo_reference=None,
                generate_metadata=True,
                merge_with=None,
                original_footprint=None):

        # open the dataset and create an In-Memory Dataset as copy
        # to perform optimizations
        ds = create_mem_copy(gdal.Open(input_filename))

        gt = ds.GetGeoTransform()
        footprint_wkt = None

        if not geo_reference:
            if gt == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0):
                if ds.GetGCPCount() > 0:
                    geo_reference = InternalGCPs()
                else:
                    raise ValueError("No geospatial reference for "
                                     "unreferenced dataset given.")

        if geo_reference:
            logger.debug("Applying geo reference '%s'." %
                         type(geo_reference).__name__)
            # footprint is always in EPSG:4326
            ds, footprint_wkt = geo_reference.apply(ds)

        # apply optimizations
        for optimization in self.get_optimizations(ds):
            logger.debug("Applying optimization '%s'." %
                         type(optimization).__name__)

            try:
                new_ds = optimization(ds)

                if new_ds is not ds:
                    # cleanup afterwards
                    cleanup_temp(ds)
                    ds = new_ds
            except:
                cleanup_temp(ds)
                raise

        # generate the footprint from the dataset
        if not footprint_wkt:
            logger.debug("Generating footprint.")
            footprint_wkt = self._generate_footprint_wkt(ds)
        # check that footprint is inside of extent of generated image
        # regenerate otherwise
        else:
            tmp_extent = getExtentFromRectifiedDS(ds)
            tmp_bbox = Polygon.from_bbox(
                (tmp_extent[0], tmp_extent[1], tmp_extent[2], tmp_extent[3]))
            # transform image bbox to EPSG:4326 if necessary
            proj = ds.GetProjection()
            srs = osr.SpatialReference()
            try:
                srs.ImportFromWkt(proj)
                srs.AutoIdentifyEPSG()
                ptype = "PROJCS" if srs.IsProjected() else "GEOGCS"
                srid = int(srs.GetAuthorityCode(ptype))
                if srid != '4326':
                    out_srs = osr.SpatialReference()
                    out_srs.ImportFromEPSG(4326)
                    transform = osr.CoordinateTransformation(srs, out_srs)
                    tmp_bbox2 = ogr.CreateGeometryFromWkt(tmp_bbox.wkt)
                    tmp_bbox2.Transform(transform)
                    tmp_bbox = GEOSGeometry(tmp_bbox2.ExportToWkt())
            except (RuntimeError, TypeError), e:
                logger.warn("Projection: %s" % proj)
                logger.warn("Failed to identify projection's EPSG code."
                            "%s: %s" % (type(e).__name__, str(e)))

            tmp_footprint = GEOSGeometry(footprint_wkt)
            if not tmp_bbox.contains(tmp_footprint):
                logger.debug("Re-generating footprint because not inside of "
                             "generated image.")
                footprint_wkt = tmp_footprint.intersection(tmp_bbox).wkt

        if self.footprint_alpha:
            logger.debug("Applying optimization 'AlphaBandOptimization'.")
            opt = AlphaBandOptimization()
            opt(ds, footprint_wkt)

        output_filename = self.generate_filename(output_filename)

        if merge_with is not None:
            if original_footprint is None:
                raise ValueError(
                    "Original footprint with to be merged image required.")

            original_ds = gdal.Open(merge_with, gdal.GA_Update)
            merger = GDALDatasetMerger([
                GDALGeometryMaskMergeSource(
                    original_ds,
                    original_footprint,
                    temporary_directory=self.temporary_directory),
                GDALGeometryMaskMergeSource(
                    ds,
                    footprint_wkt,
                    temporary_directory=self.temporary_directory)
            ])

            final_ds = merger.merge(output_filename,
                                    self.format_selection.driver_name,
                                    self.format_selection.creation_options)

            # cleanup previous file
            driver = original_ds.GetDriver()
            original_ds = None
            driver.Delete(merge_with)

            cleanup_temp(ds)

        else:
            logger.debug("Writing single file '%s' using options: %s." %
                         (output_filename, ", ".join(
                             self.format_selection.creation_options)))
            logger.debug("Metadata tags to be written: %s" %
                         ", ".join(ds.GetMetadata_List("") or []))

            # save the file to the disc
            driver = gdal.GetDriverByName(self.format_selection.driver_name)
            final_ds = driver.CreateCopy(
                output_filename,
                ds,
                options=self.format_selection.creation_options)

            # cleanup
            cleanup_temp(ds)

        for optimization in self.get_post_optimizations(final_ds):
            logger.debug("Applying post-optimization '%s'." %
                         type(optimization).__name__)
            optimization(final_ds)

        # generate metadata if requested
        footprint = None
        if generate_metadata:
            normalized_space = Polygon.from_bbox((-180, -90, 180, 90))
            non_normalized_space = Polygon.from_bbox((180, -90, 360, 90))

            footprint = GEOSGeometry(footprint_wkt)

            outer = non_normalized_space.intersection(footprint)

            if len(outer):
                footprint = MultiPolygon(*map(
                    lambda p: Polygon(*map(
                        lambda ls: LinearRing(*map(
                            lambda point: (point[0] - 360, point[1]), ls.coords
                        )), tuple(p))), (outer, ))).union(
                            normalized_space.intersection(footprint))
            else:
                if isinstance(footprint, Polygon):
                    footprint = MultiPolygon(footprint)

            if original_footprint:
                logger.debug("Merging footprint.")
                footprint = footprint.union(GEOSGeometry(original_footprint))

            logger.debug("Calculated Footprint: '%s'" % footprint.wkt)

        num_bands = final_ds.RasterCount

        # finally close the dataset and write it to the disc
        final_ds = None

        return PreProcessResult(output_filename, footprint, num_bands)
コード例 #18
0
ファイル: preprocessor.py プロジェクト: baloola/ngeo-b
    def process(self,
                input_filename,
                output_filename,
                geo_reference=None,
                generate_metadata=True,
                merge_with=None,
                original_footprint=None):

        # open the dataset and create an In-Memory Dataset as copy
        # to perform optimizations
        ds = create_mem_copy(gdal.Open(input_filename))

        gt = ds.GetGeoTransform()
        footprint_wkt = None

        if not geo_reference:
            if gt == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0):
                if ds.GetGCPCount() > 0:
                    geo_reference = InternalGCPs()
                else:
                    raise ValueError("No geospatial reference for "
                                     "unreferenced dataset given.")

        if geo_reference:
            logger.debug("Applying geo reference '%s'." %
                         type(geo_reference).__name__)
            # footprint is always in EPSG:4326
            ds, footprint_wkt = geo_reference.apply(ds)

        # apply optimizations
        for optimization in self.get_optimizations(ds):
            logger.debug("Applying optimization '%s'." %
                         type(optimization).__name__)

            try:
                new_ds = optimization(ds)

                if new_ds is not ds:
                    # cleanup afterwards
                    cleanup_temp(ds)
                    ds = new_ds
            except:
                cleanup_temp(ds)
                raise

        # generate the footprint from the dataset
        if not footprint_wkt:
            logger.debug("Generating footprint.")
            footprint_wkt = self._generate_footprint_wkt(ds)
        # check that footprint is inside of extent of generated image
        # regenerate otherwise
        else:
            tmp_extent = getExtentFromRectifiedDS(ds)
            tmp_bbox = Polygon.from_bbox(
                (tmp_extent[0], tmp_extent[1], tmp_extent[2], tmp_extent[3]))
            # transform image bbox to EPSG:4326 if necessary
            proj = ds.GetProjection()
            srs = osr.SpatialReference()
            try:
                srs.ImportFromWkt(proj)
                srs.AutoIdentifyEPSG()
                ptype = "PROJCS" if srs.IsProjected() else "GEOGCS"
                srid = int(srs.GetAuthorityCode(ptype))
                if srid != '4326':
                    out_srs = osr.SpatialReference()
                    out_srs.ImportFromEPSG(4326)
                    transform = osr.CoordinateTransformation(srs, out_srs)
                    tmp_bbox2 = ogr.CreateGeometryFromWkt(tmp_bbox.wkt)
                    tmp_bbox2.Transform(transform)
                    tmp_bbox = GEOSGeometry(tmp_bbox2.ExportToWkt())
            except (RuntimeError, TypeError), e:
                logger.warn("Projection: %s" % proj)
                logger.warn("Failed to identify projection's EPSG code."
                            "%s: %s" % (type(e).__name__, str(e)))

            tmp_footprint = GEOSGeometry(footprint_wkt)
            if not tmp_bbox.contains(tmp_footprint):
                logger.debug("Re-generating footprint because not inside of "
                             "generated image.")
                footprint_wkt = tmp_footprint.intersection(tmp_bbox).wkt
コード例 #19
0
    def handle_with_cache(self, cache, *args, **kwargs):
        metadata_component = MetadataComponent(env)
        datas = kwargs["data"]
        semantics = kwargs.get("semantics")
        metadatas = kwargs["metadata"]
        range_type_name = kwargs["range_type_name"]

        if range_type_name is None:
            raise CommandError("No range type name specified.")
        range_type = models.RangeType.objects.get(name=range_type_name)

        metadata_keys = set((
            "identifier", "extent", "size", "projection",
            "footprint", "begin_time", "end_time", "coverage_type",
        ))

        all_data_items = []
        retrieved_metadata = {}

        retrieved_metadata.update(
            self._get_overrides(**kwargs)
        )

        for metadata in metadatas:
            storage, package, format, location = self._get_location_chain(
                metadata
            )
            data_item = backends.DataItem(
                location=location, format=format or "", semantic="metadata",
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            with open(connect(data_item, cache)) as f:
                content = f.read()
                reader = metadata_component.get_reader_by_test(content)
                if reader:
                    values = reader.read(content)

                    format = values.pop("format", None)
                    if format:
                        data_item.format = format
                        data_item.full_clean()
                        data_item.save()

                    for key, value in values.items():
                        if key in metadata_keys:
                            retrieved_metadata.setdefault(key, value)

        if len(datas) < 1:
            raise CommandError("No data files specified.")

        if semantics is None:
            # TODO: check corner cases.
            # e.g: only one data item given but multiple bands in range type
            # --> bands[1:<bandnum>]
            if len(datas) == 1:
                if len(range_type) == 1:
                    semantics = ["bands[1]"]
                else:
                    semantics = ["bands[1:%d]" % len(range_type)]

            else:
                semantics = ["bands[%d]" % i for i in range(len(datas))]

        for data, semantic in zip(datas, semantics):
            storage, package, format, location = self._get_location_chain(data)
            data_item = backends.DataItem(
                location=location, format=format or "", semantic=semantic,
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            try:
                ds = gdal.Open(connect(data_item, cache))
            except:
                with open(connect(data_item, cache)) as f:
                    ds = f.read()

            reader = metadata_component.get_reader_by_test(ds)
            if reader:
                values = reader.read(ds)

                format = values.pop("format", None)
                if format:
                    data_item.format = format
                    data_item.full_clean()
                    data_item.save()

                for key, value in values.items():
                    retrieved_metadata.setdefault(key, value)
            ds = None

        if len(metadata_keys - set(retrieved_metadata.keys())):
            raise CommandError(
                "Missing metadata keys %s."
                % ", ".join(metadata_keys - set(retrieved_metadata.keys()))
            )

        # replace any already registered dataset
        if kwargs["replace"]:
            try:
                # get a list of all collections the coverage was in.
                coverage = models.Coverage.objects.get(
                    identifier=retrieved_metadata["identifier"]
                )
                additional_ids = [
                    c.identifier
                    for c in models.Collection.objects.filter(
                        eo_objects__in=[coverage.pk]
                    )
                ]
                coverage.delete()

                self.print_msg(
                    "Replacing previous dataset '%s'."
                    % retrieved_metadata["identifier"]
                )

                collection_ids = kwargs["collection_ids"] or []
                for identifier in additional_ids:
                    if identifier not in collection_ids:
                        collection_ids.append(identifier)
                kwargs["collection_ids"] = collection_ids
            except models.Coverage.DoesNotExist:
                self.print_msg(
                    "Could not replace previous dataset '%s'."
                    % retrieved_metadata["identifier"]
                )

        try:
            coverage_type = retrieved_metadata["coverage_type"]
            # TODO: allow types of different apps

            if len(coverage_type.split(".")) > 1:
                module_name, _, coverage_type = coverage_type.rpartition(".")
                module = import_module(module_name)
                CoverageType = getattr(module, coverage_type)
            else:
                CoverageType = getattr(models, coverage_type)
        except AttributeError:
            raise CommandError(
                "Type '%s' is not supported."
                % retrieved_metadata["coverage_type"]
            )

        try:
            coverage = CoverageType()
            coverage.range_type = range_type

            proj = retrieved_metadata.pop("projection")
            if isinstance(proj, int):
                retrieved_metadata["srid"] = proj
            else:
                definition, format = proj

                # Try to identify the SRID from the given input
                try:
                    sr = osr.SpatialReference(definition, format)
                    retrieved_metadata["srid"] = sr.srid
                except Exception, e:
                    prj = models.Projection.objects.get(
                        format=format, definition=definition
                    )
                    retrieved_metadata["projection"] = prj

            # TODO: bug in models for some coverages
            for key, value in retrieved_metadata.items():
                setattr(coverage, key, value)

            coverage.visible = kwargs["visible"]

            coverage.full_clean()
            coverage.save()

            for data_item in all_data_items:
                data_item.dataset = coverage
                data_item.full_clean()
                data_item.save()

            # link with collection(s)
            if kwargs["collection_ids"]:
                ignore_missing_collection = kwargs["ignore_missing_collection"]
                call_command("eoxs_collection_link",
                    collection_ids=kwargs["collection_ids"],
                    add_ids=[coverage.identifier],
                    ignore_missing_collection=ignore_missing_collection
                )
コード例 #20
0
    def register(self,
                 metadata_locations,
                 mask_locations,
                 package_path,
                 overrides,
                 identifier_template=None,
                 type_name=None,
                 extended_metadata=True,
                 discover_masks=True,
                 discover_browses=True,
                 discover_metadata=True,
                 replace=False,
                 simplify_footprint_tolerance=None):
        product_type = None
        if type_name:
            product_type = models.ProductType.objects.get(name=type_name)

        component = ProductMetadataComponent()

        browse_handles = []
        mask_locations = mask_locations or []
        metadata = {}

        package = None
        if package_path:
            handler = get_handler_by_test(package_path)
            if not handler:
                raise RegistrationError('Storage %r is not supported' %
                                        package_path)

            package, _ = backends.Storage.objects.get_or_create(
                url=package_path, storage_type=handler.name)

            if discover_masks or discover_browses or discover_metadata:
                collected_metadata = component.collect_package_metadata(
                    package, handler)
                if discover_metadata:
                    metadata.update(collected_metadata)
                if discover_browses:
                    browse_handles.extend([
                        (browse_type, package_path, browse_path)
                        for browse_type, browse_path in metadata.pop(
                            'browses', [])
                    ])
                if discover_masks:
                    mask_locations.extend([(mask_type, package_path, mask_path)
                                           for mask_type, mask_path in
                                           metadata.pop('mask_files', [])])

                    mask_locations.extend([
                        (mask_type, geometry)
                        for mask_type, geometry in metadata.pop('masks', [])
                    ])

        metadata_items = [
            models.MetaDataItem(location=location[-1],
                                storage=resolve_storage(location[:-1]))
            for location in metadata_locations
        ]

        new_metadata = {}
        for metadata_item in reversed(metadata_items):
            new_metadata.update(
                self._read_product_metadata(component, metadata_item))

        mask_locations.extend(new_metadata.pop('masks', []))

        metadata.update(new_metadata)
        metadata.update(
            dict((key, value) for key, value in overrides.items()
                 if value is not None))

        # apply overrides
        identifier = metadata.get('identifier')
        footprint = metadata.get('footprint')
        begin_time = metadata.get('begin_time')
        end_time = metadata.get('end_time')

        if identifier_template:
            identifier = identifier_template.format(metadata)
            metadata['identifier'] = identifier

        if simplify_footprint_tolerance is not None and footprint:
            footprint = footprint.simplify(simplify_footprint_tolerance,
                                           preserve_topology=True)

        replaced = False
        if replace:
            try:
                models.Product.objects.get(identifier=identifier).delete()
                replaced = True
            except models.Product.DoesNotExist:
                pass

        product = models.Product.objects.create(
            identifier=identifier,
            footprint=footprint,
            begin_time=begin_time,
            end_time=end_time,
            product_type=product_type,
            package=package,
        )

        if extended_metadata and metadata:
            create_metadata(product, metadata)

        # register all masks
        for mask_handle in mask_locations:
            geometry = None
            storage = None
            location = ''
            if isinstance(mask_handle[1], GEOSGeometry):
                geometry = GEOSGeometry(mask_handle[1])
            else:
                storage = resolve_storage(mask_handle[1:-1])
                location = mask_handle[-1]

            try:
                mask_type = models.MaskType.objects.get(
                    name=mask_handle[0], product_type=product_type)
            except models.MaskType.DoesNotExist:
                raise

            models.Mask.objects.create(product=product,
                                       mask_type=mask_type,
                                       storage=storage,
                                       location=location,
                                       geometry=geometry)

        # register all browses
        for browse_handle in browse_handles:
            browse_type = None
            if browse_handle[0]:
                # TODO: only browse types for that product type
                browse_type = models.BrowseType.objects.get(
                    name=browse_handle[0], product_type=product_type)

            browse = models.Browse(product=product,
                                   location=browse_handle[-1],
                                   storage=resolve_storage(
                                       browse_handle[1:-1]))

            # Get a VSI handle for the browse to get the size, extent and CRS
            # via GDAL
            vsi_path = get_vsi_path(browse)
            ds = gdal.Open(vsi_path)
            browse.width = ds.RasterXSize
            browse.height = ds.RasterYSize
            browse.coordinate_reference_system = ds.GetProjection()
            extent = gdal.get_extent(ds)
            browse.min_x, browse.min_y, browse.max_x, browse.max_y = extent

            browse.full_clean()
            browse.save()

        for metadata_item in metadata_items:
            metadata_item.eo_object = product
            metadata_item.full_clean()
            metadata_item.save()

        return product, replaced