Exemplo n.º 1
0
    def get_source_dataset(self, coverage, data_items, range_type):
        if len(data_items) == 1:
            return gdal.OpenShared(abspath(connect(data_items[0])))
        else:
            vrt = VRTBuilder(coverage.size_x,
                             coverage.size_y,
                             vrt_filename=temp_vsimem_filename())

            # sort in ascending order according to semantic
            data_items = sorted(data_items, key=(lambda d: d.semantic))

            gcps = []
            compound_index = 0
            for data_item in data_items:
                path = abspath(connect(data_item))

                # iterate over all bands of the data item
                for set_index, item_index in self._data_item_band_indices(
                        data_item):
                    if set_index != compound_index + 1:
                        raise ValueError
                    compound_index = set_index

                    band = range_type[set_index]
                    vrt.add_band(band.data_type)
                    vrt.add_simple_source(set_index, path, item_index)

            return vrt.dataset
    def get_source_dataset(self, coverage, data_items, range_type):
        if len(data_items) == 1:
            return gdal.OpenShared(abspath(connect(data_items[0])))
        else:
            vrt = VRTBuilder(
                coverage.size_x, coverage.size_y,
                vrt_filename=temp_vsimem_filename()
            )

            # sort in ascending order according to semantic
            data_items = sorted(data_items, key=(lambda d: d.semantic))

            gcps = []
            compound_index = 0
            for data_item in data_items:
                path = abspath(connect(data_item))

                # iterate over all bands of the data item
                for set_index, item_index in self._data_item_band_indices(data_item):
                    if set_index != compound_index + 1: 
                        raise ValueError
                    compound_index = set_index

                    band = range_type[set_index]
                    vrt.add_band(band.data_type)
                    vrt.add_simple_source(
                        set_index, path, item_index
                    )

            return vrt.dataset
Exemplo n.º 3
0
    def connect(self, coverage, data_items, layer, options):
        mask_item = data_items[0]

        try:
            is_reversed = (
                layer.metadata.get("eoxs_geometry_reversed") == "true"
            )
        except ms.MapServerError:
            is_reversed = False

        # check if the geometry is "reversed"
        if is_reversed:
            # TODO: better use the coverages Extent?
            geom_types = (ogr.wkbPolygon, ogr.wkbMultiPolygon)
            output_polygon = ogr.Geometry(wkt=str(coverage.footprint.wkt))

            for mask_item in data_items:
                ds = ogr.Open(connect(mask_item))
                for i in range(ds.GetLayerCount()):
                    ogr_layer = ds.GetLayer(i)
                    if not ogr_layer:
                        continue

                    feature = ogr_layer.GetNextFeature()
                    while feature:
                        # TODO: reproject if necessary
                        geometry = feature.GetGeometryRef()
                        if geometry.GetGeometryType() not in geom_types:
                            continue
                        if geometry:
                            output_polygon = output_polygon.Difference(geometry)
                        feature = ogr_layer.GetNextFeature()

            # since we have the geometry already in memory, add it to the layer
            # as WKT
            shape = ms.shapeObj.fromWKT(output_polygon.ExportToWkt())
            shape.initValues(1)
            shape.setValue(0, coverage.identifier)
            layer.addFeature(shape)

        else:
            layer.connectiontype = ms.MS_OGR
            layer.connection = connect(data_items[0])
            # TODO: more than one mask_item?

        layer.setProjection("EPSG:4326")
        layer.setMetaData("ows_srs", "EPSG:4326")
        layer.setMetaData("wms_srs", "EPSG:4326")
Exemplo n.º 4
0
    def connect(self, coverage, data_items, layer, options):
        mask_item = data_items[0]

        try:
            is_reversed = (
                layer.metadata.get("eoxs_geometry_reversed") == "true")
        except ms.MapServerError:
            is_reversed = False

        # check if the geometry is "reversed"
        if is_reversed:
            # TODO: better use the coverages Extent?
            geom_types = (ogr.wkbPolygon, ogr.wkbMultiPolygon)
            output_polygon = ogr.Geometry(wkt=str(coverage.footprint.wkt))

            for mask_item in data_items:
                ds = ogr.Open(connect(mask_item))
                for i in range(ds.GetLayerCount()):
                    ogr_layer = ds.GetLayer(i)
                    if not ogr_layer:
                        continue

                    feature = ogr_layer.GetNextFeature()
                    while feature:
                        # TODO: reproject if necessary
                        geometry = feature.GetGeometryRef()
                        if geometry.GetGeometryType() not in geom_types:
                            continue
                        if geometry:
                            output_polygon = output_polygon.Difference(
                                geometry)
                        feature = ogr_layer.GetNextFeature()

            # since we have the geometry already in memory, add it to the layer
            # as WKT
            shape = ms.shapeObj.fromWKT(output_polygon.ExportToWkt())
            shape.initValues(1)
            shape.setValue(0, coverage.identifier)
            layer.addFeature(shape)

        else:
            layer.connectiontype = ms.MS_OGR
            layer.connection = connect(data_items[0])
            # TODO: more than one mask_item?

        layer.setProjection("EPSG:4326")
        layer.setMetaData("ows_srs", "EPSG:4326")
        layer.setMetaData("wms_srs", "EPSG:4326")
Exemplo n.º 5
0
    def connect(self, coverage, data_items, layer, options):
        filtered = filter(lambda d: d.semantic.startswith("bands"), data_items)
        data = connect(filtered[0])

        if isinstance(coverage, models.ReferenceableDataset):
            vrt_path = join("/vsimem", uuid4().hex)
            reftools.create_rectified_vrt(data, vrt_path)
            data = vrt_path
            layer.setMetaData("eoxs_ref_data", data)

        if not layer.metadata.get("eoxs_wrap_dateline") == "true":
            layer.data = data
        else:
            e = wrap_extent_around_dateline(coverage.extent, coverage.srid)

            vrt_path = join("/vsimem", uuid4().hex)
            ds = gdal.Open(data)
            vrt_ds = create_simple_vrt(ds, vrt_path)
            size_x = ds.RasterXSize
            size_y = ds.RasterYSize

            dx = abs(e[0] - e[2]) / size_x
            dy = abs(e[1] - e[3]) / size_y

            vrt_ds.SetGeoTransform([e[0], dx, 0, e[3], 0, -dy])
            vrt_ds = None

            layer.data = vrt_path
Exemplo n.º 6
0
    def connect(self, coverage, data_items, layer):
        filtered = filter(lambda d: d.semantic.startswith("bands"), data_items)
        data = connect(filtered[0])

        if isinstance(coverage, models.ReferenceableDataset):
            vrt_path = join("/vsimem", uuid4().hex)
            reftools.create_rectified_vrt(data, vrt_path)
            data = vrt_path
            layer.setMetaData("eoxs_ref_data", data)

        if not layer.metadata.get("eoxs_wrap_dateline") == "true":
            layer.data = data
        else:
            e = wrap_extent_around_dateline(coverage.extent, coverage.srid)

            vrt_path = join("/vsimem", uuid4().hex)
            ds = gdal.Open(data)
            vrt_ds = create_simple_vrt(ds, vrt_path)
            size_x = ds.RasterXSize
            size_y = ds.RasterYSize
            
            dx = abs(e[0] - e[2]) / size_x
            dy = abs(e[1] - e[3]) / size_y 
            
            vrt_ds.SetGeoTransform([e[0], dx, 0, e[3], 0, -dy])
            vrt_ds = None
            
            layer.data = vrt_path
Exemplo n.º 7
0
def coverage(request, identifier):
    cov = Coverage.objects.get(identifier=identifier)

    filename = connect(cov.data_items.filter(semantic__startswith="band")[0])

    size = getsize(filename)

    if size > (100 * 1024 * 1024):
        tmpfile = NamedTemporaryFile(suffix='.tif')

        cmd_args = [
            'gdal_translate', '-outsize', '7%', '7%', filename, tmpfile.name
        ]
        process = check_output(cmd_args)

        #returnfile = FileWrapper(tmpfile)
        returnfile = tmpfile
        size = getsize(tmpfile.name)

    else:
        returnfile = open(filename)

    response = HttpResponse(returnfile)
    response['Content-Type'] = 'image/tif'
    response['Content-Length'] = size
    response['Content-Disposition'] = 'attachment; filename=%s' % basename(
        filename)
    response['Cache-Control'] = 'max-age=%d' % MAX_AGE

    return response
Exemplo n.º 8
0
def coverage(request, identifier):
    cov = Coverage.objects.get(identifier=identifier)

    filename = connect(cov.data_items.filter(semantic__startswith="band")[0])

    size = getsize(filename)

    if size>(100*1024*1024):
        tmpfile = NamedTemporaryFile(suffix='.tif')

        cmd_args = ['gdal_translate', '-outsize', '7%', '7%', filename, tmpfile.name]
        process = check_output(cmd_args)

        #returnfile = FileWrapper(tmpfile)
        returnfile = tmpfile
        size = getsize(tmpfile.name)
        
    else:
        returnfile = open(filename)


    response = HttpResponse(returnfile)
    response['Content-Type'] = 'image/tif'
    response['Content-Length'] = size
    response['Content-Disposition'] = 'attachment; filename=%s' % basename(filename)
    response['Cache-Control'] = 'max-age=%d' % MAX_AGE

    return response
Exemplo n.º 9
0
        def apply(self, coverage, data_items, layer):
            sld_items = filter(lambda d: (
                d.semantic.startswith("style") and d.format.upper() == "SLD"
            ), data_items)

            for sld_item in sld_items:
                sld_filename = connect(sld_item)
                with open(sld_filename) as f:
                    layer.map.applySLD(f.read())
Exemplo n.º 10
0
        def apply(self, coverage, data_items, layer):
            sld_items = filter(
                lambda d:
                (d.semantic.startswith("style") and d.format.upper() == "SLD"),
                data_items)

            for sld_item in sld_items:
                sld_filename = connect(sld_item)
                with open(sld_filename) as f:
                    layer.map.applySLD(f.read())
Exemplo n.º 11
0
    def _read_metadata_from_data(self, data_item, retrieved_metadata, cache):
        metadata_component = MetadataComponent(env)

        ds = gdal.Open(connect(data_item, cache))
        reader = metadata_component.get_reader_by_test(ds)
        if reader:
            values = reader.read(ds)

            format = values.pop("format", None)
            if format:
                data_item.format = format
                data_item.full_clean()
                data_item.save()

            for key, value in values.items():
                retrieved_metadata.setdefault(key, value)
        ds = None
Exemplo n.º 12
0
    def _read_data(self, coverage, subset, rangesubset):
        range_type = coverage.range_type

        # Open file
        filename = connect(coverage.data_items.all()[0])

        root = etree.parse(filename).getroot()
        output_data = OrderedDict()

        # Read data

        band = range_type[0]
        if not rangesubset or band in rangesubset:
            data = map(float, root.xpath("data/value/text()"))
            print data, root
            data = data[int(subset.low):int(subset.high)]
            output_data[band.identifier] = data

        return output_data
Exemplo n.º 13
0
    def _read_data(self, coverage, subset, rangesubset):
        range_type = coverage.range_type

        # Open file
        filename = connect(coverage.data_items.all()[0])

        root = etree.parse(filename).getroot()
        output_data = OrderedDict()

        # Read data

        band = range_type[0]
        if not rangesubset or band in rangesubset:
            data = map(float, root.xpath("data/value/text()"))
            print data, root
            data = data[int(subset.low):int(subset.high)]
            output_data[band.identifier] = data

        return output_data
Exemplo n.º 14
0
    def handle_with_cache(self, cache, *args, **kwargs):
        metadata_component = MetadataComponent(env)
        datas = kwargs["data"]
        semantics = kwargs.get("semantics")
        metadatas = kwargs["metadata"]
        range_type_name = kwargs["range_type_name"]

        if range_type_name is None:
            raise CommandError("No range type name specified.")
        range_type = models.RangeType.objects.get(name=range_type_name)

        metadata_keys = set((
            "identifier", "extent", "size", "projection",
            "footprint", "begin_time", "end_time", "coverage_type",
        ))

        all_data_items = []
        retrieved_metadata = {}

        retrieved_metadata.update(
            self._get_overrides(**kwargs)
        )

        for metadata in metadatas:
            storage, package, format, location = self._get_location_chain(
                metadata
            )
            data_item = backends.DataItem(
                location=location, format=format or "", semantic="metadata",
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            with open(connect(data_item, cache)) as f:
                content = f.read()
                reader = metadata_component.get_reader_by_test(content)
                if reader:
                    values = reader.read(content)

                    format = values.pop("format", None)
                    if format:
                        data_item.format = format
                        data_item.full_clean()
                        data_item.save()

                    for key, value in values.items():
                        if key in metadata_keys:
                            retrieved_metadata.setdefault(key, value)

        if len(datas) < 1:
            raise CommandError("No data files specified.")

        if semantics is None:
            # TODO: check corner cases.
            # e.g: only one data item given but multiple bands in range type
            # --> bands[1:<bandnum>]
            if len(datas) == 1:
                if len(range_type) == 1:
                    semantics = ["bands[1]"]
                else:
                    semantics = ["bands[1:%d]" % len(range_type)]

            else:
                semantics = ["bands[%d]" % i for i in range(len(datas))]

        for data, semantic in zip(datas, semantics):
            storage, package, format, location = self._get_location_chain(data)
            data_item = backends.DataItem(
                location=location, format=format or "", semantic=semantic,
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            # TODO: other opening methods than GDAL
            ds = gdal.Open(connect(data_item, cache))
            reader = metadata_component.get_reader_by_test(ds)
            if reader:
                values = reader.read(ds)

                format = values.pop("format", None)
                if format:
                    data_item.format = format
                    data_item.full_clean()
                    data_item.save()

                for key, value in values.items():
                    if key in metadata_keys:
                        retrieved_metadata.setdefault(key, value)
            ds = None

        if len(metadata_keys - set(retrieved_metadata.keys())):
            raise CommandError(
                "Missing metadata keys %s."
                % ", ".join(metadata_keys - set(retrieved_metadata.keys()))
            )

        # replace any already registered dataset
        if kwargs["replace"]:
            try:
                # get a list of all collections the coverage was in.
                coverage = models.Coverage.objects.get(
                    identifier=retrieved_metadata["identifier"]
                )
                additional_ids = [
                    c.identifier
                    for c in models.Collection.objects.filter(
                        eo_objects__in=[coverage.pk]
                    )
                ]
                coverage.delete()

                self.print_msg(
                    "Replacing previous dataset '%s'."
                    % retrieved_metadata["identifier"]
                )

                collection_ids = kwargs["collection_ids"] or []
                for identifier in additional_ids:
                    if identifier not in collection_ids:
                        collection_ids.append(identifier)
                kwargs["collection_ids"] = collection_ids
            except models.Coverage.DoesNotExist:
                self.print_msg(
                    "Could not replace previous dataset '%s'."
                    % retrieved_metadata["identifier"]
                )

        try:
            # TODO: allow types of different apps
            CoverageType = getattr(models, retrieved_metadata["coverage_type"])
        except AttributeError:
            raise CommandError(
                "Type '%s' is not supported." % kwargs["coverage_type"]
            )

        try:
            coverage = CoverageType()
            coverage.range_type = range_type

            proj = retrieved_metadata.pop("projection")
            if isinstance(proj, int):
                retrieved_metadata["srid"] = proj
            else:
                definition, format = proj

                # Try to identify the SRID from the given input
                try:
                    sr = osr.SpatialReference(definition, format)
                    retrieved_metadata["srid"] = sr.srid
                except Exception, e:
                    prj = models.Projection.objects.get(
                        format=format, definition=definition
                    )
                    retrieved_metadata["projection"] = prj

            # TODO: bug in models for some coverages
            for key, value in retrieved_metadata.items():
                setattr(coverage, key, value)

            coverage.visible = kwargs["visible"]

            coverage.full_clean()
            coverage.save()

            for data_item in all_data_items:
                data_item.dataset = coverage
                data_item.full_clean()
                data_item.save()

            # link with collection(s)
            if kwargs["collection_ids"]:
                ignore_missing_collection = kwargs["ignore_missing_collection"]
                call_command("eoxs_collection_link",
                    collection_ids=kwargs["collection_ids"],
                    add_ids=[coverage.identifier],
                    ignore_missing_collection=ignore_missing_collection
                )
Exemplo n.º 15
0
    def execute(self, collections, begin_time, end_time, coord_list, srid):
        """ The main execution function for the process.
        """
        eo_ids = collections.split(',')

        
        containment = "overlaps"

        subsets = Subsets((Trim("t", begin_time, end_time),))


        if len(eo_ids) == 0:
            raise

        # fetch a list of all requested EOObjects
        available_ids = models.EOObject.objects.filter(
            identifier__in=eo_ids
        ).values_list("identifier", flat=True)

        # match the requested EOIDs against the available ones. If any are
        # requested, that are not available, raise and exit.
        failed = [ eo_id for eo_id in eo_ids if eo_id not in available_ids ]
        if failed:
            raise NoSuchDatasetSeriesOrCoverageException(failed)

        collections_qs = subsets.filter(models.Collection.objects.filter(
            identifier__in=eo_ids
        ), containment="overlaps")

        # create a set of all indirectly referenced containers by iterating
        # recursively. The containment is set to "overlaps", to also include 
        # collections that might have been excluded with "contains" but would 
        # have matching coverages inserted.

        def recursive_lookup(super_collection, collection_set):
            sub_collections = models.Collection.objects.filter(
                collections__in=[super_collection.pk]
            ).exclude(
                pk__in=map(lambda c: c.pk, collection_set)
            )
            sub_collections = subsets.filter(sub_collections, "overlaps")

            # Add all to the set
            collection_set |= set(sub_collections)

            for sub_collection in sub_collections:
                recursive_lookup(sub_collection, collection_set)

        collection_set = set(collections_qs)
        for collection in set(collection_set):
            recursive_lookup(collection, collection_set)

        collection_pks = map(lambda c: c.pk, collection_set)

        # Get all either directly referenced coverages or coverages that are
        # within referenced containers. Full subsetting is applied here.

        coverages_qs = subsets.filter(models.Coverage.objects.filter(
            Q(identifier__in=eo_ids) | Q(collections__in=collection_pks)
        ), containment=containment)


        coordinates = coord_list.split(';')

        points = []
        for coordinate in coordinates:
            x,y = coordinate.split(',')
            # parameter parsing
            point = Point(float(x), float(y))
            point.srid = srid
            points.append(point)

        points = MultiPoint(points)
        points.srid = srid


        eo_objects = coverages_qs.filter(
            footprint__intersects=points
        ).order_by('begin_time')

        output = StringIO()
        writer = csv.writer(output, quoting=csv.QUOTE_NONE)
        header = ["id", "time", "val"]
        writer.writerow(header)

        for eo_object in eo_objects:

            coverage = eo_object.cast()

            #layer = models.DatasetSeries.objects.get(identifier__in=coverage.identifier)
            layer = eo_object.collections.all()[0]

            time =  isoformat(coverage.begin_time)

            data_item = coverage.data_items.get(semantic__startswith="bands")
            filename = connect(data_item)
            ds = gdal.Open(filename)

            if ds.GetProjection():
                gt = ds.GetGeoTransform()
                sr = SpatialReference(ds.GetProjection())
                points_t = points.transform(sr, clone=True)
            else:
                bbox = coverage.footprint.extent
                gt = [ 
                    bbox[0], (bbox[2] - bbox[0])/ds.RasterXSize, 0,
                    bbox[3], 0, (bbox[1] - bbox[3])/ds.RasterYSize
                ]
          

            for index, point in enumerate(points, start=1):
                print index, point

                if not coverage.footprint.contains(point):
                    continue

                #point.transform(sr)

                # Works only if gt[2] and gt[4] equal zero! 
                px = int((point[0] - gt[0]) / gt[1]) #x pixel
                py = int((point[1] - gt[3]) / gt[5]) #y pixel

               
                pixelVal = ds.GetRasterBand(1).ReadAsArray(px,py,1,1)[0,0]
                if pixelVal != -9999:
                    writer.writerow([ str(layer.identifier), time, pixelVal])

                

        return {
            "processed": output.getvalue()
        }
Exemplo n.º 16
0
    def execute(self, collection, begin_time, end_time, coord_list, srid):
        """ The main execution function for the process.
        """
        col_name = collection
        collection = models.Collection.objects.get(identifier=collection)

        eo_objects = collection.eo_objects.filter(
            begin_time__lte=end_time, end_time__gte=begin_time
        )

        coordinates = coord_list.split(';')

        points = []
        for coordinate in coordinates:
            x,y = coordinate.split(',')
            # parameter parsing
            point = Point(float(x), float(y))
            point.srid = srid
            points.append(point)

        points = MultiPoint(points)
        points.srid = srid


        eo_objects = eo_objects.filter(
            footprint__intersects=points
        )

        output = StringIO()
        writer = csv.writer(output, quoting=csv.QUOTE_ALL)
        #header = ["id", "begin", "end"] + ["point%d" % i for i in range(len(points))]
        header = ["id", "Green", "Red", "NIR", "MIR" ]
        writer.writerow(header)

        for eo_object in eo_objects:
            coverage = eo_object.cast()

            #values = [coverage.identifier, isoformat(coverage.begin_time), isoformat(coverage.end_time)] + [None] * len(points)
            values = [collection] + [None] * 4

            data_item = coverage.data_items.get(semantic__startswith="bands")
            filename = connect(data_item)
            ds = gdal.Open(filename)
            sr = SpatialReference(ds.GetProjection())
            #points_t = points.transform(sr, clone=True)

            for index, point in enumerate(points, start=1):

                if not coverage.footprint.contains(point):
                    continue

                gt = ds.GetGeoTransform()

                point.transform(sr)
         
                # Works only if gt[2] and gt[4] equal zero! 
                px = int((point[0] - gt[0]) / gt[1]) #x pixel
                py = int((point[1] - gt[3]) / gt[5]) #y pixel

                #array = ds.ReadRaster(px, py, 1, 1)
                #structval = ds.ReadRaster(px,py,1,1,buf_type=gdal.GDT_Int16) #TODO: Check Range Type to adapt buf_type!
                pixelVal = ds.ReadAsArray(px,py,1,1)[:,0,0]

                #pixel_value = array[0][0]
                #print structval
                #pixel_value = struct.unpack('IIII' , structval) #use the 'short' format code (2 bytes) not int (4 bytes)
                
                #values[index] = pixel_value[0]
                #writer.writerow([ col_name+"_p"+str(index), pixelVal[0], pixelVal[1], pixelVal[2], pixelVal[3] ])
                writer.writerow([ "P_"+str(index), pixelVal[0], pixelVal[1], pixelVal[2], pixelVal[3] ])

        return {
            "processed": output.getvalue()
        }
Exemplo n.º 17
0
def create_diff_label(self, master_id, slave_id, bbox, num_bands, crs, unit):
    """ The main execution function for the process.
    """

    #srid = crss.parseEPSGCode(str(crs), (crss.fromShortCode, crss.fromURN, crss.fromURL))

    master = models.RectifiedDataset.objects.get(identifier=master_id)
    slave = models.RectifiedDataset.objects.get(identifier=slave_id)

    filename_master = connect(master.data_items.get(semantic__startswith="bands"))
    filename_slave = connect(slave.data_items.get(semantic__startswith="bands"))

    ds_master = gdal.Open(filename_master, gdalconst.GA_ReadOnly)
    ds_slave = gdal.Open(filename_slave, gdalconst.GA_ReadOnly)

    master_bbox = master.footprint.extent
    slave_bbox = slave.footprint.extent

    res_x_master = (master_bbox[2] - master_bbox[0]) / ds_master.RasterXSize
    res_y_master = (master_bbox[3] - master_bbox[1]) / ds_master.RasterYSize

    res_x_slave = (slave_bbox[2] - slave_bbox[0]) / ds_slave.RasterXSize
    res_y_slave = (slave_bbox[3] - slave_bbox[1]) / ds_slave.RasterYSize

    size_x = int((bbox[2]-bbox[0])/res_x_master)
    size_y = int((bbox[3]-bbox[1])/res_y_master)

    builder = VRTBuilder(size_x, size_y, (num_bands*2), master.range_type.bands.all()[0].data_type)

    dst_rect_master = (
        int( math.floor((master_bbox[0] - bbox[0]) / res_x_master) ), # x offset
        int( math.floor((bbox[3] - master_bbox[3]) / res_y_master) ), # y offset
        ds_master.RasterXSize, # x size
        ds_master.RasterYSize  # y size
    )

    dst_rect_slave = (
        int( math.floor((slave_bbox[0] - bbox[0]) / res_x_slave) ), # x offset
        int( math.floor((bbox[3] - slave_bbox[3]) / res_y_slave) ), # y offset
        ds_slave.RasterXSize, # x size
        ds_slave.RasterYSize  # y size
    )

    for i in range(1, num_bands+1):
        builder.add_simple_source(i, str(filename_master), i, src_rect=(0, 0, ds_master.RasterXSize, ds_master.RasterYSize), dst_rect=dst_rect_master)
        builder.add_simple_source(num_bands+i , str(filename_slave), i, src_rect=(0, 0, ds_slave.RasterXSize, ds_slave.RasterYSize), dst_rect=dst_rect_slave)
    

    ext = Rect(0,0,size_x, size_y)

    
    pix_master = builder.dataset.GetRasterBand(1).ReadAsArray()
    pix_slave = builder.dataset.GetRasterBand(num_bands +1).ReadAsArray()

    if num_bands == 1:
        pix_master = np.dstack((pix_master, builder.dataset.GetRasterBand(1).ReadAsArray()))
        pix_slave = np.dstack((pix_slave, builder.dataset.GetRasterBand(2).ReadAsArray()))
    else:
        for i in range(2, num_bands+1):
            pix_master = np.dstack((pix_master, builder.dataset.GetRasterBand(i).ReadAsArray()))
            pix_slave = np.dstack((pix_slave, builder.dataset.GetRasterBand(num_bands+i).ReadAsArray()))


    def _diff(a,b):
        d = np.array(a[:,:,0],'float32') - np.array(b[:,:,0],'float32')
        return d

    pix_res = _diff(pix_master, pix_slave)
    
    res_max = np.max(pix_res)
    res_min = np.min(pix_res)

    
    # Make a figure and axes with dimensions as desired.
    fig = pyplot.figure(figsize=(8,1))
    fig.patch.set_alpha(0.8)
    ax1 = fig.add_axes([0.05, 0.75, 0.9, 0.15])

    def savefig_pix(fig,fname,width,height,dpi=100):
        rdpi = 1.0/float(dpi)  
        fig.set_size_inches(width*rdpi,height*rdpi)
        fig.savefig(fname,dpi=dpi)

    # Set the colormap and norm to correspond to the data for which
    # the colorbar will be used.
    cmap = mpl.cm.RdBu
    #norm = mpl.colors.Normalize(vmin=res_min, vmax=res_max)
    res_ = max(abs(res_max), abs(res_min))
    norm = mpl.colors.Normalize(vmin=-res_, vmax=res_)

    cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
                                       norm=norm,
                                       orientation='horizontal')
    
    mis = master_id.split("_")
    master_id_label = " ".join( (mis[0], mis[1], mis[2], isoformat(master.begin_time)) )

    sis = slave_id.split("_")
    slave_id_label = " ".join( (sis[0], sis[1], sis[2], isoformat(slave.begin_time)) )


    if unit:
        label = "Difference from %s \n to %s; Unit: %s"%(slave_id_label, master_id_label, unit)
    else:
        label = "Difference from %s \n to %s"%(slave_id_label, master_id_label)

    cb1.set_label(label)


    # the output image
    basename = "%s_%s"%( self.identifier,uuid4().hex )
    filename_png = "/tmp/%s.png" %( basename )

    try:
     
        fig.savefig(filename_png, dpi=80)

        with open(filename_png) as f:
            output = f.read()

    except Exception as e: 

        if os.path.isfile(filename_png):
            os.remove(filename_png)

        raise e
       
    else:
        os.remove(filename_png)

    return output
Exemplo n.º 18
0
def diff_process(self, master_id, slave_id, bbox, num_bands, crs):
    """ The main execution function for the process.
    """

    #srid = crss.parseEPSGCode(str(crs), (crss.fromShortCode, crss.fromURN, crss.fromURL))

    master = models.RectifiedDataset.objects.get(identifier=master_id)
    slave = models.RectifiedDataset.objects.get(identifier=slave_id)

    filename_master = connect(master.data_items.get(semantic__startswith="bands"))
    filename_slave = connect(slave.data_items.get(semantic__startswith="bands"))

    ds_master = gdal.Open(filename_master, gdalconst.GA_ReadOnly)
    ds_slave = gdal.Open(filename_slave, gdalconst.GA_ReadOnly)

    master_bbox = master.footprint.extent
    slave_bbox = slave.footprint.extent

    res_x_master = (master_bbox[2] - master_bbox[0]) / ds_master.RasterXSize
    res_y_master = (master_bbox[3] - master_bbox[1]) / ds_master.RasterYSize

    res_x_slave = (slave_bbox[2] - slave_bbox[0]) / ds_slave.RasterXSize
    res_y_slave = (slave_bbox[3] - slave_bbox[1]) / ds_slave.RasterYSize

    size_x = int((bbox[2]-bbox[0])/res_x_master)
    size_y = int((bbox[3]-bbox[1])/res_y_master)

    builder = VRTBuilder(size_x, size_y, (num_bands*2), master.range_type.bands.all()[0].data_type)

    dst_rect_master = (
        int( math.floor((master_bbox[0] - bbox[0]) / res_x_master) ), # x offset
        int( math.floor((bbox[3] - master_bbox[3]) / res_y_master) ), # y offset
        ds_master.RasterXSize, # x size
        ds_master.RasterYSize  # y size
    )

    dst_rect_slave = (
        int( math.floor((slave_bbox[0] - bbox[0]) / res_x_slave) ), # x offset
        int( math.floor((bbox[3] - slave_bbox[3]) / res_y_slave) ), # y offset
        ds_slave.RasterXSize, # x size
        ds_slave.RasterYSize  # y size
    )

    for i in range(1, num_bands+1):
        builder.add_simple_source(i, str(filename_master), i, src_rect=(0, 0, ds_master.RasterXSize, ds_master.RasterYSize), dst_rect=dst_rect_master)
        builder.add_simple_source(num_bands+i , str(filename_slave), i, src_rect=(0, 0, ds_slave.RasterXSize, ds_slave.RasterYSize), dst_rect=dst_rect_slave)
    

    ext = Rect(0,0,size_x, size_y)

    
    pix_master = builder.dataset.GetRasterBand(1).ReadAsArray()
    pix_slave = builder.dataset.GetRasterBand(num_bands +1).ReadAsArray()

    if num_bands == 1:
        pix_master = np.dstack((pix_master, builder.dataset.GetRasterBand(1).ReadAsArray()))
        pix_slave = np.dstack((pix_slave, builder.dataset.GetRasterBand(2).ReadAsArray()))
    else:
        for i in range(2, num_bands+1):
            pix_master = np.dstack((pix_master, builder.dataset.GetRasterBand(i).ReadAsArray()))
            pix_slave = np.dstack((pix_slave, builder.dataset.GetRasterBand(num_bands+i).ReadAsArray()))


    def _diff(a,b):
        d = np.array(a[:,:,0],'float32') - np.array(b[:,:,0],'float32')       
        return d

   
    pix_res = _diff(pix_master, pix_slave)


    res_max = np.max(pix_res)
    res_min = np.min(pix_res)

    res_ = max(abs(res_max), abs(res_min))
    
    # the output image
    basename = "%s_%s"%( self.identifier,uuid4().hex )
    filename_png = "/tmp/%s.png" %( basename )

    try:
        #fig = pyplot.imshow(pix_res,interpolation='nearest')
        fig = pyplot.imshow(pix_res,vmin=-res_, vmax=res_, interpolation='nearest')
        fig.set_cmap('RdBu')
        fig.write_png(filename_png, True)

        with open(filename_png) as f:
            output = f.read()

    except Exception as e: 

        if os.path.isfile(filename_png):
            os.remove(filename_png)

        raise e
       
    else:
        os.remove(filename_png)

    #return base64.b64encode(output)
    return output
Exemplo n.º 19
0
 def connect(self, coverage, data_items, layer, options):
     layer.tileindex = os.path.abspath(connect(data_items[0]))
     layer.tileitem = "location"
Exemplo n.º 20
0
    def handle_with_cache(self, cache, *args, **kwargs):

        #----------------------------------------------------------------------
        # check the inputs 

        metadata_component = MetadataComponent(env)
        datas = kwargs["data"]
        semantics = kwargs.get("semantics")
        metadatas = kwargs["metadata"]
        range_type_name = kwargs["range_type_name"]

        if range_type_name is None:
            raise CommandError("No range type name specified.")
        range_type = models.RangeType.objects.get(name=range_type_name)

        # TODO: not required, as the keys are already
        metadata_keys = set((
            "identifier", "extent", "size", "projection", 
            "footprint", "begin_time", "end_time"
        ))

        all_data_items = []
        retrieved_metadata = {}

        retrieved_metadata.update(
            self._get_overrides(**kwargs)
        )

        #----------------------------------------------------------------------
        # parent dataset series 

        # extract the parents 
        ignore_missing_parent = bool(kwargs.get('ignore_missing_parent',False))
        parents = [] 
        for parent_id in kwargs.get('parents',[]): 
            try : 
                ds = models.DatasetSeries.objects.get(identifier=parent_id)
                parents.append( ds ) 
            except models.DatasetSeries.DoesNotExist : 
                msg ="There is no Dataset Series matching the given" \
                        " identifier: '%s' "%parent_id
                if ignore_missing_parent : 
                    self.print_wrn( msg )
                else : 
                    raise CommandError( msg ) 

        #----------------------------------------------------------------------
        # meta-data

        for metadata in metadatas:
            storage, package, format, location = self._get_location_chain(metadata)
            data_item = backends.DataItem(
                location=location, format=format or "", semantic="metadata", 
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            with open(connect(data_item, cache)) as f:
                content = f.read()
                reader = metadata_component.get_reader_by_test(content)
                if reader:
                    values = reader.read(content)

                    format = values.pop("format", None)
                    if format:
                        data_item.format = format
                        data_item.full_clean()
                        data_item.save()

                    for key, value in values.items():
                        if key in metadata_keys:
                            retrieved_metadata.setdefault(key, value)


        if len(datas) < 1:
            raise CommandError("No data files specified.")

        if semantics is None:
            # TODO: check corner cases.
            # e.g: only one data item given but multiple bands in range type
            # --> bands[1:<bandnum>]
            if len(datas) == 1:
                if len(range_type) == 1:
                    semantics = ["bands[1]"]
                else:
                    semantics = ["bands[1:%d]" % len(range_type)]
            
            else:
                semantics = ["bands[%d]" % i for i in range(len(datas))]


        for data, semantic in zip(datas, semantics):
            storage, package, format, location = self._get_location_chain(data)
            data_item = backends.DataItem(
                location=location, format=format or "", semantic=semantic, 
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            # TODO: other opening methods than GDAL
            ds = gdal.Open(connect(data_item, cache))
            reader = metadata_component.get_reader_by_test(ds)
            if reader:
                values = reader.read(ds)

                format = values.pop("format", None)
                if format:
                    data_item.format = format
                    data_item.full_clean()
                    data_item.save()

                for key, value in values.items():
                    if key in metadata_keys:
                        retrieved_metadata.setdefault(key, value)
            ds = None

        if len(metadata_keys - set(retrieved_metadata.keys())):
            raise CommandError(
                "Missing metadata keys %s." 
                % ", ".join(metadata_keys - set(retrieved_metadata.keys()))
            )

        try:
            CoverageType = getattr(models, kwargs["coverage_type"])
        except:
            pass
            # TODO: split into module path/coverage and get correct coverage class

        try:
            coverage = CoverageType()
            coverage.range_type = range_type
            
            proj = retrieved_metadata.pop("projection")
            if isinstance(proj, int):
                retrieved_metadata["srid"] = proj
            else:
                definition, format = proj

                # Try to identify the SRID from the given input
                try:
                    sr = osr.SpatialReference(definition, format)
                    retrieved_metadata["srid"] = sr.srid
                except Exception, e:
                    retrieved_metadata["projection"] = models.Projection.objects.get(format=format, definition=definition)

            #coverage.identifier = identifier # TODO: bug in models for some coverages
            for key, value in retrieved_metadata.items():
                setattr(coverage, key, value)

            coverage.full_clean()
            coverage.save()

            for data_item in all_data_items:
                data_item.dataset = coverage
                data_item.full_clean()
                data_item.save()


            #------------------------------------------------------------------
            # link to the parent dataset 
            for parent in parents : 
                self.print_msg( "Linking: '%s' ---> '%s' " % ( 
                                    coverage.identifier, parent.identifier ) )
                parent.insert( coverage ) 
Exemplo n.º 21
0
 def connect(self, coverage, data_items, layer, options):
     layer.tileindex = os.path.abspath(connect(data_items[0]))
     layer.tileitem = "location"
Exemplo n.º 22
0
    def handle_with_cache(self, cache, *args, **kwargs):
        metadata_component = MetadataComponent(env)
        datas = kwargs["data"]
        semantics = kwargs.get("semantics")
        metadatas = kwargs["metadata"]
        range_type_name = kwargs["range_type_name"]

        if range_type_name is None:
            raise CommandError("No range type name specified.")
        range_type = models.RangeType.objects.get(name=range_type_name)

        metadata_keys = set((
            "identifier", "extent", "size", "projection",
            "footprint", "begin_time", "end_time", "coverage_type",
        ))

        all_data_items = []
        retrieved_metadata = {}

        retrieved_metadata.update(
            self._get_overrides(**kwargs)
        )

        for metadata in metadatas:
            storage, package, format, location = self._get_location_chain(
                metadata
            )
            data_item = backends.DataItem(
                location=location, format=format or "", semantic="metadata",
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            with open(connect(data_item, cache)) as f:
                content = f.read()
                reader = metadata_component.get_reader_by_test(content)
                if reader:
                    values = reader.read(content)

                    format = values.pop("format", None)
                    if format:
                        data_item.format = format
                        data_item.full_clean()
                        data_item.save()

                    for key, value in values.items():
                        if key in metadata_keys:
                            retrieved_metadata.setdefault(key, value)

        if len(datas) < 1:
            raise CommandError("No data files specified.")

        if semantics is None:
            # TODO: check corner cases.
            # e.g: only one data item given but multiple bands in range type
            # --> bands[1:<bandnum>]
            if len(datas) == 1:
                if len(range_type) == 1:
                    semantics = ["bands[1]"]
                else:
                    semantics = ["bands[1:%d]" % len(range_type)]

            else:
                semantics = ["bands[%d]" % i for i in range(len(datas))]

        for data, semantic in zip(datas, semantics):
            storage, package, format, location = self._get_location_chain(data)
            data_item = backends.DataItem(
                location=location, format=format or "", semantic=semantic,
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            try:
                ds = gdal.Open(connect(data_item, cache))
            except:
                with open(connect(data_item, cache)) as f:
                    ds = f.read()

            reader = metadata_component.get_reader_by_test(ds)
            if reader:
                values = reader.read(ds)

                format = values.pop("format", None)
                if format:
                    data_item.format = format
                    data_item.full_clean()
                    data_item.save()

                for key, value in values.items():
                    retrieved_metadata.setdefault(key, value)
            ds = None

        if len(metadata_keys - set(retrieved_metadata.keys())):
            raise CommandError(
                "Missing metadata keys %s."
                % ", ".join(metadata_keys - set(retrieved_metadata.keys()))
            )

        # replace any already registered dataset
        if kwargs["replace"]:
            try:
                # get a list of all collections the coverage was in.
                coverage = models.Coverage.objects.get(
                    identifier=retrieved_metadata["identifier"]
                )
                additional_ids = [
                    c.identifier
                    for c in models.Collection.objects.filter(
                        eo_objects__in=[coverage.pk]
                    )
                ]
                coverage.delete()

                self.print_msg(
                    "Replacing previous dataset '%s'."
                    % retrieved_metadata["identifier"]
                )

                collection_ids = kwargs["collection_ids"] or []
                for identifier in additional_ids:
                    if identifier not in collection_ids:
                        collection_ids.append(identifier)
                kwargs["collection_ids"] = collection_ids
            except models.Coverage.DoesNotExist:
                self.print_msg(
                    "Could not replace previous dataset '%s'."
                    % retrieved_metadata["identifier"]
                )

        try:
            coverage_type = retrieved_metadata["coverage_type"]
            # TODO: allow types of different apps

            if len(coverage_type.split(".")) > 1:
                module_name, _, coverage_type = coverage_type.rpartition(".")
                module = import_module(module_name)
                CoverageType = getattr(module, coverage_type)
            else:
                CoverageType = getattr(models, coverage_type)
        except AttributeError:
            raise CommandError(
                "Type '%s' is not supported."
                % retrieved_metadata["coverage_type"]
            )

        try:
            coverage = CoverageType()
            coverage.range_type = range_type

            proj = retrieved_metadata.pop("projection")
            if isinstance(proj, int):
                retrieved_metadata["srid"] = proj
            else:
                definition, format = proj

                # Try to identify the SRID from the given input
                try:
                    sr = osr.SpatialReference(definition, format)
                    retrieved_metadata["srid"] = sr.srid
                except Exception, e:
                    prj = models.Projection.objects.get(
                        format=format, definition=definition
                    )
                    retrieved_metadata["projection"] = prj

            # TODO: bug in models for some coverages
            for key, value in retrieved_metadata.items():
                setattr(coverage, key, value)

            coverage.visible = kwargs["visible"]

            coverage.full_clean()
            coverage.save()

            for data_item in all_data_items:
                data_item.dataset = coverage
                data_item.full_clean()
                data_item.save()

            # link with collection(s)
            if kwargs["collection_ids"]:
                ignore_missing_collection = kwargs["ignore_missing_collection"]
                call_command("eoxs_collection_link",
                    collection_ids=kwargs["collection_ids"],
                    add_ids=[coverage.identifier],
                    ignore_missing_collection=ignore_missing_collection
                )
Exemplo n.º 23
0
    def handle_with_cache(self, cache, *args, **kwargs):
        metadata_component = MetadataComponent(env)
        datas = kwargs["data"]
        semantics = kwargs.get("semantics")
        metadatas = kwargs["metadata"]
        range_type_name = kwargs["range_type_name"]
        polygon_mask_cloud = kwargs["pm_cloud"]
        polygon_mask_snow = kwargs["pm_snow"]
        wms_view = kwargs["md_wms_view"]
        wms_alias = kwargs["md_wms_alias"]

        if range_type_name is None:
            raise CommandError("No range type name specified.")
        range_type = models.RangeType.objects.get(name=range_type_name)

        metadata_keys = set((
            "identifier", "extent", "size", "projection",
            "footprint", "begin_time", "end_time", "coverage_type",
        ))

        all_data_items = []
        retrieved_metadata = {}

        retrieved_metadata.update(
            self._get_overrides(**kwargs)
        )

        #----------------------------------------------------------------------
        # meta-data

        vector_masks_src = []

        for metadata in metadatas:
            storage, package, format, location = self._get_location_chain(
                metadata
            )
            data_item = backends.DataItem(
                location=location, format=format or "", semantic="metadata", 
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            with open(connect(data_item, cache)) as f:
                content = etree.parse(f)
                reader = metadata_component.get_reader_by_test(content)
                if reader:
                    values = reader.read(content)

                    format = values.pop("format", None)
                    if format:
                        data_item.format = format
                        data_item.full_clean()
                        data_item.save()

                    for key, value in values.items():
                        if key in metadata_keys:
                            retrieved_metadata.setdefault(key, value)

                    vector_masks_src = values.get("vmasks", [])

        #----------------------------------------------------------------------
        # polygon masks

        def _get_geometry(file_name):
            """ load geometry from a feature collection """
            # TODO: improve the feature selection
            ds = ogr.Open(file_name)
            ly = ds.GetLayer(0)
            ft = ly.GetFeature(0)
            g0 = ft.GetGeometryRef()
            g0.TransformTo(SR_WGS84) # geometries always stored in WGS84
            return geos.GEOSGeometry(buffer(g0.ExportToWkb()), srid=4326)

        if polygon_mask_cloud is not None:
            vector_masks_src.append({
                    'type': 'CLOUD',
                    'subtype': None,
                    'mask': _get_geometry(polygon_mask_cloud),
                })

        if polygon_mask_snow is not None:
            vector_masks_src.append({
                    'type': 'SNOW',
                    'subtype': None,
                    'mask': _get_geometry(polygon_mask_snow),
                })

        #----------------------------------------------------------------------
        # handle vector masks

        vector_masks = []
        VMASK_TYPE = dict((v, k) for (k, v) in models.VectorMask.TYPE_CHOICES)

        for vm_src in vector_masks_src:
            if vm_src["type"] not in VMASK_TYPE:
                raise CommandError("Invalid mask type '%s'! Allowed "
                    "mask-types are: %s", vm_src["type"],
                        "|".join(VMASK_TYPE.keys()))

            vm = models.VectorMask()
            vm.type = VMASK_TYPE[vm_src["type"]]
            vm.subtype = vm_src["subtype"]
            vm.geometry = vm_src["mask"]

            #TODO: improve the semantic handling 
            if vm.subtype:
                vm.semantic = ("%s_%s"%(vm_src["type"],
                                vm_src["subtype"].replace(" ", "_"))).lower()
            else:
                vm.semantic = vm_src["type"].lower()

            vector_masks.append(vm)

        #----------------------------------------------------------------------
        # meta-data

        metadata_items = []

        # prerendered WMS view
        if wms_view is not None:
            metadata_items.append(
                models.MetadataItem(semantic="wms_view", value=wms_view)
            )

        # alias of the WMS view
        if wms_alias is not None:
            metadata_items.append(
                models.MetadataItem(semantic="wms_alias", value=wms_alias)
            )

        #----------------------------------------------------------------------
        # coverage 

        if len(datas) < 1:
            raise CommandError("No data files specified.")

        if semantics is None:
            # TODO: check corner cases.
            # e.g: only one data item given but multiple bands in range type
            # --> bands[1:<bandnum>]
            if len(datas) == 1:
                if len(range_type) == 1:
                    semantics = ["bands[1]"]
                else:
                    semantics = ["bands[1:%d]" % len(range_type)]
            
            else:
                semantics = ["bands[%d]" % i for i in range(len(datas))]


        for data, semantic in zip(datas, semantics):
            storage, package, format, location = self._get_location_chain(data)
            data_item = backends.DataItem(
                location=location, format=format or "", semantic=semantic, 
                storage=storage, package=package,
            )
            data_item.full_clean()
            data_item.save()
            all_data_items.append(data_item)

            # TODO: other opening methods than GDAL
            ds = gdal.Open(connect(data_item, cache))
            reader = metadata_component.get_reader_by_test(ds)
            if reader:
                values = reader.read(ds)

                format = values.pop("format", None)
                if format:
                    data_item.format = format
                    data_item.full_clean()
                    data_item.save()

                for key, value in values.items():
                    if key in metadata_keys:
                        retrieved_metadata.setdefault(key, value)
            ds = None

        if len(metadata_keys - set(retrieved_metadata.keys())):
            raise CommandError(
                "Missing metadata keys %s." 
                % ", ".join(metadata_keys - set(retrieved_metadata.keys()))
            )

        try:
            # TODO: allow types of different apps
            CoverageType = getattr(models, retrieved_metadata["coverage_type"])
        except AttributeError:
            raise CommandError(
                "Type '%s' is not supported." % kwargs["coverage_type"]
            )

        try:
            coverage = CoverageType()
            coverage.range_type = range_type
            
            proj = retrieved_metadata.pop("projection")
            if isinstance(proj, int):
                retrieved_metadata["srid"] = proj
            else:
                definition, format = proj

                # Try to identify the SRID from the given input
                try:
                    sr = osr.SpatialReference(definition, format)
                    retrieved_metadata["srid"] = sr.srid
                except Exception, e:
                    prj = models.Projection.objects.get(
                        format=format, definition=definition
                    )
                    retrieved_metadata["projection"] = prj

            # TODO: bug in models for some coverages
            for key, value in retrieved_metadata.items():
                setattr(coverage, key, value)

            coverage.visible = kwargs["visible"]

            coverage.full_clean()
            coverage.save()

            for data_item in all_data_items:
                data_item.dataset = coverage
                data_item.full_clean()
                data_item.save()

            for vm in vector_masks:
                vm.coverage = coverage 
                vm.full_clean()
                vm.save()

            for md in metadata_items:
                md.eo_object = coverage
                md.full_clean()
                md.save()

            # link with collection(s)
            if kwargs["collection_ids"]:
                ignore_missing_collection = kwargs["ignore_missing_collection"]
                call_command("eoxs_collection_link",
                    collection_ids=kwargs["collection_ids"], 
                    add_ids=[coverage.identifier],
                    ignore_missing_collection=ignore_missing_collection
                )