Esempio n. 1
0
    def __init__(self,
                 layer_dict,
                 formula,
                 zoom=None,
                 geom=None,
                 acres=True,
                 grouping='auto'):
        # Set defining parameter for this aggregator
        self.layer_dict = layer_dict
        self.formula = formula
        self.geom = geom
        self.acres = acres
        self.rastgeom = None

        # Get layers from input dict
        self.layers = RasterLayer.objects.filter(id__in=layer_dict.values())

        # Compute zoom if not provided
        if zoom is None:
            zoom = min(self.layers.values_list('metadata__max_zoom',
                                               flat=True))
        self.zoom = zoom

        # Compute tilerange for this area and the given zoom level
        if geom:
            # Transform geom to web mercator
            if geom.srid != WEB_MERCATOR_SRID:
                geom.transform(WEB_MERCATOR_SRID)

            # Clip against max extent for limiting nr of tiles.
            # This is important for requests on large areas for small rasters.
            max_extent = MultiPolygon([
                Polygon.from_bbox(lyr.extent()) for lyr in self.layers
            ]).envelope
            max_extent = geom.intersection(max_extent)

            # Abort if there is no spatial overlay
            if max_extent.empty:
                self.tilerange = None
                return
            else:
                # Compute tile index range for geometry and given zoom level
                self.tilerange = tile_index_range(max_extent.extent, zoom)
        else:
            # Get index range set for the input layers
            index_ranges = [
                tile_index_range(lyr.extent(), zoom) for lyr in self.layers
            ]

            # Compute intersection of index ranges
            self.tilerange = [
                max([dat[0] for dat in index_ranges]),
                max([dat[1] for dat in index_ranges]),
                min([dat[2] for dat in index_ranges]),
                min([dat[3] for dat in index_ranges])
            ]

        # Auto determine grouping based on input data
        if grouping == 'auto':
            all_discrete = all(
                [lyr.datatype in ['ca', 'ma'] for lyr in self.layers])
            grouping = 'discrete' if all_discrete else 'continuous'
        elif grouping in ('discrete', 'continuous'):
            pass
        else:
            try:
                legend_id = int(grouping)
                grouping = Legend.objects.get(id=legend_id)
            except ValueError:
                pass
            except ObjectDoesNotExist:
                raise RasterAggregationException(
                    'Invalid legend ID found in grouping value for valuecount.'
                )
        self.grouping = grouping
Esempio n. 2
0
    def value_count(self):
        """
        Compute aggregate statistics for a layers dictionary, potentially for
        an algebra expression and clipped by a geometry.

        The grouping parameter specifies how to group the pixel values for the
        aggregation count.

        Allowed are the following options:

        * 'auto' (the default). The output will be grouped by unique values if all
          input rasters are discrete, otherwise a numpy histogram is used.
        * 'discrete' groups the data will be grouped by unique values
        * 'continuous' groups the data in a numpy histogram
        * If an integer value is passed to the argument, it is interpreted as a
          legend_id. The data will be grouped using the legend expressions. For
          For instance, use grouping=23 for grouping the output with legend 23.
        """
        results = Counter({})

        for result_data in self.tiles():

            if self.grouping == 'discrete':
                # Compute unique counts for discrete input data
                unique_counts = numpy.unique(result_data.compressed(),
                                             return_counts=True)
                # Add counts to results
                values = dict(zip(unique_counts[0], unique_counts[1]))

            elif self.grouping == 'continuous':
                # Handle continuous case - compute histogram on masked (compressed) data
                counts, bins = numpy.histogram(result_data.compressed())

                # Create dictionary with bins as keys and histogram counts as values
                values = {}
                for i in range(len(bins) - 1):
                    values[(bins[i], bins[i + 1])] = counts[i]

            else:
                # If input is not a legend, interpret input as legend json data
                if not isinstance(self.grouping, Legend):
                    self.grouping = Legend(json=self.grouping)

                # Try getting a colormap from the input
                try:
                    colormap = self.grouping.colormap
                except:
                    raise RasterAggregationException(
                        'Invalid grouping value found for valuecount.')

                # Use colormap to compute value counts
                formula_parser = FormulaParser()
                values = {}
                for key, color in colormap.items():
                    try:
                        # Try to use the key as number directly
                        selector = result_data.compressed() == float(key)
                    except ValueError:
                        # Otherwise use it as numpy expression directly
                        selector = formula_parser.evaluate(
                            {'x': result_data.compressed()}, key)
                    values[key] = numpy.sum(selector)

            # Add counts to results
            results += Counter(values)

        # Transform pixel count to acres if requested
        scaling_factor = 1
        if self.acres and self.rastgeom and len(results):
            scaling_factor = abs(
                self.rastgeom.scale.x * self.rastgeom.scale.y) * 0.000247105381

        results = {
            str(int(k) if type(k) == numpy.float64 and int(k) == k else k):
            v * scaling_factor
            for k, v in results.items()
        }

        return results