Beispiel #1
0
    def test06_spatial_filter(self):
        "Testing the Layer.spatial_filter property."
        ds = DataSource(get_ds_file('cities', 'shp'))
        lyr = ds[0]

        # When not set, it should be None.
        self.assertEqual(None, lyr.spatial_filter)

        # Must be set a/an OGRGeometry or 4-tuple.
        self.assertRaises(TypeError, lyr._set_spatial_filter, 'foo')

        # Setting the spatial filter with a tuple/list with the extent of
        # a buffer centering around Pueblo.
        self.assertRaises(ValueError, lyr._set_spatial_filter, list(range(5)))
        filter_extent = (-105.609252, 37.255001, -103.609252, 39.255001)
        lyr.spatial_filter = (-105.609252, 37.255001, -103.609252, 39.255001)
        self.assertEqual(OGRGeometry.from_bbox(filter_extent), lyr.spatial_filter)
        feats = [feat for feat in lyr]
        self.assertEqual(1, len(feats))
        self.assertEqual('Pueblo', feats[0].get('Name'))

        # Setting the spatial filter with an OGRGeometry for buffer centering
        # around Houston.
        filter_geom = OGRGeometry('POLYGON((-96.363151 28.763374,-94.363151 28.763374,-94.363151 30.763374,-96.363151 30.763374,-96.363151 28.763374))')
        lyr.spatial_filter = filter_geom
        self.assertEqual(filter_geom, lyr.spatial_filter)
        feats = [feat for feat in lyr]
        self.assertEqual(1, len(feats))
        self.assertEqual('Houston', feats[0].get('Name'))

        # Clearing the spatial filter by setting it to None.  Now
        # should indicate that there are 3 features in the Layer.
        lyr.spatial_filter = None
        self.assertEqual(3, len(lyr))
Beispiel #2
0
    def test_polygons(self):
        "Testing Polygon objects."

        # Testing `from_bbox` class method
        bbox = (-180, -90, 180, 90)
        p = OGRGeometry.from_bbox(bbox)
        self.assertEqual(bbox, p.extent)

        prev = OGRGeometry("POINT(0 0)")
        for p in self.geometries.polygons:
            poly = OGRGeometry(p.wkt)
            self.assertEqual(3, poly.geom_type)
            self.assertEqual("POLYGON", poly.geom_name)
            self.assertEqual(p.n_p, poly.point_count)
            self.assertEqual(p.n_i + 1, len(poly))

            # Testing area & centroid.
            self.assertAlmostEqual(p.area, poly.area, 9)
            x, y = poly.centroid.tuple
            self.assertAlmostEqual(p.centroid[0], x, 9)
            self.assertAlmostEqual(p.centroid[1], y, 9)

            # Testing equivalence
            self.assertEqual(poly, OGRGeometry(p.wkt))
            self.assertNotEqual(poly, prev)

            if p.ext_ring_cs:
                ring = poly[0]
                self.assertEqual(p.ext_ring_cs, ring.tuple)
                self.assertEqual(p.ext_ring_cs, poly[0].tuple)
                self.assertEqual(len(p.ext_ring_cs), ring.point_count)

            for r in poly:
                self.assertEqual("LINEARRING", r.geom_name)
    def test_covering_geom_rasterization(self):
        geom = OGRGeometry.from_bbox(self.rast.extent)
        geom.srid = 3086
        result = rasterize(geom, self.rast)

        self.assertEqual(result.bands[0].data().ravel().tolist(), [1, 1, 1, 1])
        self.assertEqual(result.geotransform, self.rast.geotransform)
        self.assertEqual(result.srs.wkt, self.rast.srs.wkt)
 def test_rasterize_tiny_geom(self):
     geom = OGRGeometry.from_bbox((5e5, 4e5, 5e5 + 1e-3, 4e5 + 1e-3))
     geom.srid = 3086
     # Test default configuration for small geometries (not all touched).
     result = rasterize(geom, self.rast)
     self.assertEqual(result.bands[0].data().ravel().tolist(), [0, 0, 0, 0])
     # Switch the all touched option on.
     result = rasterize(geom, self.rast, all_touched=True)
     self.assertEqual(result.bands[0].data().ravel().tolist(), [1, 0, 0, 0])
 def test_rasterize_merge_algorithm_add(self):
     geom = OGRGeometry.from_bbox((500000.0, 399800.0, 500200.0, 399900.0))
     geom.srid = 3086
     result = rasterize(geom, self.rast)
     self.assertEqual(result.bands[0].data().ravel().tolist(), [0, 0, 1, 1])
     result = rasterize(geom, result, add=True)
     if GDAL_VERSION < (2, 1, 0):
         self.assertEqual(result.bands[0].data().ravel().tolist(), [0, 0, 1, 1])
     else:
         self.assertEqual(result.bands[0].data().ravel().tolist(), [0, 0, 2, 2])
Beispiel #6
0
 def test_tile_index_range(self):
     bounds = tile_bounds(43, 67, 8)
     geom = OGRGeometry.from_bbox(bounds)
     # With the default tolerance 0, the edging tiles are
     # included.
     idx = tile_index_range(geom.extent, 11)
     self.assertEqual(idx[2] - idx[0], 2 ** 3)
     self.assertEqual(idx[3] - idx[1], 2 ** 3)
     # With a larger tolerance, the strictly overlaping tiles are included.
     idx = tile_index_range(geom.extent, 11, tolerance=1e-3)
     self.assertEqual(idx[2] - idx[0], 2 ** 3 - 1)
     self.assertEqual(idx[3] - idx[1], 2 ** 3 - 1)
    def list(self, request, aggregationlayer, x, y, z, frmt, *args, **kwargs):
        # Select which agglayer to use for this tile.
        lyr = get_object_or_404(AggregationLayer, pk=aggregationlayer)

        # Compute tile boundary coorner coordinates.
        bounds_coords = tile_bounds(int(x), int(y), int(z))

        # Create a geometry with a 1% buffer around the tile. This buffered
        # tile boundary will be used for clipping the geometry. The overflow
        # will visually dissolve the polygons on the frontend visualization.
        bounds = OGRGeometry.from_bbox(bounds_coords)
        bounds.srid = WEB_MERCATOR_SRID
        bounds = bounds.geos
        bounds_buffer = bounds.buffer((bounds_coords[2] - bounds_coords[0]) / 100)

        # Get the intersection of the aggregation areas and the tile boundary.
        # use buffer to clip the aggregation area.
        result = AggregationArea.objects.filter(
            aggregationlayer=lyr,
            geom__intersects=bounds,
        ).annotate(
            intersection=Intersection('geom', bounds_buffer)
        ).only('id', 'name')

        # Render intersection as vector tile in two different available formats.
        if frmt == 'json':
            result = ['{{"geometry": {0}, "properties": {{"id": {1}, "name": "{2}"}}}}'.format(dat.intersection.geojson, dat.id, dat.name) for dat in result]
            result = ','.join(result)
            result = '{"type": "FeatureCollection","features":[' + result + ']}'
            return HttpResponse(result, content_type="application/json")
        elif frmt == 'pbf':
            features = [
                {
                    "geometry": bytes(dat.intersection.wkb),
                    "properties": {
                        "id": dat.id,
                        "name": dat.name,
                        "attributes": dat.attributes,
                    },
                } for dat in result
            ]
            data = [
                {
                    "name": lyr.name,
                    "features": features,
                },
            ]
            vtile = mapbox_vector_tile.encode(data, quantize_bounds=bounds_coords)
            return HttpResponse(vtile, content_type='application/x-protobuf')
def transfer_existing_orthophoto_extent_values(apps, schema_editor):
    Task = apps.get_model('app', 'Task')

    for t in Task.objects.all():
        print("Checking {}".format(t))
        orthophoto_path = assets_path(t.project.id, t.id, "odm_orthophoto", "odm_orthophoto_4326.tif")
        if os.path.exists(orthophoto_path):
            print("Migrating {}".format(orthophoto_path))

            raster = GDALRaster(orthophoto_path)
            geom = OGRGeometry.from_bbox(raster.extent)
            t.orthophoto_extent = GEOSGeometry(geom.wkt)
            t.save()

            os.remove(orthophoto_path)
Beispiel #9
0
def pixel_value_from_point(raster, point, band=0):
    """
    Returns the pixel value for the coordinate of the input point from selected
    band.

    The input can be a point or tuple, if its a tuple it is assumed to be
    coordinates in the reference system of the raster.
    """
    if isinstance(point, (tuple, list)):
        point = OGRGeometry('POINT({0} {1})'.format(*point))
        point.srid = raster.srid
    elif not point.srs or not raster.srs:
        raise ValueError('Both the point and the raster are required to have a reference system specified.')
    elif point.srs != raster.srs:
        # Ensure the projection of the point is the same as of the raster.
        point.transform(raster.srid)

    # Return if point and raster do not touch.
    bbox = OGRGeometry.from_bbox(raster.extent)
    bbox.srs = raster.srs

    if not point.intersects(bbox):
        return

    # Compute position of point relative to raster origin.
    offset = (abs(raster.origin.x - point.coords[0]), abs(raster.origin.y - point.coords[1]))

    # Compute pixel index value based on offset.
    offset_index = [int(offset[0] / abs(raster.scale.x)), int(offset[1] / abs(raster.scale.y))]

    # If the point is exactly on the boundary, the offset_index is rounded to
    # a pixel index over the edge of the pixel. The index needs to be reduced
    # by one pixel for those cases.
    if offset_index[0] == raster.width:
        offset_index[0] -= 1

    if offset_index[1] == raster.height:
        offset_index[1] -= 1

    return raster.bands[band].data(offset=offset_index, size=(1, 1))[0, 0]
Beispiel #10
0
    def test06_spatial_filter(self):
        "Testing the Layer.spatial_filter property."
        ds = DataSource(get_ds_file('cities', 'shp'))
        lyr = ds[0]

        # When not set, it should be None.
        self.assertIsNone(lyr.spatial_filter)

        # Must be set a/an OGRGeometry or 4-tuple.
        with self.assertRaises(TypeError):
            lyr._set_spatial_filter('foo')

        # Setting the spatial filter with a tuple/list with the extent of
        # a buffer centering around Pueblo.
        with self.assertRaises(ValueError):
            lyr._set_spatial_filter(list(range(5)))
        filter_extent = (-105.609252, 37.255001, -103.609252, 39.255001)
        lyr.spatial_filter = (-105.609252, 37.255001, -103.609252, 39.255001)
        self.assertEqual(OGRGeometry.from_bbox(filter_extent),
                         lyr.spatial_filter)
        feats = [feat for feat in lyr]
        self.assertEqual(1, len(feats))
        self.assertEqual('Pueblo', feats[0].get('Name'))

        # Setting the spatial filter with an OGRGeometry for buffer centering
        # around Houston.
        filter_geom = OGRGeometry(
            'POLYGON((-96.363151 28.763374,-94.363151 28.763374,'
            '-94.363151 30.763374,-96.363151 30.763374,-96.363151 28.763374))')
        lyr.spatial_filter = filter_geom
        self.assertEqual(filter_geom, lyr.spatial_filter)
        feats = [feat for feat in lyr]
        self.assertEqual(1, len(feats))
        self.assertEqual('Houston', feats[0].get('Name'))

        # Clearing the spatial filter by setting it to None.  Now
        # should indicate that there are 3 features in the Layer.
        lyr.spatial_filter = None
        self.assertEqual(3, len(lyr))
 def test_vector_tile_endpoint_json(self):
     # Get url for a tile.
     self.url = reverse('vectortiles-list', kwargs={'aggregationlayer': self.agglayer.id, 'z': 11, 'x': 552, 'y': 859, 'frmt': 'json'})
     # Setup request with fromula that will multiply the rasterlayer by itself
     response = self.client.get(self.url)
     self.assertEqual(response.status_code, 200)
     bounds = tile_bounds(552, 859, 11)
     bounds = OGRGeometry.from_bbox(bounds)
     bounds.srid = WEB_MERCATOR_SRID
     result = json.loads(response.content.decode())
     self.assertEqual(
         'St Petersburg',
         result['features'][0]['properties']['name'],
     )
     self.assertEqual(
         'Coverall',
         result['features'][1]['properties']['name'],
     )
     self.assertEqual(
         [-9220429.22801057, 3228174.60948196],
         result['features'][0]['geometry']['coordinates'][0][0][0],
     )
Beispiel #12
0
    def test_polygons(self):
        "Testing Polygon objects."

        # Testing `from_bbox` class method
        bbox = (-180, -90, 180, 90)
        p = OGRGeometry.from_bbox(bbox)
        self.assertEqual(bbox, p.extent)

        prev = OGRGeometry('POINT(0 0)')
        for p in self.geometries.polygons:
            poly = OGRGeometry(p.wkt)
            self.assertEqual(3, poly.geom_type)
            self.assertEqual('POLYGON', poly.geom_name)
            self.assertEqual(p.n_p, poly.point_count)
            self.assertEqual(p.n_i + 1, len(poly))
            msg = 'Index out of range when accessing rings of a polygon: %s.'
            with self.assertRaisesMessage(IndexError, msg % len(poly)):
                poly.__getitem__(len(poly))

            # Testing area & centroid.
            self.assertAlmostEqual(p.area, poly.area, 9)
            x, y = poly.centroid.tuple
            self.assertAlmostEqual(p.centroid[0], x, 9)
            self.assertAlmostEqual(p.centroid[1], y, 9)

            # Testing equivalence
            self.assertEqual(poly, OGRGeometry(p.wkt))
            self.assertNotEqual(poly, prev)

            if p.ext_ring_cs:
                ring = poly[0]
                self.assertEqual(p.ext_ring_cs, ring.tuple)
                self.assertEqual(p.ext_ring_cs, poly[0].tuple)
                self.assertEqual(len(p.ext_ring_cs), ring.point_count)

            for r in poly:
                self.assertEqual('LINEARRING', r.geom_name)
Beispiel #13
0
    def test_polygons(self):
        "Testing Polygon objects."

        # Testing `from_bbox` class method
        bbox = (-180, -90, 180, 90)
        p = OGRGeometry.from_bbox(bbox)
        self.assertEqual(bbox, p.extent)

        prev = OGRGeometry('POINT(0 0)')
        for p in self.geometries.polygons:
            poly = OGRGeometry(p.wkt)
            self.assertEqual(3, poly.geom_type)
            self.assertEqual('POLYGON', poly.geom_name)
            self.assertEqual(p.n_p, poly.point_count)
            self.assertEqual(p.n_i + 1, len(poly))
            msg = 'Index out of range when accessing rings of a polygon: %s.'
            with self.assertRaisesMessage(IndexError, msg % len(poly)):
                poly.__getitem__(len(poly))

            # Testing area & centroid.
            self.assertAlmostEqual(p.area, poly.area, 9)
            x, y = poly.centroid.tuple
            self.assertAlmostEqual(p.centroid[0], x, 9)
            self.assertAlmostEqual(p.centroid[1], y, 9)

            # Testing equivalence
            self.assertEqual(poly, OGRGeometry(p.wkt))
            self.assertNotEqual(poly, prev)

            if p.ext_ring_cs:
                ring = poly[0]
                self.assertEqual(p.ext_ring_cs, ring.tuple)
                self.assertEqual(p.ext_ring_cs, poly[0].tuple)
                self.assertEqual(len(p.ext_ring_cs), ring.point_count)

            for r in poly:
                self.assertEqual('LINEARRING', r.geom_name)
 def bbox(self):
     return OGRGeometry(OGRGeometry.from_bbox(self.bounding_box.split(",")).json, srs=SpatialReference("WGS84"))
Beispiel #15
0
    def process(self):
        """
        This method contains the logic for processing tasks asynchronously
        from a background thread or from the scheduler. Here tasks that are
        ready to be processed execute some logic. This could be communication
        with a processing node or executing a pending action.
        """

        try:
            if self.auto_processing_node and not self.status in [status_codes.FAILED, status_codes.CANCELED]:
                # No processing node assigned and need to auto assign
                if self.processing_node is None:
                    # Assign first online node with lowest queue count
                    self.processing_node = ProcessingNode.find_best_available_node()
                    if self.processing_node:
                        self.processing_node.queue_count += 1 # Doesn't have to be accurate, it will get overridden later
                        self.processing_node.save()

                        logger.info("Automatically assigned processing node {} to {}".format(self.processing_node, self))
                        self.save()

                # Processing node assigned, but is offline and no errors
                if self.processing_node and not self.processing_node.is_online():
                    # Detach processing node, will be processed at the next tick
                    logger.info("Processing node {} went offline, reassigning {}...".format(self.processing_node, self))
                    self.uuid = ''
                    self.processing_node = None
                    self.save()

            if self.processing_node:
                # Need to process some images (UUID not yet set and task doesn't have pending actions)?
                if not self.uuid and self.pending_action is None and self.status is None:
                    logger.info("Processing... {}".format(self))

                    images = [image.path() for image in self.imageupload_set.all()]

                    # This takes a while
                    uuid = self.processing_node.process_new_task(images, self.name, self.options)

                    # Refresh task object before committing change
                    self.refresh_from_db()
                    self.uuid = uuid
                    self.save()

                    # TODO: log process has started processing

            if self.pending_action is not None:
                if self.pending_action == pending_actions.CANCEL:
                    # Do we need to cancel the task on the processing node?
                    logger.info("Canceling {}".format(self))
                    if self.processing_node and self.uuid:
                        # Attempt to cancel the task on the processing node
                        # We don't care if this fails (we tried)
                        try:
                            self.processing_node.cancel_task(self.uuid)
                            self.status = None
                        except ProcessingException:
                            logger.warning("Could not cancel {} on processing node. We'll proceed anyway...".format(self))
                            self.status = status_codes.CANCELED

                        self.pending_action = None
                        self.save()
                    else:
                        raise ProcessingError("Cannot cancel a task that has no processing node or UUID")

                elif self.pending_action == pending_actions.RESTART:
                    logger.info("Restarting {}".format(self))
                    if self.processing_node:

                        # Check if the UUID is still valid, as processing nodes purge
                        # results after a set amount of time, the UUID might have eliminated.
                        uuid_still_exists = False

                        if self.uuid:
                            try:
                                info = self.processing_node.get_task_info(self.uuid)
                                uuid_still_exists = info['uuid'] == self.uuid
                            except ProcessingException:
                                pass

                        if uuid_still_exists:
                            # Good to go
                            try:
                                self.processing_node.restart_task(self.uuid)
                            except ProcessingError as e:
                                # Something went wrong
                                logger.warning("Could not restart {}, will start a new one".format(self))
                                self.uuid = ''
                        else:
                            # Task has been purged (or processing node is offline)
                            # Process this as a new task
                            # Removing its UUID will cause the scheduler
                            # to process this the next tick
                            self.uuid = ''

                        self.console_output = ""
                        self.processing_time = -1
                        self.status = None
                        self.last_error = None
                        self.pending_action = None
                        self.save()
                    else:
                        raise ProcessingError("Cannot restart a task that has no processing node")

                elif self.pending_action == pending_actions.REMOVE:
                    logger.info("Removing {}".format(self))
                    if self.processing_node and self.uuid:
                        # Attempt to delete the resources on the processing node
                        # We don't care if this fails, as resources on processing nodes
                        # Are expected to be purged on their own after a set amount of time anyway
                        try:
                            self.processing_node.remove_task(self.uuid)
                        except ProcessingException:
                            pass

                    # What's more important is that we delete our task properly here
                    self.delete()

                    # Stop right here!
                    return

            if self.processing_node:
                # Need to update status (first time, queued or running?)
                if self.uuid and self.status in [None, status_codes.QUEUED, status_codes.RUNNING]:
                    # Update task info from processing node
                    info = self.processing_node.get_task_info(self.uuid)

                    self.processing_time = info["processingTime"]
                    self.status = info["status"]["code"]

                    current_lines_count = len(self.console_output.split("\n")) - 1
                    self.console_output += self.processing_node.get_task_console_output(self.uuid, current_lines_count)

                    if "errorMessage" in info["status"]:
                        self.last_error = info["status"]["errorMessage"]

                    # Has the task just been canceled, failed, or completed?
                    if self.status in [status_codes.FAILED, status_codes.COMPLETED, status_codes.CANCELED]:
                        logger.info("Processing status: {} for {}".format(self.status, self))

                        if self.status == status_codes.COMPLETED:
                            assets_dir = self.assets_path("")

                            # Remove previous assets directory
                            if os.path.exists(assets_dir):
                                logger.info("Removing old assets directory: {} for {}".format(assets_dir, self))
                                shutil.rmtree(assets_dir)

                            os.makedirs(assets_dir)

                            logger.info("Downloading all.zip for {}".format(self))

                            # Download all assets
                            zip_stream = self.processing_node.download_task_asset(self.uuid, "all.zip")
                            zip_path = os.path.join(assets_dir, "all.zip")
                            with open(zip_path, 'wb') as fd:
                                for chunk in zip_stream.iter_content(4096):
                                    fd.write(chunk)

                            logger.info("Done downloading all.zip for {}".format(self))

                            # Extract from zip
                            with zipfile.ZipFile(zip_path, "r") as zip_h:
                                zip_h.extractall(assets_dir)

                            logger.info("Extracted all.zip for {}".format(self))

                            # Populate *_extent fields
                            extent_fields = [
                                (os.path.realpath(self.assets_path("odm_orthophoto", "odm_orthophoto.tif")),
                                 'orthophoto_extent'),
                                (os.path.realpath(self.assets_path("odm_dem", "dsm.tif")),
                                 'dsm_extent'),
                                (os.path.realpath(self.assets_path("odm_dem", "dtm.tif")),
                                 'dtm_extent'),
                            ]

                            for raster_path, field in extent_fields:
                                if os.path.exists(raster_path):
                                    # Read extent and SRID
                                    raster = GDALRaster(raster_path)
                                    extent = OGRGeometry.from_bbox(raster.extent)

                                    # It will be implicitly transformed into the SRID of the model’s field
                                    # self.field = GEOSGeometry(...)
                                    setattr(self, field, GEOSGeometry(extent.wkt, srid=raster.srid))

                                    logger.info("Populated extent field with {} for {}".format(raster_path, self))

                            self.update_available_assets_field()
                            self.save()
                        else:
                            # FAILED, CANCELED
                            self.save()
                    else:
                        # Still waiting...
                        self.save()

        except ProcessingError as e:
            self.set_failure(str(e))
        except (ConnectionRefusedError, ConnectionError) as e:
            logger.warning("{} cannot communicate with processing node: {}".format(self, str(e)))
        except ProcessingTimeout as e:
            logger.warning("{} timed out with error: {}. We'll try reprocessing at the next tick.".format(self, str(e)))
Beispiel #16
0
    def process(self):
        """
        This method contains the logic for processing tasks asynchronously
        from a background thread or from a worker. Here tasks that are
        ready to be processed execute some logic. This could be communication
        with a processing node or executing a pending action.
        """

        try:
            if self.pending_action == pending_actions.RESIZE:
                resized_images = self.resize_images()
                self.resize_gcp(resized_images)
                self.pending_action = None
                self.save()

            if self.auto_processing_node and not self.status in [status_codes.FAILED, status_codes.CANCELED]:
                # No processing node assigned and need to auto assign
                if self.processing_node is None:
                    # Assign first online node with lowest queue count
                    self.processing_node = ProcessingNode.find_best_available_node()
                    if self.processing_node:
                        self.processing_node.queue_count += 1 # Doesn't have to be accurate, it will get overridden later
                        self.processing_node.save()

                        logger.info("Automatically assigned processing node {} to {}".format(self.processing_node, self))
                        self.save()

                # Processing node assigned, but is offline and no errors
                if self.processing_node and not self.processing_node.is_online():
                    # If we are queued up
                    # detach processing node, and reassignment
                    # will be processed at the next tick
                    if self.status == status_codes.QUEUED:
                        logger.info("Processing node {} went offline, reassigning {}...".format(self.processing_node, self))
                        self.uuid = ''
                        self.processing_node = None
                        self.status = None
                        self.save()

                    elif self.status == status_codes.RUNNING:
                        # Task was running and processing node went offline
                        # It could have crashed due to low memory
                        # or perhaps it went offline due to network errors.
                        # We can't easily differentiate between the two, so we need
                        # to notify the user because if it crashed due to low memory
                        # the user might need to take action (or be stuck in an infinite loop)
                        raise ProcessingError("Processing node went offline. This could be due to insufficient memory or a network error.")

            if self.processing_node:
                # Need to process some images (UUID not yet set and task doesn't have pending actions)?
                if not self.uuid and self.pending_action is None and self.status is None:
                    logger.info("Processing... {}".format(self))

                    images = [image.path() for image in self.imageupload_set.all()]

                    # This takes a while
                    uuid = self.processing_node.process_new_task(images, self.name, self.options)

                    # Refresh task object before committing change
                    self.refresh_from_db()
                    self.uuid = uuid
                    self.save()

                    # TODO: log process has started processing

            if self.pending_action is not None:
                if self.pending_action == pending_actions.CANCEL:
                    # Do we need to cancel the task on the processing node?
                    logger.info("Canceling {}".format(self))
                    if self.processing_node and self.uuid:
                        # Attempt to cancel the task on the processing node
                        # We don't care if this fails (we tried)
                        try:
                            self.processing_node.cancel_task(self.uuid)
                        except ProcessingException:
                            logger.warning("Could not cancel {} on processing node. We'll proceed anyway...".format(self))

                        self.status = status_codes.CANCELED
                        self.pending_action = None
                        self.save()
                    else:
                        raise ProcessingError("Cannot cancel a task that has no processing node or UUID")

                elif self.pending_action == pending_actions.RESTART:
                    logger.info("Restarting {}".format(self))
                    if self.processing_node:

                        # Check if the UUID is still valid, as processing nodes purge
                        # results after a set amount of time, the UUID might have been eliminated.
                        uuid_still_exists = False

                        if self.uuid:
                            try:
                                info = self.processing_node.get_task_info(self.uuid)
                                uuid_still_exists = info['uuid'] == self.uuid
                            except ProcessingException:
                                pass

                        need_to_reprocess = False

                        if uuid_still_exists:
                            # Good to go
                            try:
                                self.processing_node.restart_task(self.uuid, self.options)
                            except ProcessingError as e:
                                # Something went wrong
                                logger.warning("Could not restart {}, will start a new one".format(self))
                                need_to_reprocess = True
                        else:
                            need_to_reprocess = True

                        if need_to_reprocess:
                            logger.info("{} needs to be reprocessed".format(self))

                            # Task has been purged (or processing node is offline)
                            # Process this as a new task
                            # Removing its UUID will cause the scheduler
                            # to process this the next tick
                            self.uuid = ''

                            # We also remove the "rerun-from" parameter if it's set
                            self.options = list(filter(lambda d: d['name'] != 'rerun-from', self.options))

                        self.console_output = ""
                        self.processing_time = -1
                        self.status = None
                        self.last_error = None
                        self.pending_action = None
                        self.save()
                    else:
                        raise ProcessingError("Cannot restart a task that has no processing node")

                elif self.pending_action == pending_actions.REMOVE:
                    logger.info("Removing {}".format(self))
                    if self.processing_node and self.uuid:
                        # Attempt to delete the resources on the processing node
                        # We don't care if this fails, as resources on processing nodes
                        # Are expected to be purged on their own after a set amount of time anyway
                        try:
                            self.processing_node.remove_task(self.uuid)
                        except ProcessingException:
                            pass

                    # What's more important is that we delete our task properly here
                    self.delete()

                    # Stop right here!
                    return

            if self.processing_node:
                # Need to update status (first time, queued or running?)
                if self.uuid and self.status in [None, status_codes.QUEUED, status_codes.RUNNING]:
                    # Update task info from processing node
                    info = self.processing_node.get_task_info(self.uuid)

                    self.processing_time = info["processingTime"]
                    self.status = info["status"]["code"]

                    current_lines_count = len(self.console_output.split("\n"))
                    console_output = self.processing_node.get_task_console_output(self.uuid, current_lines_count)
                    if len(console_output) > 0:
                        self.console_output += console_output + '\n'

                    if "errorMessage" in info["status"]:
                        self.last_error = info["status"]["errorMessage"]

                    # Has the task just been canceled, failed, or completed?
                    if self.status in [status_codes.FAILED, status_codes.COMPLETED, status_codes.CANCELED]:
                        logger.info("Processing status: {} for {}".format(self.status, self))

                        if self.status == status_codes.COMPLETED:
                            assets_dir = self.assets_path("")

                            # Remove previous assets directory
                            if os.path.exists(assets_dir):
                                logger.info("Removing old assets directory: {} for {}".format(assets_dir, self))
                                shutil.rmtree(assets_dir)

                            os.makedirs(assets_dir)

                            logger.info("Downloading all.zip for {}".format(self))

                            # Download all assets
                            zip_stream = self.processing_node.download_task_asset(self.uuid, "all.zip")
                            zip_path = os.path.join(assets_dir, "all.zip")
                            with open(zip_path, 'wb') as fd:
                                for chunk in zip_stream.iter_content(4096):
                                    fd.write(chunk)

                            logger.info("Done downloading all.zip for {}".format(self))

                            # Extract from zip
                            with zipfile.ZipFile(zip_path, "r") as zip_h:
                                zip_h.extractall(assets_dir)

                            logger.info("Extracted all.zip for {}".format(self))

                            # Populate *_extent fields
                            extent_fields = [
                                (os.path.realpath(self.assets_path("odm_orthophoto", "odm_orthophoto.tif")),
                                 'orthophoto_extent'),
                                (os.path.realpath(self.assets_path("odm_dem", "dsm.tif")),
                                 'dsm_extent'),
                                (os.path.realpath(self.assets_path("odm_dem", "dtm.tif")),
                                 'dtm_extent'),
                            ]

                            for raster_path, field in extent_fields:
                                if os.path.exists(raster_path):
                                    # Read extent and SRID
                                    raster = GDALRaster(raster_path)
                                    extent = OGRGeometry.from_bbox(raster.extent)

                                    # It will be implicitly transformed into the SRID of the model’s field
                                    # self.field = GEOSGeometry(...)
                                    setattr(self, field, GEOSGeometry(extent.wkt, srid=raster.srid))

                                    logger.info("Populated extent field with {} for {}".format(raster_path, self))

                            self.update_available_assets_field()
                            self.save()

                            from app.plugins import signals as plugin_signals
                            plugin_signals.task_completed.send_robust(sender=self.__class__, task_id=self.id)
                        else:
                            # FAILED, CANCELED
                            self.save()
                    else:
                        # Still waiting...
                        self.save()

        except ProcessingError as e:
            self.set_failure(str(e))
        except (ConnectionRefusedError, ConnectionError) as e:
            logger.warning("{} cannot communicate with processing node: {}".format(self, str(e)))
        except ProcessingTimeout as e:
            logger.warning("{} timed out with error: {}. We'll try reprocessing at the next tick.".format(self, str(e)))
Beispiel #17
0
 def test_burn_value_option(self):
     geom = OGRGeometry.from_bbox((500000.0, 399800.0, 500200.0, 399900.0))
     geom.srid = 3086
     result = rasterize(geom, self.rast, burn_value=99)
     self.assertEqual(result.bands[0].data().ravel().tolist(),
                      [0, 0, 99, 99])
Beispiel #18
0
 def test_half_covering_geom_rasterization(self):
     geom = OGRGeometry.from_bbox((500000.0, 399800.0, 500200.0, 399900.0))
     geom.srid = 3086
     result = rasterize(geom, self.rast)
     self.assertEqual(result.bands[0].data().ravel().tolist(), [0, 0, 1, 1])
Beispiel #19
0
    def extract_assets_and_complete(self):
        """
        Extracts assets/all.zip, populates task fields where required and assure COGs
        It will raise a zipfile.BadZipFile exception is the archive is corrupted.
        :return:
        """
        assets_dir = self.assets_path("")
        zip_path = self.assets_path("all.zip")

        # Extract from zip
        with zipfile.ZipFile(zip_path, "r") as zip_h:
            zip_h.extractall(assets_dir)

        logger.info("Extracted all.zip for {}".format(self))

        # Populate *_extent fields
        extent_fields = [
            (os.path.realpath(
                self.assets_path("odm_orthophoto",
                                 "odm_orthophoto.tif")), 'orthophoto_extent'),
            (os.path.realpath(self.assets_path("odm_dem",
                                               "dsm.tif")), 'dsm_extent'),
            (os.path.realpath(self.assets_path("odm_dem",
                                               "dtm.tif")), 'dtm_extent'),
        ]

        for raster_path, field in extent_fields:
            if os.path.exists(raster_path):
                # Make sure this is a Cloud Optimized GeoTIFF
                # if not, it will be created
                try:
                    assure_cogeo(raster_path)
                except IOError as e:
                    logger.warning(
                        "Cannot create Cloud Optimized GeoTIFF for %s (%s). This will result in degraded visualization performance."
                        % (raster_path, str(e)))

                # Read extent and SRID
                raster = GDALRaster(raster_path)
                extent = OGRGeometry.from_bbox(raster.extent)

                # Make sure PostGIS supports it
                with connection.cursor() as cursor:
                    cursor.execute(
                        "SELECT SRID FROM spatial_ref_sys WHERE SRID = %s",
                        [raster.srid])
                    if cursor.rowcount == 0:
                        raise NodeServerError(
                            gettext(
                                "Unsupported SRS %(code)s. Please make sure you picked a supported SRS."
                            ) % {'code': str(raster.srid)})

                # It will be implicitly transformed into the SRID of the model’s field
                # self.field = GEOSGeometry(...)
                setattr(self, field, GEOSGeometry(extent.wkt,
                                                  srid=raster.srid))

                logger.info("Populated extent field with {} for {}".format(
                    raster_path, self))

        self.update_available_assets_field()
        self.running_progress = 1.0
        self.console_output += gettext("Done!") + "\n"
        self.status = status_codes.COMPLETED
        self.save()

        from app.plugins import signals as plugin_signals
        plugin_signals.task_completed.send_robust(sender=self.__class__,
                                                  task_id=self.id)
 def test_half_covering_geom_rasterization(self):
     geom = OGRGeometry.from_bbox((500000.0, 399800.0, 500200.0, 399900.0))
     geom.srid = 3086
     result = rasterize(geom, self.rast)
     self.assertEqual(result.bands[0].data().ravel().tolist(), [0, 0, 1, 1])
Beispiel #21
0
 def get_bbox(self, obj):
     bbox = obj.bbox
     geom = OGRGeometry.from_bbox([bbox[0], bbox[2], bbox[1], bbox[3]])
     return json.loads(geom.json)
Beispiel #22
0
    def process(self):
        """
        This method contains the logic for processing tasks asynchronously
        from a background thread or from a worker. Here tasks that are
        ready to be processed execute some logic. This could be communication
        with a processing node or executing a pending action.
        """

        try:
            if self.pending_action == pending_actions.RESIZE:
                resized_images = self.resize_images()
                self.refresh_from_db()
                self.resize_gcp(resized_images)
                self.pending_action = None
                self.save()

            if self.auto_processing_node and not self.status in [
                    status_codes.FAILED, status_codes.CANCELED
            ]:
                # No processing node assigned and need to auto assign
                if self.processing_node is None:
                    # Assign first online node with lowest queue count
                    self.processing_node = ProcessingNode.find_best_available_node(
                    )
                    if self.processing_node:
                        self.processing_node.queue_count += 1  # Doesn't have to be accurate, it will get overridden later
                        self.processing_node.save()

                        logger.info(
                            "Automatically assigned processing node {} to {}".
                            format(self.processing_node, self))
                        self.save()

                # Processing node assigned, but is offline and no errors
                if self.processing_node and not self.processing_node.is_online(
                ):
                    # If we are queued up
                    # detach processing node, and reassignment
                    # will be processed at the next tick
                    if self.status == status_codes.QUEUED:
                        logger.info(
                            "Processing node {} went offline, reassigning {}..."
                            .format(self.processing_node, self))
                        self.uuid = ''
                        self.processing_node = None
                        self.status = None
                        self.save()

                    elif self.status == status_codes.RUNNING:
                        # Task was running and processing node went offline
                        # It could have crashed due to low memory
                        # or perhaps it went offline due to network errors.
                        # We can't easily differentiate between the two, so we need
                        # to notify the user because if it crashed due to low memory
                        # the user might need to take action (or be stuck in an infinite loop)
                        raise ProcessingError(
                            "Processing node went offline. This could be due to insufficient memory or a network error."
                        )

            if self.processing_node:
                # Need to process some images (UUID not yet set and task doesn't have pending actions)?
                if not self.uuid and self.pending_action is None and self.status is None:
                    logger.info("Processing... {}".format(self))

                    images = [
                        image.path() for image in self.imageupload_set.all()
                    ]

                    # Track upload progress, but limit the number of DB updates
                    # to every 2 seconds (and always record the 100% progress)
                    last_update = 0

                    def callback(progress):
                        nonlocal last_update
                        if time.time() - last_update >= 2 or (
                                progress >= 1.0 - 1e-6
                                and progress <= 1.0 + 1e-6):
                            Task.objects.filter(pk=self.id).update(
                                upload_progress=progress)
                            last_update = time.time()

                    # This takes a while
                    uuid = self.processing_node.process_new_task(
                        images, self.name, self.options, callback)

                    # Refresh task object before committing change
                    self.refresh_from_db()
                    self.uuid = uuid
                    self.save()

                    # TODO: log process has started processing

            if self.pending_action is not None:
                if self.pending_action == pending_actions.CANCEL:
                    # Do we need to cancel the task on the processing node?
                    logger.info("Canceling {}".format(self))
                    if self.processing_node and self.uuid:
                        # Attempt to cancel the task on the processing node
                        # We don't care if this fails (we tried)
                        try:
                            self.processing_node.cancel_task(self.uuid)
                        except ProcessingException:
                            logger.warning(
                                "Could not cancel {} on processing node. We'll proceed anyway..."
                                .format(self))

                        self.status = status_codes.CANCELED
                        self.pending_action = None
                        self.save()
                    else:
                        raise ProcessingError(
                            "Cannot cancel a task that has no processing node or UUID"
                        )

                elif self.pending_action == pending_actions.RESTART:
                    logger.info("Restarting {}".format(self))
                    if self.processing_node:

                        # Check if the UUID is still valid, as processing nodes purge
                        # results after a set amount of time, the UUID might have been eliminated.
                        uuid_still_exists = False

                        if self.uuid:
                            try:
                                info = self.processing_node.get_task_info(
                                    self.uuid)
                                uuid_still_exists = info['uuid'] == self.uuid
                            except ProcessingException:
                                pass

                        need_to_reprocess = False

                        if uuid_still_exists:
                            # Good to go
                            try:
                                self.processing_node.restart_task(
                                    self.uuid, self.options)
                            except ProcessingError as e:
                                # Something went wrong
                                logger.warning(
                                    "Could not restart {}, will start a new one"
                                    .format(self))
                                need_to_reprocess = True
                        else:
                            need_to_reprocess = True

                        if need_to_reprocess:
                            logger.info(
                                "{} needs to be reprocessed".format(self))

                            # Task has been purged (or processing node is offline)
                            # Process this as a new task
                            # Removing its UUID will cause the scheduler
                            # to process this the next tick
                            self.uuid = ''

                            # We also remove the "rerun-from" parameter if it's set
                            self.options = list(
                                filter(lambda d: d['name'] != 'rerun-from',
                                       self.options))
                            self.upload_progress = 0

                        self.console_output = ""
                        self.processing_time = -1
                        self.status = None
                        self.last_error = None
                        self.pending_action = None
                        self.running_progress = 0
                        self.save()
                    else:
                        raise ProcessingError(
                            "Cannot restart a task that has no processing node"
                        )

                elif self.pending_action == pending_actions.REMOVE:
                    logger.info("Removing {}".format(self))
                    if self.processing_node and self.uuid:
                        # Attempt to delete the resources on the processing node
                        # We don't care if this fails, as resources on processing nodes
                        # Are expected to be purged on their own after a set amount of time anyway
                        try:
                            self.processing_node.remove_task(self.uuid)
                        except ProcessingException:
                            pass

                    # What's more important is that we delete our task properly here
                    self.delete()

                    # Stop right here!
                    return

            if self.processing_node:
                # Need to update status (first time, queued or running?)
                if self.uuid and self.status in [
                        None, status_codes.QUEUED, status_codes.RUNNING
                ]:
                    # Update task info from processing node
                    info = self.processing_node.get_task_info(self.uuid)

                    self.processing_time = info["processingTime"]
                    self.status = info["status"]["code"]

                    current_lines_count = len(self.console_output.split("\n"))
                    console_output = self.processing_node.get_task_console_output(
                        self.uuid, current_lines_count)
                    if len(console_output) > 0:
                        self.console_output += "\n".join(console_output) + '\n'

                        # Update running progress
                        for line in console_output:
                            for line_match, value in self.TASK_OUTPUT_MILESTONES.items(
                            ):
                                if line_match in line:
                                    self.running_progress = value
                                    break

                    if "errorMessage" in info["status"]:
                        self.last_error = info["status"]["errorMessage"]

                    # Has the task just been canceled, failed, or completed?
                    if self.status in [
                            status_codes.FAILED, status_codes.COMPLETED,
                            status_codes.CANCELED
                    ]:
                        logger.info("Processing status: {} for {}".format(
                            self.status, self))

                        if self.status == status_codes.COMPLETED:
                            assets_dir = self.assets_path("")

                            # Remove previous assets directory
                            if os.path.exists(assets_dir):
                                logger.info(
                                    "Removing old assets directory: {} for {}".
                                    format(assets_dir, self))
                                shutil.rmtree(assets_dir)

                            os.makedirs(assets_dir)

                            logger.info(
                                "Downloading all.zip for {}".format(self))

                            # Download all assets
                            zip_stream = self.processing_node.download_task_asset(
                                self.uuid, "all.zip")
                            zip_path = os.path.join(assets_dir, "all.zip")
                            with open(zip_path, 'wb') as fd:
                                for chunk in zip_stream.iter_content(4096):
                                    fd.write(chunk)

                            logger.info(
                                "Done downloading all.zip for {}".format(self))

                            # Extract from zip
                            with zipfile.ZipFile(zip_path, "r") as zip_h:
                                zip_h.extractall(assets_dir)

                            logger.info(
                                "Extracted all.zip for {}".format(self))

                            # Populate *_extent fields
                            extent_fields = [
                                (os.path.realpath(
                                    self.assets_path("odm_orthophoto",
                                                     "odm_orthophoto.tif")),
                                 'orthophoto_extent'),
                                (os.path.realpath(
                                    self.assets_path("odm_dem", "dsm.tif")),
                                 'dsm_extent'),
                                (os.path.realpath(
                                    self.assets_path("odm_dem", "dtm.tif")),
                                 'dtm_extent'),
                            ]

                            for raster_path, field in extent_fields:
                                if os.path.exists(raster_path):
                                    # Read extent and SRID
                                    raster = GDALRaster(raster_path)
                                    extent = OGRGeometry.from_bbox(
                                        raster.extent)

                                    # It will be implicitly transformed into the SRID of the model’s field
                                    # self.field = GEOSGeometry(...)
                                    setattr(
                                        self, field,
                                        GEOSGeometry(extent.wkt,
                                                     srid=raster.srid))

                                    logger.info(
                                        "Populated extent field with {} for {}"
                                        .format(raster_path, self))

                            self.update_available_assets_field()
                            self.running_progress = 1.0
                            self.save()

                            from app.plugins import signals as plugin_signals
                            plugin_signals.task_completed.send_robust(
                                sender=self.__class__, task_id=self.id)
                        else:
                            # FAILED, CANCELED
                            self.save()
                    else:
                        # Still waiting...
                        self.save()

        except ProcessingError as e:
            self.set_failure(str(e))
        except (ConnectionRefusedError, ConnectionError) as e:
            logger.warning(
                "{} cannot communicate with processing node: {}".format(
                    self, str(e)))
        except ProcessingTimeout as e:
            logger.warning(
                "{} timed out with error: {}. We'll try reprocessing at the next tick."
                .format(self, str(e)))
Beispiel #23
0
    print extent
    bl = extent[:2]
    tr = extent[2:]

    tlt = deg2num(bl[0], tr[1], 14)
    trt = deg2num(tr[0], tr[1], 14)
    tlb = deg2num(bl[0], bl[1], 14)

    xs = range(tlt[0], trt[0]+1)
    ys = range(tlt[1], tlb[1]+1)

    matrix = []
    for xt in xs:
        for yt in  ys:
            matrix.append((xt, yt))

    for foo in matrix:
        foo_1 = foo[1]     
        bbox = (foo[0] * 0.02197265625 - 180, num2deg(foo[1] + 1), (foo[0] + 1) * 0.02197265625 - 180, num2deg(foo[1]))
        print bbox
        dens  = OGRGeometry.from_bbox(bbox)
        dens.srid = 4326
        d = TaskCell()
        d.x = foo[0]
        d.y = foo[1]
        d.geom = dens.wkt
        d.task = t
        d.save()
    

Beispiel #24
0

#bl = sys.argv[1].split(",")[:2]
#ur = sys.argv[1].split(",")[2:]

#print bl
#print ur

blah = "-72.6775034,19.6069557,-72.5595274,19.6865932"


bblah = [ float(i) for i in blah.split(",")]

Density.objects.all().delete()

foo = OGRGeometry.from_bbox(bblah)
#print foo
#foo.srid = 900913

#foo.transform(4326)
print foo

#import ipdb; ipdb.set_trace()


# from this figure out what would be the polygons in tile size 15
# from this a grid model can be created and can keep track of the amount of nodes per tile

# zoom level 0 is a 1x1 tile (256)
# zoom level 1 will have 4 tiles
# zoom level 2 will have 16 tles
 def test_burn_value_option(self):
     geom = OGRGeometry.from_bbox((500000.0, 399800.0, 500200.0, 399900.0))
     geom.srid = 3086
     result = rasterize(geom, self.rast, burn_value=99)
     self.assertEqual(result.bands[0].data().ravel().tolist(), [0, 0, 99, 99])