def test_func_no_wrapping(): # Select query with function select_query = select([ func.ST_Buffer(Point.geom), # with wrapping (default behavior) func.ST_Buffer(Point.geom, type_=Geometry), # with wrapping func.ST_Buffer(Point.geom, type_=RawGeometry) # without wrapping ]) # Check the query assert str(select_query) == ( "SELECT " "ST_AsEWKB(ST_Buffer(point.geom)) AS \"ST_Buffer_1\", " "ST_AsEWKB(ST_Buffer(point.geom)) AS \"ST_Buffer_2\", " "ST_Buffer(point.geom) AS \"ST_Buffer_3\" \n" "FROM point")
def query(self, source, target, core, column, pkey): # ST_Buffer is not yet implemented so BigQueryCore won't work # (groups.google.com/d/msg/bq-gis-feedback/Yq4Ku6u2A80/ceVXU01RCgAJ) if isinstance(core, BigQueryCore): raise ValueError( "The LengthOf feature is currently incompatible with \ BigQueryCore because ST_Buffer is not yet implemented") # Get all lines-of-interests (LOIs) of fclass `on` lois = select( [source.c[self.source_id], source.c.WKT], source.c[self.source_column] == self.source_filter, ).cte("lois") # Create a buffer `within` a distance/radius around each centroid. # The point has to be converted to EPSG:3857 so that meters can be # used instead of decimal degrees for EPSG:4326. buff = select([ target, func.ST_Buffer(core.ST_GeoFromText(target.c[column]), self.within).label("__buffer__"), ]).cte("buff") # Clip the LOIs with the buffers then calculate the length of all # LOIs inside each buffer. clip = select( [ buff, func.ST_Intersection( core.ST_GeoFromText(lois.c.WKT), func.ST_Transform(buff.c["__buffer__"], 4326), ).label("__geom__"), func.ST_Length( func.ST_Intersection( func.ST_Transform(core.ST_GeoFromText(lois.c.WKT), 3857), buff.c["__buffer__"], )).label("__len__"), ], func.ST_Intersects( core.ST_GeoFromText(lois.c.WKT), func.ST_Transform(buff.c["__buffer__"], 4326), ), ).cte("clip") # Sum the length of all LOIs inside each buffer sum_length = (select([ clip.c[pkey], func.sum(clip.c["__len__"]).label(self.feature_name), ]).select_from(clip).group_by(clip.c[pkey]).cte("sum_length")) # Join the sum of the length of all LOIs inside each buffer query = select( [ col for col in sum_length.columns if col.key not in ("__len__", "__geom__", "__buffer__") ], sum_length.c[pkey] == buff.c[pkey], ) return query
def get_by_sujeto(db_session: Session, *, sujeto: SujetoInDB) -> Optional[Location]: if sujeto.latlng is not None: location = db_session.query(Location).filter( func.ST_Contains( func.ST_Buffer(func.ST_Transform(Location.center, 32631), Location.radius), func.ST_Transform( func.ST_GeomFromEWKT('SRID=4326;POINT({} {})'.format( *crud.coordinates.get_latlng_from_geom( sujeto.latlng)[::-1])), 32631))).order_by( func.ST_Transform(Location.center, 32631).ST_Buffer( Location.radius).ST_Area().asc()).first() if location is not None: return location.name else: return google.get_sujeto_place(sujeto) else: return None
def __init__(self, trip=None, srid='0'): '''This function needs to modify the input data that it is provided. Special considerations need to be made for geometry fields and remapping of keys to avoid collisions. ''' trip = trip.copy() self.event_types = { 'speeding': SpeedingEvent, 'hard_accel': HardAccelerationEvent, 'hard_brake': HardBrakeEvent } if trip is None: raise ValueError("A trip object must be supplied") trip.pop('user') # this path will be used to find speeding_event substrings path_linestring = SpatialQueries.points_to_projected_line(trip['path']) trip['geom_path'] = path_linestring # paths are stored with a 20 M buffer as a polygon to account for gps # inaccuracies. This is just a very rough way to do this, more care # would be needed for a robust solution, such as greater buffer size, # or simply checking that a high percentage of points are within a smaller # buffer. trip['geom'] = func.ST_Buffer(path_linestring, 20) trip_id_string = trip.pop('id') trip['trip_id_string'] = trip_id_string drive_events = trip.pop('drive_events') super(Trip, self).__init__(**trip) # Instantiate each drive event with its respective class # and then append it to its respective list on the trip mapped object for event in drive_events: cls = self.event_types[event.pop('type')] lst = getattr(self, cls.__tablename__) lst.append(cls(trip['trip_id_string'], event, path_linestring))
def test_ST_Buffer(self): from sqlalchemy.sql import select, func from geoalchemy2 import WKBElement, WKTElement lake_id = self._create_one() s = select([func.ST_Buffer(Lake.__table__.c.geom, 2)]) r1 = session.execute(s).scalar() ok_(isinstance(r1, WKBElement)) lake = session.query(Lake).get(lake_id) r2 = session.execute(lake.geom.ST_Buffer(2)).scalar() ok_(isinstance(r2, WKBElement)) r3 = session.query(Lake.geom.ST_Buffer(2)).scalar() ok_(isinstance(r3, WKBElement)) ok_(r1.data == r2.data == r3.data) r4 = session.query(Lake).filter( func.ST_Within(WKTElement('POINT(0 0)', srid=4326), Lake.geom.ST_Buffer(2))).one() ok_(isinstance(r4, Lake)) eq_(r4.id, lake_id)
def calculate_summary(self, product_name: str, time: Range) -> TimePeriodOverview: """ Create a summary of the given product/time range. """ log = self.log.bind(product_name=product_name, time=time) log.debug("summary.query") begin_time, end_time, where_clause = self._where(product_name, time) select_by_srid = (select(( func.ST_SRID(DATASET_SPATIAL.c.footprint).label("srid"), func.count().label("dataset_count"), func.ST_Transform( func.ST_Union(DATASET_SPATIAL.c.footprint), self._target_srid(), type_=Geometry(), ).label("footprint_geometry"), func.sum(DATASET_SPATIAL.c.size_bytes).label("size_bytes"), func.max(DATASET_SPATIAL.c.creation_time).label( "newest_dataset_creation_time"), )).where(where_clause).group_by("srid").alias("srid_summaries")) # Union all srid groups into one summary. result = self._engine.execute( select(( func.sum( select_by_srid.c.dataset_count).label("dataset_count"), func.array_agg(select_by_srid.c.srid).label("srids"), func.sum(select_by_srid.c.size_bytes).label("size_bytes"), func.ST_Union( func.ST_Buffer(select_by_srid.c.footprint_geometry, 0), type_=Geometry(srid=self._target_srid()), ).label("footprint_geometry"), func.max(select_by_srid.c.newest_dataset_creation_time).label( "newest_dataset_creation_time"), func.now().label("summary_gen_time"), ))) rows = result.fetchall() log.debug("summary.query.done", srid_rows=len(rows)) assert len(rows) == 1 row = dict(rows[0]) row["dataset_count"] = int( row["dataset_count"]) if row["dataset_count"] else 0 if row["footprint_geometry"] is not None: row["footprint_crs"] = self._get_srid_name( row["footprint_geometry"].srid) row["footprint_geometry"] = geo_shape.to_shape( row["footprint_geometry"]) else: row["footprint_crs"] = None row["crses"] = None if row["srids"] is not None: row["crses"] = {self._get_srid_name(s) for s in row["srids"]} del row["srids"] # Convert from Python Decimal if row["size_bytes"] is not None: row["size_bytes"] = int(row["size_bytes"]) has_data = row["dataset_count"] > 0 log.debug("counter.calc") # Initialise all requested days as zero day_counts = Counter({ d.date(): 0 for d in pd.date_range(begin_time, end_time, closed="left") }) region_counts = Counter() if has_data: day_counts.update( Counter({ day.date(): count for day, count in self._engine.execute( select([ func.date_trunc( "day", DATASET_SPATIAL.c.center_time.op( "AT TIME ZONE")(self.grouping_time_zone), ).label("day"), func.count(), ]).where(where_clause).group_by("day")) })) region_counts = Counter({ item: count for item, count in self._engine.execute( select([ DATASET_SPATIAL.c.region_code.label("region_code"), func.count(), ]).where(where_clause).group_by("region_code")) }) summary = TimePeriodOverview( **row, timeline_period="day", time_range=Range(begin_time, end_time), timeline_dataset_counts=day_counts, region_dataset_counts=region_counts, # TODO: filter invalid from the counts? footprint_count=row["dataset_count"] or 0, ) log.debug( "summary.calc.done", dataset_count=summary.dataset_count, footprints_missing=summary.dataset_count - summary.footprint_count, ) return summary