def test_spatial_join_query_using_index_on_polygons(self): query_window_rdd = PolygonRDD(self.sc, polygon_rdd_input_location, polygon_rdd_start_offset, polygon_rdd_end_offset, polygon_rdd_splitter, True) object_rdd = PointRDD(sparkContext=self.sc, InputLocation=point_rdd_input_location, Offset=point_rdd_offset, splitter=point_rdd_splitter, carryInputData=False) object_rdd.analyze() object_rdd.spatialPartitioning(join_query_partitionin_type) query_window_rdd.spatialPartitioning(object_rdd.getPartitioner()) for i in range(each_query_loop_times): result_size = JoinQuery.SpatialJoinQuery(object_rdd, query_window_rdd, True, False)
def test_spatial_join_query_and_build_index_on_polygons_on_the_fly(self): query_window_rdd = PolygonRDD(self.sc, polygon_rdd_input_location, polygon_rdd_start_offset, polygon_rdd_end_offset, polygon_rdd_splitter, True) object_rdd = PointRDD(sparkContext=self.sc, InputLocation=point_rdd_input_location, Offset=point_rdd_offset, splitter=point_rdd_splitter, carryInputData=False) object_rdd.analyze() object_rdd.spatialPartitioning(join_query_partitionin_type) query_window_rdd.spatialPartitioning(object_rdd.getPartitioner()) for i in range(each_query_loop_times): join_params = JoinParams(False, polygon_rdd_index_type, JoinBuildSide.LEFT) resultSize = JoinQuery.spatialJoin(query_window_rdd, object_rdd, join_params).count()
def test_indexed_rdd_assignment(self): object_rdd = PointRDD( self.sc, point_rdd_input_location, point_rdd_offset, point_rdd_splitter, True) query_window_rdd = CircleRDD(object_rdd, 0.1) object_rdd.analyze() object_rdd.spatialPartitioning(GridType.QUADTREE) object_rdd.buildIndex(IndexType.QUADTREE, True) query_window_rdd.spatialPartitioning(object_rdd.getPartitioner()) object_rdd.buildIndex(IndexType.RTREE, False) object_rdd.indexedRDD.persist(StorageLevel.MEMORY_ONLY) query_window_rdd.jvmSpatialPartitionedRDD.persist(StorageLevel.MEMORY_ONLY) query_window_rdd.jvmSpatialPartitionedRDD.count() object_rdd.indexedRDD.count() import time start = time.time() for _ in range(each_query_loop_times): result_size = JoinQuery.DistanceJoinQuery(object_rdd, query_window_rdd, True, True).count() diff = time.time() - start object_rdd = PointRDD( self.sc, point_rdd_input_location, point_rdd_offset, point_rdd_splitter, True) query_window_rdd = CircleRDD(object_rdd, 0.1) object_rdd.analyze() object_rdd.spatialPartitioning(GridType.QUADTREE) object_rdd.buildIndex(IndexType.QUADTREE, True) query_window_rdd.spatialPartitioning(object_rdd.getPartitioner()) object_rdd.buildIndex(IndexType.RTREE, False) start1 = time.time() for _ in range(each_query_loop_times): result_size = JoinQuery.DistanceJoinQuery(object_rdd, query_window_rdd, True, True).count()
def rdd_filesave_join(): logger.info("\t - RDD file save join start") full_start_time = datetime.now() # ---------------------------------------------------------- # get spark session and context # ---------------------------------------------------------- start_time = datetime.now() spark = create_spark_session() sc = spark.sparkContext sedona_version = pkg_resources.get_distribution("sedona").version logger.info( "\t - PySpark {} session initiated with Apache Sedona {}: {}".format( sc.version, sedona_version, datetime.now() - start_time)) # ---------------------------------------------------------- # create GNAF PointRDD from CSV file # ---------------------------------------------------------- start_time = datetime.now() offset = 0 # The point long/lat fields start at column 0 carry_other_attributes = True # include non-geo columns point_rdd = PointRDD(sc, os.path.join(output_path, gnaf_csv_file_path), offset, FileDataSplitter.CSV, carry_other_attributes) point_rdd.analyze() # add partitioning and indexing point_rdd.spatialPartitioning(GridType.KDBTREE) point_rdd.buildIndex(IndexType.RTREE, True) # set Spark storage type - set to MEMORY_AND_DISK if low on memory point_rdd.indexedRDD.persist(StorageLevel.MEMORY_ONLY) logger.info("\t\t - GNAF RDD created: {}".format(datetime.now() - start_time)) # ---------------------------------------------------------- # get boundary tags using a spatial join # ---------------------------------------------------------- for bdy in bdy_list: start_time = datetime.now() # load boundaries # create geometries from WKT strings into new DataFrame bdy_df = spark.read.parquet(os.path.join(output_path, bdy["name"])) \ .withColumn("geom", f.expr("st_geomFromWKT(wkt_geom)")) \ .drop("wkt_geom") # create bdy rdd bdy_rdd = Adapter.toSpatialRdd(bdy_df, "geom") bdy_rdd.analyze() bdy_df.unpersist() bdy_rdd.spatialPartitioning(point_rdd.getPartitioner()) bdy_rdd.spatialPartitionedRDD.persist( StorageLevel.MEMORY_ONLY) # no need to persist(?) - used once # run the join - returns a PairRDD with 1 boundary to 1-N points # e.g. [Geometry: Polygon userData: WA32 TANGNEY WA, [Geometry: Point userData: GAWA_146792426 WA, ...]] result_pair_rdd = JoinQuery.SpatialJoinQueryFlat( point_rdd, bdy_rdd, True, True) # jim = result_pair_rdd.take(10) # for row in jim: # print(row) result_pair_rdd.saveAsTextFile( os.path.join(output_path, "rdd_file_save_gnaf_with_{}".format(bdy["name"]))) # # flat map values to have one point to bdy matched pair # flat_mapped_rdd = result_pair_rdd.flatMapValues(lambda x: x) # # # map values to create RDD row of gnaf & bdy IDs, plus state data # mapped_rdd = flat_mapped_rdd.map( # lambda x: [x[1].getUserData().split("\t")[0], # x[0].getUserData().split("\t")[0], # x[0].getUserData().split("\t")[1]] # ) # # # convert result to a dataframe of the following shema # schema = t.StructType([t.StructField("gnaf_pid", t.StringType(), False), # t.StructField(bdy["id_field"], t.StringType(), False), # t.StructField(bdy["name_field"], t.StringType(), False)]) # # join_df = spark.createDataFrame(mapped_rdd, schema) # # # save result to disk # join_df.write \ # .option("compression", "gzip") \ # .mode("overwrite") \ # .parquet(os.path.join(output_path, "rdd_file_save_gnaf_with_{}".format(bdy["name"]))) logger.info("\t\t - GNAF points bdy tagged with {}: {}".format( bdy["name"], datetime.now() - start_time)) # cleanup spark.stop() logger.info("\t - RDD file save join done: {}".format(datetime.now() - full_start_time))
def main(): start_time = datetime.now() # ---------------------------------------------------------- # copy gnaf tables from Postgres to a CSV file - a one off # - export required fields only and no header # ---------------------------------------------------------- pg_conn = pg_pool.getconn() pg_cur = pg_conn.cursor() sql = """COPY ( SELECT longitude, latitude, gnaf_pid, locality_pid, locality_name, postcode, state FROM gnaf_202008.{} ) TO STDOUT WITH CSV""" # address principals with open(gnaf_csv_file_path, 'w') as csv_file: pg_cur.copy_expert(sql.format("address_principals"), csv_file) # append address aliases with open(gnaf_csv_file_path, 'a') as csv_file: pg_cur.copy_expert(sql.format("address_aliases"), csv_file) pg_cur.close() pg_pool.putconn(pg_conn) logger.info("\t - GNAF points exported to CSV: {}".format(datetime.now() - start_time)) start_time = datetime.now() # ---------------------------------------------------------- # create Spark session and context # ---------------------------------------------------------- # upload Apache Sedona JARs upload_jars() spark = (SparkSession.builder.master("local[*]").appName("query").config( "spark.sql.session.timeZone", "UTC").config("spark.sql.debug.maxToStringFields", 100).config( "spark.serializer", KryoSerializer.getName).config( "spark.kryo.registrator", SedonaKryoRegistrator.getName).config( "spark.cores.max", num_processors).config( "spark.sql.adaptive.enabled", "true").config("spark.driver.memory", "8g").getOrCreate()) # Register Apache Sedona UDTs and UDFs SedonaRegistrator.registerAll(spark) # # set Sedona spatial indexing and partitioning config in Spark session # # (no effect on the "small" spatial join query in this script. Will improve bigger queries) # spark.conf.set("sedona.global.index", "true") # spark.conf.set("sedona.global.indextype", "rtree") # spark.conf.set("sedona.join.gridtype", "kdbtree") sc = spark.sparkContext logger.info("\t - PySpark {} session initiated: {}".format( sc.version, datetime.now() - start_time)) start_time = datetime.now() # ---------------------------------------------------------- # create GNAF PointRDD from CSV file # ---------------------------------------------------------- offset = 0 # The point long/lat fields start at column 0 carry_other_attributes = True # include non-geo columns point_rdd = PointRDD(sc, os.path.join(output_path, gnaf_csv_file_path), offset, FileDataSplitter.CSV, carry_other_attributes) point_rdd.analyze() # add partitioning and indexing point_rdd.spatialPartitioning(GridType.KDBTREE) point_rdd.buildIndex(IndexType.RTREE, True) # set Spark storage type - set to MEMORY_AND_DISK if low on memory point_rdd.indexedRDD.persist(StorageLevel.MEMORY_ONLY) logger.info("\t - GNAF RDD created: {}".format(datetime.now() - start_time)) # ---------------------------------------------------------- # get boundary tags using a spatial join # ---------------------------------------------------------- for bdy in bdy_list: bdy_tag(spark, point_rdd, bdy) # point_rdd.unpersist() # no such method on a SpatialRDD # ---------------------------------------------------------- # merge boundary tag dataframes with GNAF records # - required because spatial joins are INNER JOIN only, # need to add untagged GNAF points # ---------------------------------------------------------- start_time = datetime.now() # create gnaf dataframe and SQL view gnaf_df = spark.read \ .option("header", False) \ .option("inferSchema", True) \ .csv(gnaf_csv_file_path) \ .drop("_C0") \ .drop("_C1") \ .withColumnRenamed("_C2", "gnaf_pid") \ .withColumnRenamed("_C3", "locality_pid") \ .withColumnRenamed("_C4", "locality_name") \ .withColumnRenamed("_C5", "postcode") \ .withColumnRenamed("_C6", "state") # gnaf_df.printSchema() # gnaf_df.show(10, False) gnaf_df.createOrReplaceTempView("pnt") # add bdy tags, one bdy type at a time for bdy in bdy_list: gnaf_df = join_bdy_tags(spark, bdy) gnaf_df.createOrReplaceTempView("pnt") # # add point geoms for output to Postgres - in the PostGIS specific EWKT format # final_df = gnaf_df.withColumn("geom", f.expr("concat('SRID=4326;POINT (', longitude, ' ', latitude, ')')")) \ # .drop("longitude") \ # .drop("latitude") # # final_df.printSchema() # # final_df.show(10, False) logger.info("\t - Boundary tags merged: {}".format(datetime.now() - start_time)) # output result to Postgres export_to_postgres(gnaf_df, "testing2.gnaf_with_bdy_tags", os.path.join(output_path, "temp_gnaf_with_bdy_tags"), True) # cleanup spark.stop() # delete intermediate bdy tag files and GNAF csv file for bdy in bdy_list: shutil.rmtree( os.path.join(output_path, "gnaf_with_{}".format(bdy["name"]))) os.remove(gnaf_csv_file_path)