def get_destinations(dfs, roam_dist=110, earth_radius=6372.795 * 1000): """ Applies DBSCAN to extract the unique stop locations from a pyspark DataFrame :param x: DataFrame with ['id_client', 'latitude', 'longitude', "from", "to"]. Coordinates are in degrees. :param roam_dist: The stop location size in meters. :param earth_radius: The radius of the earth. :param group_results: If True, it groups by the cluster's location and id_client. :return: (pyspark DataFrame) If group_results=True: ['id_client', 'clatitude', 'clongitude', 'time_spent', 'frequency'] (pyspark DataFrame) If group_results=False: ['id_client', 'latitude', 'longitude', 'clatitude', 'clongitude', 'from', 'to'] """ @pandas_udf( "userId string, state string, latitude double, longitude double, begin timestamp, end timestamp, clusterId integer", PandasUDFType.GROUPED_MAP) def get_destinations(df): """ Applies DBSCAN to stop locations :param x: 2D numpy array with latitude and longitude. :param from_to_array: 2D numpy array with from and to timestamps. :param roam_dist: The stop location size in meters. :param earth_radius: The radius of the earth. :return: (pandas DataFrame) ['latitude', 'longitude', 'clatitude', 'clongitude', 'from', 'to', 'time_spent'] """ db = DBSCAN(eps=roam_dist / earth_radius, min_samples=1, algorithm='ball_tree', metric='haversine') df["clusterId"] = db.fit_predict(df[['latitude', 'longitude']]) return df dfs = dfs.withColumn('latitude', F.radians('latitude')) dfs = dfs.withColumn('longitude', F.radians('longitude')) stops_dfs = dfs.groupby('userId', 'state').apply(get_destinations) stops_dfs = stops_dfs.withColumn('latitude', F.degrees('latitude')) stops_dfs = stops_dfs.withColumn('longitude', F.degrees('longitude')) w = Window().partitionBy('userId', 'clusterId') stops_dfs = stops_dfs.withColumn('clusterLatitude', F.mean('latitude').over(w)) stops_dfs = stops_dfs.withColumn('clusterLongitude', F.mean('longitude').over(w)) stops_dfs = stops_dfs.drop('latitude').drop('longitude') return stops_dfs
def add_solar_features(df): return (df.withColumn( "declination_angle", radians(-23.45 * cos(((2 * pi) / 365) * (dayofyear("date") + 10))), ).withColumn("diff_local_time_UTC", timezone_from_date("date")).withColumn( "d", (2 * pi * dayofyear("date")) / 365).withColumn( "equation_of_time", -7.655 * sin(col("d")) + 9.873 * sin(2 * col("d") + 3.588), ).drop("d").withColumn( "time_correction", 4 * (col("loc_long") - (15 * col("diff_local_time_UTC"))) + col("equation_of_time"), ).withColumn( "local_solar_hour", col("hour") + 0.5 + col("time_correction") / 60).withColumn( "hour_angle", 0.2618 * (col("local_solar_hour") - 12)).drop( "diff_local_time_UTC", "equation_of_time", "time_correction", "local_solar_hour", ).withColumn( "solar_elevation", degrees( asin( sin("declination_angle") * sin(radians("loc_lat")) + cos("declination_angle") * cos(radians("loc_lat")) * cos("hour_angle"))), ).drop("declination_angle", "hour_angle"))
def add_solar_features(df): return \ (df .withColumn('declination_angle', radians(-23.45 * cos(((2 * pi)/365) * (dayofyear('date') + 10)))) .withColumn('diff_local_time_UTC', timezone_from_date('date')) .withColumn('d', (2 * pi * dayofyear('date')) / 365) .withColumn('equation_of_time', -7.655 * sin(col('d')) + 9.873 * sin(2 * col('d') + 3.588)) .drop('d') .withColumn('time_correction', 4 * (col('loc_long') - (15 * col('diff_local_time_UTC'))) + col('equation_of_time')) .withColumn('local_solar_hour', col('hour') + 0.5 + col('time_correction') / 60) .withColumn('hour_angle', 0.2618 * (col('local_solar_hour') - 12)) .drop('diff_local_time_UTC', 'equation_of_time', 'time_correction', 'local_solar_hour') .withColumn('solar_elevation', degrees(asin(sin('declination_angle') * sin(radians('loc_lat')) + cos('declination_angle') * cos(radians('loc_lat')) * cos('hour_angle')))) .drop('declination_angle', 'hour_angle'))
def calculate_bearing_degrees(latitude_1, longitude_1, latitude_2, longitude_2): diff_longitude = F.radians(longitude_2 - longitude_1) r_latitude_1 = F.radians(latitude_1) r_longitude_1 = F.radians(longitude_1) r_latitude_2 = F.radians(latitude_2) r_longitude_2 = F.radians(longitude_2) y = F.sin(diff_longitude) * F.cos(r_longitude_2) x = (F.cos(r_latitude_1) * F.sin(r_latitude_2) - F.sin(r_latitude_1) * F.cos(r_latitude_2) * F.cos(diff_longitude)) return F.degrees(F.atan2(x, y))
def main(): # Parameters for the algorithm roam_dist = 100 # meters min_stay = 10 # minutes # Parameters for the paths input_path = '/path_to_parquet' # parquet file output_path = '/path_to_parquet_out' # parquet file spark = SparkSession \ .builder \ .appName("Stop locations") \ .getOrCreate() # Read data source_df = spark.read.parquet(input_path) source_df = source_df.select( 'user_id', 'timestamp', F.radians('latitude').alias('lat'), F.radians('longitude').alias("lon")).orderBy('timestamp') source_df.cache() # Filter out all the data that is not necessary (e.g. positions equals to others in a time-distance less than min_stay w = Window.partitionBy(['user_id']).orderBy('timestamp') source_df = source_df.select("user_id", "timestamp", "lat", "lon", F.lead("lat", 1).over(w).alias("next_lat"), F.lead("lon", 1).over(w).alias("next_lon")) dist_df = source_df.withColumn( "distance_next", EARTH_RADIUS * 2 * F.asin( F.sqrt( F.pow(F.sin((col("next_lat") - col("lat")) / 2.0), 2) + F.cos("lat") * F.cos("next_lat") * F.pow(F.sin((col("next_lon") - col("lon")) / 2.0), 2)))) dist_df = dist_df.withColumn("distance_prev", F.lag("distance_next").over(w)) exclude_df = dist_df.where(( (col("distance_next") < 5) & (col("distance_prev") < 5)) | ((col("distance_next") > roam_dist) & (col("distance_prev") > roam_dist))) df = source_df.join(exclude_df, ['user_id', 'timestamp'], "left_anti").select("user_id", "timestamp", "lat", "lon") # Transform to RDD, in order to apply the function get_stop_location # RDD that contains: (user_id, [timestamp, lat, lon, lat_degrees, lon_degrees]) df_rdd = df.orderBy(['user_id', 'timestamp']).rdd.map(tuple) df_rdd = df_rdd.map(lambda x: (x[0], [[x[1], x[2], x[3]]])) # RDD that contains: (user_id, [[timestamp, lat, lon], ..., [timestamp, lat, lon]]), sorted by timestamp grouped_rdd = df_rdd.reduceByKey(lambda x, y: x + y) stop_locations_rdd = grouped_rdd.map(lambda x: ( x[0], get_stop_location( x[1], min_stay_duration=min_stay, roaming_distance=roam_dist))) stop_locations_rdd = stop_locations_rdd.flatMapValues(lambda x: x).map( lambda x: (x[0], x[1][0], x[1][1], x[1][2], x[1][3])) # Output schema schema = StructType([ StructField('user_id', StringType(), False), StructField('lat', DoubleType(), False), StructField('lon', DoubleType(), False), StructField('from', TimestampType(), False), StructField('to', TimestampType(), False) ]) result_df = spark.createDataFrame(stop_locations_rdd, schema) result_df = result_df.withColumn('lat', F.degrees('lat')) result_df = result_df.withColumn('lon', F.degrees('lon')) result_df.write.save(output_path)
def main(): """Main function""" # Get args args = get_args() # Azure credentials sas_token = args.sas storage_account_name = args.storage container_in = args.container_in container_out = args.container_out azure_accounts = list() azure_accounts.append({ "storage": storage_account_name, "sas": sas_token, "container": container_in }) azure_accounts.append({ "storage": storage_account_name, "sas": sas_token, "container": container_out }) # VM cores = args.vm_cores ram = args.vm_ram shuffle_partitions = args.shuffle_partitions # Geohash file path geohash_path = args.geohashpath # Date, country, prefix country = args.country date_string = args.date prefix = args.prefix # Set date variables day_time = datetime.strptime(date_string, "%Y-%m-%d") year = day_time.year month = day_time.month day = day_time.day # stop config seconds = 60 accuracy = args.accuracy roam_dist = args.roam_dist min_stay = args.min_stay overlap_hours = args.overlap_hours # Path in - path out blob_in = f"wasbs://{container_in}@{storage_account_name}.blob.core.windows.net/preprocessed/{country}/" path_out = f"stoplocation-v{VERSION}_r{roam_dist}-s{min_stay}-a{accuracy}-h{overlap_hours}/{country}" if prefix: path_out = f"stoplocation-v{VERSION}_prefix_r{roam_dist}-s{min_stay}-a{accuracy}-h{overlap_hours}/{country}" # config spark conf = getSparkConfig(cores, ram, shuffle_partitions, azure_accounts) # Create spark session sc = SparkContext(conf=conf).getOrCreate() sqlContext = SQLContext(sc) spark = sqlContext.sparkSession # Init azure client blob_service_client = BlobServiceClient.from_connection_string( CONN_STRING.format(storage_account_name, sas_token)) # build keys, date is mandatory, prefix opt partition_key = "year={}/month={}/day={}".format(year, month, day) if prefix: partition_key = "year={}/month={}/day={}/prefix={}".format( year, month, day, prefix) blob_base = "{}/{}".format(path_out, partition_key) # # check for skip # TODO # skip = False print("process " + partition_key + " to " + blob_base) start_time = time.time() local_dir = LOCAL_PATH + partition_key print("write temp to " + local_dir) # cleanup local if exists if (os.path.isdir(local_dir)): map(os.unlink, (os.path.join(local_dir, f) for f in os.listdir(local_dir))) # TODO cleanup remote if exists # Output schema schema = ArrayType( StructType([ #StructField('device_type', IntegerType(), False), StructField('serial', IntegerType(), False), StructField('latitude', DoubleType(), False), StructField('longitude', DoubleType(), False), StructField('begin', TimestampType(), False), StructField('end', TimestampType(), False), StructField('personal_area', BooleanType(), False), StructField('distance', DoubleType(), False), StructField('geohash6', StringType(), False), StructField('after_stop_distance', DoubleType(), False) ])) spark_get_stop_location = udf( lambda z: get_stop_location(z, roam_dist, min_stay), schema) # Geohash file print("read geohash parquet") csv_time = time.time() dfs_us_states = spark.read.format("parquet").load(geohash_path) # states = [s.STUSPS for s in dfs_us_states.select( # 'STUSPS').distinct().collect()] dfs_us_states = dfs_us_states.select( col('STUSPS').alias('state'), col('geohash').alias('geohash5')) dfs_us_states = dfs_us_states.drop_duplicates(subset=['geohash5']) # Input dataset print("read dataset table") read_time = time.time() # dfs = spark.read.format("parquet").load(blob_in) # # apply partition filter # dfs_partition = dfs.where( # f"(year = {year} AND month = {month} AND day = {day} AND prefix = '{prefix}')") # read only partition to reduce browse time dfs_cur_partition = spark.read.format("parquet").load( f"{blob_in}/{partition_key}") # lit partition filters as data dfs_cur_partition = dfs_cur_partition.withColumn('year', F.lit(year)) dfs_cur_partition = dfs_cur_partition.withColumn('month', F.lit(month)) dfs_cur_partition = dfs_cur_partition.withColumn('day', F.lit(day)) if prefix: dfs_cur_partition = dfs_cur_partition.withColumn( 'prefix', F.lit(prefix)) # read next day for overlap next_day = day_time + timedelta(days=1) next_partition_key = "year={}/month={}/day={}".format( next_day.year, next_day.month, next_day.day) if prefix: next_partition_key = "year={}/month={}/day={}/prefix={}".format( next_day.year, next_day.month, next_day.day, prefix) dfs_next_partition = spark.read.format("parquet").load( f"{blob_in}/{next_partition_key}") dfs_next_partition = dfs_next_partition.where( F.hour("timestamp") <= (overlap_hours - 1)) # lit partition filters as data dfs_next_partition = dfs_next_partition.withColumn('year', F.lit(next_day.year)) dfs_next_partition = dfs_next_partition.withColumn('month', F.lit(next_day.month)) dfs_next_partition = dfs_next_partition.withColumn('day', F.lit(next_day.day)) if prefix: dfs_next_partition = dfs_next_partition.withColumn( 'prefix', F.lit(prefix)) # union with overlap dfs_partition = dfs_cur_partition.unionAll(dfs_next_partition) print("process with spark") spark_time = time.time() # select columns dfs_partition = dfs_partition.select( 'prefix', 'userID', 'timestamp', 'latitude', 'longitude', (F.when(col('opt1') == 'PERSONAL_AREA', True).otherwise(False)).alias('personal_area'), 'accuracy') # keep only data with required accuracy dfs_partition = dfs_partition.where((col('accuracy') <= accuracy) & (col('accuracy') >= 0)) # stats - enable only for debug! # num_inputs = dfs_partition.count() # print(f"read {num_inputs} rows from "+partition_key) # Lowering the granularity to 1 minutes # explicitely convert to timestamp #dfs_partition = dfs_partition.withColumn('timestamp', col('timestamp').cast('timestamp')) seconds_window = F.unix_timestamp( 'timestamp') - F.unix_timestamp('timestamp') % seconds w = Window().partitionBy('userID', seconds_window).orderBy('accuracy') dfs_partition = dfs_partition.withColumn( 'rn', F.row_number().over(w).cast('int')).where(col('rn') == 1).drop('rn') # Radians lat/lon dfs_partition = dfs_partition.withColumn('latitude', F.radians('latitude')).withColumn( 'longitude', F.radians('longitude')) # Groups GPS locations into chucks. A chunk is formed by groups of points that are distant no more than roam_dist w = Window.partitionBy(['prefix', 'userID']).orderBy('timestamp') dfs_partition = dfs_partition.withColumn('next_lat', F.lead('latitude', 1).over(w)) dfs_partition = dfs_partition.withColumn('next_lon', F.lead('longitude', 1).over(w)) # Haversine distance dfs_partition = dfs_partition.withColumn( 'distance_next', EARTH_RADIUS * 2 * F.asin( F.sqrt( F.pow(F.sin((col('next_lat') - col('latitude')) / 2.0), 2) + F.cos('latitude') * F.cos('next_lat') * F.pow(F.sin((col('next_lon') - col('longitude')) / 2.0), 2)))) dfs_partition = dfs_partition.withColumn( 'distance_prev', F.lag('distance_next', default=0).over(w)) # Chunks dfs_partition = dfs_partition.withColumn( 'chunk', F.when(col('distance_prev') > roam_dist, 1).otherwise(0)) windowval = (Window.partitionBy( 'prefix', 'userID').orderBy('timestamp').rangeBetween(Window.unboundedPreceding, 0)) dfs_partition = dfs_partition.withColumn( 'chunk', F.sum('chunk').over(windowval).cast('int')) # Remove chunks of the next day w = Window.partitionBy(['prefix', 'userID', 'chunk']) dfs_partition = dfs_partition.withColumn( 'min_timestamp', F.dayofmonth(F.min('timestamp').over(w))) dfs_partition = dfs_partition.where( col('min_timestamp') == day).drop('min_timestamp') # Get the stops result_df = dfs_partition.groupBy('prefix', 'userID', 'chunk').agg( F.array_sort( F.collect_list( F.struct('timestamp', 'latitude', 'longitude', 'distance_prev', 'personal_area'))).alias('gpsdata'), F.sum('distance_prev').alias('dist_sum')) result_df = result_df.withColumn('gpsdata', spark_get_stop_location('gpsdata')) result_df = result_df.select('userID', 'chunk', F.explode_outer('gpsdata').alias('e'), 'dist_sum') result_df = result_df.select( 'userID', 'chunk', col('e.latitude').alias('latitude'), col('e.longitude').alias('longitude'), col('e.begin').alias('begin'), col('e.end').alias('end'), col('e.personal_area').alias('personal_area'), col('e.geohash6').alias('geohash6'), col('e.serial').alias('serial'), col('e.distance').alias('stop_distance'), col('e.after_stop_distance').alias('after_stop_distance'), 'dist_sum') result_df = result_df.fillna(0, subset=['after_stop_distance']) # Remove all those stop that start the next day result_df = result_df.where((col('begin').isNull()) | (F.dayofmonth('begin') != next_day.day)) result_df = result_df.withColumn( 'isStop', F.when(col('serial').isNotNull(), 1).otherwise(0)) result_df = result_df.withColumn( 'dist_sum', F.when(col('isStop') == 1, col('stop_distance')).otherwise(col('dist_sum'))) windowval = (Window.partitionBy('userId').orderBy( 'chunk', 'serial').rowsBetween(Window.currentRow, Window.unboundedFollowing)) result_df = result_df.withColumn('isStop_cum', F.sum('isStop').over(windowval)) result_df = result_df.groupBy('userId', 'isStop_cum').agg( F.first('latitude', ignorenulls=True).alias('latitude'), F.first('longitude', ignorenulls=True).alias('longitude'), F.first('begin', ignorenulls=True).alias('begin'), F.first('end', ignorenulls=True).alias('end'), F.first('personal_area', ignorenulls=True).alias('personal_area'), F.first('geohash6', ignorenulls=True).alias('geohash6'), F.sum('dist_sum').alias('prev_travelled_distance'), F.sum('after_stop_distance').alias('after_stop_distance')) # compute next distance, which is null if it's the last windowval = Window.partitionBy('userId').orderBy(F.desc('isStop_cum')) result_df = result_df.withColumn( 'next_travelled_distance', F.lead('prev_travelled_distance').over(windowval)) result_df = result_df.withColumn( 'next_travelled_distance', F.when((col('next_travelled_distance').isNull()) & (col('after_stop_distance') > 0), col('after_stop_distance')).otherwise( col('next_travelled_distance'))) # Drop nulls result_df = result_df.dropna(subset=['latitude']).drop('isStop_cum') # Transform latitude and longitude back to degrees result_df = result_df.withColumn('latitude', F.degrees('latitude')) result_df = result_df.withColumn('longitude', F.degrees('longitude')) # US states result_df = result_df.withColumn( "geohash5", F.expr("substring(geohash6, 1, length(geohash6)-1)")) result_df = result_df.join(F.broadcast(dfs_us_states), on="geohash5", how="inner").drop('geohash5') # lit partition data - enable only if added to partitionBy # result_df = result_df.withColumn('year', F.lit(year)) # result_df = result_df.withColumn('month', F.lit(month)) # result_df = result_df.withColumn('day', F.lit(day)) # write out_partitions = len(US_STATES) result_df.repartition(out_partitions, "state").write.partitionBy( "state").format('parquet').mode("overwrite").save(local_dir + "/") # stats - enable only for debug! # num_records = result_df.count() # print(f"written {num_records} rows to "+local_dir) # if num_records == 0: # raise Exception("Zero rows output") print("upload local data to azure") upload_time = time.time() # upload parts over states for state in US_STATES: print(f"upload files for {state}") state_dir = local_dir + "/state=" + state state_key = f"{partition_key}/state={state}/" if (os.path.isdir(state_dir)): files = [ filename for filename in os.listdir(state_dir) if filename.startswith("part-") ] if len(files) > 0: for file_local in files: file_path = state_dir + "/" + file_local part_num = int(file_local.split('-')[1]) part_key = '{:05d}'.format(part_num) # fix name as static hash to be reproducible filename_hash = hashlib.sha1( str.encode(state_key + part_key)).hexdigest() blob_key = "{}/state={}/part-{}-{}.snappy.parquet".format( blob_base, state, part_key, filename_hash) print("upload " + file_path + " to " + container_out + ":" + blob_key) blob_client = blob_service_client.get_blob_client( container_out, blob_key) with open(file_path, "rb") as data: blob_client.upload_blob(data, overwrite=True) # cleanup os.remove(file_path) else: print(f"no files to upload for {state}") else: print(f"missing partition for {state}") print("--- {} seconds elapsed ---".format(int(time.time() - start_time))) print() stop_time = time.time() spark.stop() end_time = time.time() print("Done in {} seconds (csv:{} read:{} spark:{} upload:{} stop:{})". format(int(end_time - start_time), int(read_time - csv_time), int(spark_time - read_time), int(upload_time - spark_time), int(stop_time - upload_time), int(end_time - stop_time))) print('Done.')
from df_tools import * from histfile import * from tools import * from pyspark.sql import functions as F import sys nside = int(sys.argv[1]) spark = SparkSession.builder.getOrCreate() df = spark.read.parquet("nside{}.parquet".format(nside)) df = df.withColumn( "dx", F.degrees( F.sin((df["theta"] + df["theta_c"]) / 2) * (df["phi"] - df["phi_c"])) * 60) df = df.withColumn("dy", F.degrees(df["theta"] - df["theta_c"]) * 60) #df=df.withColumn("r",F.hypot(df["dx"],df["dy"])) df = df.withColumn("x", F.sin(df["theta"]) * F.cos(df["phi"])).withColumn( "y", F.sin(df["theta"]) * F.sin(df["phi"])).withColumn( "z", F.cos(df["theta"])).drop("theta", "phi") df = df.withColumn("xc", F.sin(df["theta_c"]) * F.cos(df["phi_c"])).withColumn( "yc", F.sin(df["theta_c"]) * F.sin(df["phi_c"])).withColumn( "zc",