Esempio n. 1
0
def sucursal(event=None, context=None):
	data = request.get_json()
	lat = data["lat"]
	long = data["long"]

	h3_address_9 = h3.geo_to_h3(float(lat),float(long), 9)
	h3_address_8 = h3.geo_to_h3(float(lat),float(long), 8)

	cur = mysql.connection.cursor()
	#sql = '''SELECT emp_run, suc_empresa_id FROM Sucursales WHERE emp_run=%s and suc_cuadrante_9 = "%s"'''
	sql = '''SELECT e.emp_nombrecorto,s.emp_run,s.suc_empresa_id,s.suc_nombre,e.emp_logo_url,e.emp_color_fondo,s.suc_id FROM Sucursales s JOIN Empresas e ON s.emp_run = e.emp_run WHERE e.emp_run=0 or suc_9_cuadrante1="%s" OR suc_9_cuadrante2="%s" OR suc_9_cuadrante3="%s" OR suc_9_cuadrante4="%s" OR suc_9_cuadrante5="%s" OR suc_9_cuadrante6="%s" OR suc_9_cuadrante7="%s"''' 
	#sql = '''SET @cuadrante= '%s'; SELECT e.emp_nombrecorto,s.emp_run,s.suc_empresa_id,s.suc_nombre,e.emp_logo_url,e.emp_color_fondo FROM Sucursales s JOIN Empresas e ON s.emp_run = e.emp_run WHERE suc_9_cuadrante1=@cuadrante OR suc_9_cuadrante2=@cuadrante OR suc_9_cuadrante3=@cuadrante OR suc_9_cuadrante4=@cuadrante OR suc_9_cuadrante5=@cuadrante OR suc_9_cuadrante6=@cuadrante OR suc_9_cuadrante7=@cuadrante; '''
	data = (h3_address_9, h3_address_9, h3_address_9, h3_address_9, h3_address_9, h3_address_9, h3_address_9)
	#data = (h3_address_9)

	#print (sql % data)
	cur.execute(sql % data)
	rv = cur.fetchall()

	lista = []
	for row in rv:
		#lista.append({'emp_nombrecorto': row[0], 'emp_run' : row[1], 'suc_empresa' : row[2], 'suc_nombre' : row[3], 'emp_logo_url' : row[4], 'emp_color_fondo' : row[5]})
		lista.append({'suc_id' : row[6], 'emp_color_fondo' : row[5],'emp_logo_url' : row[4], 'suc_nombre' : row[3], 'suc_empresa' : row[2], 'emp_run' : row[1], 'emp_nombrecorto': row[0]})
	# ##############################################
	# Si no trae nada, deberia buscar en resolucion 8 tambien si no trae nada en 8, debe volver json con error indicando 
	# que no hay sucursal de esta tienda en esta posicion

	return jsonify(lista)
Esempio n. 2
0
def preprocess2(data1, json_filename):
    prep_file = open("preprocessed1.csv", "a")
    h3_coordinates = read_json(json_filename)
    count = 0
    prep = csv.writer(prep_file)
    final_data = []
    prep.writerow([
        'tpep_pickup_datetime', 'tpep_dropoff_datetime', 'pickup-zone',
        'dropoff-zone'
    ])
    #### Preprocessing starts from here
    for index, row in data1.iterrows():
        if (count == 10000):
            print(str(index), " items preprocessed")
            count = 0

    ### check if the latitude and longitude is correct
        if (row['pickup_latitude']
                == 0) or (row['pickup_longitude']
                          == 0) or (row['dropoff_latitude']
                                    == 0) or (row['dropoff_longitude'] == 0):
            count = count + 1
            continue

        ### get the date and add it
        now = dt.datetime.strptime(row['tpep_pickup_datetime'],
                                   '%Y-%m-%d %H:%M:%S')
        then = dt.datetime.strptime(row['tpep_dropoff_datetime'],
                                    '%Y-%m-%d %H:%M:%S')
        diff = then - now
        diff = diff.total_seconds()
        if diff <= 60 or diff > 10800:
            count = count + 1
            continue

        ### get the zone co-ordinates
        pickup_h3 = h3.geo_to_h3(row['pickup_latitude'],
                                 row['pickup_longitude'], 9)
        #print(h3.geo_to_h3(row['pickup_latitude'],row['pickup_longitude'],9))
        dropoff_h3 = h3.geo_to_h3(row['dropoff_latitude'],
                                  row['dropoff_longitude'], 9)

        ###check is the co-ordinates are in Manhattan
        if (pickup_h3 not in h3_coordinates
                or dropoff_h3 not in h3_coordinates):
            count += 1
            continue
        final_data.append([
            row['tpep_pickup_datetime'], row['tpep_dropoff_datetime'],
            pickup_h3, dropoff_h3
        ])
    prep.writerows(final_data)
    prep_file.close()
def add_hexs_and_prepare_bulk_request(df, dataformat='raw'):
    """
    Apply geo_to_h3 to a chunk of tweets and prepare bulk request
    :param df: dataframe of tweets (a chunk of the tweets collection)
    :return: request job for bulk insert

    Example:
    [InsertOne({'_id': ObjectId('5e10ced16e7ccd7b44e9ee07'), 'hex': {'9': '89dd6876033ffff'}}),

    """
    if dataformat == 'raw':
        df2 = df.apply(lambda row: h3.geo_to_h3(row['lat'], row['lon'], 9),
                       axis=1)
        df2.name = '9'

        df3 = df.apply(lambda row: h3.geo_to_h3(row['lat'], row['lon'], 10),
                       axis=1)
        df3.name = '10'
    else:  #coordinates have been reshaped to location shape in mongo

        # apply geo_to_h3
        resolution = 9
        df2 = df['location'].apply(lambda row: h3.geo_to_h3(
            row["coordinates"][0], row["coordinates"][1], resolution))
        df2.name = '9'

        resolution = 10
        df3 = df['location'].apply(lambda row: h3.geo_to_h3(
            row["coordinates"][0], row["coordinates"][1], resolution))
        df3.name = '10'

    # join (concatenating) tweets with new data
    df4 = pd.concat([df, df2, df3], axis=1)

    # Esta funcion me arma el diccionario y lo mete en el InsertOne method que luego voy a pasar en formato de lista
    def f(x):
        #return InsertOne({'_id': ObjectId(x['_id']),'hex':{'9':x['9'],'10':x['10']}})
        return UpdateOne({'_id': ObjectId(x['_id'])},
                         {'$set': {
                             'hex': {
                                 '9': x['9'],
                                 '10': x['10']
                             }
                         }})

    #            'geometry': gpd.GeoSeries(x['geometry']).__geo_interface__['features'][0]['geometry']}

    # return list of requests
    return list(df4.apply(f, axis=1))
def add_user_attributes(users, props, visits, std=False):
    r"""
    Asignar caracteristicas de usuarios a partir de las propiedades que visito

    Parameters
    ----------        
    users: DataFrame
        Dataframe de usuarios.
    props: DataFrame
        Dataframe de propiedades.
    visits: DataFrame
        Dataframe de visitas.
    std: bool
        Si añadir las desviaciones estandar de las caracteristicas
    """
    visits_labeled = visits.merge(props,
                                  left_on="id_entidad",
                                  right_index=True)

    features = list(visits_labeled.columns)
    features.remove("id_entidad")
    features.remove("id_usuario")
    users_labeled = users
    for feat in features:
        grouped_visits_labeled = visits_labeled.groupby(
            "id_usuario")[feat].apply(np.mean).to_frame()
        users_labeled = users_labeled.merge(grouped_visits_labeled,
                                            right_index=True,
                                            left_index=True)
        if std:
            grouped_visits_labeled = visits_labeled.groupby(
                "id_usuario")[feat].apply(np.std).to_frame()
            users_labeled = users_labeled.merge(grouped_visits_labeled,
                                                right_index=True,
                                                left_index=True,
                                                suffixes=(None, '_std'))
    users = users_labeled
    visits = visits_labeled
    users["6h3"] = users.apply(lambda x: h3.geo_to_h3(x[
        'ubicacion_latitud'], x['ubicacion_longitud'], 6),
                               axis=1)
    users["8h3"] = users.apply(lambda x: h3.geo_to_h3(x[
        'ubicacion_latitud'], x['ubicacion_longitud'], 8),
                               axis=1)
    users["10h3"] = users.apply(lambda x: h3.geo_to_h3(x[
        'ubicacion_latitud'], x['ubicacion_longitud'], 10),
                                axis=1)
    return users
Esempio n. 5
0
 def test_h3_get_resolution(self):
     for res in range(16):
         h3_address = h3.geo_to_h3(37.3615593, -122.0553238, res)
         self.assertEqual(
             h3.h3_get_resolution(h3_address), res,
             'Got the expected H3 resolution back'
         )
Esempio n. 6
0
def main(data_file, polygon_file, resolution, output_file):
    data = pd.read_csv(data_file).query("~latitude.isnull()")
    # Index the data by h3. Not the most efficient way to do it but it's fast
    # enough IMO.
    data.loc[:, "h3_index"] = [
        h3.geo_to_h3(row.latitude, row.longitude, resolution)
        for _, row in data.iterrows()
    ]

    # Read in the US states polygons.
    us_states = shape(json.load(polygon_file))
    state_hexes = set()

    # Polyfill each state and add it to the big list of h3 indexes.
    for geometry in us_states:
        state_hexes |= h3.polyfill(mapping(geometry),
                                   resolution,
                                   geo_json_conformant=True)

    all_hexes = state_hexes | set(data.h3_index)
    # Now reindex the counted sightings by hex address and fill the empties
    # with zeros.
    grouped_sightings = (data.groupby("h3_index").agg({
        "date": "count"
    }).reindex(list(all_hexes), fill_value=0))

    geo_json = {"type": "FeatureCollection", "features": []}

    for h3_address, row in grouped_sightings.iterrows():
        hexagon = h3.h3_to_geo_boundary(h3_address, geo_json=True)
        geo_json["features"].append({
            "type": "Feature",
            "geometry": {
                "type": "Polygon",
                "coordinates": [hexagon]
            },
            "properties": {
                "hex_address": h3_address,
                "count": int(row.date),
            },
        })

    # Now it's map time.
    map_center = [data["latitude"].mean(), data["longitude"].mean()]
    colormap = branca.colormap.linear.YlOrRd_09.scale(
        grouped_sightings.date.min(), grouped_sightings.date.max())
    m = folium.Map(location=map_center, zoom_start=5, tiles="cartodbpositron")

    folium.GeoJson(
        geo_json,
        tooltip=folium.GeoJsonTooltip(["hex_address", "count"]),
        style_function=lambda x: {
            "fillColor": colormap(x["properties"]["count"]),
            "color": "gray",
            "weight": 0.1,
            "fillOpacity": 0.5,
        },
    ).add_to(m)
    colormap.add_to(m)
    m.save(output_file)
Esempio n. 7
0
def main(polygon_file, data_file, resolution, output_file):
    logger.info("Reading 👣 sightings.")
    data = pd.read_csv(data_file).query("~latitude.isnull()")
    data.loc[:, "h3_index"] = [
        h3.geo_to_h3(row.latitude, row.longitude, resolution)
        for _, row in data.iterrows()
    ]

    logger.info("Reading US polygon.")
    us_states = shape(json.load(polygon_file))
    us_hexes = set(data.h3_index)

    logger.info("Polyfilling the USA.")
    for geometry in tqdm(us_states, total=len(us_states)):
        us_hexes |= h3.polyfill(mapping(geometry),
                                resolution,
                                geo_json_conformant=True)
    logger.info(f"Writing to {output_file.name}.")
    writer = csv.DictWriter(output_file,
                            fieldnames=["hex_address", "hex_geojson"])
    writer.writeheader()
    for us_hex in tqdm(us_hexes, total=len(us_hexes)):
        writer.writerow({
            "hex_address":
            us_hex,
            "hex_geojson":
            json.dumps(h3.h3_to_geo_boundary(us_hex, geo_json=True)),
        })
Esempio n. 8
0
def counts_by_hexagon(df, resolution, latlong):
    '''Use h3.geo_to_h3 to index each data point into the spatial index of the specified resolution.
      Use h3.h3_to_geo_boundary to obtain the geometries of these hexagons'''

    #df = df[["latitude","longitude"]]
    df = df[latlong]
    print('1st')
    #df["hex_id"] = df.apply(lambda row: h3.geo_to_h3(row["latitude"], row["longitude"], resolution), axis = 1)
    df["hex_id"] = df.apply(
        lambda row: h3.geo_to_h3(row[latlong[0]], row[latlong[1]], resolution),
        axis=1)

    df_aggreg = df.groupby(by="hex_id").size().reset_index()
    print(len(df_aggreg))
    df_aggreg.columns = ["hex_id", "value"]

    df_aggreg["geometry"] = df_aggreg.hex_id.apply(
        lambda x: {
            "type": "Polygon",
            "coordinates":
            [h3.h3_to_geo_boundary(h3_address=x, geo_json=True)]
        })
    """
    df_aggreg["center"] =  df_aggreg.hex_id.apply(lambda x: 
                                                           {    "type" : "Polygon",
                                                                 "coordinates": 
                                                                [h3.h3_to_geo(h3_address=x)]
                                                            }
                                                        )
    """

    return df_aggreg
Esempio n. 9
0
def add_h3_ids_to_points(df: pd.DataFrame, h3_max: int,
                         h3_min: int) -> pd.DataFrame:
    """Add Uber H3 ids to the point geometries in a Spatially Enabled DataFrame.
    :param df: Spatially Enabled DataFrame with point geometries to be aggregated.
    :param h3_max: Integer maximum H3 grid level defining the samllest geographic hex area - must be larger than the minimum.
    :param h3_min: Integer minimum H3 grid level defining the largest geograhpic hex area - must be smaller than the maximum.
    :return: Pandas DataFrame with Uber H3 ids added for all the resolutions betwen teh maximum and minimum.
    """
    assert h3_max > h3_min

    # get a list of zoom levels and ensure the H3 levels are sorted from highest to lowest resolution
    h3_lvl_lst = _get_h3_range_lst(h3_min, h3_max)
    h3_lvl_lst.sort(reverse=True)

    # calculate the highest resolution H3 id for each location
    first_level = h3_lvl_lst[0]
    df[_h3_col(
        first_level)] = df.SHAPE.swifter.apply(lambda geom: h3.geo_to_h3(
            geom.centroid[1], geom.centroid[0], first_level))

    # use the highest resolution H3 id to get progressivley lower resolution H3 id's
    for h3_lvl in h3_lvl_lst[1:]:
        df[_h3_col(h3_lvl)] = df[_h3_col(first_level)].swifter.apply(
            lambda first_val: h3.h3_to_parent(first_val, h3_lvl))

    return df
Esempio n. 10
0
def _df_to_h3(df, h3_level=8, aggfunc=np.sum):
    """
    Aggregates point data to corresponding h3 polygons 
    For more on h3 see https://uber.github.io/h3/#/
    Parameters
    ----------
    df : pd.DataFrame of lat/long data to be aggregated, or GeoDataFrame with valid point geometry
    h3_level : resolution of h3_tiles. Default is arbitrary
    aggfunc : function, str, list or dict to aggregate numeric cols to h3 tile as per pd.DataFrame.agg(aggfunc)

    Returns
    -------
    H3DataFrame of the dataframe aggregated to h3 tiles, with index 'id' of h3 tile code
    """
    df = _validate_point_data(df)

    if isinstance(df, GeoDataFrame):
        df = gpdf_to_latlong_df(df)

    # Utility for h3.geo_to_h3 to work on dataframe row
    lat_lng_to_h3 = lambda row, h3_level: h3.geo_to_h3(row['latitude'], row[
        'longitude'], h3_level)
    df['id'] = df.apply(lat_lng_to_h3, args=(h3_level, ), axis=1)

    df = df.drop(columns=['latitude', 'longitude'])
    df = df.groupby('id').agg(aggfunc).reset_index()

    # Utilty for for h3.h3_to_geo_boundary to return shapely.geometry.Polygon
    h3_to_polygon = lambda h3_address: Polygon(
        h3.h3_to_geo_boundary(h3_address, geo_json=True))
    df['geometry'] = df.id.apply(h3_to_polygon)

    return PorygonDataFrame(df.set_index('id'))
Esempio n. 11
0
def display_hexagon(surf, x, y, l_color=RED):

    h3coord = h3.geo_to_h3(y, x, 8)
    bound_list = h3.h3_to_geo_boundary(h3coord)

    adj_bound_list = return_adj_coord(bound_list)

    pygame.draw.polygon(surf, l_color, adj_bound_list, 2)
Esempio n. 12
0
def create_city_hexagons(lat=49.83826, lon=24.02324):
    """
    For Lviv, coordinates: 49.83826, 24.02324.
    Function, for deviding city into regions, and save coordinates for this boundaries to DF.
    Coordinates must be a city center from OpenStreetMap

    Takes:
        lat: float value of latitude
        lon: float value of longitude
    Return:
        df with coordinates

    """
    h3_address = h3.geo_to_h3(lat, lon, 8)
    hexagons = h3.k_ring_distances(h3_address, 6)
    list_address = [hex for el in hexagons for hex in el]

    latitudes1, latitudes2, latitudes3, latitudes4, latitudes5, latitudes6 = \
        list(), list(), list(), list(), list(), list()
    longitudes1, longitudes2, longitudes3, longitudes4, longitudes5, longitudes6 = \
        list(), list(), list(), list(), list(), list()
    hexagon_id = list()

    for index, element in enumerate(list_address):
        hex_boundary = h3.h3_to_geo_boundary(element)
        hexagon_id.append(index + 1)
        latitudes1.append(hex_boundary[0][0])
        longitudes1.append(hex_boundary[0][1])
        latitudes2.append(hex_boundary[1][0])
        longitudes2.append(hex_boundary[1][1])
        latitudes3.append(hex_boundary[2][0])
        longitudes3.append(hex_boundary[2][1])
        latitudes4.append(hex_boundary[3][0])
        longitudes4.append(hex_boundary[3][1])
        latitudes5.append(hex_boundary[4][0])
        longitudes5.append(hex_boundary[4][1])
        latitudes6.append(hex_boundary[5][0])
        longitudes6.append(hex_boundary[5][1])

    df_address = pd.DataFrame({
        "hexagon_id": hexagon_id,
        "latitude1": latitudes1,
        "latitude2": latitudes2,
        "latitude3": latitudes3,
        "latitude4": latitudes4,
        "latitude5": latitudes5,
        "latitude6": latitudes6,
        "longitude1": longitudes1,
        "longitude2": longitudes2,
        "longitude3": longitudes3,
        "longitude4": longitudes4,
        "longitude5": longitudes5,
        "longitude6": longitudes6
    })

    return df_address
Esempio n. 13
0
def get_nyc_h3coord():
    rtn_x_list, rtn_y_list = get_linspace(left_top, right_bottom, 100)

    h3_list = []

    for x_coord in rtn_x_list:
        for y_coord in rtn_y_list:
            h3_list.append(h3.geo_to_h3(y_coord, x_coord, 8))

    return set(h3_list)
Esempio n. 14
0
def sucursal(event=None, context=None):
    data = request.get_json()
    empresa = data["empresa"]
    lat = data["lat"]
    long = data["long"]

    h3_address_9 = h3.geo_to_h3(float(lat), float(long), 9)
    h3_address_8 = h3.geo_to_h3(float(lat), float(long), 8)

    cur = mysql.connection.cursor()
    sql = '''SELECT emp_run, suc_empresa_id FROM Sucursales WHERE emp_run=%s and suc_cuadrante_9 = "%s"'''
    data = (empresa, h3_address_9)
    #print (sql % data)
    cur.execute(sql % data)
    rv = cur.fetchall()

    # ##############################################
    # Si no trae nada, deberia buscar en resolucion 8 tambien si no trae nada en 8, debe volver json con error indicando
    # que no hay sucursal de esta tienda en esta posicion

    return jsonify({'empresa': rv[0][0], 'sucursal': rv[0][1]})
Esempio n. 15
0
 def test_h3_is_valid(self):
     self.assertTrue(h3.h3_is_valid('85283473fffffff'),
                     'H3 Address is considered an address')
     self.assertTrue(h3.h3_is_valid('850dab63fffffff'),
                     'H3 Address from Java test also valid')
     self.assertFalse(h3.h3_is_valid('lolwut'),
                      'Random string is not considered an address')
     self.assertFalse(h3.h3_is_valid('5004295803a88'),
                      'H3 0.x Addresses are not considered valid')
     for res in range(16):
         self.assertTrue(h3.h3_is_valid(h3.geo_to_h3(37, -122, res)),
                         'H3 Address is considered an address')
Esempio n. 16
0
def hexify(df, lat_col, lon_col, levels=(6, 8)) -> Optional[pandas.DataFrame]:
    try:
        from h3 import h3
    except ImportError:  # pragma: no cover
        logger.error(
            'h3-py must be installed for geo hashing capabilities. Exiting.'
            'Install it with: pip install scikit-hts[geo]')
        return
    for r in range(levels[0], levels[1] + 1):
        df[f'hex_index_{r}'] = df.apply(
            lambda x: h3.geo_to_h3(x[lat_col], x[lon_col], r), 1)
    return df
Esempio n. 17
0
def get_uber_tokens(dt):
    traj_tokens = list()
    key_set = set()
    for i, items in tqdm(enumerate(dt)):
        traj = list()
        for j, item in enumerate(items):
            if item[0] != 0. and item[1] != 0.:
                h3_address = h3.geo_to_h3(item[0], item[1], 10)
                traj.append(h3_address)
                key_set.add(h3_address)
        traj_tokens.append(traj)
    return traj_tokens, list(key_set)
Esempio n. 18
0
def counts_by_hexagon(df, resolution, latlong, filter_variable=None):
    '''Use h3.geo_to_h3 to index each data point into the spatial index of the specified resolution.
      Use h3.h3_to_geo_boundary to obtain the geometries of these hexagons'''

    #df = df[["latitude","longitude"]]
    #df=df[latlong]
    print('1st')
    #df["hex_id"] = df.apply(lambda row: h3.geo_to_h3(row["latitude"], row["longitude"], resolution), axis = 1)
    df["hex_id"] = df.apply(
        lambda row: h3.geo_to_h3(row[latlong[0]], row[latlong[1]], resolution),
        axis=1)
    df.hex_no = pd.Categorical(df.hex_id)
    df['hex_no'] = df.hex_no.codes
    df['hex_no'] = df['hex_no'].astype(str)
    if filter_variable and hex_filter_select.value != 'All Hexes':
        if hex_filter_select.value == 'Filter by Number':
            hex_filter_list = hex_filter_no.value.split(',')
            df = df[df['hex_no'].isin(hex_filter_list)]

    #df_aggreg = df.groupby(by = ["hex_id","hex_no"]).size().reset_index()
    df_aggreg = df.groupby(by=["hex_id", "hex_no"]).agg({
        'vehicle_id':
        'count',
        'object_data-fare':
        'mean'
    }).reset_index()
    print(len(df_aggreg))
    df_aggreg.columns = ["hex_id", "hex_no", "value", "average_fare"]

    if filter_variable and hex_filter_select.value != 'All Hexes':
        if hex_filter_select.value == 'Filter by Threshold':
            hex_filter_threshold = int(hex_filter_no.value)
            df_aggreg = df_aggreg[df_aggreg['value'] > hex_filter_threshold]
            hex_th_filtered_list = list(set(df_aggreg['hex_no']))
            df = df[df['hex_no'].isin(hex_th_filtered_list)]

    df_aggreg["geometry"] = df_aggreg.hex_id.apply(
        lambda x: {
            "type": "Polygon",
            "coordinates":
            [h3.h3_to_geo_boundary(h3_address=x, geo_json=True)]
        })
    """
    df_aggreg["center"] =  df_aggreg.hex_id.apply(lambda x: 
                                                           {    "type" : "Polygon",
                                                                 "coordinates": 
                                                                [h3.h3_to_geo(h3_address=x)]
                                                            }
                                                        )
    """

    return df, df_aggreg
def get_hex(point, resolution=HEX_LEVEL):
    """
    Convert Shapely Point coordinates into H3 Uber Hex index codes
    Args:
        point (obj): shapely Point object
        resolution (int): the Uber Hex resolution/size - an int from 0-15 .
    Returns:
        hex_code (str): the Uber hex code relating to the lat-lng coordinates
    """
    lat = point.y
    lng = point.x
    hex_code = h3.geo_to_h3(lat, lng, resolution)
    return hex_code
Esempio n. 20
0
def define_h3(lat, lng, resolution):
    """UDF for h3 hash retrieval.

    Attributes:
        lat: latitude column.
        lng: longitude column.
        resolution: desired h3 resolution.
    """
    if lat and lng:
        h3_feature = h3.geo_to_h3(lat, lng, resolution)
    else:
        h3_feature = None
    return h3_feature
Esempio n. 21
0
def hex_ecoregions(
        ecoregions: numpy.array,
        transform: rasterio.Affine) -> t.Dict[h3.H3Index, t.Counter[int]]:
    c: t.Dict[h3.H3Index, t.Counter[int]] = t.DefaultDict(t.Counter)
    for y, row in enumerate(ecoregions):
        (_, lat) = transform * (0, y)
        area = numpy.cos(lat * numpy.pi / 180) * SQUARE_OF_15_ARCSEC
        for x, eco in enumerate(row):
            (lon, lat) = transform * (x, y)
            index: h3.H3Index = h3.geo_to_h3(lat, lon, RESOLUTION)
            c[index][int(
                eco
            )] += area  # eco is a numpy type that sqlalchemy does not understand as int
    return c
Esempio n. 22
0
    def geo_to_tile(
        self,
        lat: float,
        lon: float,
        resolution: Union[int, None] = None,
        area_km: Union[float, None] = None,
    ) -> Tile:
        """Map coordinate pair (lat, lon) and resolution to a Tile object.

        The Tile object has nice properties. Let's say that `tile` is an
        object from the Tile class.

        ```
        print(tile) --> Tile: grid_type "s2", resolution 14, key 94d28d8b
        tile.geometry.shapely --> to access the shapely object
        tile.geometry.wkt     --> to access the wkt string
        tile.geometry.geojson --> to access the geojson object
        tile.parent           --> to access the tile parent id
        tile.parent           --> to access the tile children id
        ```
        Parameters
        ----------
        lat : float
        lon : float
        resolution : int
            Grid system resolution/zoom/size

        Returns
        -------
        str
            Tile id
        """

        resolution = self._checks_resolution_option(resolution, area_km, lat)

        if self.grid_type == "s2":

            tile_id = s2.geo_to_s2(lat, lon, resolution)

        elif self.grid_type == "h3":

            tile_id = h3.geo_to_h3(lat, lon, resolution)

        elif self.grid_type in ("bing", "quadtree"):

            tile_id = quadtree.geo_to_tile(lat, lon, resolution)

        return self.id_to_tile(tile_id)
Esempio n. 23
0
def _h3_bin_from_rupture(
    rupture: Union[SimpleRupture, NonParametricProbabilisticRupture,
                   ParametricProbabilisticRupture],
    h3_res: int = 3,
) -> str:
    """
    Returns the hexadecimal string that is the index of the `h3` spatial bin
    which contains the hypocenter of the `rupture` at the given level of
    resolution (`res`).

    See the documentation for `h3` (https://uber.github.io/h3/) for more
    information.
    """

    return h3.geo_to_h3(rupture.hypocenter.latitude,
                        rupture.hypocenter.longitude, h3_res)
Esempio n. 24
0
    def decorated(*args, **kwargs):
        geoframe = kwargs.pop('geoframe', None)
        country = kwargs.pop('country', None)
        polygon = kwargs.pop('polygon', None)
        point = kwargs.pop('point', None)

        number_of_parameter = sum(x is not None
                                  for x in (geoframe, country, polygon, point))
        if number_of_parameter > 1:
            return ("'geoframe', 'country', 'polygon' and "
                    "'point' are mutually exclusive", 400)

        # Parse parameter geoframe
        if geoframe is not None:
            try:
                logger.debug('Try parsing geoframe')
                kwargs['wkt'] = bounding_box_to_wkt(*geoframe)
            except ValueError:
                return 'Invalid geoparam', 400
        # parse parameter country
        elif country is not None:
            logger.debug('Try parsing country')
            try:
                kwargs['wkt'] = get_country_wkt(country.upper())
            except CountryNotFound:
                return 'Unknown country code.', 400
        # parse parameter polygon
        elif polygon is not None:
            try:
                logger.debug('Try parsing polygon')
                kwargs['wkt'] = polygon_to_wkt(polygon)
            except RESTParamError as err:
                return str(err), 400
        # parse parameter point
        elif point is not None:
            try:
                logger.debug('Try to parse point')
                point = h3.h3_to_geo(
                    h3.geo_to_h3(point[1], point[0],
                                 emissionsapi.db.resolution))
                kwargs['wkt'] = f'POINT({point[1]} {point[0]})'
                # Take a radius from 0.01 decimal degree which are approx.
                # 1113 meter
                kwargs['distance'] = 0.01
            except KeyError:
                return 'Invalid point', 400
        return f(*args, **kwargs)
Esempio n. 25
0
def latlong_to_geojson_string_h3_geometry(latitude_dd: float,
                                          longitude_dd: float,
                                          resolution: int = 8) -> str:
    """Converts a lat long point in decimal degrees to a geojson string with H3 hexagon geometry.

    Args:
        latitude_dd (float): [latitude in decimal degrees]
        longitude_dd (float): [longitude in decimal degrees]
        resolution (int, optional): [H3 resolution/ APERTURE_SIZE]. Defaults to 8.

    Returns:
        [type]: geojson string
    """

    list_hex_res = []
    list_hex_res_geom = []

    h = h3.geo_to_h3(lat=latitude_dd, lng=longitude_dd, resolution=resolution)

    list_hex_res.append(h)
    # get the geometry of the hexagon and convert to geojson
    h_geom = {
        "type": "Polygon",
        "coordinates": [h3.h3_to_geo_boundary(h=h, geo_json=True)]
    }
    list_hex_res_geom.append(h_geom)

    df_res_point = pd.DataFrame({
        "h3_resolution": resolution,
        "hex_id": list_hex_res,
        "geometry": list_hex_res_geom
    })

    list_features = []

    for _, row in df_res_point.iterrows():
        feature = geojson.feature.Feature(
            geometry=row["geometry"],
            id=row["hex_id"],
            properties={"resolution": int(row["h3_resolution"])})
        list_features.append(feature)

    feat_collection = geojson.feature.FeatureCollection(list_features)
    geojson_result = json.dumps(feat_collection)

    return geojson_result
Esempio n. 26
0
 def fit(self, X, y):
     # X is a Nx2 lat/lon data frame.
     # y is boolean numpy array.
     self.hex_frame = (
         pd.DataFrame(
             {
                 "latitude": X[y]["latitude"],
                 "longitude": X[y]["longitude"],
                 "h3": np.apply_along_axis(
                     lambda x: h3.geo_to_h3(x[0], x[1], self.resolution),
                     axis=1,
                     arr=X[y],
                 ),
             }
         )
         .groupby("h3")
         .agg({"h3": "count"})
     )
     return self
Esempio n. 27
0
def geo_hex_features(df: pd.DataFrame, points: List[Tuple[str, str]],
                     res_list: List[int]):
    """Calculate uber hex for hexagon resolutions from res_list.

    Args:
        df - dataframe
        points (list) - point column names like [Lat, Long]
        res_list (list) - list of integer resolutons
    Returns:
        df - new dataframe with hex feature columns
    """
    df = df.copy()

    for res in res_list:
        df[f"{points[0]}_{points[1]}_hex_{res}"] = [
            h3.geo_to_h3(vals[0], vals[1], res) for vals in df[points].values
        ]

    return df
Esempio n. 28
0
def point_to_h3(dataframe, resolution=1):
    """Convert longitude and latitude in pandas dataframe into h3 indices and
    add them as additional column.

    :param dataframe: a pandas dataframe as returned from load_ncfile()
    :type dataframe: pandas.core.frame.DataFrame
    :param resolution: Resolution of the h3 grid
    :type resolution: uint
    :return: the dataframe including the h3 indices
    :rtype: pandas.core.frame.DataFrame
    """

    # create a new column 'h3' and fill it row-wise with
    # the converted longitudes and latitudes
    dataframe['h3'] = [h3.geo_to_h3(lat, lon, resolution)
                       for lat, lon in
                       zip(dataframe['latitude'], dataframe['longitude'])]

    return dataframe
Esempio n. 29
0
File: geo.py Progetto: UDST/udtk
def h3_from_row(row, res, x_col, y_col):
    """
    This function takes a pandas DataFrame row with a lat and long point coordinate columns
    and resolution level and returns a h3 index for that resolution

    Parameters:
    row (pandas.Series):
        pandas DataFrame row
    res (int):
        h3 resolution level
    x_col (str):
        column name with the x coordinate
    y_col (str):
        column name with the y coordinate

    Returns:
    str : H3 index id
    """
    return h3.geo_to_h3(row[y_col], row[x_col], res=res)
Esempio n. 30
0
def parse_locations(api_result, type_loc):
    locations = dict()
    csv_locations = list()
    for e in api_result:
        coords = (e["geometry"]["location"]["lat"],
                  e["geometry"]["location"]["lng"])
        areaId = h3.geo_to_h3(coords[0], coords[1], 8)
        name = e["name"]
        addr = e["formatted_address"]
        locations[str(coords)] = {
            "name": name,
            "addr": addr,
            "areaId": areaId,
            "type": type_loc
        }
        csv_locations.append(
            str(coords[0]) + "," + str(coords[1]) + "," + "\"" + areaId +
            "\"" + "," + "\"" + name + "\"" + "," + "\"" + addr + "\"" + "," +
            "\"" + type_loc + "\"")
    return csv_locations