Beispiel #1
0
def loads():
    logger.info(f'ENV {ENV}')

    # time_=datetime.datetime(2021, 1, 10, 10, 30, 0, 0)
    # day_before = now_  - datetime.timedelta(days=1)

    now_ = datetime.datetime.now()
    neighborhood_df, cells_df = neighborhood(time_=now_)

    now_ = datetime.datetime.now()
    load_rets(time_=now_)  # ok

    now_ = datetime.datetime.now()
    neighborhood_df, cells_df = load_terrains(
        time_=now_,
        neighborhood_df=neighborhood_df,
        cells_df=cells_df,
    )

    now_ = datetime.datetime.now()
    load_overshooters(
        time_=now_,
        neighborhood_df=neighborhood_df,
        cells_df=cells_df,
    )
Beispiel #2
0
def rets_data(time_=None):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    when_ = time_
    period = when_.strftime("%Y-%m-%d")

    query_ = f'''
    select x.datetimeid, x.node, x.devicename, x.deviceno, x.tilt,
    x.subname, x.subunitno, y.localcellid, y.eci, y.cellname
    from (select
    ret.dateid as datetimeid,
    ret.node as node,
    ret.devicename as devicename,
    ret.deviceno as deviceno,
    sub.tilt as tilt,
    sub.subname as subname,
    sub.subunitno as subunitno
    from ret
    inner join retsubunit sub on
    date(ret.dateid) = date(sub.dateid) and
    ret.node = sub.node and
    ret.deviceno = sub.deviceno
    where date(ret.dateid) = current_date) as x
    inner join lcellreference as y
    on (x.node = y.node
    and (x.deviceno = y.localcellid or x.deviceno = y.localcellid + 10)
    and STR_TO_DATE(y.dateid, '%Y-%m-%d') = '{period}');
    '''

    return pd_sql(time_=time_, query_=query_)
Beispiel #3
0
def pd_sql(time_=None, query_=None):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    try:

        cnx = mysql.connector.connect(
                user=user_,
                password=password_,
                host=host_,
                database=database_,
                use_pure=True,
            )

        query = query_
        df = pd.read_sql(query,cnx)
        logger.info(f'df.shape {df.shape}')
        cnx.close()
        return df

    except mysql.connector.Error as err:
      if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
        logger.error("Something is wrong with your user name or password")
      elif err.errno == errorcode.ER_BAD_DB_ERROR:
        logger.error("Database does not exist")
      else:
        logger.error(err)
    else:
      cnx.close()
Beispiel #4
0
def overshooting_intensity(neighborhood_df=pd.DataFrame(),
                           ta_df=pd.DataFrame()):
    global TA_COLUMNS, TA_INDEX
    logger.info(f'overshooting_intensity:')

    if neighborhood_df.empty:
        neighborhood_df = neighborhood()
        neighborhood_df.reset_index(inplace=True)

    l = ['CELLNAME', 'distance_']
    t = ['CELLNAME']
    overshooters_intensity_df = neighborhood_df[l].groupby(t).mean()
    overshooters_intensity_df.reset_index(inplace=True)
    overshooters_intensity_df[
        'distance_index'] = overshooters_intensity_df.apply(
            distance_percentaje_index, axis=1)

    if ta_df.empty:
        ta_df = get_ta_df()

    l = [
        'L_RA_TA_UE_Index0',
        'L_RA_TA_UE_Index1',
        'L_RA_TA_UE_Index2',
        'L_RA_TA_UE_Index3',
        'L_RA_TA_UE_Index4',
        'L_RA_TA_UE_Index5',
        'L_RA_TA_UE_Index6',
        'L_RA_TA_UE_Index7',
        'L_RA_TA_UE_Index8',
        'L_RA_TA_UE_Index9',
        'L_RA_TA_UE_Index10',
        'L_RA_TA_UE_Index11',
    ]

    ra_ta_df = ta_df[l]
    cumulative_df = ra_ta_df.cumsum(axis=1, skipna=True)

    l = ['Cell_Name']
    ta_df_ = pd.concat([ta_df[l], cumulative_df], axis=1)

    merged_df = pd.merge(overshooters_intensity_df,
                         ta_df_,
                         how="inner",
                         left_on='CELLNAME',
                         right_on='Cell_Name').drop_duplicates()

    # merged_df['overshooting'] = merged_df.apply(_overshooting_intensity, axis=1)

    # ---------------------------------------
    merged_df['cum_sum_out'] = merged_df.apply(cum_sum_out, axis=1)
    merged_df['overs_intensity'] = merged_df.apply(_overshooting_intensity,
                                                   axis=1)
    # ---------------------------------------

    new_merged_df = merged_df.drop(['Cell_Name'], axis=1)

    return neighborhood_df, new_merged_df
Beispiel #5
0
def load_rets(time_=None):
    logger.info(f'ENV {ENV}')

    if not time_:
        return

    list_ = [
        {
            'datetimeid': time_,
            'node': 'MBTS-AIS_3G_003',
            'cellname': 'AIS_4G_003_3',
            'eci': 2816002,
            'devicename': 'RET82',
            'deviceno': 2,
            'tilt': 30,
            'subname': 'RET82',
            'subunitno': 1,
            'localcellid': 2,
        },
        {
            'datetimeid': time_,
            'node': 'MBTS-ARA_3G_013',
            'cellname': 'ARA_4G_013_3',
            'eci': 2304258,
            'devicename': 'RET82R_S3',
            'deviceno': 2,
            'tilt': 40,
            'subname': 'RET82R_S3',
            'subunitno': 1,
            'localcellid': 2,
        },
        {
            'datetimeid': time_,
            'node': 'MBTS-ARA_3G_013',
            'cellname': 'ARA_4G_013_3',
            'eci': 2304258,
            'devicename': 'RET82L_S3',
            'deviceno': 12,
            'tilt': 40,
            'subname': 'RET82L_S3',
            'subunitno': 1,
            'localcellid': 2,
        },
    ]

    if ENV == 'sim':
        df = pd.DataFrame.from_dict(list_)

    if ENV == 'prod':
        df = rets_data(time_=time_)

    engine = get_engine()
    session = get_session(engine=engine)
    df.to_sql('rets', con=engine, if_exists='append', index=False)
    session.commit()
    session.close()
Beispiel #6
0
def get_period_data(time_=None):
    logger.info(f'ENV {ENV}')

    if ENV == 'sim':
        period = time_.strftime("%Y%m%d")
        df = pd.read_json(f'./data/_prs_lte_hour_{period}.json')

    if ENV == 'prod':
        df = kpi_data(time_=time_)

    return df
def neighborhood(time_=None):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    cells_df = get_cells_df(time_=time_)

    l = ['SITE', 'LAT', 'LON']
    sites_df = cells_df[l].drop_duplicates()

    sites_df['key'] = 1
    merged_df = pd.merge(sites_df, sites_df, on='key').drop("key", 1)

    merged_df = merged_df[merged_df['SITE_x'] != merged_df['SITE_y']]

    merged_df['distance_'] = haversine_distance(merged_df['LAT_x'].values,
                                                merged_df['LON_x'].values,
                                                merged_df['LAT_y'].values,
                                                merged_df['LON_y'].values)

    merged_df['bearing_'] = bearing(merged_df['LAT_x'].values,
                                    merged_df['LON_x'].values,
                                    merged_df['LAT_y'].values,
                                    merged_df['LON_y'].values)

    # logger.info(f'neighborhood: merged_df.columns {merged_df.columns}')
    l = ['SITE_x', 'SITE_y', 'distance_', 'bearing_']
    merged_df = merged_df[l]

    merged_df = merged_df[merged_df['distance_'] <= KM]

    l = ['SITE', 'CELLNAME', 'AZIMUTH']
    merged_df = pd.merge(cells_df[l],
                         merged_df,
                         how="inner",
                         left_on='SITE',
                         right_on='SITE_x')

    merged_df = merged_df[(merged_df['bearing_'] > merged_df['AZIMUTH'] - D)
                          & (merged_df['bearing_'] < merged_df['AZIMUTH'] + D)]

    l = ['CELLNAME', 'distance_']
    merged_df.sort_values(by=l, inplace=True)

    l = ['CELLNAME']
    neighborhood_df = merged_df.groupby(l).head(N_DISTANCE)

    l = ['CELLNAME', 'AZIMUTH', 'SITE_x', 'SITE_y', 'distance_', 'bearing_']

    logger.info(f'neighborhood_df[l].shape {neighborhood_df[l].shape}')
    return neighborhood_df[l], cells_df
Beispiel #8
0
def haversine_distance(lat1, lon1, lat2, lon2):
    logger.info(f'haversine_distance:')

    km_constant = 6372.795477598
    lat1, lon1, lat2, lon2 = list(map(np.deg2rad,\
    [lat1, lon1, lat2, lon2]))
    dlat = lat2 - lat1
    dlon = lon2 - lon1
    a = np.sin(dlat/2)**2 + np.cos(lat1) *\
    np.cos(lat2) * np.sin(dlon/2)**2
    c = 2 * np.arcsin(np.sqrt(a))
    mi = km_constant * c
    return mi
Beispiel #9
0
def overshooting(neighborhood_df=pd.DataFrame(), ta_df=pd.DataFrame()):
    global TA_COLUMNS, TA_INDEX
    logger.info(f'overshooting:')

    if neighborhood_df.empty:
        neighborhood_df = neighborhood()
        neighborhood_df.reset_index(inplace=True)

    # Detecting cells' overshooting

    l = ['CELLNAME', 'distance_']
    t = ['CELLNAME']
    overshooters_df = neighborhood_df[l].groupby(t).mean()
    overshooters_df.reset_index(inplace=True)

    if ta_df.empty:
        ta_df = get_ta_df()

    l = [
        'Cell_Name',
        'L_RA_TA_UE_Index0',
        'L_RA_TA_UE_Index1',
        'L_RA_TA_UE_Index2',
        'L_RA_TA_UE_Index3',
        'L_RA_TA_UE_Index4',
        'L_RA_TA_UE_Index5',
        'L_RA_TA_UE_Index6',
        'L_RA_TA_UE_Index7',
        'L_RA_TA_UE_Index8',
        'L_RA_TA_UE_Index9',
        'L_RA_TA_UE_Index10',
        'L_RA_TA_UE_Index11',
    ]
    TA_COLUMNS = l
    TA_INDEX = {i: f for i, f in enumerate(l)}

    ta_df = ta_df[l].drop_duplicates().copy()
    ta_df['ta_'] = ta_df.apply(ta_percentaje_distance, axis=1)

    l = ['Cell_Name', 'ta_']
    t = ['CELLNAME', 'ta_', 'distance_']
    overshooters_df = pd.merge(overshooters_df,
                               ta_df[l],
                               how="inner",
                               left_on='CELLNAME',
                               right_on='Cell_Name')[t].drop_duplicates()

    overshooters_df[
        'overshooter'] = overshooters_df['ta_'] > overshooters_df['distance_']

    return neighborhood_df, overshooters_df
Beispiel #10
0
def get_cells_df(time_=None):
    logger.info(f'ENV {ENV}')

    if ENV == 'sim':
        df = pd.read_csv("./data/lcellreference_2020_12_30.csv")

    if ENV == 'prod':
        if not time_:
            logger.info(f'time_ {time_}')
            return

        df = cells_data(time_=time_)

    return df
Beispiel #11
0
def get_ta_df(time_=None):
    # logger.info(f'get_ta_df:')
    # return pd.read_csv("./data/prs_lte_hour_2020_12_30.csv")
    logger.info(f'ENV {ENV}')

    if ENV == 'sim':
        df = pd.read_csv("./data/prs_lte_hour_2020_12_30.csv")

    if ENV == 'prod':
        if not time_:
            logger.info(f'time_ {time_}')
            return

        df = ta_data(time_=time_)

    return df
Beispiel #12
0
def all_enabler(time_=None):
    '''
    La finalidad de esta función es habilitar a todas las celdas.
    Con excepción de aquellas cuyo nombre haga match con el pattern
    _MM_
    '''
    logger.debug(f'ENV {ENV} time_ {time_}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    df = cells_data(time_=time_)
    cellnames = df[~df['CELLNAME'].str.contains("_MM_")]['CELLNAME'].drop_duplicates().tolist()
    logger.debug(f'len(cellnames) {len(cellnames)}')

    enabler(cellnames=cellnames)
Beispiel #13
0
def cells_data(time_=None):
    logger.debug(f'ENV {ENV} time_ {time_}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    when_ = time_
    period = when_.strftime("%Y-%m-%d")
    logger.debug(f'period {period}')

    query_ = f'''
    select distinct * from lcellreference as l
    where STR_TO_DATE(l.dateid, '%Y-%m-%d') = '{period}';
    '''

    return pd_sql(time_=time_, query_=query_)
Beispiel #14
0
def trx_updater(commands=None, sent_=None):
    '''
    Esta función recibe una lista de diccionarios, con las respuestas
    a los comandos de cambio de tilt ejecutados en el NBI.
    Si el resultado es exitoso se actualizan las tablas rets y
    transactions.
    '''
    logger.debug(f"ENV {ENV}")

    if not commands:
        return

    engine = get_engine()
    session = get_session(engine=engine)

    for command in commands:
        result = command['data']['result']
        logger.debug(f"result {result}")
        executed_time_stamp_str = command['data']['executed_time_stamp']
        executed_time_stamp = datetime.datetime.strptime(
            executed_time_stamp_str, '%Y-%m-%d %H:%M:%S')
        object_id = command['object_id']
        trx = session.query(Transaction).filter(
            Transaction.id == object_id).first()
        if not trx:
            session.commit()
            session.close()
            return

        trx.sent = sent_
        if result:
            trx.oldtilt = trx.newtilt
            trx.success = executed_time_stamp
            ret_updater(node=trx.node,
                        deviceno=trx.deviceno,
                        tilt=trx.newtilt,
                        session=session)
        else:
            logger.info(f"result {result}")
            trx.failure = executed_time_stamp

    session.commit()
    session.close()
Beispiel #15
0
def load_overshooters():
    logger.info(f'load_overshooters:')
    neighborhood_df, overshooters_df = overshooting()
    overshooters_dict = overshooters_df.to_dict('index')
    engine = get_engine()
    session = get_session(engine=engine)

    now_ = datetime.now()
    for index, dict_ in overshooters_dict.items():
        obj_ = Overshooter(
            date_time = now_,
            cell_name = dict_['CELLNAME'],
            time_advanced = int(dict_['ta_']),
            average_distance = int(dict_['distance_']),
            is_overshooter = dict_['overshooter']
            )
        session.add(obj_)
    session.commit()
    session.close()
Beispiel #16
0
def bearing(lat1, lon1, lat2, lon2):
    logger.info(f'bearing:')

    lat1, lon1, lat2, lon2 = list(map(np.deg2rad,\
    [lat1, lon1, lat2, lon2]))

    dlon = lon2 - lon1

    x = np.sin(dlon) * np.cos(lat2)
    y = np.cos(lat1) * np.sin(lat2) - (np.sin(lat1) * np.cos(lat2) *
                                       np.cos(dlon))

    initial_bearing = np.arctan2(x, y)

    # Now we have the initial bearing but math.atan2 return values
    # from -180° to + 180° which is not what we want for a compass bearing
    # The solution is to normalize the initial bearing as shown below
    initial_bearing = np.degrees(initial_bearing)
    bearing = (initial_bearing + 360) % 360
    return bearing
def load_overshooters(time_=None,
                      neighborhood_df=pd.DataFrame(),
                      cells_df=pd.DataFrame()):
    logger.info(f'load_overshooters:')

    if not time_:
        return

    list_ = [
        {
            'datetimeid': time_,
            'cellname': 'AIS_4G_003_3',
            'ta_calculated': 14.4,
            'average_distance': 2.43982546537283,
            'overshooter': True,
            'intensity': 'High',
        },
        {
            'datetimeid': time_,
            'cellname': 'ARA_4G_013_3',
            'ta_calculated': 14.4,
            'average_distance': 6.14587256200947,
            'overshooter': True,
            'intensity': 'High',
        },
    ]

    if ENV == 'sim':
        df = pd.DataFrame.from_dict(list_)

    if ENV == 'prod':
        df = overshooters(time_=time_,
                          neighborhood_df=neighborhood_df,
                          cells_df=cells_df)

    engine = get_engine()
    session = get_session(engine=engine)
    df.to_sql('overshooters', con=engine, if_exists='append', index=False)
    session.commit()
    session.close()
Beispiel #18
0
def ta_data(time_=None):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    when_ = time_
    period = when_.strftime("%Y-%m-%d")

    query_ = f'''
    select distinct * from prs_lte_hour p
    where STR_TO_DATE(p.dateid_date, '%Y-%m-%d')
     between '{period}' and '{period}';
    '''

    query_ = f'''
    select distinct * from prs_lte_hour p
    where STR_TO_DATE(p.dateid_date, '%Y-%m-%d')
     between '{period}' and '{period}'
      and p.dateid_hour = '20';
    '''

    return pd_sql(time_=time_, query_=query_)
def load_terrains(time_=None,
                  neighborhood_df=pd.DataFrame(),
                  cells_df=pd.DataFrame()):
    logger.info(f'ENV {ENV}')

    if not time_:
        return

    list_ = [
        {
            'datetimeid': time_,
            'cellname': 'AIS_4G_003_3',
            'is_plain': True,
            'slope': 0,
        },
        {
            'datetimeid': time_,
            'cellname': 'ARA_4G_013_3',
            'is_plain': True,
            'slope': 0,
        },
    ]

    if ENV == 'sim':
        terrain_df = pd.DataFrame.from_dict(list_)

    if ENV == 'prod':
        terrain_df, neighborhood_df, cells_df = check_terrain(
            time_=time_, neighborhood_df=neighborhood_df, cells_df=cells_df)

    logger.info(f'terrain_df.shape {terrain_df.shape}')
    logger.info(f'terrain_df.columns {terrain_df.columns}')

    terrains_dict = terrain_df.to_dict('index')
    engine = get_engine()
    session = get_session(engine=engine)

    now_ = datetime.datetime.now()
    for index, dict_ in terrains_dict.items():
        is_plain_ = not dict_['HILL']
        if is_plain_:
            slope_ = 0
        else:
            slope_ = 1 if int(dict_['HEIGHT_DIFF']) > 0 else -1
        obj_ = Terrain(datetimeid=now_,
                       cellname=dict_['CELLNAME'],
                       slope=slope_,
                       is_plain=is_plain_)
        session.add(obj_)
    session.commit()
    session.close()

    return neighborhood_df, cells_df
Beispiel #20
0
def nbi_simulator(time_=None, session_=None, trx_=None):
    logger.debug(f"time_ {time_}")

    if not time_ or not session_ or not trx_:
        pass

    logger.info(f"trx_ \n{trx_}")
    # logger.info(f"ENV {ENV}")

    # if ENV == 'sim':

    logger.info(f"ENV {ENV}")

    trx_.sent = datetime.now()

    nbi_response = failure_percentage()
    logger.info(f"nbi_response {nbi_response}")

    if nbi_response:
        trx_.success = datetime.now()
    else:
        trx_.failure = datetime.now()

    session_.commit()
Beispiel #21
0
def overshooters(time_=None,
                 neighborhood_df=pd.DataFrame(),
                 cells_df=pd.DataFrame()):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return
    # ------------------------------------------
    # fue necesario setear tiempos para avanzar con las pruebas ..
    # now_ = datetime.datetime.now()

    if neighborhood_df.empty:
        neighborhood_df, cells_df = neighborhood(time_)

    day_before = time_ - datetime.timedelta(days=1)
    ta_df = get_ta_df(time_=day_before)

    # neighborhood_df.reset_index(inplace=True)
    # cells_df.reset_index(inplace=True)
    # ta_df.reset_index(inplace=True)
    # ------------------------------------------

    neighborhood_df, overshooters_df = overshooting(
        neighborhood_df=neighborhood_df, ta_df=ta_df)
    # neighborhood_df.to_excel(r'data/neighborhood_df.xlsx', index = False)
    # overshooters_df.to_excel(r'data/overshooters_df.xlsx', index = False)

    # neighborhood_df.reset_index(inplace=True)
    # overshooters_df.reset_index(inplace=True)

    neighborhood_df, overshooters_intensity_df = overshooting_intensity(
        neighborhood_df=neighborhood_df, ta_df=ta_df)
    # neighborhood_df.to_excel(r'data/neighborhood_df.xlsx', index = False)
    overshooters_intensity_df.to_excel(r'data/overshooters_intensity_df.xlsx',
                                       index=False)

    # neighborhood_df.reset_index(inplace=True)
    # overshooters_intensity_df.reset_index(inplace=True)

    intensity_df = overshooters_intensity_df.drop(['distance_'], axis=1)
    # intensity_df.reset_index(inplace=True)

    merged_df = pd.merge(overshooters_df,
                         intensity_df,
                         how="inner",
                         left_on='CELLNAME',
                         right_on='CELLNAME').drop_duplicates()
    # merged_df.to_excel(r'data/merged_df.xlsx', index = False)
    # merged_df.reset_index(inplace=True)

    # ------------------------------------------------
    l = ['CELLNAME', 'ta_', 'distance_', 'overshooter', 'overs_intensity']
    overshooters_df = merged_df[l].drop_duplicates()

    l = [
        'cellname', 'ta_calculated', 'average_distance', 'overshooter',
        'intensity'
    ]
    overshooters_df.columns = l

    overshooters_df['datetimeid'] = cells_df.iloc[0]['Dateid']
    # ------------------------------------------------

    # return overshooters_df, intensity_df, merged_df
    return overshooters_df
Beispiel #22
0
def get_prs_lte_hour_df():
    logger.info(f'get_prs_lte_hour_df:')
    return pd.read_csv("./data/prs_lte_hour_2020_12_30.csv")
Beispiel #23
0
def get_ret_join_data(period=None):
    logger.info(f'get_ret_data:')
    return pd.read_json(f'./data/ret_join_data_{period}.json')
def check_terrain(time_=None,
                  neighborhood_df=pd.DataFrame(),
                  cells_df=pd.DataFrame()):
    logger.info(f'check_terrain:')

    if not time_:
        return

    if neighborhood_df.empty:
        neighborhood_df, cells_df = neighborhood(time_)

    if cells_df.empty:
        cells_df = get_cells_df(time_=time_)

    # Adding GEO_HEIGHT_x to neighborhood
    l = ['SITE', 'GEO_HEIGHT']
    aux = pd.merge(neighborhood_df,
                   cells_df[l],
                   how="inner",
                   left_on='SITE_x',
                   right_on='SITE').drop_duplicates()

    l = ['SITE_x', 'GEO_HEIGHT']
    aux = aux[l][aux['SITE_x'] == aux['SITE']].drop_duplicates()

    l = ['SITE_x', 'GEO_HEIGHT_x']
    aux.columns = l

    neighborhood_df_ = pd.merge(neighborhood_df,
                                aux,
                                how="inner",
                                left_on='SITE_x',
                                right_on='SITE_x').drop_duplicates()

    # Adding GEO_HEIGHT_y to neighborhood
    l = ['SITE', 'GEO_HEIGHT']
    aux = pd.merge(neighborhood_df_,
                   cells_df[l],
                   how="inner",
                   left_on='SITE_y',
                   right_on='SITE').drop_duplicates()

    l = ['SITE_y', 'GEO_HEIGHT']
    aux = aux[l][aux['SITE_y'] == aux['SITE']].drop_duplicates()

    l = ['SITE_y', 'GEO_HEIGHT_y']
    aux.columns = l

    neighborhood_df_ = pd.merge(neighborhood_df_,
                                aux,
                                how="inner",
                                left_on='SITE_y',
                                right_on='SITE_y').drop_duplicates()

    # Detecting cells' terrain

    # neighborhood_df_['HEIGHT_DIFF'] = np.abs((neighborhood_df_['GEO_HEIGHT_x'] - neighborhood_df_['GEO_HEIGHT_y']).values)

    neighborhood_df_['HEIGHT_DIFF'] = (neighborhood_df_['GEO_HEIGHT_x'] -
                                       neighborhood_df_['GEO_HEIGHT_y']).values

    l = ['CELLNAME', 'SITE_x', 'SITE_y', 'HEIGHT_DIFF']
    t = ['CELLNAME', 'SITE_x']
    terrain_df = neighborhood_df_[l].groupby(t).mean()
    terrain_df['HILL'] = np.abs(terrain_df['HEIGHT_DIFF']) > TERRAIN_DELTA
    terrain_df.reset_index(inplace=True)
    # neighborhood_df.reset_index(inplace=True)
    return terrain_df, neighborhood_df, cells_df