예제 #1
0
def main():
    print("Time: %s" % (datetime.datetime.now().strftime("%X")))

    try:
        server_id = mem.get("server_config")["gsmio"]["server_id"]
    except KeyError:
        raise ValueError(">> Unable to deteremine server_id from memory")

    DF_GSM_MODULES = mem.get("df_gsm_modules")

    df_gsm_modules_in_server = DF_GSM_MODULES[DF_GSM_MODULES["gsm_server_id"]
                                              == server_id]
    gsm_id_list = list(df_gsm_modules_in_server["gsm_id"])

    SCREEN_PATH = "/usr/bin/screen"
    PYTHON_PATH = "/usr/bin/python"
    SERVER_PATH = "/home/pi/centralserver/gsm/gsmserver.py"

    process_list = []
    for gsm_id in gsm_id_list:
        string_to_match_in_grep = "gsmserver.py -g%d" % gsm_id
        command_to_run = "%s -S g%d -d -m %s %s -g%d" % (
            SCREEN_PATH, gsm_id, PYTHON_PATH, SERVER_PATH, gsm_id)
        proc = GsmProcess(string_to_match_in_grep, command_to_run)
        process_list.append(proc)

    for proc in process_list:
        proc_count = count_processes(proc)
        if proc_count <= 3:
            print("Execute: %s" % (proc.cmd_str))
            execute_cmd(proc.cmd_str, False)
        else:
            print("Script ok:", proc.pattern_str)
예제 #2
0
def main():
    """Writes in rainfall_gauges information on available rain gauges 
     for rainfall alert analysis

    """

    start = datetime.now()
    qdb.print_out(start)

    if qdb.does_table_exist('rainfall_gauges') == False:
        #Create a rainfall_gauges table if it doesn't exist yet
        qdb.create_rainfall_gauges()

    senslope = mem.get('df_dyna_rain_gauges')
    senslope = senslope.loc[senslope.has_rain == 1, :]
    senslope.loc[:, 'data_source'] = 'senslope'

    noah = noah_gauges()

    all_gauges = senslope.append(noah, sort=False)
    all_gauges.loc[:, 'gauge_name'] = all_gauges.loc[:, 'gauge_name'].apply(
        lambda x: str(x))
    all_gauges.loc[:, 'date_activated'] = pd.to_datetime(
        all_gauges.loc[:, 'date_activated'])
    written_gauges = mem.get('df_rain_gauges')
    not_written = set(all_gauges['gauge_name']) \
                     - set(written_gauges['gauge_name'])

    new_gauges = all_gauges.loc[all_gauges.gauge_name.isin(not_written), :]
    new_gauges = new_gauges.loc[new_gauges.date_deactivated.isnull(), :]
    new_gauges = new_gauges.loc[:, [
        'gauge_name', 'data_source', 'longitude', 'latitude', 'date_activated'
    ]]
    if len(new_gauges) != 0:
        data_table = sms.DataTable('rainfall_gauges', new_gauges)
        db.df_write(data_table)

    deactivated = written_gauges.loc[
        ~written_gauges.date_deactivated.isnull(), :]

    deactivated_gauges = all_gauges.loc[(~all_gauges.date_deactivated.isnull()) \
                                  & (~all_gauges.gauge_name.isin(not_written))\
                                  & (~all_gauges.gauge_name.isin(deactivated.gauge_name)), :]
    date_deactivated = pd.to_datetime(
        deactivated_gauges.loc[:, 'date_deactivated'])
    deactivated_gauges.loc[:, 'date_deactivated'] = date_deactivated
    deactivated_gauges = deactivated_gauges.loc[:, [
        'gauge_name', 'data_source', 'longitude', 'latitude', 'date_activated'
    ]]
    if len(deactivated_gauges) != 0:
        data_table = sms.DataTable('rainfall_gauges', deactivated_gauges)
        db.df_write(data_table)

    qdb.print_out('runtime = %s' % (datetime.now() - start))
예제 #3
0
def main():
    tsm_details = memory.get('DF_TSM_SENSORS')
    accelerometers = memory.get('DF_ACCELEROMETERS')

    dfa = accelerometers.merge(tsm_details, how='inner', on='tsm_id')
    dfa = dfa[dfa.date_deactivated.isnull()]
    #dfa=dfa[dfa.accel_id>=1240]

    for i in dfa.accel_id:
        try:
            drift_detection(acc_id=i)
            print(i)
        except TypeError:
            pass
예제 #4
0
def main(time_now=datetime.now()):
    conn = mem.get('DICT_DB_CONNECTIONS')
    query = "select site_code, ts, marker_name from  "
    query += "  (select data_id from {analysis}.marker_data_tags "
    query += "  where tag_type = 0 "
    query += "  ) tag "
    query += "inner join (select data_id, alert_level from {analysis}.marker_alerts) sub1 using (data_id) "
    query += "inner join {analysis}.marker_data using (data_id) "
    query += "inner join {analysis}.marker_observations mo using (mo_id) "
    query += "inner join {common}.sites using (site_id) "
    query += "inner join (select marker_id, marker_name from {analysis}.view_marker_history) sub2 using (marker_id)"
    query += "where alert_level = 0 "
    query += "and mo.ts >= '{ts}' "
    query = query.format(analysis=conn['analysis']['schema'],
                         common=conn['common']['schema'],
                         ts=time_now - timedelta(1.5))
    tags = db.df_read(query, resource='sensor_analysis')
    tags.loc[:, 'ts'] = tags.loc[:, 'ts'].astype(str)

    if len(tags) != 0:
        msg = 'Validate measurements with displacement of 1cm and more:\n'
        msg += '\n'.join(list(map(lambda x: ': '.join(x), tags.values)))
        msg += '\n\nEdit data tag info for confirmed movement or unreliable measurement.'
        msg += '\n\nFor repositioned markers, add event to marker history: reposition event with ts of marker observation above. Adding reposition event will also delete the validating data tag'
    else:
        msg = ''

    return msg
예제 #5
0
def get_site_names():
    sites = mem.get('df_sites')
    special = ['hin', 'mca', 'msl', 'msu']
    sites.loc[~sites.site_code.isin(special), 'name'] = sites.loc[~sites.site_code.isin(special), ['barangay', 'municipality']].apply(lambda row: ', '.join(row.values).lower().replace('city', '').replace('.', '').strip(), axis=1)
    sites.loc[sites.site_code.isin(special[0:2]), 'name'] = sites.loc[sites.site_code.isin(special[0:2]), 'municipality'].apply(lambda x: x.lower())
    sites.loc[sites.site_code.isin(special[2:4]), 'name'] = sites.loc[sites.site_code.isin(special[2:4]), 'sitio'].apply(lambda x: x.lower().replace(' ', ' | '))
    return sites
예제 #6
0
def get_surficial_trigger(start_ts, end_ts, resource='sensor_analysis'):
    conn = mem.get('DICT_DB_CONNECTIONS')
    query = "SELECT trigger_id, ts, site_id, alert_status, ts_updated, "
    query += "trigger_sym_id, alert_symbol, alert_level, site_code FROM "
    query += "  (SELECT * FROM {}.operational_triggers ".format(
        conn['analysis']['schema'])
    query += "  WHERE ts >= '{}' ".format(start_ts)
    query += "  AND ts_updated <= '{}' ".format(end_ts)
    query += "  ) AS trig "
    query += "INNER JOIN "
    query += "  (SELECT * FROM {}.operational_trigger_symbols ".format(
        conn['analysis']['schema'])
    query += "  WHERE alert_level > 0 "
    query += "  ) AS sym "
    query += "USING (trigger_sym_id) "
    query += "INNER JOIN "
    query += "  (SELECT * FROM {}.trigger_hierarchies ".format(
        conn['analysis']['schema'])
    query += "  WHERE trigger_source = 'surficial' "
    query += "  ) AS hier "
    query += "USING (source_id) "
    query += "INNER JOIN {}.alert_status USING (trigger_id) ".format(
        conn['analysis']['schema'])
    query += "INNER JOIN {}.sites USING (site_id) ".format(
        conn['common']['schema'])
    query += "ORDER BY ts DESC "
    df = db.df_read(query, resource=resource)
    return df
예제 #7
0
def earthquake(site_id, ts):
    """Insert values to earthquake_events, earthquake_alerts, and 
    operational_triggers to (re)trigger subsurface alert.
    
    Args:
        site_id (int): ID of site to compute earthquake analysis for.
        ts (datetime): Timestamp of alert trigger.
    """

    # writes to earthquake_events; defaults epicenter to site coordinates, depth to 0, and magnitude to 4.3
    sites = eq.get_sites()
    earthquake_events = sites.loc[sites.site_id == site_id, ['latitude', 'longitude', 'province']]
    earthquake_events.loc[:, 'ts'] = ts
    earthquake_events.loc[:, 'magnitude'] = 4.3
    earthquake_events.loc[:, 'depth'] = 0
    earthquake_events.loc[:, 'critical_distance'] = np.round(eq.get_crit_dist(4.3), decimals=2)
    earthquake_events.loc[:, 'issuer'] = 'TOPSSOFTWAREINFRA'
    earthquake_events.loc[:, 'processed'] = 1
    eq_id = int(db.df_write(data_table = sms.DataTable("earthquake_events", earthquake_events), resource='sensor_data', last_insert=True)[0][0])
    
    # writes to earthquake_alerts
    earthquake_alerts = pd.DataFrame({'eq_id': [eq_id], 'site_id': [site_id], 'distance': [0]})
    db.df_write(data_table = sms.DataTable("earthquake_alerts", earthquake_alerts), resource='sensor_data')
    
    # writes to operational_triggers
    trigger_symbol = mem.get('df_trigger_symbols')
    trigger_sym_id = trigger_symbol.loc[(trigger_symbol.trigger_source == 'earthquake') & (trigger_symbol.alert_level == 1), 'trigger_sym_id'].values[0]
    operational_trigger = pd.DataFrame({'site_id': [site_id], 'trigger_sym_id': [trigger_sym_id], 'ts': [ts], 'ts_updated': [ts]})
    qdb.alert_to_db(operational_trigger, 'operational_triggers')
    
    # details for trigger tech info
    earthquake_events.loc[:, 'distance'] = 0

    return earthquake_events
예제 #8
0
def check_name_of_number(number):
    """
    - The process of querying the mobile number  to check the cellphone number logger name.

    :param number: Cellphone number.
    :type number: int

    Returns:
        str: Query output for success and return False if fails.

    Example Output::

        >>> x = check_name_of_number('639173082161')
        agbta

    """
    conn = mem.get('DICT_DB_CONNECTIONS')
    query = ("select logger_name from {}.loggers where "
             "logger_id = (select logger_id from {}.logger_mobile "
             "where sim_num like '%{}' order by date_activated desc limit 1)".
             format(conn['common']['schema'], conn['gsm_pi']['schema'],
                    number))
    query = dynadb.read(query, 'check_name_of_number', resource='sms_analysis')
    if len(query) != 0:
        return query[0][0]
    else:
        return
예제 #9
0
def get_site_code(text):
    """
    - The processing of getting data of site code.

    :param text: Sms line of message for soms .
    :type text: str

    Returns:
       list: List data output for success parsing and it break
       if fails.
    """
    err_val = 0
    site_id = 0

    site_code_match = re.split(" ", text, maxsplit=1)[0].lower()[0:3]
    df_sites = mem.get('df_sites')
    site_code = adjust_site_code(site_code_match)
    try:
        site_id = df_sites.loc[df_sites.site_code == site_code.lower(),
                               'site_id'].values[0]
    except IndexError:
        print("No site_code record for %s" % (site_code))
        err_val = SURFICIAL_PARSER_ERROR_VALUE["site_code"]

    return {
        "value": {
            'site_id': site_id,
            'site_code': site_code
        },
        "match": str(site_code_match),
        "err_val": err_val
    }
예제 #10
0
def get_name_of_staff(number):
    conn = mem.get('DICT_DB_CONNECTIONS')
    query  = "select user_id, nickname, gsm_id from {}.users ".format(conn['common']['schema'])
    query += "inner join {}.user_mobiles using (user_id) ".format(conn['gsm_pi']['schema'])
    query += "inner join {}.mobile_numbers using (mobile_id) ".format(conn['gsm_pi']['schema'])
    query += "where sim_num = '%s'" % (number)

    return dbio.read(query=query, resource="sms_data")[0]
예제 #11
0
def rainfall(site_id, ts, rain_id, rain_alert='both'):
    """Insert values to rainfall_alerts and operational_triggers tables 
    to (re)trigger rainfall alert
    
    Args:
        site_id (int): ID of site to compute rainfall analysis for.
        ts (datetime): Timestamp of alert trigger.
        rain_id (int): ID of rain gauge to use as data source.
        rain_alert (str: {'a', 'b', 'x', None}, default None): Type of rainfall alert. 
                a: 1-day cumulative rainfall threshold  exceeded
                b: 3-day cumulative rainfall threshold  exceeded
                x: 
                None: both 1-day and 3-day cumulative rainfall threshold  exceeded    
    """

    # 4 nearest rain gauges of each site with threshold and distance from site
    gauges = rain.rainfall_gauges()
    df = gauges.loc[(gauges.site_id == site_id) & (gauges.rain_id == rain_id), ['site_id', 'rain_id', 'threshold_value']]
    df = df.rename(columns={'threshold_value': 'threshold'})
    df.loc[:, 'ts'] = ts
    # rainfall cumulative based on alert level
    if str(rain_alert) == '0':
            df.loc[:, 'rain_alert'] = 0
            df.loc[:, 'cumulative'] = 0
    else:
        if rain_alert != 'x':
            df.loc[:, 'rain_alert'] = 'b'
            df.loc[:, 'cumulative'] = 1.2 * df.loc[:, 'threshold']
        else:
            df.loc[:, 'rain_alert'] = 'x'
            df.loc[:, 'cumulative'] = 0.80 * df.loc[:, 'threshold']
        if rain_alert == 'a' or rain_alert == 'both':
            dfa = df.copy()
            dfa.loc[:, ['cumulative', 'threshold']] = dfa.loc[:, ['cumulative', 'threshold']].div(2)
            dfa.loc[:, 'rain_alert'] = 'a'
            if rain_alert == 'a':
                df = dfa.copy()
            else:
                df = df.append(dfa, ignore_index=True)
        qdb.write_rain_alert(df)

    # writes to operational_triggers
    trigger_symbol = mem.get('df_trigger_symbols')
    if str(rain_alert) in ['0', 'x']:
        alert_level = 0
    else:
        alert_level = 1
    trigger_sym_id = trigger_symbol.loc[(trigger_symbol.trigger_source == 'rainfall') & (trigger_symbol.alert_level == alert_level), 'trigger_sym_id'].values[0]
    operational_trigger = pd.DataFrame({'site_id': [site_id], 'trigger_sym_id': [trigger_sym_id], 'ts': [ts], 'ts_updated': [ts]})
    qdb.alert_to_db(operational_trigger, 'operational_triggers')

    return df
예제 #12
0
def all_site_coord():
    """Retrieves coordinates of sites from memcache

    Returns:
        dataframe: Record of coordinates of sites.
    
    """

    df = mem.get('df_dyna_rain_gauges')[['site_id', 'latitude', 'longitude']]
    df = df.dropna()
    df = df.drop_duplicates('site_id')
    df = df.sort_values('site_id')
    return df
예제 #13
0
def get_event_releases(start, end, mysql=True, to_csv=False):
    """Gets events overlapping with timestamp range given.

    Args:
        start (datetime): Start of timestamp range.
        end (datetime): End of timestamp range.
        mysql (bool): Gets data from mysql if True, else gets data from csv. 
                      Optional. Default: False.
        drop (bool): Retains most recent event release if True. Optional. 
                     Default: True.

    Returns:
        DataFrame: Events overlapping with timestamp range given. 
                   Columns: site_id, site_code, event_id, validity, pub_sym_id, alert_symbol

    """

    if mysql:
        start = start-timedelta(3)
        end = end+timedelta(3)
        conn = mem.get('DICT_DB_CONNECTIONS')
        query =  "select site_id, site_code, event_id, validity, pub_sym_id, alert_symbol, data_ts, release_time "
        query += "from {common}.sites "
        query += "inner join {website}.monitoring_events using(site_id) "
        query += "left join {website}.monitoring_event_alerts using(event_id) "
        query += "left join "
        query += "	(SELECT * FROM {website}.monitoring_releases "
        query += "	left join {website}.monitoring_triggers using(release_id) "
        query += "	left join {website}.internal_alert_symbols using(internal_sym_id) "
        query += "    ) as trig "
        query += "using(event_alert_id) "
        query += "where ((ts_start >= '{start}' and ts_start <= '{end}') "
        query += "or (validity >= '{start}' and validity <= '{end}') "
        query += "or (ts_start <= '{start}' and validity >= '{end}')) "
        query += "and pub_sym_id != 1 "
        query += "and active = 1 "
        query += "order by event_id, data_ts"
        query = query.format(start=start, end=end, common=conn['common']['schema'], website=conn['website']['schema'])
        df = db.df_read(query, resource='ops')
        if len(df) != 0:
            df.loc[:, 'data_ts'] = df.data_ts.apply(lambda x: round_data_ts(pd.to_datetime(x)))
            df_grp = df.groupby('event_id', as_index=False)
            df = df_grp.apply(event_with_eq, mysql=mysql).reset_index(drop=True)
            df_grp = df.groupby(['event_id', 'pub_sym_id'], as_index=False)
            df = df_grp.apply(event_with_moms, mysql=mysql).reset_index(drop=True)
            if to_csv:
                df.to_csv(output_path+'/input_output/event_releases.csv', index=False)
    else:
        df = pd.read_csv(output_path+'/input_output/event_releases.csv')
    return df
예제 #14
0
def to_mysql(df):
    """Writes in rainfall_priorities the distance of 4 active nearby
    rain gauges from the site.
    
    Args:
        df (dataframe): Record of 4 nearby rain gauges with 
        its distance from the site.
    """
    
    written_df = mem.get('df_rain_priorities').loc[:, ['priority_id', 'site_id', 'rain_id']]
    combined = pd.merge(df, written_df, on=['site_id', 'rain_id'], how='left')
    
    if len(combined) > 0:
        qdb.write_rain_priorities(combined)
예제 #15
0
def main(site_code=''):
    """Writes in rainfall_priorities information on nearest rain gauges
    from the project sites for rainfall alert analysis

    """

    start = datetime.now()
    qdb.print_out(start)
    
    coord = mem.get('df_sites')
    if site_code == '':
        try:
            site_code = sys.argv[1].lower()
            site_code = site_code.replace(' ', '').split(',')
        except:
            pass
    else:
        site_code = site_code.replace(' ', '').split(',')
    if site_code != '':
        coord = coord.loc[coord.site_code.isin(site_code), :]
    
    coord = coord.loc[coord.active == 1, ['site_id', 'latitude', 'longitude']]

    rg_coord = mem.get('df_rain_gauges')
    rg_coord = rg_coord[rg_coord.date_deactivated.isnull()]
    site_coord = coord.groupby('site_id', as_index=False)
    nearest_rg = site_coord.apply(get_distance, rg_coord=rg_coord)
    nearest_rg['distance'] = np.round(nearest_rg.distance,2)
    nearest_rg = nearest_rg.reset_index(drop=True)
    
    if qdb.does_table_exist('rainfall_priorities') == False:
        #Create a NOAH table if it doesn't exist yet
        qdb.create_rainfall_priorities()
        
    to_mysql(nearest_rg)
    
    qdb.print_out('runtime = %s' %(datetime.now() - start))
예제 #16
0
def main(start, end):
    conn = mem.get('DICT_DB_CONNECTIONS')
    query  = "SELECT ts_sms, site_code FROM "
    query += "  (SELECT inbox_id, ts_sms, user_id FROM "
    query += "    (SELECT * FROM {gsm_pi}.smsinbox_users "
    query += "    WHERE ts_sms BETWEEN '{start}' AND '{end}' "
    query += "    ) sms "
    query += "  INNER JOIN "
    query += "    {gsm_pi}.user_mobiles "
    query += "  USING (mobile_id) "
    query += "  INNER JOIN "
    query += "    {common}.users "
    query += "  USING (user_id) "
    query += "  ) AS msg "
    query += "INNER JOIN "
    query += "  (SELECT user_id, site_code, org_name FROM "
    query += "    {common}.user_organizations "
    query += "  INNER JOIN "
    query += "    {common}.sites "
    query += "  USING (site_id) "
    query += "  ) AS site_org "
    query += "USING (user_id) "
    query += "INNER JOIN "
    query += "  (SELECT * FROM "
    query += "    {gsm_pi}.smsinbox_user_tags "
    query += "  INNER JOIN "
    query += "    (SELECT tag_id, tag FROM {gsm_pi}.sms_tags "
    query += "    WHERE tag = '#EwiResponse') ack "
    query += "  USING (tag_id) "
    query += "  ) AS tags "
    query += "USING (inbox_id) "
    query += "ORDER BY inbox_id DESC "
    query = query.format(start=start, end=end, common=conn['common']['schema'], gsm_pi=conn['gsm_pi']['schema'])
    ACK = db.df_read(query, resource='sms_analysis')
    
    ACK.loc[:, 'day'] = ACK.loc[:, 'ts_sms'].map(lambda x: pd.to_datetime(x).isocalendar()[2])
    ACK.loc[:, 'hour'] = ACK.ts_sms.dt.hour
    
    site_hour_ACK = ACK[['ts_sms', 'hour', 'site_code']].groupby(['hour', 'site_code']).count().reset_index()
    hour_ACK = ACK[['ts_sms', 'hour']].groupby(['hour']).count().reset_index()
    site_day_ACK = ACK[['ts_sms', 'day', 'site_code']].groupby(['day', 'site_code']).count().reset_index()
    site_day_ACK = site_day_ACK.sort_values('day')
    site_day_ACK.loc[:, 'day'] = site_day_ACK.day.map({1: 'M', 2: 'T', 3: 'W', 4: 'Th', 5: 'F', 6: 'Sa', 7: 'Su'})
    day_ACK = ACK[['ts_sms', 'day']].groupby(['day']).count().reset_index()
    day_ACK = day_ACK.sort_values('day')
    day_ACK.loc[:, 'day'] = day_ACK.day.map({1: 'M', 2: 'T', 3: 'W', 4: 'Th', 5: 'F', 6: 'Sa', 7: 'Su'})

    return site_hour_ACK, hour_ACK, site_day_ACK, day_ACK
예제 #17
0
def get_sites_no_markers(mysql=True, to_csv=False):
    if mysql:
        conn = mem.get('DICT_DB_CONNECTIONS')
        query = "select site_id, site_code from {common}.sites  "
        query += "where site_id not in  "
        query += "	(select site_id from {analysis}.site_markers  "
        query += "	where in_use = 1) "
        query += "and active = 1"
        query = query.format(common=conn['common']['schema'], analysis=conn['analysis']['schema'])
        df = db.df_read(query, resource='sensor_analysis')
        df = df.loc[df.site_code != 'msu', :]
        if to_csv:
            df.to_csv(output_path+'/input_output/site_no_marker.csv', index=False)
    else:
        df = pd.read_csv(output_path+'/input_output/site_no_marker.csv')
    return df
예제 #18
0
def get_gndmeas(start, end, mysql=True, to_csv=False):
    if mysql:
        conn = mem.get('DICT_DB_CONNECTIONS')
        query  = "select site_code, ts from "
        query += "  (select * from {analysis}.marker_observations "
        query += "  where ts between '{start}' and '{end}' "
        query += "  ) obs "
        query += "inner join {common}.sites using (site_id) "
        query = query.format(start=start, end=end, common=conn['common']['schema'], analysis=conn['analysis']['schema'])
        df = db.df_read(query, resource='sensor_analysis')
        if to_csv:
            df.to_csv(output_path+'gndmeas.csv', index=False)
    else:
        df = pd.read_csv(output_path+'gndmeas.csv')

    return df
예제 #19
0
def main():
    """Updates data of NOAH rain gauges.
        
    """

    start_time = datetime.now()
    qdb.print_out(start_time)

    #get the list of rainfall NOAH rain gauge IDs
    gauges = mem.get('df_rain_props')

    gauges = gauges[gauges.data_source == 'noah'].drop_duplicates('rain_id')
    noah_gauges = gauges.groupby('rain_id')
    noah_gauges.apply(update_single_table)

    qdb.print_out('runtime = %s' % (datetime.now() - start_time))
예제 #20
0
def to_mysql(df):
    """Writes in rainfall_priorities the distance of 4 active nearby
    rain gauges from the site.
    
    Args:
        df (dataframe): Record of 4 nearby rain gauges with 
        its distance from the site.

    """
    written_df = mem.get('df_rain_priorities')
    combined = written_df.append(df, ignore_index=True, sort=False)
    combined = combined.append(written_df, ignore_index=True, sort=False)
    combined = combined.drop_duplicates(['site_id', 'rain_id'], keep=False)

    if len(combined) > 0:
        data_table = sms.DataTable('rainfall_priorities', combined)
        db.df_write(data_table)
예제 #21
0
def get_gsm_modules(reset_val=False):
    """
    -  The process of  getting the gsm modules information.
     
    :param reset_val: Trigger value to check the gsm information and **Default** to **False**
    :type table: boolean, default False

    Returns:
       obj: Gsm info output for success processing.

    Example Output::

        {1: {'network': 'globe', 'module': 0, 'port': 'dummy', 'num': '639171234567', 
        'pwr_on_pin': 1, 'ring_pin': 0, 'id': 1, 'name': 'simulate'}, 2: {'network': 
        'globe', 'module': 1, 'port': None, 'num': '639176321023', 'pwr_on_pin': 0, 
        'ring_pin': 0, 'id': 2, 'name': 'globe'}, 3: {'network': 'smart', 'module': 1, 
        'port': None, 'num': '639988448687', 'pwr_on_pin': 0, 'ring_pin': 0, 'id': 3, 
        'name': 'smart'}, 4: {'network': 'globe', 'module': 2, 'port': '/dev/globeport', 
        'num': '639175388301', 'pwr_on_pin': 11, 'ring_pin': 15, 'id': 4, 
        'name': 'globe1'}, 5: {'network': 'smart', 'module': 2, 'port': '/dev/smartport',
        'num': '639088125640', 'pwr_on_pin': 31, 'ring_pin': 33, 'id': 5, 'name': 'smart1'},
        6: {'network': 'globe', 'module': 2, 'port': '/dev/smartport', 'num': '639175963573',
        'pwr_on_pin': 31, 'ring_pin': 33, 'id': 6, 'name': 'globe2'}, 7: {'network': 'smart',
        'module': 2, 'port': '/dev/ttyUSB1', 'num': '639088125638', 'pwr_on_pin': 29,
        'ring_pin': 33, 'id': 7, 'name': 'smart1'}}
    """
    try:
        DF_GSM_MODULES = mem.get("DF_GSM_MODULES")
    except AttributeError:
        raise AttributeError("No DF_GSM_MODULES in memory. Initialize first.")

    # backwards compatibility
    DF_GSM_MODULES.rename(columns={
        "network_type": "network",
        "gsm_name": "name",
        "gsm_sim_num": "num",
        "ser_port": "port",
        "gsm_id": "id",
        "module_type": "module"
    },
                          inplace=True)

    gsm_modules = DF_GSM_MODULES.to_dict(orient='index')

    return gsm_modules
예제 #22
0
def get_rain_recipients(mysql=True, to_csv=False):
    if mysql:
        conn = mem.get('DICT_DB_CONNECTIONS')
        query = "SELECT mobile_id, sim_num, user_id, rain_site_id, rain_site_code, fullname, province, all_sites, mm_values, percentage, date_start, date_end FROM "
        query += "{common}.rain_info_recipients "
        query += "	LEFT JOIN "
        query += "(SELECT user_id, CONCAT(first_name, ' ', last_name) AS fullname, status AS user_status, ewi_recipient "
        query += "FROM {common}.users "
        query += ") users  "
        query += "	USING (user_id) "
        query += "	LEFT JOIN "
        query += "{gsm_pi}.user_mobiles  "
        query += "	USING (user_id) "
        query += "	LEFT JOIN "
        query += "{gsm_pi}.mobile_numbers "
        query += "	USING (mobile_id) "
        query += "	LEFT JOIN "
        query += "(SELECT user_id, site_id AS rain_site_id, site_code AS rain_site_code, province FROM  "
        query += "	{common}.user_organizations "
        query += "		INNER JOIN  "
        query += "	{common}.sites  "
        query += "		USING (site_id) "
        query += ") AS site_org  "
        query += "	USING (user_id) "
        query += "	LEFT JOIN "
        query += "{gsm_pi}.user_ewi_restrictions  "
        query += "	USING (user_id) "
        query += "WHERE user_id NOT IN ( "
        query += "	SELECT user_fk_id user_id "
        query += "    FROM {common}.user_accounts) "
        query += "AND ewi_recipient = 1 "
        query += "AND user_status = 1 "
        query += "AND status = 1 "
        query += "ORDER BY fullname, sim_num"
        query = query.format(common=conn['common']['schema'],
                             gsm_pi=conn['gsm_pi']['schema'])
        df = db.df_read(query, resource='sms_analysis')
        if to_csv:
            df.to_csv(output_path + '/input_output/ewi_recipient.csv',
                      index=False)
    else:
        df = pd.read_csv(output_path + '/input_output/ewi_recipient.csv')
    return df
예제 #23
0
def subsurface(site_id, ts, alert_level):
    """Insert values to node_alerts, tsm_alerts, and operational_triggers
    to (re)trigger subsurface alert.
    
    Args:
        site_id (int): ID of site to compute subsurface analysis for.
        ts (datetime): Timestamp of alert trigger.
        alert_level (int: {0, 2, 3}, default None): Subsurface alert level.
    """
    
    # get tsm_id
    query = "SELECT tsm_id FROM tsm_sensors "
    query += "where site_id = {} ".format(site_id)
    query += "and (date_deactivated is null or date_deactivated > '{}')".format(ts)
    tsm_id = db.df_read(query, resource='sensor_data').values.flatten()
    tsm_id = random.choice(tsm_id)
    
    # writes to node_alerts; defaults to node 1 and vel_alert
    ts_list = pd.date_range(end=ts, freq='30min', periods=4)
    node_alerts = pd.DataFrame({'ts': ts_list, 'node_id': [1]*len(ts_list),
                                'tsm_id': [tsm_id]*len(ts_list),
                                'disp_alert': [0]*len(ts_list),
                                'vel_alert': [alert_level]*len(ts_list)})
    db.df_write(data_table = sms.DataTable("node_alerts", node_alerts), resource='sensor_data')
    
    # writes to tsm_alerts
    tsm_alerts = pd.DataFrame({'ts': [ts], 'tsm_id': [tsm_id],
                               'alert_level': [alert_level],
                               'ts_updated': [ts]})
    db.df_write(data_table = sms.DataTable("tsm_alerts", tsm_alerts), resource='sensor_data')

    # writes to operational_triggers
    trigger_symbol = mem.get('df_trigger_symbols')
    trigger_sym_id = trigger_symbol.loc[(trigger_symbol.trigger_source == 'subsurface') & (trigger_symbol.alert_level == alert_level), 'trigger_sym_id'].values[0]
    operational_trigger = pd.DataFrame({'site_id': [site_id], 'trigger_sym_id': [trigger_sym_id], 'ts': [ts], 'ts_updated': [ts]})
    qdb.alert_to_db(operational_trigger, 'operational_triggers')

    # details for trigger tech info
    tsm_alerts.loc[:, 'node_id'] = 1
    tsm_alerts.loc[:, 'disp_alert'] = 0
    tsm_alerts.loc[:, 'vel_alert'] = alert_level
    
    return tsm_alerts
예제 #24
0
def get_mobile(nickname_list=''):
    conn = mem.get('DICT_DB_CONNECTIONS')
    query  = "SELECT mobile_id, nickname, gsm_id FROM "
    query += "  (SELECT user_id, nickname FROM {}.users ".format(conn['common']['schema'])
    query += "  WHERE status = 1) u "
    query += "INNER JOIN "
    query += "  (SELECT user_id, team_code FROM "
    query += "  {}.user_team_members ".format(conn['common']['schema'])
    query += "  INNER JOIN {}.user_teams ".format(conn['common']['schema'])
    query += "  USING (team_id) "
    query += "  WHERE team_code IN ('admin', 'CT', 'MT') "
    query += "  ) team "
    query += "USING (user_id) "
    query += "INNER JOIN {}.user_mobiles ".format(conn['gsm_pi']['schema'])
    query += "USING (user_id) "
    query += "INNER JOIN {}.mobile_numbers ".format(conn['gsm_pi']['schema'])
    query += "USING (mobile_id)"
    df = db.df_read(query, resource='sms_analysis')
    if nickname_list != '':
        df = df.loc[df.nickname.isin(nickname_list)]
    return df
예제 #25
0
def rainfall_gauges(end=datetime.now()):
    """Check top 4 rain gauges to be used in rainfall analysis.
    
    Args:
        end (datetime): Timestamp of alert and plot to be computed. Optional.
                        Defaults to current timestamp.

    Returns:
        dataframe: Top 4 rain gauges per site.
    
    """

    gauges = mem.get('df_rain_props')

    gauges['gauge_name'] = np.array(','.join(gauges.data_source).replace('noah',
                                 'rain_noah_').replace('senslope',
                                 'rain_').split(','))+gauges.gauge_name
    gauges = gauges.sort_values('distance')
    gauges = gauges.groupby('site_id').head(4)
    gauges = gauges.sort_values(['site_id', 'distance'])

    return gauges
예제 #26
0
def logger_response(sms,log_type,log='False'):
    """
    - The process of logging the id of the match expression on table logger_respose.

    :param sms: list data info of sms message .
    :param Log_type: list data info of sms message .
    :param Log: Switch on or off the logging of the response.
    :type sms: list
    :type sms: str
    :type sms: str, Default(False)

    """ 
    conn = mem.get('DICT_DB_CONNECTIONS')
    if log:
        query = ("INSERT INTO %s.logger_response (`logger_Id`, `inbox_id`, `log_type`)"
         "values((Select logger_id from %s.logger_mobile where sim_num = %s order by"
          " date_activated desc limit 1),'%s','%s')" 
         % (conn['analysis']['schema'],conn['common']['schema'],sms.sim_num,sms.inbox_id,log_type))
                    
        dbio.write(query, resource="sensor_analysis")
        print ('>> Log response')
    else:
        return False
예제 #27
0
def check_alerts():
    conn = mem.get('DICT_DB_CONNECTIONS')
    ts_now = dt.now().strftime("%Y-%m-%d %H:%M:%S")
    common = conn['common']['schema']
    analysis = conn['analysis']['schema']
    query = ("SELECT stat_id, site_id, site_code, trigger_source, "
             "alert_symbol, ts_last_retrigger FROM "
             "(SELECT stat_id, ts_last_retrigger, site_id, "
             "trigger_source, alert_symbol FROM "
             "(SELECT stat_id, ts_last_retrigger, site_id, "
             "trigger_sym_id FROM "
             "(SELECT * FROM {}.alert_status "
             "WHERE ts_set < '{}' "
             "and ts_ack is NULL "
             ") AS stat "
             "INNER JOIN "
             "{}.operational_triggers AS op "
             "USING (trigger_id) "
             ") AS trig "
             "INNER JOIN "
             "(SELECT trigger_sym_id, trigger_source, "
             "alert_level, alert_symbol FROM "
             "{}.operational_trigger_symbols "
             "INNER JOIN "
             "{}.trigger_hierarchies "
             "USING (source_id) "
             ") as sym "
             "USING (trigger_sym_id)) AS alert "
             "INNER JOIN "
             "(SELECT * FROM {}.sites WHERE active = 1) s "
             "USING (site_id)").format(analysis,ts_now,analysis,analysis,analysis,common)

    alert_msgs = dbio.read(query=query, resource="sensor_analysis")

    print("alert messages:", alert_msgs)

    return alert_msgs
예제 #28
0
from datetime import timedelta
import os
import pandas as pd
import re
import sys

sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import dynadb.db as db
import volatile.memory as mem

output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
conn = mem.get('DICT_DB_CONNECTIONS')


def get_sms_recipients(mysql=True, to_csv=False):
    if mysql:
        query = "SELECT mobile_id, sim_num, user_id, fullname, site_id, org_name, alert_level, start_ewi_recipient FROM "
        query += "    {gsm_pi}.mobile_numbers "
        query += "  LEFT JOIN "
        query += "    {gsm_pi}.user_mobiles "
        query += "  USING (mobile_id) "
        query += "  LEFT JOIN "
        query += "    (select user_id, CONCAT(first_name, ' ', last_name) AS fullname, status AS user_status, ewi_recipient, start_ewi_recipient from {common}.users) users "
        query += "  USING (user_id) "
        query += "LEFT JOIN "
        query += "  (SELECT user_id, site_id, site_code, org_name, primary_contact FROM "
        query += "    {common}.user_organizations "
        query += "  INNER JOIN "
        query += "    {common}.sites "
        query += "  USING (site_id) "
        query += "  ) AS site_org "
예제 #29
0
from scipy.signal import gaussian
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sys
import time as mytime

#### Import local codes
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import analysis.analysislib as lib
import analysis.querydb as qdb
import volatile.memory as mem

#### Open config files
sc = mem.get('server_config')

#### Create directory
output_path = os.path.abspath(
    os.path.join(os.path.dirname(__file__), '../../..'))


def gaussian_weighted_average(series, sigma=3, width=39):
    """ Computes for rolling weighted average and variance using
    a gaussian signal filter.
    
    Parameters
    ---------------
    series: Array
        Series to be averaged
    sigma: Float
예제 #30
0
def main(site_code='', end='', Print=True, write_to_db=True,
         print_plot=False, save_plot=True, days='', is_command_line_run=True):
    """Computes alert and plots rainfall data.
    
    Args:
        site_code (list): Site codes to compute rainfall analysis for. Optional.
                          Defaults to empty string which will compute alert
                          and plot for all sites.
        Print (bool): To print plot and summary of alerts. Optional. Defaults to
                      True.
        end (datetime): Timestamp of alert and plot to be computed. Optional.
                        Defaults to current timestamp.

    Returns:
        str: Json format of cumulative rainfall and alert per site.
    
    """

    start_time = datetime.now()
    qdb.print_out(start_time)

    if site_code == '':
        if is_command_line_run:
            site_code = sys.argv[1].lower()
            site_code = site_code.replace(' ', '').split(',')
    else:
        site_code = site_code.replace(' ', '').split(',')
            
    if end == '':
        try:
            end = pd.to_datetime(sys.argv[2])
        except:
            end = datetime.now()
    else:
        end = pd.to_datetime(end)

    output_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                                                   '../../..'))
    
    sc = mem.server_config()

    #creates directory if it doesn't exist
    if (sc['rainfall']['print_plot'] or sc['rainfall']['print_summary_alert']) and Print:
        if not os.path.exists(output_path+sc['fileio']['rainfall_path']):
            os.makedirs(output_path+sc['fileio']['rainfall_path'])

    # setting monitoring window
    if days != '':
        sc['rainfall']['rt_window_length'] = days
    end, start, offsetstart = get_rt_window(float(sc['rainfall']['rt_window_length']),
                            float(sc['rainfall']['roll_window_length']), end=end)
    tsn=end.strftime("%Y-%m-%d_%H-%M-%S")

    # 4 nearest rain gauges of each site with threshold and distance from site
    gauges = rainfall_gauges()
    if site_code != '':
        gauges = gauges[gauges.site_code.isin(site_code)]
    gauges['site_id'] = gauges['site_id'].apply(lambda x: float(x))

    trigger_symbol = mem.get('df_trigger_symbols')
    trigger_symbol = trigger_symbol[trigger_symbol.trigger_source == 'rainfall']
    trigger_symbol['trigger_sym_id'] = trigger_symbol['trigger_sym_id'].apply(lambda x: float(x))
    site_props = gauges.groupby('site_id')
    
    summary = site_props.apply(ra.main, end=end, sc=sc,
                                trigger_symbol=trigger_symbol, write_to_db=write_to_db)
    summary = summary.reset_index(drop=True)[['site_id', 'site_code',
                    '1D cml', 'half of 2yr max', '3D cml', '2yr max',
                    'DataSource', 'alert']]
                    
    if Print == True:
        if sc['rainfall']['print_summary_alert']:
            summary.to_csv(output_path+sc['fileio']['rainfall_path'] +
                        'SummaryOfRainfallAlertGenerationFor'+tsn+'.csv',
                        sep=',', mode='w', index=False)
        if sc['rainfall']['print_plot'] or print_plot:
            rain_data = site_props.apply(rp.main, offsetstart=offsetstart,
                                         tsn=tsn, save_plot=save_plot, sc=sc,
                                         start=start, output_path=output_path,
                                         end=end).reset_index(drop=True)
            summary = pd.merge(summary, rain_data, on='site_id',
                               validate='1:1')
    
    summary_json = summary.to_json(orient="records")
    
    qdb.print_out("runtime = %s" %(datetime.now()-start_time))
    
    return summary_json