def main(start, end, sched, mysql=False):
    sched = sched.loc[(sched.gndmeas == 1) | (sched.moms == 1), :]

    inbox_tag = smstags.inbox_tag(start, end, mysql=mysql)
    inbox_tag = inbox_tag.loc[inbox_tag.tag.isin(['#GroundMeas', '#CantSendGroundMeas', '#GroundObs']), :]
    outbox_tag = smstags.outbox_tag(start, end, mysql=mysql)
    outbox_tag = outbox_tag.loc[outbox_tag.tag.isin(['#GroundObsReminder', '#GroundMeasReminder']), :]
    
    monitoring_ipr = pd.read_excel(output_path + 'monitoring_ipr.xlsx', sheet_name=None)
    downtime = ipr_lib.system_downtime(mysql=mysql)
    sched = ipr_lib.remove_downtime(sched, downtime, meas_reminder=True)
    
    for name in monitoring_ipr.keys():
        indiv_ipr = monitoring_ipr[name]
        indiv_ipr.columns = indiv_ipr.columns.astype(str)
        for ts in indiv_ipr.columns[5:]:
            ts = pd.to_datetime(ts)
            ts_end = ts + timedelta(0.5)
            shift_release = sched.loc[(sched.data_ts >= ts) & (sched.data_ts < ts_end), :]
            if len(shift_release) != 0:
                shift_release.loc[:, 'ts_reminder'] = shift_release.data_ts-timedelta(hours=2)
                indiv_release = shift_release.reset_index().groupby('index', as_index=False)
                shift_release = indiv_release.apply(check_sending, inbox_tag=inbox_tag, outbox_tag=outbox_tag).reset_index(drop=True)
                indiv_ipr.loc[indiv_ipr.Output2 == 'Ground meas reminder', str(ts)] = np.mean(shift_release.reminder)
            else:
                indiv_ipr.loc[indiv_ipr.Output2 == 'Ground meas reminder', str(ts)] = np.nan
        monitoring_ipr[name] = indiv_ipr
    
    writer = pd.ExcelWriter(output_path + 'monitoring_ipr.xlsx')
    for sheet_name, xlsxdf in monitoring_ipr.items():
        xlsxdf.to_excel(writer, sheet_name, index=False)
    writer.save()
Exemple #2
0
def main(start, end, sched, site_names, eval_df, mysql=True, to_csv=False):

    sent_start = start - timedelta(hours=0.25)
    sent_end = end + timedelta(hours=4)
    per_release = sched.groupby('data_ts', as_index=False)
    sched = per_release.apply(sending_sched)
    
    recipients = qdb.get_sms_recipients(mysql=mysql, to_csv=to_csv)
    sent = qdb.get_sms_sent(sent_start, sent_end, site_names, mysql=mysql, to_csv=to_csv)
    sent = sent.loc[~sent.ts_sent.isnull(), :]
    sched = sms.ewi_sched(sched, recipients, sent, site_names)
    
    monitoring_ipr = pd.read_excel(output_path + 'monitoring_ipr.xlsx', sheet_name=None)
    
    downtime = ipr_lib.system_downtime(mysql=mysql)
    sched = ipr_lib.remove_downtime(sched, downtime)
    
    
    for name in monitoring_ipr.keys():
        indiv_ipr = monitoring_ipr[name]
        indiv_ipr.columns = indiv_ipr.columns.astype(str)
        for ts in indiv_ipr.columns[5:]:
            ts = pd.to_datetime(ts)
            sending_status = sched.loc[(sched.data_ts >= ts) & (sched.data_ts < ts+timedelta(0.5)), :]
            # ewi sms timeliness
            if len(sending_status) == 0:
                # no scheduled
                grade_t = np.nan
            elif len(sending_status.loc[sending_status.ts_written.isnull(), :]) == 0 and all(sending_status.ts_written <= sending_status.ts_due):
                # all sent on time
                grade_t = 1
            else:
                grade_t = np.round(np.average(1 - sending_status.apply(lambda row: np.where(row.ts_written is pd.NaT, 1, delay_deduction * max(0, (row.ts_written - row.ts_due).total_seconds())/(60*delay_min)), axis=1)), 2)
            indiv_ipr.loc[indiv_ipr.Output2.str.contains('EWI SMS', na=False), str(ts)] = grade_t
            # ewi sms quality
            if ts >= pd.to_datetime('2021-04-01') and len(sending_status) != 0:
                shift_eval = eval_df.loc[(eval_df.shift_ts >= ts) & (eval_df.shift_ts <= ts+timedelta(1)) & ((eval_df['evaluated_MT'] == name) | (eval_df['evaluated_CT'] == name) | (eval_df['evaluated_backup'] == name)), :].drop_duplicates('shift_ts', keep='last')[0:1]
                shift_eval = shift_eval.drop_duplicates('shift_ts', keep='last')[0:1]
                deduction = np.nansum(shift_eval[['routine_sms_alert', 'sms_alert']].values) + np.nansum(shift_eval[['routine_sms_ts', 'sms_ts']].values)/3 + np.nansum(shift_eval[['routine_sms_typo', 'sms_typo']].values)/30
                indiv_ipr.loc[indiv_ipr.Output1 == 'EWI SMS', str(ts)] = np.round((len(sending_status) - deduction)/len(sending_status), 2)
        monitoring_ipr[name] = indiv_ipr
    
    writer = pd.ExcelWriter(output_path + 'monitoring_ipr.xlsx')
    for sheet_name, xlsxdf in monitoring_ipr.items():
        xlsxdf.to_excel(writer, sheet_name, index=False)
    writer.save()
Exemple #3
0
def main(start, end, sched, eval_df, mysql=True):
    downtime = ipr_lib.system_downtime(mysql=mysql)
    sched = ipr_lib.remove_downtime(sched, downtime)
    sched.loc[sched.raising != 1,
              'ts_start'] = sched.loc[sched.raising != 1,
                                      'data_ts'] + timedelta(minutes=add_start)
    sched.loc[sched.raising != 1,
              'ts_due'] = sched.loc[sched.raising != 1,
                                    'ts_start'] + timedelta(minutes=due)
    sched.loc[sched.raising == 1, 'ts_start'] = sched.loc[sched.raising == 1,
                                                          'data_ts']
    sched.loc[sched.raising == 1,
              'ts_due'] = sched.loc[sched.raising == 1,
                                    'data_ts'] + timedelta(minutes=due_r)

    releases = qdb.get_web_releases(start, end, mysql=mysql)
    sched = pd.merge(sched, releases, how='left', on=['site_code', 'data_ts'])
    sched.loc[:, 'grade_t'] = sched.apply(lambda row: 1 - min(
        1,
        delay_deduction * np.ceil(
            max(max(0, (row.ts_release - row.ts_due).total_seconds()),
                max(0, (row.ts_start - row.ts_release).total_seconds())) /
            (60 * delay_min))),
                                          axis=1)
    sched.loc[sched.ts_release.isnull(), 'grade_t'] = 0

    monitoring_ipr = pd.read_excel(output_path + 'monitoring_ipr.xlsx',
                                   sheet_name=None)

    for name in monitoring_ipr.keys():
        indiv_ipr = monitoring_ipr[name]
        indiv_ipr.columns = indiv_ipr.columns.astype(str)
        for ts in indiv_ipr.columns[5:]:
            ts = pd.to_datetime(ts)
            ts_end = ts + timedelta(0.5)
            shift_release = sched.loc[(sched.data_ts >= ts) &
                                      (sched.data_ts < ts_end), :]
            if len(shift_release) != 0:
                grade = np.round(np.average(shift_release.grade_t), 2)
                indiv_ipr.loc[indiv_ipr.Output2 == 'EWI web release',
                              str(ts)] = grade
            else:
                indiv_ipr.loc[indiv_ipr.Output2 == 'EWI web release',
                              str(ts)] = np.nan
            if ts >= pd.to_datetime('2021-04-01') and len(shift_release) != 0:
                shift_eval = eval_df.loc[
                    (eval_df.shift_ts >= ts) &
                    (eval_df.shift_ts <= ts + timedelta(1)) &
                    ((eval_df['evaluated_MT'] == name) |
                     (eval_df['evaluated_CT'] == name) |
                     (eval_df['evaluated_backup'] == name)
                     ), :].drop_duplicates('shift_ts', keep='last')[0:1]
                shift_eval = shift_eval.drop_duplicates('shift_ts',
                                                        keep='last')[0:1]
                deduction = np.nansum(shift_eval[[
                    'routine_web_alert_ts', 'web_alert_ts'
                ]].values) / 3 + np.nansum(shift_eval[[
                    'routine_web_alert_level', 'web_alert_level'
                ]].values)
                indiv_ipr.loc[indiv_ipr.Output1 == 'web release',
                              str(ts)] = np.round(
                                  (len(shift_release) - deduction) /
                                  len(shift_release), 2)
            else:
                indiv_ipr.loc[indiv_ipr.Output1 == 'web release',
                              str(ts)] = np.nan
        monitoring_ipr[name] = indiv_ipr

    writer = pd.ExcelWriter(output_path + 'monitoring_ipr.xlsx')
    for sheet_name, xlsxdf in monitoring_ipr.items():
        xlsxdf.to_excel(writer, sheet_name, index=False)
    writer.save()
Exemple #4
0
def main(start, end, sched, eval_df, mysql=False):
    monitoring_ipr = pd.read_excel(output_path + 'monitoring_ipr.xlsx',
                                   sheet_name=None)

    downtime = ipr_lib.system_downtime(mysql=mysql)
    sched = ipr_lib.remove_downtime(sched, downtime)

    for name in monitoring_ipr.keys():
        indiv_ipr = monitoring_ipr[name]
        indiv_ipr.columns = indiv_ipr.columns.astype(str)
        for ts in indiv_ipr.columns[5:]:
            ts = pd.to_datetime(ts)
            ts_end = ts + timedelta(0.5)
            shift_release = sched.loc[(sched.data_ts > ts) &
                                      (sched.data_ts <= ts_end), :]
            event_shift_release = shift_release.loc[shift_release.event ==
                                                    1, :]

            shift_eval = eval_df.loc[
                (eval_df.shift_ts >= ts) &
                (eval_df.shift_ts <= ts + timedelta(1)) &
                ((eval_df['evaluated_MT'] == name) |
                 (eval_df['evaluated_CT'] == name) |
                 (eval_df['evaluated_backup'] == name)), :].drop_duplicates(
                     'shift_ts', keep='last')
            shift_eval = shift_eval.drop_duplicates('shift_ts',
                                                    keep='last')[0:1]
            deduction = np.nansum(shift_eval[[
                'routine_tag', 'surficial_tag', 'response_tag', 'rain_tag',
                'call_log', 'fyi_tag', 'aim_tag', 'aim_surficial_tag',
                'aim_response_tag'
            ]].values)
            if len(shift_release) != 0:
                indiv_ipr.loc[
                    indiv_ipr.Output1.str.contains('narrative', na=False),
                    str(ts)] = max(
                        0,
                        np.round((15 * len(set(shift_release.site_code)) -
                                  deduction) /
                                 (15 * len(set(shift_release.site_code))), 2))
                if len(event_shift_release) != 0:
                    indiv_ipr.loc[indiv_ipr.Output2 == 'EoSR',
                                  str(ts)] = np.round(
                                      (len(set(shift_release.site_code)) -
                                       0.1 * np.nansum(shift_eval['eosr'])) /
                                      len(set(shift_release.site_code)), 2)
                    indiv_ipr.loc[
                        indiv_ipr.Output2 == 'narratives',
                        str(ts)] = int((
                            (shift_eval.mt_narrative == 'No').values.all()) & (
                                (shift_eval.eosr_info == 'Yes').values.all()))
                    indiv_ipr.loc[indiv_ipr.Output2 == 'plot attachment',
                                  str(ts)] = np.round(
                                      (len(set(shift_release.site_code)) -
                                       0.25 * np.nansum(shift_eval['plot'])) /
                                      len(set(shift_release.site_code)), 2)
                    indiv_ipr.loc[
                        indiv_ipr.Output2 == 'subsurface analysis',
                        str(ts)] = np.round(
                            (3 * len(set(shift_release.site_code)) -
                             0.75 * np.nansum(shift_eval['eosr_subsurface'])) /
                            (3 * len(set(shift_release.site_code))), 2)
                    indiv_ipr.loc[
                        indiv_ipr.Output2 == 'surficial analysis',
                        str(ts)] = np.round(
                            (3 * len(set(shift_release.site_code)) -
                             0.75 * np.nansum(shift_eval['eosr_surficial'])) /
                            (3 * len(set(shift_release.site_code))), 2)
                    indiv_ipr.loc[
                        indiv_ipr.Output2 == 'rainfall analysis',
                        str(ts)] = np.round(
                            (3 * len(set(shift_release.site_code)) -
                             0.75 * np.nansum(shift_eval['eosr_rain'])) /
                            (3 * len(set(shift_release.site_code))), 2)
                    moms_release = shift_release.loc[shift_release.moms ==
                                                     1, :]
                    if len(moms_release) != 0:
                        indiv_ipr.loc[
                            indiv_ipr.Output2 == 'moms analysis',
                            str(ts)] = np.round(
                                (3 * len(
                                    set(shift_release.loc[shift_release.moms ==
                                                          1, 'site_code'])) -
                                 0.75 * np.nansum(shift_eval['eosr_moms'])) /
                                (3 * len(
                                    set(shift_release.loc[shift_release.moms ==
                                                          1, 'site_code']))),
                                2)
                    eq_release = shift_release.loc[shift_release.EQ == 1, :]
                    if len(eq_release) != 0:
                        indiv_ipr.loc[
                            indiv_ipr.Output2 == 'eq analysis',
                            str(ts)] = np.round(
                                (3 * len(
                                    set(shift_release.loc[shift_release.EQ ==
                                                          1, 'site_code'])) -
                                 0.75 * np.nansum(shift_eval['eosr_eq'])) /
                                (3 * len(
                                    set(shift_release.loc[shift_release.EQ ==
                                                          1, 'site_code']))),
                                2)
            else:
                indiv_ipr.loc[indiv_ipr.Output2 == 'gintag',
                              str(ts)] = max(
                                  0, np.round((15 - deduction) / 15, 2))
        monitoring_ipr[name] = indiv_ipr

    writer = pd.ExcelWriter(output_path + 'monitoring_ipr.xlsx')
    for sheet_name, xlsxdf in monitoring_ipr.items():
        xlsxdf.to_excel(writer, sheet_name, index=False)
    writer.save()
Exemple #5
0
def main(start, end, sched, site_names, eval_df, mysql=True, to_csv=False):

    sent_start = start - timedelta(hours=0.25)
    sent_end = end + timedelta(hours=4)
    sched.loc[:, 'ts_due'] = sched.data_ts + timedelta(minutes=add_start + due)

    rain_recipients = qdb.get_rain_recipients(mysql=mysql, to_csv=to_csv)
    rain_sent = qdb.get_rain_sent(sent_start,
                                  sent_end,
                                  mysql=mysql,
                                  to_csv=to_csv)
    rain_sent = rain_sent.loc[~rain_sent.ts_sent.isnull(), :]
    rain_sched = raininfo.ewi_sched(sched, rain_recipients, rain_sent,
                                    site_names)

    monitoring_ipr = pd.read_excel(output_path + 'monitoring_ipr.xlsx',
                                   sheet_name=None)

    downtime = ipr_lib.system_downtime(mysql=mysql)
    rain_sched = ipr_lib.remove_downtime(rain_sched, downtime)

    for name in monitoring_ipr.keys():
        indiv_ipr = monitoring_ipr[name]
        indiv_ipr.columns = indiv_ipr.columns.astype(str)
        for ts in indiv_ipr.columns[5:]:
            ts = pd.to_datetime(ts)
            sending_status = rain_sched.loc[(rain_sched.data_ts >= ts) & (
                rain_sched.data_ts < ts + timedelta(0.5)), :]
            # ewi bulletin timeliness
            if len(sending_status) == 0:
                # no scheduled
                grade_t = np.nan
            elif len(sending_status.loc[
                    sending_status.ts_written.isnull(), :]) == 0 and all(
                        sending_status.ts_written <= sending_status.ts_due):
                # all sent on time
                grade_t = 1
            else:
                grade_t = np.round(
                    np.average(1 - sending_status.apply(lambda row: np.where(
                        row.ts_written is pd.NaT, 1,
                        delay_deduction * max(0, (row.ts_written - row.ts_due).
                                              total_seconds()) /
                        (60 * delay_min)),
                                                        axis=1)), 2)
            indiv_ipr.loc[indiv_ipr.Output2 == 'Rainfall info',
                          str(ts)] = grade_t
            if ts >= pd.to_datetime('2021-04-01') and len(sending_status) != 0:
                shift_eval = eval_df.loc[
                    (eval_df.shift_ts >= ts) &
                    (eval_df.shift_ts <= ts + timedelta(1)) &
                    ((eval_df['evaluated_MT'] == name) |
                     (eval_df['evaluated_CT'] == name) |
                     (eval_df['evaluated_backup'] == name)
                     ), :].drop_duplicates('shift_ts', keep='last')[0:1]
                shift_eval = shift_eval.drop_duplicates('shift_ts',
                                                        keep='last')[0:1]
                unsent = len(
                    set(sending_status.loc[sending_status.ts_written.isnull(),
                                           'data_ts']))
                deduction = np.nansum(0.5 * (shift_eval['rain_det'] + unsent) +
                                      0.05 * shift_eval['rain_typo'])
                if len(sending_status) != 0 and np.nansum(
                        shift_eval['rain_det'] + shift_eval['rain_typo']) == 0:
                    deduction = len(sending_status.loc[
                        sending_status.ts_written.isnull(), :])
                indiv_ipr.loc[indiv_ipr.Output1 == 'Rainfall info',
                              str(ts)] = max(
                                  0,
                                  np.round((len(
                                      sending_status.drop_duplicates(
                                          ['data_ts', 'site_code'])) -
                                            deduction) / len(
                                                sending_status.drop_duplicates(
                                                    ['data_ts', 'site_code'])),
                                           2))
        monitoring_ipr[name] = indiv_ipr

    writer = pd.ExcelWriter(output_path + 'monitoring_ipr.xlsx')
    for sheet_name, xlsxdf in monitoring_ipr.items():
        xlsxdf.to_excel(writer, sheet_name, index=False)
    writer.save()