def main(name='',custom_end = ''): if name == '': name = sys.argv[1].lower() start = datetime.now() print "=========================== {} {} =========================".format(str(name), custom_end) window,config = rtw.getwindow(end = custom_end ) col = q.GetSensorList(name) monitoring = g.genproc(col[0], window, config, config.io.column_fix) lgd = q.GetLastGoodDataFromDb(monitoring.colprops.name) monitoring_vel = monitoring.vel[window.start:window.end] monitoring_vel = monitoring_vel.reset_index().sort_values('ts',ascending=True) nodal_dv = monitoring_vel.groupby('id') alert = nodal_dv.apply(node_alert2, colname=monitoring.colprops.name, num_nodes=monitoring.colprops.nos, T_disp=config.io.t_disp, T_velL2=config.io.t_vell2, T_velL3=config.io.t_vell3, k_ac_ax=config.io.k_ac_ax, lastgooddata=lgd,window=window,config=config) alert = column_alert(alert, config.io.num_nodes_to_check, config.io.k_ac_ax) not_working = q.GetNodeStatus(1).loc[q.GetNodeStatus(1).site == name].node.values for i in not_working: alert = alert.loc[alert.id != i] if 'L3' in list(alert.col_alert.values): site_alert = 'L3' elif 'L2' in list(alert.col_alert.values): site_alert = 'L2' else: site_alert = min(getmode(list(alert.col_alert.values))) column_level_alert = pd.DataFrame({'timestamp': [window.end], 'site': [monitoring.colprops.name], 'source': ['sensor'], 'alert': [site_alert], 'updateTS': [window.end]}) print column_level_alert if site_alert in ('L2', 'L3'): A.main(monitoring.colprops.name,custom_end) else: alert_toDB(column_level_alert, 'column_level_alert', window) write_site_alert(monitoring.colprops.name, window) ####################### query = "SELECT * FROM senslopedb.site_level_alert WHERE site = '%s' and source = 'public' ORDER BY updateTS DESC LIMIT 1" %monitoring.colprops.name[0:3] public_alert = q.GetDBDataFrame(query) if public_alert.alert.values[0] != 'A0' or RoundTime(pd.to_datetime(public_alert.timestamp.values[0])) == RoundTime(window.end): plot_time = ['07:30:00', '19:30:00'] if str(window.end.time()) in plot_time: print "Plotter.main(monitoring, window, config)" elif RoundTime(pd.to_datetime(public_alert.timestamp.values[0])) == RoundTime(window.end): print "Plotter.main(monitoring, window, config)" ####################### print 'run time =', datetime.now()-start return column_level_alert,monitoring
to_fill = io.io.to_fill to_smooth = io.io.to_smooth output_path = (__file__) output_file_path = (__file__) proc_file_path = (__file__) CSVFormat = '.csv' PrintProc = io.io.printproc T_disp = io.io.t_disp T_velL2 = io.io.t_vell2 T_velL3 = io.io.t_vell3 k_ac_ax = io.io.k_ac_ax num_nodes_to_check = io.io.num_nodes_to_check colarrange = io.io.alerteval_colarrange.split(',') summary = pd.DataFrame() node_status = qdb.GetNodeStatus(1) last_target = 5 for i in range(0,last_target): try: sites,custom_end = ffd.aim(i) sensorlist = qdb.GetSensorList(sites) for s in sensorlist: last_col=sensorlist[-1:] last_col=last_col[0] last_col=last_col.name # getting current column properties colname,num_nodes,seg_len= s.name,s.nos,s.seglen
def main(name='', end='', end_mon=False): start = datetime.now() if name == '': name = sys.argv[1].lower() if end == '': try: end = pd.to_datetime(sys.argv[2]) if end > start + timedelta(hours=0.5): print 'invalid timestamp' return except: end = datetime.now() else: end = pd.to_datetime(end) window, config = rtw.getwindow(end) col = q.GetSensorList(name) monitoring = g.genproc(col[0], window, config, config.io.column_fix) lgd = q.GetLastGoodDataFromDb(monitoring.colprops.name) monitoring_vel = monitoring.disp_vel[window.start:window.end] monitoring_vel = monitoring_vel.reset_index().sort_values('ts', ascending=True) nodal_dv = monitoring_vel.groupby('id') alert = nodal_dv.apply(node_alert2, colname=monitoring.colprops.name, num_nodes=monitoring.colprops.nos, T_disp=config.io.t_disp, T_velL2=config.io.t_vell2, T_velL3=config.io.t_vell3, k_ac_ax=config.io.k_ac_ax, lastgooddata=lgd, window=window, config=config) alert['col_alert'] = -1 col_alert = pd.DataFrame({ 'id': range(1, monitoring.colprops.nos + 1), 'col_alert': [-1] * monitoring.colprops.nos }) node_col_alert = col_alert.groupby('id', as_index=False) node_col_alert.apply(column_alert, alert=alert, num_nodes_to_check=config.io.num_nodes_to_check, k_ac_ax=config.io.k_ac_ax, T_velL2=config.io.t_vell2, T_velL3=config.io.t_vell3) alert['node_alert'] = alert['node_alert'].map({ -1: 'ND', 0: 'L0', 1: 'L2', 2: 'L3' }) alert['col_alert'] = alert['col_alert'].map({ -1: 'ND', 0: 'L0', 1: 'L2', 2: 'L3' }) not_working = q.GetNodeStatus(1).loc[q.GetNodeStatus(1).site == name].node.values for i in not_working: alert = alert.loc[alert.id != i] if 'L3' in list(alert.col_alert.values): site_alert = 'L3' elif 'L2' in list(alert.col_alert.values): site_alert = 'L2' else: site_alert = min(getmode(list(alert.col_alert.values))) column_level_alert = pd.DataFrame({ 'timestamp': [window.end], 'site': [monitoring.colprops.name], 'source': ['noadjfilt'], 'alert': [site_alert], 'updateTS': [window.end] }) if site_alert in ('L2', 'L3'): column_level_alert = A.main(monitoring.colprops.name, window.end) alert_toDB(column_level_alert, 'column_level_alert', window) write_site_alert(monitoring.colprops.name, window) print column_level_alert print 'run time =', datetime.now() - start return column_level_alert
def trending_alertgen(trending_alert, monitoring, lgd, window, config): endTS = pd.to_datetime(trending_alert['timestamp'].values[0]) monitoring_vel = monitoring.vel[endTS - timedelta(3):endTS] monitoring_vel = monitoring_vel.reset_index().sort_values('ts', ascending=True) nodal_dv = monitoring_vel.groupby('id') alert = nodal_dv.apply(node_alert2, colname=monitoring.colprops.name, num_nodes=monitoring.colprops.nos, T_disp=config.io.t_disp, T_velL2=config.io.t_vell2, T_velL3=config.io.t_vell3, k_ac_ax=config.io.k_ac_ax, lastgooddata=lgd, window=window, config=config) alert = column_alert(alert, config.io.num_nodes_to_check, config.io.k_ac_ax) alert['timestamp'] = endTS palert = alert.loc[(alert.col_alert == 'L2') | (alert.col_alert == 'L3')] if len(palert) != 0: palert['site'] = monitoring.colprops.name palert = palert[[ 'timestamp', 'site', 'disp_alert', 'vel_alert', 'col_alert' ]].reset_index() palert = palert[[ 'timestamp', 'site', 'id', 'disp_alert', 'vel_alert', 'col_alert' ]] engine = create_engine('mysql://' + q.Userdb + ':' + q.Passdb + '@' + q.Hostdb + ':3306/' + q.Namedb) for i in palert.index: try: palert.loc[palert.index == i].to_sql( name='node_level_alert_noFilter', con=engine, if_exists='append', schema=q.Namedb, index=False) except: print 'data already written in senslopedb.node_level_alert_noFilter' alert['TNL'] = alert['col_alert'].values if len(palert) != 0: for i in palert['id'].values: query = "SELECT * FROM senslopedb.node_level_alert_noFilter WHERE site = '%s' and timestamp >= '%s' and id = %s" % ( monitoring.colprops.name, endTS - timedelta(hours=3), i) nodal_palertDF = q.GetDBDataFrame(query) if len(nodal_palertDF) >= 3: palert_index = alert.loc[alert.id == i].index[0] alert.loc[palert_index]['TNL'] = max( getmode(list(nodal_palertDF['col_alert'].values))) else: alert.loc[palert_index]['TNL'] = 'L0' not_working = q.GetNodeStatus(1).loc[q.GetNodeStatus( 1).site == monitoring.colprops.name]['node'].values for i in not_working: alert = alert.loc[alert.id != i] if 'L3' in alert['TNL'].values: site_alert = 'L3' elif 'L2' in alert['TNL'].values: site_alert = 'L2' else: site_alert = min(getmode(list(alert['TNL'].values))) alert_index = trending_alert.loc[trending_alert.timestamp == endTS].index[0] trending_alert.loc[alert_index] = [ endTS, monitoring.colprops.name, 'sensor', site_alert ] return trending_alert
def genproc(col, window, config, fixpoint, realtime=False, comp_vel=True): monitoring = q.GetRawAccelData(col.name, window.offsetstart, window.end) #identify the node ids with no data at start of monitoring window NodesNoInitVal = GetNodesWithNoInitialData(monitoring, col.nos, window.offsetstart) #get last good data prior to the monitoring window (LGDPM) lgdpm = pd.DataFrame() for node in NodesNoInitVal: temp = q.GetSingleLGDPM(col.name, node, window.offsetstart.strftime("%Y-%m-%d %H:%M")) lgdpm = lgdpm.append(temp, ignore_index=True) monitoring = monitoring.append(lgdpm) try: monitoring = flt.applyFilters(monitoring) LastGoodData = q.GetLastGoodData(monitoring, col.nos) q.PushLastGoodData(LastGoodData, col.name) LastGoodData = q.GetLastGoodDataFromDb(col.name) except: LastGoodData = q.GetLastGoodDataFromDb(col.name) # print 'error' # if len(LastGoodData)<col.nos: print col.name, " Missing nodes in LastGoodData" monitoring = monitoring.loc[monitoring.id <= col.nos] invalid_nodes = q.GetNodeStatus(1) invalid_nodes[invalid_nodes.site == 'oslb']['node'].values monitoring = monitoring.loc[~monitoring.id.isin(invalid_nodes)] #assigns timestamps from LGD to be timestamp of offsetstart monitoring.loc[(monitoring.ts < window.offsetstart) | (pd.isnull(monitoring.ts)), ['ts']] = window.offsetstart monitoring['xz'], monitoring['xy'] = accel_to_lin_xz_xy( col.seglen, monitoring.x.values, monitoring.y.values, monitoring.z.values) monitoring = monitoring.drop(['x', 'y', 'z'], axis=1) monitoring = monitoring.drop_duplicates(['ts', 'id']) monitoring = monitoring.set_index('ts') monitoring = monitoring[['name', 'id', 'xz', 'xy']] nodes_noval = GetNodesWithNoData(monitoring, col.nos) nodes_nodata = pd.DataFrame({ 'name': [0] * len(nodes_noval), 'id': nodes_noval, 'xy': [np.nan] * len(nodes_noval), 'xz': [np.nan] * len(nodes_noval), 'ts': [window.offsetstart] * len(nodes_noval) }) nodes_nodata = nodes_nodata.set_index('ts') monitoring = monitoring.append(nodes_nodata) max_min_df, max_min_cml = err.cml_noise_profiling(monitoring, config, fixpoint, col.nos) #resamples xz and xy values per node using forward fill monitoring = monitoring.groupby('id').apply( resamplenode, window=window).reset_index(level=1).set_index('ts') nodal_proc_monitoring = monitoring.groupby('id') if not realtime: to_smooth = config.io.to_smooth to_fill = config.io.to_fill else: to_smooth = config.io.rt_to_smooth to_fill = config.io.rt_to_fill filled_smoothened = nodal_proc_monitoring.apply( fill_smooth, offsetstart=window.offsetstart, end=window.end, roll_window_numpts=window.numpts, to_smooth=to_smooth, to_fill=to_fill) filled_smoothened = filled_smoothened[['xz', 'xy', 'name']].reset_index() monitoring = filled_smoothened.set_index('ts') if comp_vel == True: filled_smoothened[ 'td'] = filled_smoothened.ts.values - filled_smoothened.ts.values[0] filled_smoothened['td'] = filled_smoothened['td'].apply( lambda x: x / np.timedelta64(1, 'D')) nodal_filled_smoothened = filled_smoothened.groupby('id') disp_vel = nodal_filled_smoothened.apply( node_inst_vel, roll_window_numpts=window.numpts, start=window.start) disp_vel = disp_vel[['ts', 'xz', 'xy', 'vel_xz', 'vel_xy', 'name']].reset_index() disp_vel = disp_vel[[ 'ts', 'id', 'xz', 'xy', 'vel_xz', 'vel_xy', 'name' ]] disp_vel = disp_vel.set_index('ts') disp_vel = disp_vel.sort_values('id', ascending=True) else: disp_vel = monitoring return procdata(col, monitoring.sort(), disp_vel.sort(), max_min_df, max_min_cml)
def main(name='', end=datetime.now(), end_mon=False): if name == '': name = sys.argv[1].lower() window, config = rtw.getwindow(end) col = q.GetSensorList(name) monitoring = g.genproc(col[0], window, config, config.io.column_fix) lgd = q.GetLastGoodDataFromDb(monitoring.colprops.name) monitoring_vel = monitoring.vel[window.start:window.end] monitoring_vel = monitoring_vel.reset_index().sort_values('ts', ascending=True) nodal_dv = monitoring_vel.groupby('id') alert = nodal_dv.apply(node_alert2, colname=monitoring.colprops.name, num_nodes=monitoring.colprops.nos, T_disp=config.io.t_disp, T_velL2=config.io.t_vell2, T_velL3=config.io.t_vell3, k_ac_ax=config.io.k_ac_ax, lastgooddata=lgd, window=window, config=config) alert = column_alert(alert, config.io.num_nodes_to_check, config.io.k_ac_ax) not_working = q.GetNodeStatus(1).loc[q.GetNodeStatus(1).site == name].node.values for i in not_working: alert = alert.loc[alert.id != i] if 'L3' in list(alert.col_alert.values): site_alert = 'L3' elif 'L2' in list(alert.col_alert.values): site_alert = 'L2' else: site_alert = min(getmode(list(alert.col_alert.values))) column_level_alert = pd.DataFrame({ 'timestamp': [window.end], 'site': [monitoring.colprops.name], 'source': ['sensor'], 'alert': [site_alert], 'updateTS': [window.end] }) if site_alert in ('L2', 'L3'): column_level_alert = A.main(monitoring.colprops.name, window.end) alert_toDB(column_level_alert, 'column_level_alert', window) print column_level_alert write_site_alert(monitoring.colprops.name, window) ####################### if monitoring.colprops.name == 'mesta': colname = 'msu' elif monitoring.colprops.name == 'messb': colname = 'msl' else: colname = monitoring.colprops.name[0:3] query = "SELECT * FROM senslopedb.site_level_alert WHERE site = '%s' and source = 'public' and timestamp <= '%s' and updateTS >= '%s' ORDER BY updateTS DESC LIMIT 1" % ( colname, window.end, window.end - timedelta(hours=0.5)) public_alert = q.GetDBDataFrame(query) if public_alert.alert.values[0] != 'A0': plot_time = ['07:30:00', '19:30:00'] if str(window.end.time()) in plot_time or end_mon: plotter.main(monitoring, window, config, plotvel_start=window.end - timedelta(hours=3), plotvel_end=window.end, realtime=False) elif RoundTime(pd.to_datetime( public_alert.timestamp.values[0])) == RoundTime(window.end): plotter.main(monitoring, window, config, plotvel_start=window.end - timedelta(hours=3), plotvel_end=window.end, realtime=False) ####################### return column_level_alert
def genproc(col, window, config, fixpoint, realtime=False, comp_vel=True): monitoring = q.GetRawAccelData(col.name, window.offsetstart, window.end) monitoring = flt.applyFilters(monitoring) try: LastGoodData = q.GetLastGoodData(monitoring,col.nos) q.PushLastGoodData(LastGoodData,col.name) LastGoodData = q.GetLastGoodDataFromDb(col.name) except: LastGoodData = q.GetLastGoodDataFromDb(col.name) #identify the node ids with no data at start of monitoring window NodesNoInitVal=GetNodesWithNoInitialData(monitoring,col.nos,window.offsetstart) #get last good data prior to the monitoring window (LGDPM) if len(NodesNoInitVal) != 0: lgdpm = q.GetSingleLGDPM(col.name, NodesNoInitVal, window.offsetstart) if len(lgdpm) != 0: lgdpm = flt.applyFilters(lgdpm) lgdpm = lgdpm.sort_index(ascending = False).drop_duplicates('id') if len(lgdpm) != 0: monitoring=monitoring.append(lgdpm) monitoring = monitoring.loc[monitoring.id <= col.nos] #assigns timestamps from LGD to be timestamp of offsetstart monitoring.loc[(monitoring.ts < window.offsetstart)|(pd.isnull(monitoring.ts)), ['ts']] = window.offsetstart invalid_nodes = q.GetNodeStatus(1) invalid_nodes = invalid_nodes[invalid_nodes.site == col.name] if len(invalid_nodes) != 0: stat = invalid_nodes.groupby('node', as_index=False) monitoring = stat.apply(remove_invalid, df=monitoring) nodes_noval = GetNodesWithNoData(monitoring, col.nos) nodes_nodata = pd.DataFrame({'name': [0]*len(nodes_noval), 'id': nodes_noval, 'x': [0]*len(nodes_noval), 'y': [0]*len(nodes_noval), 'z': [0]*len(nodes_noval), 'ts': [window.offsetstart]*len(nodes_noval)}) monitoring = monitoring.append(nodes_nodata) max_min_df, max_min_cml = err.cml_noise_profiling(monitoring, config, fixpoint, col.nos) monitoring['xz'], monitoring['xy'] = accel_to_lin_xz_xy(col.seglen,monitoring.x.values,monitoring.y.values,monitoring.z.values) monitoring = monitoring.drop_duplicates(['ts', 'id']) monitoring = monitoring.set_index('ts') #resamples xz and xy values per node using forward fill monitoring = monitoring.groupby('id').apply(resamplenode, window = window).reset_index(level=1).set_index('ts') nodal_proc_monitoring = monitoring.groupby('id') if not realtime: to_smooth = config.io.to_smooth to_fill = config.io.to_fill else: to_smooth = config.io.rt_to_smooth to_fill = config.io.rt_to_fill filled_smoothened = nodal_proc_monitoring.apply(fill_smooth, offsetstart=window.offsetstart, end=window.end, roll_window_numpts=window.numpts, to_smooth=to_smooth, to_fill=to_fill) filled_smoothened = filled_smoothened[['xz', 'xy', 'x', 'y', 'z', 'name']].reset_index() filled_smoothened['depth'] = filled_smoothened['x']/np.abs(filled_smoothened['x']) * np.sqrt(col.seglen**2 - filled_smoothened['xz']**2 - filled_smoothened['xy']**2) filled_smoothened['depth'] = filled_smoothened['depth'].fillna(value=col.seglen) filled_smoothened['net_dist'] = np.sqrt((filled_smoothened['xz'] ** 2) + (filled_smoothened['xy'] ** 2)) monitoring = filled_smoothened.set_index('ts') if comp_vel == True: filled_smoothened['td'] = filled_smoothened['ts'].values - filled_smoothened['ts'].values[0] filled_smoothened['td'] = filled_smoothened['td'].apply(lambda x: x / np.timedelta64(1,'D')) nodal_filled_smoothened = filled_smoothened.groupby('id') disp_vel = nodal_filled_smoothened.apply(node_inst_vel, roll_window_numpts=window.numpts, start=window.start) disp_vel = disp_vel.reset_index(drop=True) disp_vel = disp_vel.set_index('ts') disp_vel = disp_vel.sort_values('id', ascending=True) else: disp_vel = monitoring return procdata(col,disp_vel.sort(),max_min_df,max_min_cml)
def worker(first_target, last_target): #load all global variables? summary = pd.DataFrame() s_f = pd.DataFrame() s_a = pd.DataFrame() io = cfg.config() num_roll_window_ops = io.io.num_roll_window_ops roll_window_length = io.io.roll_window_length data_dt = io.io.data_dt rt_window_length = io.io.rt_window_length roll_window_numpts = int(1 + roll_window_length / data_dt) col_pos_interval = io.io.col_pos_interval col_pos_num = io.io.num_col_pos to_fill = io.io.to_fill to_smooth = io.io.to_smooth # output_path = (__file__) # output_file_path = (__file__) # proc_file_path = (__file__) CSVFormat = '.csv' # PrintProc = io.io.printproc T_disp = io.io.t_disp T_velL2 = io.io.t_vell2 T_velL3 = io.io.t_vell3 k_ac_ax = io.io.k_ac_ax num_nodes_to_check = io.io.num_nodes_to_check colarrange = io.io.alerteval_colarrange.split(',') node_status = qdb.GetNodeStatus(1) for i in range(first_target, last_target): # try: sites, custom_end = ffd.aim(i) sensorlist = qdb.GetSensorList(sites) for s in sensorlist: last_col = sensorlist[-1:] last_col = last_col[0] last_col = last_col.name # getting current column properties colname, num_nodes, seg_len = s.name, s.nos, s.seglen # list of working nodes node_list = range(1, num_nodes + 1) not_working = node_status.loc[(node_status.site == colname) & (node_status.node <= num_nodes)] not_working_nodes = not_working['node'].values for i in not_working_nodes: node_list.remove(i) proc_monitoring, monwin = generate_proc(colname, num_nodes, seg_len, custom_end, roll_window_length, data_dt, rt_window_length, num_roll_window_ops) xz_series_list, xy_series_list = create_series_list( proc_monitoring, monwin, colname, num_nodes) # print "create_series_list tapos na" # create, fill and smooth dataframes from series lists xz = create_fill_smooth_df(xz_series_list, num_nodes, monwin, roll_window_numpts, to_fill, to_smooth) xy = create_fill_smooth_df(xy_series_list, num_nodes, monwin, roll_window_numpts, to_fill, to_smooth) # computing instantaneous velocity vel_xz, vel_xy = compute_node_inst_vel(xz, xy, roll_window_numpts) # computing cumulative displacements cs_x, cs_xz, cs_xy = compute_col_pos(xz, xy, monwin.index[-1], col_pos_interval, col_pos_num, seg_len) # processing dataframes for output xz, xy, xz_0off, xy_0off, vel_xz, vel_xy, vel_xz_0off, vel_xy_0off, cs_x, cs_xz, cs_xy, cs_xz_0, cs_xy_0 = df_to_out( colname, xz, xy, vel_xz, vel_xy, cs_x, cs_xz, cs_xy, # proc_file_path, CSVFormat) # Alert generation # alert_out=alert_generation(colname,xz,xy,vel_xz,vel_xy,num_nodes, T_disp, T_velL2, T_velL3, k_ac_ax, # num_nodes_to_check,custom_end,CSVFormat,colarrange) alert_out = alert_generation(colname, xz, xy, vel_xz, vel_xy, num_nodes, T_disp, T_velL2, T_velL3, k_ac_ax, num_nodes_to_check, custom_end, CSVFormat, colarrange) alert_out = alert_out.reset_index(level=['id']) alert_out = alert_out[[ 'id', 'disp_alert', 'vel_alert', 'node_alert', 'col_alert' ]] alert_out = alert_out[(alert_out['vel_alert'] > 0) | (alert_out.node_alert == 'l2')] alert_out = alert_out[alert_out.id == 1] alert_out['site'] = sites summary = pd.concat((summary, alert_out), axis=0) # except: # print "Error recreating alarm." # continue print "--------------------Filtering chenes----------------------" print "--------------------Store yung mga nafilter----------------------" for j in range(0, len(summary)): # try: sites, custom_end = time_site(j, summary) # print "custom_end -------------> %s" %str(custom_end) sensorlist = qdb.GetSensorList(sites) for s in sensorlist: last_col = sensorlist[-1:] last_col = last_col[0] last_col = last_col.name # getting current column properties colname, num_nodes, seg_len = s.name, s.nos, s.seglen # list of working nodes node_list = range(1, num_nodes + 1) not_working = node_status.loc[(node_status.site == colname) & (node_status.node <= num_nodes)] not_working_nodes = not_working['node'].values for i in not_working_nodes: node_list.remove(i) # proc_monitoring,monwin=generate_proc(colname, num_nodes, seg_len, custom_end,f=True) proc_monitoring, monwin = generate_proc(colname, num_nodes, seg_len, custom_end, roll_window_length, data_dt, rt_window_length, num_roll_window_ops, filt=True) xz_series_list, xy_series_list = create_series_list( proc_monitoring, monwin, colname, num_nodes) xz = create_fill_smooth_df(xz_series_list, num_nodes, monwin, roll_window_numpts, to_fill, to_smooth) xy = create_fill_smooth_df(xy_series_list, num_nodes, monwin, roll_window_numpts, to_fill, to_smooth) # computing instantaneous velocity vel_xz, vel_xy = compute_node_inst_vel(xz, xy, roll_window_numpts) # computing cumulative displacements cs_x, cs_xz, cs_xy = compute_col_pos(xz, xy, monwin.index[-1], col_pos_interval, col_pos_num, seg_len) # processing dataframes for output xz, xy, xz_0off, xy_0off, vel_xz, vel_xy, vel_xz_0off, vel_xy_0off, cs_x, cs_xz, cs_xy, cs_xz_0, cs_xy_0 = df_to_out( colname, xz, xy, vel_xz, vel_xy, cs_x, cs_xz, cs_xy, # proc_file_path, CSVFormat) # Alert generation alert_out = alert_generation(colname, xz, xy, vel_xz, vel_xy, num_nodes, T_disp, T_velL2, T_velL3, k_ac_ax, num_nodes_to_check, custom_end, CSVFormat, colarrange) # print alert_out alert_out = alert_out.reset_index(level=['id']) a_out = alert_out.copy() a_out = a_out[[ 'id', 'disp_alert', 'vel_alert', 'node_alert', 'col_alert' ]] a_out = a_out[(a_out['vel_alert'] < 1.0) | (a_out.node_alert == 'l0')] a_out = a_out[a_out.id == 1] a_out['site'] = sites s_f = pd.concat((s_f, a_out), axis=0) b_out = alert_out.copy() b_out = b_out[[ 'id', 'disp_alert', 'vel_alert', 'node_alert', 'col_alert' ]] b_out = b_out[(b_out['vel_alert'] > 0.0) | (b_out.node_alert == 'l2')] b_out = b_out[b_out.id == 1] b_out['site'] = sites s_a = pd.concat((s_a, b_out), axis=0) # except: # print "Error." # continue print "################# Drawing! Dahil drawing ka! ##################" print "################# Idrawing lahat ng nafilter! ##################" for k in range(0, len(s_f)): try: sites, custom_end = time_site(k, s_f) ce = custom_end.strftime("%y_%m_%d__%H_%M") fname = "FILTERED_" + str(sites) + "_" + ce + "_049_049" sensorlist = qdb.GetSensorList(sites) for s in sensorlist: last_col = sensorlist[-1:] last_col = last_col[0] last_col = last_col.name # getting current column properties colname, num_nodes, seg_len = s.name, s.nos, s.seglen # list of working nodes # node_list = range(1, num_nodes + 1) # not_working = node_status.loc[(node_status.site == colname) & (node_status.node <= num_nodes)] # not_working_nodes = not_working['node'].values # for i in not_working_nodes: # node_list.remove(i) # importing proc_monitoring file of current column to dataframe # try: # print "proc_monitoring here: " proc_monitoring = generate_proc(colname, num_nodes, seg_len, custom_end, roll_window_length, data_dt, rt_window_length, num_roll_window_ops, filt=True, for_plots=True) # print proc_monitoring proc_monitoring = proc_monitoring[proc_monitoring.id == 1] ffd.plotter(proc_monitoring, fname=fname) except: print "Error plotting Filtered." for k in range(0, len(s_a)): try: sites, custom_end = time_site(k, s_a) ce = custom_end.strftime("%y_%m_%d__%H_%M") sensorlist = qdb.GetSensorList(sites) for s in sensorlist: last_col = sensorlist[-1:] last_col = last_col[0] last_col = last_col.name # getting current column properties colname, num_nodes, seg_len = s.name, s.nos, s.seglen # list of working nodes # node_list = range(1, num_nodes + 1) # not_working = node_status.loc[(node_status.site == colname) & (node_status.node <= num_nodes)] # not_working_nodes = not_working['node'].values # for i in not_working_nodes: # node_list.remove(i) # importing proc_monitoring file of current column to dataframe # try: # print "proc_monitoring here: " proc_monitoring = generate_proc(colname, num_nodes, seg_len, custom_end, roll_window_length, data_dt, rt_window_length, num_roll_window_ops, f=True, for_plots=True) # print proc_monitoring proc_monitoring = proc_monitoring[proc_monitoring.id == 1] ffd.plotter(proc_monitoring, fname=fname) except: print "Error plotting Alarms."