async def alert_continuously(self, alert_interval_seconds): is_triggered = False while True: #is_triggered == False: try: is_triggered = await self.check_for_alert_match() except Exception as e: stacktrace = OsExpert.stacktrace() Log.e('Failed to run alert check, stacktace:\n{}', stacktrace) await asyncio.sleep(alert_interval_seconds)
def watch_continuously(self, watch_interval_seconds): Log.i('continuous watching activated with interval of {} seconds', watch_interval_seconds) consecutive_error_count = 0 while True: try: self.__verify_datafetch_apis_write_frequency() consecutive_error_count = 0 except Exception as e: consecutive_error_count += 1 Log.e('fail during watcher check ({} consecutive errors)', consecutive_error_count) stacktrace = OsExpert.stacktrace() Log.d('stacktrace:\n{}', stacktrace) time.sleep(watch_interval_seconds)
df_to_append, format='table', data_columns=True) row_count = h5.get_storer(job.uid).nrows Log.d('...h5 key {}, row count is {}', job.uid, row_count) except Exception as append_error: raise append_error Log.d('...time spent adding to h5: {:.2f}s', time.time() - h5_process_start_time) row_processing_time = time.time() - subset_process_start_time Log.d('...total time spent on subset: {:.2f}s ({:.2f}s per row)', row_processing_time, row_processing_time / row_process_count) return transaction_min_timestamp if __name__ == '__main__': file_dirpath = OsExpert.path_backstep(__file__) pd.options.display.float_format = '{:.2f}'.format try: app = GeneratorApp() app.feed_jobs_forever( job_changed_handler=lambda job: None #Log.w('dummy callback for job: {}', job.uid) ) except KeyboardInterrupt: print('\n\nKeyboardInterrupt\n') except Exception as e: Log.c('app failed: {}', e) stacktrace = OsExpert.stacktrace() Log.d('stacktrace:\n{}', stacktrace)