class PagerDutyDaemon(Daemon): def run(self): self.running = True # Connect to message queue self.mq = Messaging() self.mq.connect(callback=PagerDutyMessage(self.mq)) self.mq.subscribe(destination=CONF.outbound_topic) # TODO(nsatterl): use dedicated queue? while not self.shuttingdown: try: LOG.debug('Waiting for PagerDuty messages...') time.sleep(CONF.loop_every) LOG.debug('Send heartbeat...') heartbeat = Heartbeat(version=Version) self.mq.send(heartbeat) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True LOG.info('Shutdown request received...') self.running = False LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class AlertaDaemon(Daemon): alerta_opts = { 'forward_duplicate': 'no', } def __init__(self, prog, **kwargs): config.register_opts(AlertaDaemon.alerta_opts) Daemon.__init__(self, prog, kwargs) def run(self): self.running = True self.queue = Queue.Queue() # Create internal queue self.db = Mongo() # mongo database self.carbon = Carbon() # carbon metrics self.statsd = StatsD() # graphite metrics # Connect to message queue self.mq = Messaging() self.mq.connect(callback=ServerMessage(self.mq, self.queue, self.statsd)) self.mq.subscribe() # Start worker threads LOG.debug('Starting %s worker threads...', CONF.server_threads) for i in range(CONF.server_threads): w = WorkerThread(self.mq, self.queue, self.statsd) try: w.start() except Exception, e: LOG.error('Worker thread #%s did not start: %s', i, e) continue LOG.info('Started worker thread: %s', w.getName()) while not self.shuttingdown: try: LOG.debug('Send heartbeat...') heartbeat = Heartbeat(version=Version, timeout=CONF.loop_every) self.mq.send(heartbeat) time.sleep(CONF.loop_every) LOG.info('Alert processing queue length is %d', self.queue.qsize()) self.carbon.metric_send('alerta.alerts.queueLength', self.queue.qsize()) self.db.update_queue_metric(self.queue.qsize()) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True LOG.info('Shutdown request received...') self.running = False for i in range(CONF.server_threads): self.queue.put(None) w.join() LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class LoggerDaemon(Daemon): """ Index alerts in ElasticSearch using Logstash format so that logstash GUI and/or Kibana can be used as front-ends """ def run(self): self.running = True # Connect to message queue self.mq = Messaging() self.mq.connect(callback=LoggerMessage()) self.mq.subscribe(destination=CONF.outbound_queue) while not self.shuttingdown: try: LOG.debug('Waiting for log messages...') time.sleep(30) LOG.debug('Send heartbeat...') heartbeat = Heartbeat() self.mq.send(heartbeat) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True LOG.info('Shutdown request received...') self.running = False LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class MailerDaemon(Daemon): def run(self): self.running = True # Start token bucket thread self.tokens = LeakyBucket(tokens=20, rate=30) self.tokens.start() self.onhold = dict() # Connect to message queue self.mq = Messaging() self.mq.connect( callback=MailerMessage(self.mq, self.onhold, self.tokens)) self.mq.subscribe(destination=CONF.outbound_topic) while not self.shuttingdown: try: LOG.debug('Send email messages...') for alertid in self.onhold.keys(): try: (mailAlert, hold_time) = self.onhold[alertid] except KeyError: continue if time.time() > hold_time: if not self.tokens.get_token(): LOG.warning( '%s : No tokens left, rate limiting this alert', alertid) continue email = Mailer(mailAlert) mail_to = CONF.mail_list.split(',') if 'mailto' in mailAlert.tags: mail_to.append(mailAlert.tags['mailto']) email.send(mail_to=mail_to) try: del self.onhold[alertid] except KeyError: continue time.sleep(CONF.loop_every) LOG.debug('Send heartbeat...') heartbeat = Heartbeat(version=Version) self.mq.send(heartbeat) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True LOG.info('Shutdown request received...') self.running = False self.tokens.shutdown() LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class MailerDaemon(Daemon): def run(self): self.running = True # Start token bucket thread self.tokens = LeakyBucket(tokens=20, rate=30) self.tokens.start() self.onhold = dict() # Connect to message queue self.mq = Messaging() self.mq.connect(callback=MailerMessage(self.mq, self.onhold, self.tokens)) self.mq.subscribe(destination=CONF.outbound_topic) while not self.shuttingdown: try: LOG.debug('Send email messages...') for alertid in self.onhold.keys(): try: (mailAlert, hold_time) = self.onhold[alertid] except KeyError: continue if time.time() > hold_time: if not self.tokens.get_token(): LOG.warning('%s : No tokens left, rate limiting this alert', alertid) continue email = Mailer(mailAlert) mail_to = CONF.mail_list.split(',') for tag in mailAlert.tags: if tag.startswith('email'): mail_to.append(tag.split(':')[1]) email.send(mail_to=mail_to) try: del self.onhold[alertid] except KeyError: continue time.sleep(CONF.loop_every) LOG.debug('Send heartbeat...') heartbeat = Heartbeat(version=Version) self.mq.send(heartbeat) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True LOG.info('Shutdown request received...') self.running = False self.tokens.shutdown() LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class NotifyDaemon(Daemon): def run(self): self.running = True # Initialiase alert config init_config() # Start token bucket thread _TokenThread = TokenTopUp() _TokenThread.start() # Start notify thread _NotifyThread = ReleaseThread() _NotifyThread.start() # Connect to message queue self.mq = Messaging() self.mq.connect(callback=NotifyMessage(self.mq)) self.mq.subscribe(destination=CONF.outbound_topic) while not self.shuttingdown: try: # Read (or re-read) config as necessary if os.path.getmtime(CONF.yaml_config) != config_mod_time: init_config() config_mod_time = os.path.getmtime(CONF.yaml_config) LOG.debug('Waiting for email messages...') time.sleep(CONF.loop_every) LOG.debug('Send heartbeat...') heartbeat = Heartbeat(version=Version) self.mq.send(heartbeat) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True _TokenThread.shutdown() _NotifyThread.shutdown() LOG.info('Shutdown request received...') self.running = False LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class AlertaDaemon(Daemon): def run(self): self.running = True self.queue = Queue.Queue() # Create internal queue self.statsd = StatsD() # graphite metrics # Connect to message queue self.mq = Messaging() self.mq.connect(callback=ServerMessage(self.mq, self.queue, self.statsd)) self.mq.subscribe() # Start worker threads LOG.debug('Starting %s worker threads...', CONF.server_threads) for i in range(CONF.server_threads): w = WorkerThread(self.mq, self.queue, self.statsd) try: w.start() except Exception, e: LOG.error('Worker thread #%s did not start: %s', i, e) continue LOG.info('Started worker thread: %s', w.getName()) while not self.shuttingdown: try: LOG.debug('Send heartbeat...') heartbeat = Heartbeat(version=Version, timeout=CONF.loop_every) self.mq.send(heartbeat) LOG.debug('Internal queue size is %s messages', self.queue.qsize()) time.sleep(CONF.loop_every) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True LOG.info('Shutdown request received...') self.running = False for i in range(CONF.server_threads): self.queue.put(None) w.join() LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class LoggerDaemon(Daemon): """ Index alerts in ElasticSearch using Logstash format so that logstash GUI and/or Kibana can be used as front-ends """ logger_opts = { 'es_host': 'localhost', 'es_port': 9200, 'es_index': 'alerta-%Y.%m.%d', # NB. Kibana config must match this index } def __init__(self, prog, **kwargs): config.register_opts(LoggerDaemon.logger_opts) Daemon.__init__(self, prog, kwargs) def run(self): self.running = True # Connect to message queue self.mq = Messaging() self.mq.connect(callback=LoggerMessage(self.mq)) self.mq.subscribe(destination=CONF.outbound_queue) while not self.shuttingdown: try: LOG.debug('Waiting for log messages...') time.sleep(CONF.loop_every) LOG.debug('Send heartbeat...') heartbeat = Heartbeat(version=Version) self.mq.send(heartbeat) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True LOG.info('Shutdown request received...') self.running = False LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class PagerDutyDaemon(Daemon): pagerduty_opts = { 'pagerduty_endpoint': 'https://events.pagerduty.com/generic/2010-04-15/create_event.json', 'pagerduty_api_key': '', } def __init__(self, prog, **kwargs): config.register_opts(PagerDutyDaemon.pagerduty_opts) Daemon.__init__(self, prog, kwargs) def run(self): self.running = True # Connect to message queue self.mq = Messaging() self.mq.connect(callback=PagerDutyMessage(self.mq)) self.mq.subscribe(destination=CONF.outbound_topic) # TODO(nsatterl): use dedicated queue? while not self.shuttingdown: try: LOG.debug('Waiting for PagerDuty messages...') time.sleep(CONF.loop_every) LOG.debug('Send heartbeat...') heartbeat = Heartbeat(version=Version) self.mq.send(heartbeat) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True LOG.info('Shutdown request received...') self.running = False LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class AlertaDaemon(Daemon): def run(self): self.running = True # Create internal queue self.queue = Queue.Queue() # Connect to message queue self.mq = Messaging() self.mq.connect(callback=ServerMessage(self.queue)) self.mq.subscribe() # Start worker threads LOG.debug('Starting %s alert handler threads...', CONF.server_threads) for i in range(CONF.server_threads): w = WorkerThread(self.mq, self.queue) try: w.start() except Exception, e: LOG.error('Worker thread #%s did not start: %s', i, e) continue LOG.info('Started alert handler thread: %s', w.getName()) while not self.shuttingdown: try: time.sleep(0.1) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True for i in range(CONF.server_threads): self.queue.put(None) LOG.info('Shutdown request received...') self.running = False LOG.info('Disconnecting from message broker...') self.mq.disconnect()
class AlertaDaemon(Daemon): alerta_opts = { 'forward_duplicate': 'no', } def __init__(self, prog, **kwargs): config.register_opts(AlertaDaemon.alerta_opts) Daemon.__init__(self, prog, kwargs) def run(self): self.running = True self.queue = Queue.Queue() # Create internal queue self.db = Mongo() # mongo database self.carbon = Carbon() # carbon metrics self.statsd = StatsD() # graphite metrics # Connect to message queue self.mq = Messaging() self.mq.connect( callback=ServerMessage(self.mq, self.queue, self.statsd)) self.mq.subscribe() # Start worker threads LOG.debug('Starting %s worker threads...', CONF.server_threads) for i in range(CONF.server_threads): w = WorkerThread(self.mq, self.queue, self.statsd) try: w.start() except Exception, e: LOG.error('Worker thread #%s did not start: %s', i, e) continue LOG.info('Started worker thread: %s', w.getName()) while not self.shuttingdown: try: LOG.debug('Send heartbeat...') heartbeat = Heartbeat(version=Version, timeout=CONF.loop_every) self.mq.send(heartbeat) time.sleep(CONF.loop_every) LOG.info('Alert processing queue length is %d', self.queue.qsize()) self.carbon.metric_send('alerta.alerts.queueLength', self.queue.qsize()) self.db.update_queue_metric(self.queue.qsize()) except (KeyboardInterrupt, SystemExit): self.shuttingdown = True LOG.info('Shutdown request received...') self.running = False for i in range(CONF.server_threads): self.queue.put(None) w.join() LOG.info('Disconnecting from message broker...') self.mq.disconnect()