def __init__(self, config_options): if not boto3_available: print "Boto3 package is not available, cannot use SESAlerter." return Alerter.__init__(self, config_options) try: from_addr = config_options["from"] to_addr = config_options["to"] except: raise RuntimeError("Required configuration fields missing") if from_addr == "": raise RuntimeError("missing mail from address") if to_addr == "": raise RuntimeError("missing mail to address") self.from_addr = from_addr self.to_addr = to_addr self.support_catchup = True self.ses_client_params = {} if 'aws_region' in config_options: os.environ["AWS_DEFAULT_REGION"] = config_options['aws_region'] if 'aws_access_key' in config_options and 'aws_secret_key' in config_options: self.ses_client_params['aws_access_key_id'] = config_options[ 'aws_access_key'] self.ses_client_params['aws_secret_access_key'] = config_options[ 'aws_secret_key']
def __init__(self, config_options): Alerter.__init__(self, config_options) try: username = config_options["username"] password = config_options["password"] target = config_options["target"] except: raise RuntimeError("Required configuration fields missing") if 'sender' in config_options: sender = config_options["sender"] if len(sender) > 11: print "warning: truncating SMS sender name to 11 chars" sender = sender[:11] else: sender = "SmplMntr" api_host = 'www.bulksms.co.uk' if 'api_host' in config_options: api_host = config_options['api_host'] self.username = username self.password = password self.target = target self.sender = sender self.api_host = api_host self.support_catchup = True
def __init__(self, config_options): Alerter.__init__(self, config_options) try: mail_host = config_options["host"] from_addr = config_options["from"] to_addr = config_options["to"] except: raise RuntimeError("Required configuration fields missing") if mail_host == "": raise RuntimeError("missing mailserver hostname") if from_addr == "": raise RuntimeError("missing mail from address") if to_addr == "": raise RuntimeError("missing mail to address") if config_options.has_key("port"): try: mail_port = int(config_options["port"]) except: raise RuntimeError("mail port is not an integer") else: mail_port = 25 self.mail_host = mail_host self.mail_port = mail_port self.from_addr = from_addr self.to_addr = to_addr self.support_catchup = True
def __init__(self, config_options): if not boto3_available: print "Boto3 package is not available, cannot use SESAlerter." return Alerter.__init__(self, config_options) try: from_addr = config_options["from"] to_addr = config_options["to"] except: raise RuntimeError("Required configuration fields missing") if from_addr == "": raise RuntimeError("missing mail from address") if to_addr == "": raise RuntimeError("missing mail to address") self.from_addr = from_addr self.to_addr = to_addr self.support_catchup = True self.ses_client_params = {} if 'aws_region' in config_options: os.environ["AWS_DEFAULT_REGION"] = config_options['aws_region'] if 'aws_access_key' in config_options and 'aws_secret_key' in config_options: self.ses_client_params['aws_access_key_id'] = config_options['aws_access_key'] self.ses_client_params['aws_secret_access_key'] = config_options['aws_secret_key']
def __init__(self, config_options): if not requests_available: print "Requests package is not available, cannot use SlackAlerter." return Alerter.__init__(self, config_options) try: appid = config_options['appid'] secret = config_options['secret'] template = config_options['template'] users = config_options['users'] redis = config_options['redis'] except: raise RuntimeError("Required configuration fields missing") if appid == "": raise RuntimeError("missing wechat appid") if secret == "": raise RuntimeError("missing wechat secret") if template == "": raise RuntimeError("missing wechat secret") if users == "": raise RuntimeError("missing users secret") self.appid = appid self.secret = secret self.template = template self.users = users self.redis = redis
def test_check_if_alert_under_threshold_no_alert(self): alerter = Alerter(alert_check_interval=120, high_traffic_threshold=10) loglines = generate_loglines(num_lines=100, sleep=0) has_alert = alerter.check_if_alert(loglines) self.assertFalse(has_alert) self.assertIsNone(alerter.last_alert)
def __init__(self, config_options): if not requests_available: print "Requests package is not available, cannot use SlackAlerter." print "Try: pip install -r requirements.txt" return Alerter.__init__(self, config_options) try: url = config_options['url'] except: raise RuntimeError("Required configuration fields missing") if 'channel' in config_options: channel = config_options['channel'] else: channel = None if 'username' in config_options: username = config_options['username'] else: username = None if url == "": raise RuntimeError("missing url") self.url = url self.channel = channel self.username = username
def __init__(self): ##self.crawl_site = 'https://coronamask.kr' self.json_file = '/root/maskbot/data/coronamask.json' self.mask_list = { } # 크롤링할 마스크 사이트 정보 {name: {content, link, sell_time}} self.alerter = Alerter() pass
async def get_tasks(loop): app = Alerter.create_web() server = loop.create_server(app.make_handler(), '127.0.0.1', WEB_PORT) tasks = [] tasks.append(server) tasks.append(Alerter.notification_loop(NOTIFICATION_QUEUE, subscribers)) tasks.extend(Alerter.get_tasks()) tasks = asyncio.gather(*tasks) await tasks
def test_check_if_alert_over_threshold_has_alert(self): alerter = Alerter(alert_check_interval=120, high_traffic_threshold=10) loglines_at_threshold = alerter.high_traffic_threshold * alerter.alert_check_interval loglines = generate_loglines(num_lines=loglines_at_threshold + 1, sleep=0) has_alert = alerter.check_if_alert(loglines) self.assertIsNotNone(alerter.last_alert) self.assertEqual(alerter.last_alert.state, AlertState.HIGH_TRAFFIC)
def __init__(self, config_options): Alerter.__init__(self, config_options) try: pushbullet_token = config_options["token"] except: raise RuntimeError("Required configuration fields missing") if pushbullet_token == "": raise RuntimeError("missing pushbullet token") self.pushbullet_token = pushbullet_token self.support_catchup = True
def test_check_alert_recover_from_high_traffic(self): alerter = Alerter(alert_check_interval=120, high_traffic_threshold=10) loglines_at_threshold = alerter.high_traffic_threshold * alerter.alert_check_interval loglines = generate_loglines(num_lines=loglines_at_threshold + 1, sleep=0) has_alert = alerter.check_if_alert(loglines) self.assertIsNotNone(alerter.last_alert) loglines = generate_loglines(num_lines=100, sleep=0) has_alert = alerter.check_if_alert(loglines) self.assertIsNotNone(alerter.last_alert) self.assertEqual(alerter.last_alert.state, AlertState.RECOVERED)
def __init__(self, parent_pid, storage): """ Initialize the Analyzer """ super(Analyzer, self).__init__() self.redis_conn = StrictRedis( unix_socket_path=settings.REDIS_SOCKET_PATH) self.daemon = True self.parent_pid = parent_pid self.current_pid = getpid() self.lock = Lock() self.exceptions = Manager().dict() self.anomaly_breakdown = Manager().dict() self.anomalous_metrics = Manager().list() self.storage = storage self.alerter = Alerter(storage)
def test_check_if_alert_no_duplicate_alerts_created(self): alerter = Alerter(alert_check_interval=120, high_traffic_threshold=10) loglines_at_threshold = alerter.high_traffic_threshold * alerter.alert_check_interval loglines = generate_loglines(num_lines=loglines_at_threshold + 1, sleep=0) # Generate first alert has_alert = alerter.check_if_alert(loglines) first_alert = alerter.last_alert self.assertIsNotNone(alerter.last_alert) self.assertEqual(alerter.last_alert.state, AlertState.HIGH_TRAFFIC) # Try to generate second alert alerter.check_if_alert(loglines) self.assertEqual(first_alert.time, alerter.last_alert.time)
def __init__(self, config_options): Alerter.__init__(self, config_options) try: mail_host = config_options["host"] from_addr = config_options["from"] to_addr = config_options["to"] except: raise RuntimeError("Required configuration fields missing") if mail_host == "": raise RuntimeError("missing mailserver hostname") if from_addr == "": raise RuntimeError("missing mail from address") if to_addr == "": raise RuntimeError("missing mail to address") if 'port' in config_options: try: mail_port = int(config_options["port"]) except: raise RuntimeError("mail port is not an integer") else: mail_port = 25 self.username = None self.password = None if 'username' in config_options: self.username = config_options['username'] if 'password' in config_options: self.password = config_options['password'] if 'ssl' in config_options: if config_options['ssl'] == 'starttls': self.ssl = 'starttls' elif config_options['ssl'] == 'yes': print 'Warning: ssl=yes for email alerter is untested' self.ssl = 'yes' else: self.ssl = None self.mail_host = mail_host self.mail_port = mail_port self.from_addr = from_addr self.to_addr = to_addr self.support_catchup = True
def __init__(self, config_options): Alerter.__init__(self, config_options) try: pushover_token = config_options["token"] pushover_user = config_options["user"] except: raise RuntimeError("Required configuration fields missing") if pushover_token == "": raise RuntimeError("missing pushover token") if pushover_user == "": raise RuntimeError("missing pushover user") self.pushover_token = pushover_token self.pushover_user = pushover_user self.support_catchup = True
def __init__(self, config_options): Alerter.__init__(self, config_options) # config options # fail command string # recover command string if 'fail_command' in config_options: self.fail_command = config_options['fail_command'] else: self.fail_command = None if 'success_command' in config_options: self.success_command = config_options['success_command'] else: self.success_command = None if 'catchup_command' in config_options: self.catchup_command = config_options['catchup_command']
def __init__(self, args): self.args = args self.__check_for_testing() self.stats = Stats(args['alert'], args['window'], args['test_window_end']) self.console = Console(self.stats, self.args) self.log_watcher = LogWatcher(self.stats, self.args['file']) self.alerter = Alerter(self.stats) threads = self.__setup_threads() self.__start_threads(threads) self.__keep_alive()
def __init__(self, config=None): self.config_mdb_atlas = config.get('mdb_atlas') self.config_email = config.get('email') self.config_sms = config.get('sms') self._discoverer = Discoverer(None, None) self._nmapper = Nmapper() self._alerter = Alerter() self._scan_db = MongoDBClient( self.config_mdb_atlas.get('scan_cluster_name'), self.config_mdb_atlas.get('scan_username'), self.config_mdb_atlas.get('scan_password'), self.config_mdb_atlas.get('scan_db')) self._running_event = None self._previous_scan = {'hosts_list': [], 'hosts_ports_scan': []} self._email_client = EmailClient( self.config_email.get('email_from_addr'), self.config_email.get('email_password'), self.config_email.get('email_smtp_server'), self.config_email.get('email_smtp_server_port')) self._sms_client = SmsClient(self.config_sms.get('sms_account_sid'), self.config_sms.get('sms_auth_token'))
def __init__(self, config_options): if not requests_available: print "Requests package is not available, cannot use SlackAlerter." return Alerter.__init__(self, config_options) try: url = config_options['url'] except: raise RuntimeError("Required configuration fields missing") if 'channel' in config_options: channel = config_options['channel'] else: channel = None if url == "": raise RuntimeError("missing url") self.url = url self.channel = channel
def __init__(self, config_options): Alerter.__init__(self, config_options) try: apikey = config_options["apikey"] except: raise RuntimeError("Required configuration fields missing") api_host = 'www.notifymyandroid.com' if 'api_host' in config_options: api_host = config_options['api_host'] application = 'SimpleMonitor' if 'application' in config_options: application = config_options['application'] self.apikey = apikey self.api_host = api_host self.application = application self.support_catchup = True
def __init__(self, config_options): if not requests_available: print "Requests package is not available, cannot use FortySixElksAlerter." print "Try: pip install -r requirements.txt" return Alerter.__init__(self, config_options) try: username = config_options["username"] password = config_options["password"] target = config_options["target"] except: raise RuntimeError("Required configuration fields missing") if 'sender' in config_options: sender = config_options["sender"] if sender[0] == '+' and sender[1:].isdigit(): # sender is phone number pass elif len(sender) < 3: raise RuntimeError( "SMS sender name must be at least 3 chars long") elif len(sender) > 11: print "warning: truncating SMS sender name to 11 chars" sender = sender[:11] else: sender = "SmplMntr" api_host = 'api.46elks.com' if 'api_host' in config_options: api_host = config_options['api_host'] self.username = username self.password = password self.target = target self.sender = sender self.api_host = api_host self.support_catchup = True
def __init__(self, config_options): if not requests_available: print "Requests package is not available, cannot use FortySixElksAlerter." print "Try: pip install -r requirements.txt" return Alerter.__init__(self, config_options) try: username = config_options["username"] password = config_options["password"] target = config_options["target"] except: raise RuntimeError("Required configuration fields missing") if 'sender' in config_options: sender = config_options["sender"] if sender[0] == '+' and sender[1:].isdigit(): # sender is phone number pass elif len(sender) < 3: raise RuntimeError("SMS sender name must be at least 3 chars long") elif len(sender) > 11: print "warning: truncating SMS sender name to 11 chars" sender = sender[:11] else: sender = "SmplMntr" api_host = 'api.46elks.com' if 'api_host' in config_options: api_host = config_options['api_host'] self.username = username self.password = password self.target = target self.sender = sender self.api_host = api_host self.support_catchup = True
def run(self): """Function which is called from the Daemon runner""" print "MACupdTracker Starting on HOST: " + self.host + " on port", self.port self.server = SocketServer.UDPServer((self.host, self.port), Handler) # # Set Variables # self.server.TRACKER = {} self.server.MACLIST = self.MACLIST self.server.CONFIG = self.CONFIG # # Set current log date # now = datetime.now() self.server.LOG_DATE = now.strftime('%Y%m%d') # # Read status if current tracker logfile exists # self.read_tracker_status() # # Set logfile sizes # self.set_logfile_sizes() # # Instantiate alerter # alerter = Alerter() self.server.alert = alerter.alert # # Instantiate reporter # reporter = Reporter(logdir=self.logdir, reportdir=self.reportdir, maclist=self.MACLIST) self.server.report = reporter.report # # Export function to handler # self.server.dump_tracker_log = self.dump_tracker_log self.server.set_line = self.set_line self.server.rotate_logs = self.rotate_logs self.server.logdir = self.logdir self.rotate_logs() # # Register atexit # atexit.register(self.dump_atexit) # # Start UDP handler forever ... # self.server.serve_forever()
def __init__(self, config_options): Alerter.__init__(self, config_options) try: username = config_options["username"] password = config_options["password"] target = config_options["target"] except: raise RuntimeError("Required configuration fields missing") if config_options.has_key("sender"): sender = config_options["sender"] if len(sender) > 11: print "warning: truncating SMS sender name to 11 chars" sender = sender[:11] else: sender = "SmplMntr" self.username = username self.password = password self.target = target self.sender = urllib.quote(sender) self.support_catchup = True
def __init__(self, parent_pid, storage): """ Initialize the Analyzer """ super(Analyzer, self).__init__() self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH) self.daemon = True self.parent_pid = parent_pid self.current_pid = getpid() self.lock = Lock() self.exceptions = Manager().dict() self.anomaly_breakdown = Manager().dict() self.anomalous_metrics = Manager().list() self.storage = storage self.alerter = Alerter(storage)
class Analyzer(Thread): def __init__(self, parent_pid, storage): """ Initialize the Analyzer """ super(Analyzer, self).__init__() self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH) self.daemon = True self.parent_pid = parent_pid self.current_pid = getpid() self.lock = Lock() self.exceptions = Manager().dict() self.anomaly_breakdown = Manager().dict() self.anomalous_metrics = Manager().list() self.storage = storage self.alerter = Alerter(storage) def check_if_parent_is_alive(self): """ Self explanatory """ try: kill(self.current_pid, 0) kill(self.parent_pid, 0) except: exit(0) def spin_process(self, i, unique_metrics): """ Assign a bunch of metrics for a process to analyze. """ # Discover assigned metrics keys_per_processor = int(ceil(float(len(unique_metrics)) / float(settings.ANALYZER_PROCESSES))) if i == settings.ANALYZER_PROCESSES: assigned_max = len(unique_metrics) else: assigned_max = i * keys_per_processor assigned_min = assigned_max - keys_per_processor assigned_keys = range(assigned_min, assigned_max) # Compile assigned metrics assigned_metrics = [unique_metrics[index] for index in assigned_keys] # Check if this process is unnecessary if len(assigned_metrics) == 0: return # Multi get series raw_assigned = self.redis_conn.mget(assigned_metrics) # Make process-specific dicts exceptions = defaultdict(int) anomaly_breakdown = defaultdict(int) # Distill timeseries strings into lists for i, metric_name in enumerate(assigned_metrics): self.check_if_parent_is_alive() try: raw_series = raw_assigned[i] unpacker = Unpacker(use_list = False) unpacker.feed(raw_series) timeseries = list(unpacker) anomalous, ensemble, datapoint, ts = run_selected_algorithm(timeseries) # If it's anomalous, add it to list if anomalous: base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1) metric = [datapoint, base_name, ts] self.anomalous_metrics.append(metric) # Get the anomaly breakdown - who returned True? for index, value in enumerate(ensemble): if value: algorithm = settings.ALGORITHMS[index] anomaly_breakdown[algorithm] += 1 # It could have been deleted by the Roomba except AttributeError: exceptions['DeletedByRoomba'] += 1 except TooShort: exceptions['TooShort'] += 1 except Stale: exceptions['Stale'] += 1 except Incomplete: exceptions['Incomplete'] += 1 except Boring: exceptions['Boring'] += 1 except: exceptions['Other'] += 1 logger.info(traceback.format_exc()) # Collate process-specific dicts to main dicts with self.lock: for key, value in anomaly_breakdown.items(): if key not in self.anomaly_breakdown: self.anomaly_breakdown[key] = value else: self.anomaly_breakdown[key] += value for key, value in exceptions.items(): if key not in self.exceptions: self.exceptions[key] = value else: self.exceptions[key] += value def send_mail(self, alert, metric): """ Send an alert email to the appropriate recipient """ msg = MIMEMultipart('alternative') msg['Subject'] = '[skyline alert] ' + metric[1] msg['From'] = settings.ALERT_SENDER msg['To'] = alert[1] link = '%s/render/?width=588&height=308&target=%s' % (settings.GRAPHITE_HOST, metric[1]) body = 'Anomalous value: %s <br> Next alert in: %s seconds <a href="%s"><img src="%s"/></a>' % (metric[0], alert[2], link, link) msg.attach(MIMEText(body, 'html')) s = SMTP('127.0.0.1') s.sendmail(settings.ALERT_SENDER, alert[1], msg.as_string()) s.quit() def run(self): """ Called when the process intializes. """ while 1: now = time() # Make sure Redis is up try: self.redis_conn.ping() except: logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH) sleep(10) self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH) continue # Discover unique metrics unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics')) if len(unique_metrics) == 0: logger.info('no metrics in redis. try adding some - see README') sleep(10) continue # Spawn processes pids = [] for i in range(1, settings.ANALYZER_PROCESSES + 1): if i > len(unique_metrics): logger.info('WARNING: skyline is set for more cores than needed.') break p = Process(target=self.spin_process, args=(i, unique_metrics)) pids.append(p) p.start() # Send wait signal to zombie processes for p in pids: p.join() # Send alerts #if settings.ENABLE_ALERTS: # for alert in settings.ALERTS: # for metric in self.anomalous_metrics: # if alert[0] in metric[1]: # try: # last_alert = self.redis_conn.get('last_alert.' + metric[1]) # if not last_alert: # self.redis_conn.setex('last_alert.' + metric[1], alert[2], packb(metric[0])) # self.send_mail(alert, metric) # except Exception as e: # logger.error("couldn't send alert: %s" % e) # Write anomalous_metrics to static webapp directory filename = path.abspath(path.join(path.dirname( __file__ ), '..', settings.ANOMALY_DUMP)) with open(filename, 'w') as fh: # Make it JSONP with a handle_data() function anomalous_metrics = list(self.anomalous_metrics) anomalous_metrics.sort(key=operator.itemgetter(1)) fh.write('handle_data(%s)' % anomalous_metrics) # process anomalous metrics for metric in self.anomalous_metrics: try: last_save_key = 'last_save.%s.%s' % (metric[1], metric[2]) last_save = self.redis_conn.get(last_save_key) if not last_save: self.redis_conn.setex(last_save_key, settings.SKIP_FREQUENCY, packb(metric[0])) self.storage.save(metric) if settings.ENABLE_ALERTS: last_alert_key = 'last_alert.' + metric[1] last_alert = self.redis_conn.get(last_alert_key) if not last_alert: self.redis_conn.setex(last_alert_key, settings.SKIP_FREQUENCY, packb(metric[0])) self.alerter.add(metric) except Exception as e: logger.error("Failed processing anomaly, pid: %s, metric: %s, error: %s", getpid(), metric[1], e) # send ready alerts if settings.ENABLE_ALERTS: try: self.alerter.send_alerts() except Exception as e: logger.error("Failed sending alerts, error: %s", e) # Log progress logger.info('seconds to run :: %.2f' % (time() - now)) logger.info('total metrics :: %d' % len(unique_metrics)) logger.info('total analyzed :: %d' % (len(unique_metrics) - sum(self.exceptions.values()))) logger.info('total anomalies :: %d' % len(self.anomalous_metrics)) logger.info('exception stats :: %s' % self.exceptions) logger.info('anomaly breakdown :: %s' % self.anomaly_breakdown) # Log to Graphite if settings.GRAPHITE_HOST != '': host = settings.GRAPHITE_HOST.replace('http://', '') system('echo skyline.analyzer.run_time %.2f %s | nc -w 3 %s 2003' % ((time() - now), now, host)) system('echo skyline.analyzer.total_analyzed %d %s | nc -w 3 %s 2003' % ((len(unique_metrics) - sum(self.exceptions.values())), now, host)) # Check canary metric raw_series = self.redis_conn.get(settings.FULL_NAMESPACE + settings.CANARY_METRIC) if raw_series is not None: unpacker = Unpacker(use_list = False) unpacker.feed(raw_series) timeseries = list(unpacker) time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600 projected = 24 * (time() - now) / time_human logger.info('canary duration :: %.2f' % time_human) if settings.GRAPHITE_HOST != '': host = settings.GRAPHITE_HOST.replace('http://', '') system('echo skyline.analyzer.duration %.2f %s | nc -w 3 %s 2003' % (time_human, now, host)) system('echo skyline.analyzer.projected %.2f %s | nc -w 3 %s 2003' % (projected, now, host)) # Reset counters self.anomalous_metrics[:] = [] self.exceptions = Manager().dict() self.anomaly_breakdown = Manager().dict() # Sleep if it went too fast if time() - now < 5: logger.info('sleeping due to low run time...') sleep(10)
class Analyzer(Thread): def __init__(self, parent_pid, storage): """ Initialize the Analyzer """ super(Analyzer, self).__init__() self.redis_conn = StrictRedis( unix_socket_path=settings.REDIS_SOCKET_PATH) self.daemon = True self.parent_pid = parent_pid self.current_pid = getpid() self.lock = Lock() self.exceptions = Manager().dict() self.anomaly_breakdown = Manager().dict() self.anomalous_metrics = Manager().list() self.storage = storage self.alerter = Alerter(storage) def check_if_parent_is_alive(self): """ Self explanatory """ try: kill(self.current_pid, 0) kill(self.parent_pid, 0) except: exit(0) def spin_process(self, i, unique_metrics): """ Assign a bunch of metrics for a process to analyze. """ # Discover assigned metrics keys_per_processor = int( ceil( float(len(unique_metrics)) / float(settings.ANALYZER_PROCESSES))) if i == settings.ANALYZER_PROCESSES: assigned_max = len(unique_metrics) else: assigned_max = i * keys_per_processor assigned_min = assigned_max - keys_per_processor assigned_keys = range(assigned_min, assigned_max) # Compile assigned metrics assigned_metrics = [unique_metrics[index] for index in assigned_keys] # Check if this process is unnecessary if len(assigned_metrics) == 0: return # Multi get series raw_assigned = self.redis_conn.mget(assigned_metrics) # Make process-specific dicts exceptions = defaultdict(int) anomaly_breakdown = defaultdict(int) # Distill timeseries strings into lists for i, metric_name in enumerate(assigned_metrics): self.check_if_parent_is_alive() try: raw_series = raw_assigned[i] unpacker = Unpacker(use_list=False) unpacker.feed(raw_series) timeseries = list(unpacker) anomalous, ensemble, datapoint, ts = run_selected_algorithm( timeseries) # If it's anomalous, add it to list if anomalous: base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1) metric = [datapoint, base_name, ts] self.anomalous_metrics.append(metric) # Get the anomaly breakdown - who returned True? for index, value in enumerate(ensemble): if value: algorithm = settings.ALGORITHMS[index] anomaly_breakdown[algorithm] += 1 # It could have been deleted by the Roomba except AttributeError: exceptions['DeletedByRoomba'] += 1 except TooShort: exceptions['TooShort'] += 1 except Stale: exceptions['Stale'] += 1 except Incomplete: exceptions['Incomplete'] += 1 except Boring: exceptions['Boring'] += 1 except: exceptions['Other'] += 1 logger.info(traceback.format_exc()) # Collate process-specific dicts to main dicts with self.lock: for key, value in anomaly_breakdown.items(): if key not in self.anomaly_breakdown: self.anomaly_breakdown[key] = value else: self.anomaly_breakdown[key] += value for key, value in exceptions.items(): if key not in self.exceptions: self.exceptions[key] = value else: self.exceptions[key] += value def send_mail(self, alert, metric): """ Send an alert email to the appropriate recipient """ msg = MIMEMultipart('alternative') msg['Subject'] = '[skyline alert] ' + metric[1] msg['From'] = settings.ALERT_SENDER msg['To'] = alert[1] link = '%s/render/?width=588&height=308&target=%s' % ( settings.GRAPHITE_HOST, metric[1]) body = 'Anomalous value: %s <br> Next alert in: %s seconds <a href="%s"><img src="%s"/></a>' % ( metric[0], alert[2], link, link) msg.attach(MIMEText(body, 'html')) s = SMTP('127.0.0.1') s.sendmail(settings.ALERT_SENDER, alert[1], msg.as_string()) s.quit() def run(self): """ Called when the process intializes. """ while 1: now = time() # Make sure Redis is up try: self.redis_conn.ping() except: logger.error( 'skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH) sleep(10) self.redis_conn = StrictRedis( unix_socket_path=settings.REDIS_SOCKET_PATH) continue # Discover unique metrics unique_metrics = list( self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics')) if len(unique_metrics) == 0: logger.info( 'no metrics in redis. try adding some - see README') sleep(10) continue # Spawn processes pids = [] for i in range(1, settings.ANALYZER_PROCESSES + 1): if i > len(unique_metrics): logger.info( 'WARNING: skyline is set for more cores than needed.') break p = Process(target=self.spin_process, args=(i, unique_metrics)) pids.append(p) p.start() # Send wait signal to zombie processes for p in pids: p.join() # Send alerts #if settings.ENABLE_ALERTS: # for alert in settings.ALERTS: # for metric in self.anomalous_metrics: # if alert[0] in metric[1]: # try: # last_alert = self.redis_conn.get('last_alert.' + metric[1]) # if not last_alert: # self.redis_conn.setex('last_alert.' + metric[1], alert[2], packb(metric[0])) # self.send_mail(alert, metric) # except Exception as e: # logger.error("couldn't send alert: %s" % e) # Write anomalous_metrics to static webapp directory filename = path.abspath( path.join(path.dirname(__file__), '..', settings.ANOMALY_DUMP)) with open(filename, 'w') as fh: # Make it JSONP with a handle_data() function anomalous_metrics = list(self.anomalous_metrics) anomalous_metrics.sort(key=operator.itemgetter(1)) fh.write('handle_data(%s)' % anomalous_metrics) # process anomalous metrics for metric in self.anomalous_metrics: try: last_save_key = 'last_save.%s.%s' % (metric[1], metric[2]) last_save = self.redis_conn.get(last_save_key) if not last_save: self.redis_conn.setex(last_save_key, settings.SKIP_FREQUENCY, packb(metric[0])) self.storage.save(metric) if settings.ENABLE_ALERTS: last_alert_key = 'last_alert.' + metric[1] last_alert = self.redis_conn.get(last_alert_key) if not last_alert: self.redis_conn.setex(last_alert_key, settings.SKIP_FREQUENCY, packb(metric[0])) self.alerter.add(metric) except Exception as e: logger.error( "Failed processing anomaly, pid: %s, metric: %s, error: %s", getpid(), metric[1], e) # send ready alerts if settings.ENABLE_ALERTS: try: self.alerter.send_alerts() except Exception as e: logger.error("Failed sending alerts, error: %s", e) # Log progress logger.info('seconds to run :: %.2f' % (time() - now)) logger.info('total metrics :: %d' % len(unique_metrics)) logger.info('total analyzed :: %d' % (len(unique_metrics) - sum(self.exceptions.values()))) logger.info('total anomalies :: %d' % len(self.anomalous_metrics)) logger.info('exception stats :: %s' % self.exceptions) logger.info('anomaly breakdown :: %s' % self.anomaly_breakdown) # Log to Graphite if settings.GRAPHITE_HOST != '': host = settings.GRAPHITE_HOST.replace('http://', '') system( 'echo skyline.analyzer.run_time %.2f %s | nc -w 3 %s 2003' % ((time() - now), now, host)) system( 'echo skyline.analyzer.total_analyzed %d %s | nc -w 3 %s 2003' % ((len(unique_metrics) - sum(self.exceptions.values())), now, host)) # Check canary metric raw_series = self.redis_conn.get(settings.FULL_NAMESPACE + settings.CANARY_METRIC) if raw_series is not None: unpacker = Unpacker(use_list=False) unpacker.feed(raw_series) timeseries = list(unpacker) time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600 projected = 24 * (time() - now) / time_human logger.info('canary duration :: %.2f' % time_human) if settings.GRAPHITE_HOST != '': host = settings.GRAPHITE_HOST.replace('http://', '') system( 'echo skyline.analyzer.duration %.2f %s | nc -w 3 %s 2003' % (time_human, now, host)) system( 'echo skyline.analyzer.projected %.2f %s | nc -w 3 %s 2003' % (projected, now, host)) # Reset counters self.anomalous_metrics[:] = [] self.exceptions = Manager().dict() self.anomaly_breakdown = Manager().dict() # Sleep if it went too fast if time() - now < 5: logger.info('sleeping due to low run time...') sleep(10)
import asyncio from alerter import Alerter from alerter.config import WEB_PORT from alerter.Subscribers import subscribers from alerter.Queues import NOTIFICATION_QUEUE from alerter.sources.web_hook import WebHook from alerter.pins.printer import PrinterPin from alerter.pins.telegram import TelegramPin from alerter.managers.telegram import TelegramManager source_types = ["deploy"] Alerter.register_manager(TelegramManager(subscribers, source_types)) Alerter.register_pin(PrinterPin()) Alerter.register_pin(TelegramPin()) Alerter.register_source(WebHook(NOTIFICATION_QUEUE)) async def get_tasks(loop): app = Alerter.create_web() server = loop.create_server(app.make_handler(), '127.0.0.1', WEB_PORT) tasks = [] tasks.append(server) tasks.append(Alerter.notification_loop(NOTIFICATION_QUEUE, subscribers)) tasks.extend(Alerter.get_tasks()) tasks = asyncio.gather(*tasks) await tasks def main():
TopNSectionsStatistic(n=top_n_value, statistic_delay=STATS_DELAY_INTERVAL) ] if 'response_codes' in requested_stats: default_stats += [ TopNResponseStatusCodes(n=top_n_value, statistic_delay=STATS_DELAY_INTERVAL) ] if 'request_size' in requested_stats: default_stats += [ AverageRequestSizeStatistic(statistic_delay=STATS_DELAY_INTERVAL) ] return default_stats logkeep = LogKeep(ALERT_DELAY_INTERVAL) consumer = LogConsumer(FILE_PATH, logkeep) alerter = Alerter(ALERT_DELAY_INTERVAL, HIGH_TRAFFIC_THRESHOLD) traffic_stats = create_traffic_statistics(TRAFFIC_STATS) monitor = HTTPLogMonitor(consumer, logkeep, alerter, traffic_stats, STATS_DELAY_INTERVAL) try: print('Monitoring {}...'.format(FILE_PATH)) monitor.run() except KeyboardInterrupt: # Cleanup offset file generated by Pygtail consumer = None os.remove('{}.offset'.format(FILE_PATH))
class AutoscanApplication(object): """ This is the Alerting Tool Business logic class. It applies the main buisiness logic for generating the scans, the alerts and the notifications. It orchestrates the interaction between each componenets of the application. """ def __init__(self, config=None): self.config_mdb_atlas = config.get('mdb_atlas') self.config_email = config.get('email') self.config_sms = config.get('sms') self._discoverer = Discoverer(None, None) self._nmapper = Nmapper() self._alerter = Alerter() self._scan_db = MongoDBClient( self.config_mdb_atlas.get('scan_cluster_name'), self.config_mdb_atlas.get('scan_username'), self.config_mdb_atlas.get('scan_password'), self.config_mdb_atlas.get('scan_db')) self._running_event = None self._previous_scan = {'hosts_list': [], 'hosts_ports_scan': []} self._email_client = EmailClient( self.config_email.get('email_from_addr'), self.config_email.get('email_password'), self.config_email.get('email_smtp_server'), self.config_email.get('email_smtp_server_port')) self._sms_client = SmsClient(self.config_sms.get('sms_account_sid'), self.config_sms.get('sms_auth_token')) def catch_exceptions(cancel_on_failure=False): def catch_exceptions_decorator(job_func): @functools.wraps(job_func) def wrapper(*args, **kwargs): try: return job_func(*args, **kwargs) except: ('Exception caught: ') import traceback print(traceback.format_exc()) if cancel_on_failure: return schedule.CancelJob return wrapper return catch_exceptions_decorator @catch_exceptions(cancel_on_failure=False) def _job(self): """ The job represents each steps executed every time the scheduler is triggered. :return: """ print("AutoscanApplication _job Discovering hosts: ") scan_result = self._create_scan_result() print("AutoscanApplication _job Saving the scan: ") self._save_scan_result(scan_result) print("AutoscanApplication _job Scan saved: ") alerts_result = {} alerts_result = self._alerter.inspect_generate_alert_output( self._previous_scan, scan_result, scan_result['scan_time']) print("AutoscanApplication _job Saving the alerts: ") ssls_result = alerts_result.get('ssl_analysis', None) alerts_result.pop('ssl_analysis') alerting_check = alerts_result.get('alerting_check', None) alerts_result.pop('alerting_check') self._save_alerts_result(alerts_result) print("AutoscanApplication _job Alerts saved: ") print("AutoscanApplication _job Saving the ssls: ") self._save_ssls_result(ssls_result) print("AutoscanApplication _job SSLs saved: ") self._previous_scan = scan_result self._verify_and_generate_alerts(self._alerting_config, alerting_check, scan_result['scan_time']) def _run_continuously(self, schedule, interval): """Continuously run, while executing pending jobs at each elapsed time interval. """ cease_continuous_run = threading.Event() class ScheduleThread(threading.Thread): @classmethod def run(cls): print('ScheduleThread run:') while not cease_continuous_run.is_set(): print('ScheduleThread schedule: ' + str(cease_continuous_run.is_set())) print('ScheduleThread schedule run_pending:') schedule.run_pending() time.sleep(interval) continuous_thread = ScheduleThread() continuous_thread.start() return cease_continuous_run def _print_scans(self): discoveries = self._scan_db.collection( self.config_mdb_atlas.get('scans_coll')) cursor = discoveries.find() for document in cursor: print(document) def start(self, config): """ Creates the job to be scheduled and returns the control to the main thread the Web Application :param config: :return: """ print('AutoscanApplication start:') if config['network'] != None: self._discoverer.network(config['network']) if config['mask'] != None: self._discoverer.mask(config['mask']) schedule.every(config['schedule_interval']).minutes.do( self._job).tag('scan') self._running_event = self._run_continuously(schedule, config['interval']) p_scan = self._find_last_scan() if p_scan.count(): self._previous_scan = p_scan[0] self._alerting_config = config['alerting_config'] return self._running_event.is_set() def stop(self): """ Stop the scheduler for executing the next job. The current job can't be stopped if it is still running. :return: """ print('AutoscanApplication stop:') result = True if self._running_event is not None: self._running_event.set() schedule.clear('scan') result = self._running_event.is_set() return result def _verify_and_generate_alerts(self, alerting_config, alerting_check, scan_time): """ Compares the current alerting configuration and the alerts checks executed by the Alerter class :param alerting_config: :param alerting_check: :param scan_time: :return: """ def _verify_email(email_ac, alerting_check): result = False if email_ac.get('missing_hosts', False) and alerting_check.get('missing_hosts'): result = True elif email_ac.get('new_hosts', False) and alerting_check.get('new_hosts'): result = True elif email_ac.get('missing_ports', False) and alerting_check.get('missing_ports'): result = True elif email_ac.get('new_ports', False) and alerting_check.get('new_ports'): result = True elif email_ac.get('vulns', False) and alerting_check.get('vulns'): result = True elif email_ac.get('ssl_issues', False) and alerting_check.get('ssl_issues'): result = True return result def _verify_sms(sms_ac, alerting_check): result = False if sms_ac.get('missing_hosts', False) and alerting_check.get('missing_hosts'): result = True elif sms_ac.get('new_hosts', False) and alerting_check.get('new_hosts'): result = True elif sms_ac.get('missing_ports', False) and alerting_check.get('missing_ports'): result = True elif sms_ac.get('new_ports', False) and alerting_check.get('new_ports'): result = True elif sms_ac.get('vulns', False) and alerting_check.get('vulns'): result = True elif sms_ac.get('ssl_issues', False) and alerting_check.get('ssl_issues'): result = True return result print( "AutoscanApplication _verify_and_generate_alerts alerting_config: ", alerting_config) print( "AutoscanApplication _verify_and_generate_alerts alerting_check: ", alerting_check) print("AutoscanApplication _job Checking for Sending Email: ") if _verify_email(email_ac=alerting_config.get('email'), alerting_check=alerting_check): self._create_and_send_email(scan_time) print("AutoscanApplication _job Email Sent: ") else: print("AutoscanApplication _job Email NOT Sent: ") print("AutoscanApplication _job Checking for Sending SMS: ") if _verify_sms(sms_ac=alerting_config.get('sms'), alerting_check=alerting_check): self._create_and_send_sms(scan_time) print("AutoscanApplication _job SMS Sent: ") else: print("AutoscanApplication _job SMS NOT Sent: ") def _create_and_send_sms(self, scan_time): """ Generates a simple SMS body message and sends it to the SMS external subsystem. :param scan_time: :return: """ body = "Alerting Tool: new alerts for network topology, vulnerabilities " \ "or SSL certificates, please verify ASAP. Scan Time: " + str(scan_time) self._sms_client.send_sms_alert(self.config_sms.get('sms_to_number'), self.config_sms.get('sms_from_number'), body) def _create_and_send_email(self, scan_time): """ Generates the HTML body and sends it to the SMTP external subsystem. :param scan_time: :return: """ text_part = "AlertingTool Email Report from scan finished on: " + str( scan_time) subject = "AlertingTool Email Report " + str(scan_time) toaddr_list = [ e.strip() for e in self.config_email.get('email_to_addr').split(',') ] html_part = "<h1>Network Topology and Vulnerabilities Alert</h1> <br> " + self.get_html_alert_result( str(scan_time) ) + "<h1>SSL Analysis Alert</h1> <br> " + self.get_html_ssl_result( str(scan_time)) self._email_client.send_mail_alert(toaddr_list, text_part, html_part, subject) def get_html_scan_result(self, scan_time): input = self._get_scan_by_date( self._parse_string_to_datetime(scan_time)) converted = json2html.convert(json=input, table_attributes="id=\"scan-table\"") return converted def get_html_alert_result(self, scan_time): input = self._get_alert_by_date( self._parse_string_to_datetime(scan_time)) converted = json2html.convert(json=input, table_attributes="id=\"alerts-table\"") return converted def get_html_ssl_result(self, scan_time): input = self._get_ssl_by_date( self._parse_string_to_datetime(scan_time)) converted = json2html.convert(json=input, table_attributes="id=\"ssls-table\"") return converted def get_scans_limit(self, limit): return self._get_last_scans_limit(limit) def get_alerts_limit(self, limit): return self._get_last_alerts_limit(limit) def get_ssls_limit(self, limit): return self._get_last_ssls_limit(limit) def _parse_string_to_datetime(self, string_time): return parser.parse(string_time) def _get_scan_by_date(self, scantime): result = self._scan_db.collection( self.config_mdb_atlas.get('scans_coll')).find_one( {"scan_time": scantime}, {'_id': 0}) return result def _get_alert_by_date(self, scantime): result = self._scan_db.collection( self.config_mdb_atlas.get('alerts_coll')).find_one( {"scan_time": scantime}, {'_id': 0}) return result def _get_ssl_by_date(self, scantime): result = self._scan_db.collection( self.config_mdb_atlas.get('ssls_coll')).find_one( {"scan_time": scantime}, {'_id': 0}) return result def _save_scan_result(self, scan_result): discoveries = self._scan_db.collection( self.config_mdb_atlas.get('scans_coll')) discoveries.insert_one(scan_result) def _save_alerts_result(self, alerts_result): alerts = self._scan_db.collection( self.config_mdb_atlas.get('alerts_coll')) alerts.insert_one(alerts_result) def _save_ssls_result(self, ssls_result): if ssls_result: ssls = self._scan_db.collection( self.config_mdb_atlas.get('ssls_coll')) ssls.insert_one(ssls_result) def _get_last_scans_limit(self, limit=1): print("autoscan_app _get_last_scans_limit") return self._scan_db.collection( self.config_mdb_atlas.get('scans_coll')).find().sort( "scan_time", -1).limit(limit) def _get_last_alerts_limit(self, limit=1): print("autoscan_app _get_last_alert_limit") return self._scan_db.collection( self.config_mdb_atlas.get('alerts_coll')).find().sort( "scan_time", -1).limit(limit) def _get_last_ssls_limit(self, limit=1): print("autoscan_app _get_last_ssls_limit") return self._scan_db.collection( self.config_mdb_atlas.get('ssls_coll')).find().sort( "scan_time", -1).limit(limit) def _find_last_scan(self): print('AutoscanApplication _find_last_scan:') return self._scan_db.collection( self.config_mdb_atlas.get('scans_coll')).find().sort( "scan_time", -1).limit(1) def get_alert(self, scan_time=None): if not scan_time: return self._find_last_alert() def _find_last_alert(self): print('AutoscanApplication _find_last_alert') alerts = self._scan_db.collection( self.config_mdb_atlas.get('alerts_coll')).find({}, { '_id': False, 'scan_time': False }).sort("scan_time", -1).limit(1) result = {"message": "no alerts"} if alerts.count(): result = alerts[0] print(result) return dumps(result) def _create_scan_result(self): print('AutoscanApplication _create_scan_result:') ip_discovery = self._discoverer.discover() nmapper_result = [] for ip in ip_discovery: scan_result = self._nmapper.scan_ports_per_host(ip) nmapper_result.append(scan_result) result = { 'scan_time': datetime.datetime.utcnow(), 'hosts_list': ip_discovery, 'hosts_ports_scan': nmapper_result } return result
class Bot(): def __init__(self): ##self.crawl_site = 'https://coronamask.kr' self.json_file = '/root/maskbot/data/coronamask.json' self.mask_list = { } # 크롤링할 마스크 사이트 정보 {name: {content, link, sell_time}} self.alerter = Alerter() pass ## 크롤링 메소드 def crawling(self, _time=60, _count=-1): ## 마스크 정보 읽어오기 self.get_info_from_json() ## 판매시간 계산하여 10분전이면 알림 보내기 msg_list = [ mask for mask in self.mask_list.values() if self.is_time_to_alert(mask) ] ## 알림 메세지 보내기 if msg_list: try: self.alerter.send_all_msgs(msg_list) except Exception as e: print(e) else: ## 알람 보낸 애들은 alerted = True로 전환 for dic in msg_list: n = dic['name'] self.mask_list[n]['alerted'] = True self.save_update_to_json() ## 알림 보낼 수 있는 시간인가 확인하여 true/false 리턴 def is_time_to_alert(self, mask): ## 이미 알림을 보냈으면 알림 또 보낼 필요 없음 if mask['alerted']: return False ## 만약 마스크 판매 시간이 안 정해졌으면 알림 못 보냄 if not mask['sell_time']: return False ## 한국의 현재 시간 가져오기 KST = timezone('Asia/Seoul') now = datetime.datetime.utcnow() now = utc.localize(now).astimezone(KST) ## mask 판매시간을 datetime객체로 변환 mask_time = datetime.datetime.strptime(mask['sell_time'], '%Y/%m/%d %H:%M') mask_time = KST.localize(mask_time) ##mask_time = mask_time.astimezone(KST) ## 마스크 판매시간 10분전이면 알림을 보내기 diff = (mask_time - now).seconds // 60 day_diff = (mask_time - now).days return ((day_diff == 0) and (diff < 12)) ## mask_list를 json에 저장 ## 게릴라판매는 직접 json만 수정하면 될수있도록하기위함 def save_update_to_json(self): with open(self.json_file, 'w', encoding='utf-8') as _json_file: json.dump(self.mask_list, _json_file, ensure_ascii=False, indent="\t") ## 코로나마스크 json 불러오기 ## 게릴라 판매들은 수동으로 json에 입력해주기 위함 def get_info_from_json(self): with open(self.json_file, encoding='utf-8') as masks: self.mask_list = json.load(masks)