def test_will_parse_basic(self): query = json.loads(""" { "condition": "AND", "rules": [ { "id": "column", "field": "column", "type": "string", "input": "text", "operator": "column_equal", "value": [ "model_id", "5500" ] } ] } """) network = Network() network.parse_query(query) # AND condition, single column condition assert len(network.conditions) == 2
def test_parse_error_unknown_operator(self): # Different operators in each sub-group query = json.loads(""" { "condition": "OR", "rules": [ { "id": "query_name", "field": "query_name", "type": "string", "input": "text", "operator": "BAD OPERATOR", "value": "foo" } ] }""") network = Network() exc = None try: network.parse_query(query) except Exception as e: exc = e assert isinstance(exc, ValueError) assert exc.args == ("Unsupported operator: BAD OPERATOR", )
def test_will_not_reuse_different_groups(self): # Different operators in each sub-group query = json.loads(""" { "condition": "AND", "rules": [ { "condition": "AND", "rules": [ { "id": "query_name", "field": "query_name", "type": "string", "input": "text", "operator": "not_equal", "value": "asdf" } ] }, { "condition": "AND", "rules": [ { "id": "query_name", "field": "query_name", "type": "string", "input": "text", "operator": "equal", "value": "asdf" } ] } ] }""") network = Network() network.parse_query(query) counts = defaultdict(int) for condition in network.conditions.values(): counts[condition.__class__.__name__] += 1 # Top level, each sub-group (not reused), each condition assert counts == { 'AndCondition': 3, 'EqualCondition': 1, 'NotEqualCondition': 1 }
def test_parse_error_no_rules_in_group(self): # Different operators in each sub-group query = json.loads(""" { "condition": "AND", "rules": [ ] }""") network = Network() exc = None try: network.parse_query(query) except Exception as e: exc = e assert isinstance(exc, ValueError) assert exc.args == ("A group contains no rules", )
def test_will_reuse_identical_conditions(self): # Operators are equal in each condition query = json.loads(""" { "condition": "AND", "rules": [ { "condition": "AND", "rules": [ { "id": "query_name", "field": "query_name", "type": "string", "input": "text", "operator": "equal", "value": "asdf" } ] }, { "id": "query_name", "field": "query_name", "type": "string", "input": "text", "operator": "equal", "value": "asdf" } ] }""") network = Network() network.parse_query(query) counts = defaultdict(int) for condition in network.conditions.values(): counts[condition.__class__.__name__] += 1 # Top-level AND, AND group, reused column condition assert counts == {'AndCondition': 2, 'EqualCondition': 1}
class RuleManager(object): def __init__(self, app=None): self.network = None self.last_update = None if app is not None: self.init_app(app) def init_app(self, app): self.app = app self.load_alerters() # Save this instance on the app, so we have a way to get at it. app.rule_manager = self def load_alerters(self): """ Load the alerter plugin(s) specified in the app config. """ from importlib import import_module from polylogyx.plugins import AbstractAlerterPlugin alerters = self.app.config.get('POLYLOGYX_ALERTER_PLUGINS', {}) self.alerters = {} for name, (plugin, config) in alerters.items(): package, classname = plugin.rsplit('.', 1) module = import_module(package) klass = getattr(module, classname, None) if klass is None: raise ValueError( 'Could not find a class named "{0}" in package "{1}"'. format(classname, package)) if not issubclass(klass, AbstractAlerterPlugin): raise ValueError( '{0} is not a subclass of AbstractAlerterPlugin'.format( name)) self.alerters[name] = klass(config) def should_reload_rules(self): """ Checks if we need to reload the set of rules. """ from polylogyx.models import Rule if self.last_update is None: return True newest_rule = Rule.query.order_by( Rule.updated_at.desc()).limit(1).first() if newest_rule and self.last_update < newest_rule.updated_at: return True return False def load_ioc_intels(self): from polylogyx.models import IOCIntel self.all_ioc_intels = list(IOCIntel.query.all()) if not self.all_ioc_intels: return def load_rules(self): """ Load rules from the database. """ from polylogyx.rules import Network from polylogyx.models import Rule if not self.should_reload_rules(): return all_rules = list(Rule.query.filter(Rule.status != 'INACTIVE').all()) self.network = Network() if not all_rules: return for rule in all_rules: # Verify the alerters for alerter in rule.alerters: if alerter not in self.alerters: current_app.logger.error( 'No such alerter: "{0}"'.format(alerter)) # raise ValueError('No such alerter: "{0}"'.format(alerter)) # Create the rule. try: self.network.parse_query(rule.conditions, alerters=rule.alerters, rule_id=rule.id) except Exception as e: current_app.logger.error(rule.id) # Save the last updated date # Note: we do this here, and not in should_reload_rules, because it's # possible that we've reloaded a rule in between the two functions, and # thus we accidentally don't reload when we should. self.last_update = max(r.updated_at for r in all_rules) def check_for_ioc_matching(self, name, columns, node, uuid, capture_column): for intel in self.all_ioc_intels: if capture_column == intel.type and columns[ capture_column] == intel.value: from polylogyx.utils import save_intel_alert save_intel_alert(data={}, source="ioc", query_name=name, severity=intel.severity, uuid=uuid, columns=columns, node_id=node['id']) break def check_for_iocs(self, name, columns, node, uuid): try: from polylogyx.constants import TO_CAPTURE_COLUMNS from polylogyx.models import ResultLogScan for capture_column in TO_CAPTURE_COLUMNS: if capture_column in columns and columns[capture_column]: self.check_for_ioc_matching(name, columns, node, uuid, capture_column) result_log_scan = ResultLogScan.query.filter( ResultLogScan.scan_value == columns[capture_column]).first() if not result_log_scan: from polylogyx.models import ResultLogScan ResultLogScan.create( scan_value=columns[capture_column], scan_type=capture_column, reputations={}) break except Exception as e: current_app.logger.error(e) def handle_log_entry(self, entry, node): """ The actual entrypoint for handling input log entries. """ from polylogyx.models import Rule, Settings from polylogyx.rules import RuleMatch self.load_rules() self.load_ioc_intels() to_trigger = [] for result in entry: self.check_for_iocs(result['name'], result['columns'], node, result['uuid']) alerts = self.network.process(result, node) if len(alerts) == 0: continue # Alerts is a set of (alerter name, rule id) tuples. We convert # these into RuleMatch instances, which is what our alerters are # actually expecting. for rule_id, alerters in alerts.items(): rule = Rule.get_by_id(rule_id) to_trigger.append((alerters, RuleMatch(rule=rule, result=result, node=node, alert_id=0))) # Now that we've collected all results, start triggering them. alert_aggr_duration_setting = Settings.query.filter( Settings.name == 'alert_aggregation_duration').first() if alert_aggr_duration_setting: alert_aggr_duration = int(alert_aggr_duration_setting.setting) else: alert_aggr_duration = 60 for alerters, match in to_trigger: alert = self.save_in_db(match.result, match.node, match.rule, alert_aggr_duration) node['alert'] = alert for alerter in alerters: match = match._replace(alert_id=alert.id) self.alerters[alerter].handle_alert(node, match, None) def save_in_db(self, result_log_dict, node, rule, alert_aggr_duration): from polylogyx.models import Alerts, AlertLog existing_alert = Alerts.query.filter( Alerts.node_id == node['id']).filter( Alerts.rule_id == rule.id).filter( (dt.datetime.utcnow() - Alerts.created_at) <= dt.timedelta( seconds=alert_aggr_duration)).first() if existing_alert: AlertLog.create(name=result_log_dict['name'], timestamp=result_log_dict['timestamp'], action=result_log_dict['action'], columns=result_log_dict['columns'], alert_id=existing_alert.id, result_log_uuid=result_log_dict['uuid']) db.session.commit() current_app.logger.info( 'Aggregating the Alert with ID {0}..'.format( existing_alert.id)) return existing_alert else: alertsObj = Alerts(message=result_log_dict['columns'], query_name=result_log_dict['name'], result_log_uid=result_log_dict['uuid'], node_id=node['id'], rule_id=rule.id, type=Alerts.RULE, source="rule", source_data={}, recon_queries=rule.recon_queries, severity=rule.severity) alertsObj = alertsObj.save(alertsObj) AlertLog.create(name=result_log_dict['name'], timestamp=result_log_dict['timestamp'], action=result_log_dict['action'], columns=result_log_dict['columns'], alert_id=alertsObj.id, result_log_uuid=result_log_dict['uuid']) db.session.commit() current_app.logger.info( 'Creating a new Alert with ID {0}..'.format(alertsObj.id)) return alertsObj
class RuleManager(object): def __init__(self, app=None): self.network = None self.last_update = None if app is not None: self.init_app(app) def init_app(self, app): self.app = app self.load_alerters() # Save this instance on the app, so we have a way to get at it. app.rule_manager = self def load_alerters(self): """ Load the alerter plugin(s) specified in the app config. """ from importlib import import_module from polylogyx.plugins import AbstractAlerterPlugin alerters = self.app.config.get('POLYLOGYX_ALERTER_PLUGINS', {}) self.alerters = {} for name, (plugin, config) in alerters.items(): package, classname = plugin.rsplit('.', 1) module = import_module(package) klass = getattr(module, classname, None) if klass is None: raise ValueError( 'Could not find a class named "{0}" in package "{1}"'. format(classname, package)) if not issubclass(klass, AbstractAlerterPlugin): raise ValueError( '{0} is not a subclass of AbstractAlerterPlugin'.format( name)) self.alerters[name] = klass(config) def should_reload_rules(self): """ Checks if we need to reload the set of rules. """ from polylogyx.models import Rule if self.last_update is None: return True newest_rule = Rule.query.order_by( Rule.updated_at.desc()).limit(1).first() if newest_rule and self.last_update < newest_rule.updated_at: return True return False def load_rules(self): """ Load rules from the database. """ from polylogyx.rules import Network from polylogyx.models import Rule if not self.should_reload_rules(): return all_rules = list(Rule.query.filter(Rule.status != 'INACTIVE').all()) self.network = Network() if not all_rules: return for rule in all_rules: # Verify the alerters for alerter in rule.alerters: if alerter not in self.alerters: current_app.logger.error( 'No such alerter: "{0}"'.format(alerter)) # raise ValueError('No such alerter: "{0}"'.format(alerter)) # Create the rule. try: self.network.parse_query(rule.conditions, alerters=rule.alerters, rule_id=rule.id) except Exception as e: current_app.logger.error(rule.id) # Save the last updated date # Note: we do this here, and not in should_reload_rules, because it's # possible that we've reloaded a rule in between the two functions, and # thus we accidentally don't reload when we should. self.last_update = max(r.updated_at for r in all_rules) def handle_result_log_entry(self, entry): from polylogyx.models import Node """ The actual entrypoint for handling input log entries. """ from polylogyx.models import Rule from polylogyx.rules import RuleMatch from polylogyx.utils import extract_result_logs self.load_rules() to_trigger = [] for name, action, columns, timestamp, uuid, node_id in extract_result_logs( entry): result = { 'name': name, 'action': action, 'timestamp': timestamp, 'columns': columns, } node = Node.query.filter(Node.id == node_id).first().to_dict() alerts = self.network.process(result, node) if len(alerts) == 0: continue # Alerts is a set of (alerter name, rule id) tuples. We convert # these into RuleMatch instances, which is what our alerters are # actually expecting. for rule_id, alerters in alerts.items(): rule = Rule.get_by_id(rule_id) to_trigger.append((alerters, RuleMatch(rule=rule, result=result, node=node, alert_id=0))) # Now that we've collected all results, start triggering them. for alerters, match in to_trigger: alert = self.save_in_db(match.result['columns'], match.result['name'], match.node, match.rule, uuid) node = match.node node['alert'] = alert for alerter in alerters: match = match._replace(alert_id=alert.id) self.alerters[alerter].handle_alert(node, match, None) def handle_log_entry(self, entry, node): """ The actual entrypoint for handling input log entries. """ from polylogyx.models import Rule from polylogyx.rules import RuleMatch from polylogyx.utils import extract_results self.load_rules() to_trigger = [] for name, action, columns, timestamp, uuid in extract_results(entry): result = { 'name': name, 'action': action, 'timestamp': timestamp, 'columns': columns, } alerts = self.network.process(result, node) if len(alerts) == 0: continue # Alerts is a set of (alerter name, rule id) tuples. We convert # these into RuleMatch instances, which is what our alerters are # actually expecting. for rule_id, alerters in alerts.items(): rule = Rule.get_by_id(rule_id) to_trigger.append((alerters, RuleMatch(rule=rule, result=result, node=node, alert_id=0))) # Now that we've collected all results, start triggering them. for alerters, match in to_trigger: alert = self.save_in_db(match.result['columns'], match.result['name'], match.node, match.rule, uuid) node['alert'] = alert for alerter in alerters: match = match._replace(alert_id=alert.id) self.alerters[alerter].handle_alert(node, match, None) def save_in_db(self, message, query_name, node, rule, uuid): from polylogyx.models import Alerts alertsObj = Alerts(message=message, query_name=query_name, result_log_uid=uuid, node_id=node['id'], rule_id=rule.id, type=Alerts.RULE, source="rule", source_data={}, recon_queries=rule.recon_queries, severity=rule.severity) alertsObj.save(alertsObj) return alertsObj
def test_will_alert(self, node): query = json.loads(""" { "condition": "AND", "rules": [ { "id": "query_name", "field": "query_name", "type": "string", "input": "text", "operator": "begins_with", "value": "packs/osx-attacks/" }, { "id": "action", "field": "action", "type": "string", "input": "text", "operator": "equal", "value": "added" }, { "id": "column", "field": "column", "type": "string", "input": "text", "operator": "column_equal", "value": [ "name", "com.whitesmoke.uploader.plist" ] } ] }""") network = Network() network.parse_query(query, alerters=['debug'], rule_id=1) network.parse_query(query, alerters=['debug'], rule_id=2) network.parse_query(query, alerters=['debug'], rule_id=3) # Should trigger the top-level alert, above now = dt.datetime.utcnow() bad_input = { 'name': 'packs/osx-attacks/Whitesmoke', 'action': 'added', 'timestamp': now, 'columns': { 'path': '/LaunchAgents/com.whitesmoke.uploader.plist', 'name': 'com.whitesmoke.uploader.plist', # Remainder omitted }, } # Should *not* trigger the alert, above. good_input = { 'name': 'other-query', 'action': 'added', 'timestamp': now, 'columns': { 'a_column': 'the_value', }, } alerts = network.process(good_input, node) assert len(alerts) == 0 alerts = network.process(bad_input, node) assert sorted(alerts, key=lambda v: v[1]) == [('debug', 1), ('debug', 2), ('debug', 3)] # Re-process the good input to assert that we don't continue to alert # on good input after a bad one... alerts = network.process(good_input, node) assert len(alerts) == 0 # ... and that we *do* continue to alert on bad input. alerts = network.process(bad_input, node) assert sorted(alerts, key=lambda v: v[1]) == [('debug', 1), ('debug', 2), ('debug', 3)]