def cli_runner(options): """Main Stream Alert CLI handler Args: options (dict): command line arguments passed from the argparser. Contains the following keys for terraform commands: (command, subcommand, target) Contains the following keys for lambda commands: (command, subcommand, env, func, source) """ cli_load_message = ('Issues? Report here: ' 'https://github.com/airbnb/streamalert/issues') LOGGER_CLI.info(cli_load_message) if options.debug: LOGGER_CLI.setLevel('DEBUG') if options.command == 'output': configure_output(options) elif options.command == 'lambda': lambda_handler(options) elif options.command == 'live-test': stream_alert_test(options, CONFIG) elif options.command == 'terraform': terraform_handler(options) elif options.command == 'configure': configure_handler(options) elif options.command == 'athena': athena_handler(options)
def cli_runner(options): """Main Stream Alert CLI handler Args: options (dict): command line arguments passed from the argparser. Contains the following keys for terraform commands: (command, subcommand, target) Contains the following keys for lambda commands: (command, subcommand, env, func, source) """ cli_load_message = 'Issues? Report here: https://github.com/airbnb/streamalert/issues' LOGGER_CLI.info(cli_load_message) if options.debug: LOGGER_CLI.setLevel('DEBUG') if options.command == 'output': configure_output(options) elif options.command == 'lambda': lambda_handler(options, CONFIG) elif options.command == 'live-test': stream_alert_test(options, CONFIG) elif options.command == 'validate-schemas': stream_alert_test(options, CONFIG) elif options.command == 'terraform': terraform_handler(options, CONFIG) elif options.command == 'configure': configure_handler(options) elif options.command == 'athena': athena_handler(options, CONFIG) elif options.command == 'metrics': _toggle_metrics(options) elif options.command == 'create-alarm': _create_alarm(options) elif options.command == 'app': _app_integration_handler(options) elif options.command == 'kinesis': kinesis_handler(options, CONFIG) elif options.command == 'threat_intel': _threat_intel_handler(options, CONFIG) elif options.command == 'threat_intel_downloader': threat_intel_downloader_handler(options, CONFIG) elif options.command == 'rule-table': rule_table_handler(options, CONFIG)
def run_tests(options, context): """Actual protected function for running tests Args: options [namedtuple]: CLI options (debug, processor, etc) context [namedtuple]: A constructed aws context object """ # Instantiate two status items - one for the rule processor # and one for the alert processor rp_status, ap_status = True, True if options.debug: LOGGER_SA.setLevel(logging.DEBUG) LOGGER_SO.setLevel(logging.DEBUG) LOGGER_CLI.setLevel(logging.DEBUG) else: # Add a filter to suppress a few noisy log messages LOGGER_SA.addFilter(TestingSuppressFilter()) # Check if the rule processor should be run for these tests test_rules = (run_options.get('processor') in {'rule', 'all'} or run_options.get('command') == 'live-test') # Check if the alert processor should be run for these tests test_alerts = (run_options.get('processor') in {'alert', 'all'} or run_options.get('command') == 'live-test') rule_proc_tester = RuleProcessorTester(test_rules) # Run the rule processor for all rules or designated rule set for status, alerts in rule_proc_tester.test_processor(options.rules): # If the alert processor should be tested, pass any alerts to it # and store the status over time if test_alerts: # Update the overall alert processor status with the ongoing status ap_status = AlertProcessorTester(context).test_processor( alerts) and ap_status # Update the overall rule processor status with the ongoing status rp_status = status and rp_status # Report summary information for the alert processor if it was ran if test_alerts: AlertProcessorTester.report_output_summary() if not (rp_status and ap_status): sys.exit(1)
def run_tests(options, context): """Actual protected function for running tests Args: options [namedtuple]: CLI options (debug, processor, etc) context [namedtuple]: A constructed aws context object """ if options.debug: LOGGER_SA.setLevel(logging.DEBUG) LOGGER_SO.setLevel(logging.DEBUG) LOGGER_CLI.setLevel(logging.DEBUG) else: # Add a filter to suppress a few noisy log messages LOGGER_SA.addFilter(TestingSuppressFilter()) # Check if the rule processor should be run for these tests test_rules = (set(run_options.get('processor')).issubset({'rule', 'all'}) or run_options.get('command') == 'live-test') # Check if the alert processor should be run for these tests test_alerts = (set(run_options.get('processor')).issubset({'alert', 'all'}) or run_options.get('command') == 'live-test') rule_proc_tester = RuleProcessorTester(context, test_rules) alert_proc_tester = AlertProcessorTester(context) # Run the rule processor for all rules or designated rule set for alerts in rule_proc_tester.test_processor(options.rules): # If the alert processor should be tested, process any alerts if test_alerts: alert_proc_tester.test_processor(alerts) # Report summary information for the alert processor if it was ran if test_alerts: AlertProcessorTester.report_output_summary() # Print any invalid log messages that we accumulated over this run for message in rule_proc_tester.invalid_log_messages: LOGGER_CLI.error('%s%s%s', COLOR_RED, message, COLOR_RESET) if not (rule_proc_tester.all_tests_passed and alert_proc_tester.all_tests_passed and (not rule_proc_tester.invalid_log_messages)): sys.exit(1)