def run_tests(options, context): """Actual protected function for running tests Args: options [namedtuple]: CLI options (debug, processor, etc) context [namedtuple]: A constructed aws context object """ # Instantiate two status items - one for the rule processor # and one for the alert processor rp_status, ap_status = True, True if options.debug: LOGGER_SA.setLevel(logging.DEBUG) LOGGER_SO.setLevel(logging.DEBUG) LOGGER_CLI.setLevel(logging.DEBUG) else: # Add a filter to suppress a few noisy log messages LOGGER_SA.addFilter(TestingSuppressFilter()) # Check if the rule processor should be run for these tests test_rules = (run_options.get('processor') in {'rule', 'all'} or run_options.get('command') == 'live-test') # Check if the alert processor should be run for these tests test_alerts = (run_options.get('processor') in {'alert', 'all'} or run_options.get('command') == 'live-test') rule_proc_tester = RuleProcessorTester(test_rules) # Run the rule processor for all rules or designated rule set for status, alerts in rule_proc_tester.test_processor(options.rules): # If the alert processor should be tested, pass any alerts to it # and store the status over time if test_alerts: # Update the overall alert processor status with the ongoing status ap_status = AlertProcessorTester(context).test_processor( alerts) and ap_status # Update the overall rule processor status with the ongoing status rp_status = status and rp_status # Report summary information for the alert processor if it was ran if test_alerts: AlertProcessorTester.report_output_summary() if not (rp_status and ap_status): sys.exit(1)
def run_tests(options, context): """Actual protected function for running tests Args: options [namedtuple]: CLI options (debug, processor, etc) context [namedtuple]: A constructed aws context object """ if options.debug: LOGGER_SA.setLevel(logging.DEBUG) LOGGER_SO.setLevel(logging.DEBUG) LOGGER_CLI.setLevel(logging.DEBUG) else: # Add a filter to suppress a few noisy log messages LOGGER_SA.addFilter(TestingSuppressFilter()) # Check if the rule processor should be run for these tests test_rules = (set(run_options.get('processor')).issubset({'rule', 'all'}) or run_options.get('command') == 'live-test') # Check if the alert processor should be run for these tests test_alerts = (set(run_options.get('processor')).issubset({'alert', 'all'}) or run_options.get('command') == 'live-test') rule_proc_tester = RuleProcessorTester(context, test_rules) alert_proc_tester = AlertProcessorTester(context) # Run the rule processor for all rules or designated rule set for alerts in rule_proc_tester.test_processor(options.rules): # If the alert processor should be tested, process any alerts if test_alerts: alert_proc_tester.test_processor(alerts) # Report summary information for the alert processor if it was ran if test_alerts: AlertProcessorTester.report_output_summary() # Print any invalid log messages that we accumulated over this run for message in rule_proc_tester.invalid_log_messages: LOGGER_CLI.error('%s%s%s', COLOR_RED, message, COLOR_RESET) if not (rule_proc_tester.all_tests_passed and alert_proc_tester.all_tests_passed and (not rule_proc_tester.invalid_log_messages)): sys.exit(1)
def stream_alert_test(options): """Integration testing handler Args: options: dict of CLI options: (func, env, source) """ if options.debug: LOGGER_SA.setLevel(logging.DEBUG) else: LOGGER_SA.setLevel(logging.INFO) if options.processor == 'rule': passed = test_alert_rules() elif options.processor == 'alert': # TODO(jack) test output raise NotImplementedError if not passed: os._exit(1)