Пример #1
0
    def run_tests(options, context):
        """Actual protected function for running tests

        Args:
            options [namedtuple]: CLI options (debug, processor, etc)
            context [namedtuple]: A constructed aws context object
        """
        # Instantiate two status items - one for the rule processor
        # and one for the alert processor
        rp_status, ap_status = True, True

        if options.debug:
            LOGGER_SA.setLevel(logging.DEBUG)
            LOGGER_SO.setLevel(logging.DEBUG)
            LOGGER_CLI.setLevel(logging.DEBUG)
        else:
            # Add a filter to suppress a few noisy log messages
            LOGGER_SA.addFilter(TestingSuppressFilter())

        # Check if the rule processor should be run for these tests
        test_rules = (run_options.get('processor') in {'rule', 'all'}
                      or run_options.get('command') == 'live-test')

        # Check if the alert processor should be run for these tests
        test_alerts = (run_options.get('processor') in {'alert', 'all'}
                       or run_options.get('command') == 'live-test')

        rule_proc_tester = RuleProcessorTester(test_rules)
        # Run the rule processor for all rules or designated rule set
        for status, alerts in rule_proc_tester.test_processor(options.rules):
            # If the alert processor should be tested, pass any alerts to it
            # and store the status over time
            if test_alerts:
                # Update the overall alert processor status with the ongoing status
                ap_status = AlertProcessorTester(context).test_processor(
                    alerts) and ap_status

            # Update the overall rule processor status with the ongoing status
            rp_status = status and rp_status

        # Report summary information for the alert processor if it was ran
        if test_alerts:
            AlertProcessorTester.report_output_summary()

        if not (rp_status and ap_status):
            sys.exit(1)
Пример #2
0
    def run_tests(options, context):
        """Actual protected function for running tests

        Args:
            options [namedtuple]: CLI options (debug, processor, etc)
            context [namedtuple]: A constructed aws context object
        """
        if options.debug:
            LOGGER_SA.setLevel(logging.DEBUG)
            LOGGER_SO.setLevel(logging.DEBUG)
            LOGGER_CLI.setLevel(logging.DEBUG)
        else:
            # Add a filter to suppress a few noisy log messages
            LOGGER_SA.addFilter(TestingSuppressFilter())

        # Check if the rule processor should be run for these tests
        test_rules = (set(run_options.get('processor')).issubset({'rule', 'all'}) or
                      run_options.get('command') == 'live-test')

        # Check if the alert processor should be run for these tests
        test_alerts = (set(run_options.get('processor')).issubset({'alert', 'all'}) or
                       run_options.get('command') == 'live-test')

        rule_proc_tester = RuleProcessorTester(context, test_rules)
        alert_proc_tester = AlertProcessorTester(context)
        # Run the rule processor for all rules or designated rule set
        for alerts in rule_proc_tester.test_processor(options.rules):
            # If the alert processor should be tested, process any alerts
            if test_alerts:
                alert_proc_tester.test_processor(alerts)

        # Report summary information for the alert processor if it was ran
        if test_alerts:
            AlertProcessorTester.report_output_summary()

        # Print any invalid log messages that we accumulated over this run
        for message in rule_proc_tester.invalid_log_messages:
            LOGGER_CLI.error('%s%s%s', COLOR_RED, message, COLOR_RESET)

        if not (rule_proc_tester.all_tests_passed and
                alert_proc_tester.all_tests_passed and
                (not rule_proc_tester.invalid_log_messages)):
            sys.exit(1)
Пример #3
0
def stream_alert_test(options):
    """Integration testing handler

    Args:
        options: dict of CLI options: (func, env, source)
    """
    if options.debug:
        LOGGER_SA.setLevel(logging.DEBUG)
    else:
        LOGGER_SA.setLevel(logging.INFO)

    if options.processor == 'rule':
        passed = test_alert_rules()

    elif options.processor == 'alert':
        # TODO(jack) test output
        raise NotImplementedError

    if not passed:
        os._exit(1)
Пример #4
0
    def run_tests(options, context):
        """Actual protected function for running tests

        Args:
            options (namedtuple): CLI options (debug, processor, etc)
            context (namedtuple): A constructed aws context object
        """
        # The Rule Processor and Alert Processor need environment variables for many things
        prefix = config['global']['account']['prefix']
        alerts_table = '{}_streamalert_alerts'.format(prefix)
        os.environ[
            'ALERT_PROCESSOR'] = '{}_streamalert_alert_processor'.format(
                prefix)
        os.environ['ALERTS_TABLE'] = alerts_table
        os.environ['AWS_DEFAULT_REGION'] = config['global']['account'][
            'region']
        os.environ['CLUSTER'] = run_options.get('cluster') or ''

        if not options.debug:
            # Add a filter to suppress a few noisy log messages
            LOGGER_SA.addFilter(SuppressNoise())

        # Create an in memory logging buffer to be used to caching all error messages
        log_mem_handler = get_log_memory_handler()

        # Check if the rule processor should be run for these tests
        # Using NOT set.isdisjoint will check to see if there are commonalities between
        # the options in 'processor' and {'rule', 'all'}
        test_rules = (not run_options.get('processor').isdisjoint(
            {'rule', 'all'}) if run_options.get('processor') else
                      run_options.get('command') == 'live-test'
                      or run_options.get('command') == 'validate-schemas')

        # Check if the alert processor should be run for these tests
        # Using NOT set.isdisjoint will check to see if there are commonalities between
        # the options in 'processor' and {'alert', 'all'}
        test_alerts = (not run_options.get('processor').isdisjoint(
            {'alert', 'all'}) if run_options.get('processor') else
                       run_options.get('command') == 'live-test')

        validate_schemas = options.command == 'validate-schemas'

        rules_filter = run_options.get('rules', {})
        files_filter = run_options.get('files', {})

        # Run the rule processor for all rules or designated rule set
        if context.mocked:
            helpers.setup_mock_alerts_table(alerts_table)
            # Mock S3 bucket for lookup tables testing
            helpers.mock_s3_bucket(config)

        rule_proc_tester = RuleProcessorTester(context, config, test_rules)
        alert_proc_tester = AlertProcessorTester(config, context)

        for _ in range(run_options.get('repeat', 1)):
            for alerts in rule_proc_tester.test_processor(
                    rules_filter, files_filter, validate_schemas):
                # If the alert processor should be tested, process any alerts
                if test_alerts:
                    alert_proc_tester.test_processor(alerts)

            # Report summary information for the alert processor if it was ran
            if test_alerts:
                AlertProcessorTester.report_output_summary()

        all_test_rules = None
        if rules_filter:
            all_test_rules = helpers.get_rules_from_test_events(
                TEST_EVENTS_DIR)
            check_invalid_rules_filters(rules_filter, all_test_rules)

        # If this is not just a validation run, and rule/file filters are not in place
        # then warn the user if there are test files without corresponding rules
        # Also check all of the rule files to make sure they have tests configured
        if not (validate_schemas or rules_filter or files_filter):
            all_test_rules = all_test_rules or helpers.get_rules_from_test_events(
                TEST_EVENTS_DIR)
            check_untested_files(all_test_rules)
            check_untested_rules(all_test_rules)

        if not (rule_proc_tester.all_tests_passed
                and alert_proc_tester.all_tests_passed):
            return 1  # will exit with error

        # If there are any log records in the memory buffer, then errors occurred somewhere
        if log_mem_handler.buffer:
            # Release the MemoryHandler so we can do some other logging now
            logging.getLogger().removeHandler(log_mem_handler)
            LOGGER_CLI.error(
                '%sSee %d miscellaneous error(s) below '
                'that were encountered and may need to be addressed%s',
                COLOR_RED, len(log_mem_handler.buffer), COLOR_RESET)

            log_mem_handler.setTarget(LOGGER_CLI)
            log_mem_handler.flush()

            return 1  # will exit with error

        return 0  # will exit without error
Пример #5
0
    def run_tests(options, context):
        """Actual protected function for running tests

        Args:
            options (namedtuple): CLI options (debug, processor, etc)
            context (namedtuple): A constructed aws context object
        """
        if options.debug:
            # TODO(jack): Currently there is no (clean) way to set
            #             the logger formatter to provide more verbose
            #             output in debug mode.  Running basicConfig twice
            #             does not actually change the formatter on the logger object.
            #             This functionality can be added during the logging refactor
            # Example Steps:
            #   call .shutdown() on the existing logger
            #   debug_formatter = logging.Formatter(
            #       '%(name)s [%(levelname)s]: [%(module)s.%(funcName)s] %(message)s')
            #   set the new logger to the formatter above
            for streamalert_logger in (LOGGER_SA, LOGGER_SH, LOGGER_SO,
                                       LOGGER_CLI):
                streamalert_logger.setLevel(logging.DEBUG)
        else:
            # Add a filter to suppress a few noisy log messages
            LOGGER_SA.addFilter(SuppressNoise())

        # Create an in memory logging buffer to be used to caching all error messages
        log_mem_hanlder = get_log_memory_hanlder()

        # Check if the rule processor should be run for these tests
        test_rules = (set(run_options.get('processor')).issubset(
            {'rule', 'all'}) if run_options.get('processor') else
                      run_options.get('command') == 'live-test'
                      or run_options.get('command') == 'validate-schemas')

        # Check if the alert processor should be run for these tests
        test_alerts = (set(run_options.get('processor')).issubset(
            {'alert', 'all'}) if run_options.get('processor') else
                       run_options.get('command') == 'live-test')

        rule_proc_tester = RuleProcessorTester(context, test_rules)
        alert_proc_tester = AlertProcessorTester(context)

        validate_schemas = options.command == 'validate-schemas'

        filters = options.test_files if validate_schemas else options.rules

        # Run the rule processor for all rules or designated rule set
        for alerts in rule_proc_tester.test_processor(filters,
                                                      validate_schemas):
            # If the alert processor should be tested, process any alerts
            if test_alerts:
                alert_proc_tester.test_processor(alerts)

        # Report summary information for the alert processor if it was ran
        if test_alerts:
            AlertProcessorTester.report_output_summary()

        # Check all of the rule files to make sure they have tests configured
        check_untested_rules()

        # If this is not just a validation run, then warn the user
        # if there are test files without corresponding rules
        if not validate_schemas:
            check_untested_files()

        if not (rule_proc_tester.all_tests_passed
                and alert_proc_tester.all_tests_passed):
            sys.exit(1)

        # If there are any log records in the memory buffer, then errors occured somewhere
        if log_mem_hanlder.buffer:
            # Release the MemoryHandler so we can do some other logging now
            logging.getLogger().removeHandler(log_mem_hanlder)
            LOGGER_CLI.error(
                '%sSee %d miscellaneous error(s) below '
                'that were encountered and may need to be addressed%s',
                COLOR_RED, len(log_mem_hanlder.buffer), COLOR_RESET)

            log_mem_hanlder.setTarget(LOGGER_CLI)
            log_mem_hanlder.flush()

            sys.exit(1)
Пример #6
0
    def run_tests(options, context):
        """Actual protected function for running tests

        Args:
            options (namedtuple): CLI options (debug, processor, etc)
            context (namedtuple): A constructed aws context object
        """
        # The Rule Processor and Alert Processor need environment variables for many things
        prefix = config['global']['account']['prefix']
        alerts_table = '{}_streamalert_alerts'.format(prefix)
        os.environ['ALERT_PROCESSOR'] = '{}_streamalert_alert_processor'.format(prefix)
        os.environ['ALERTS_TABLE'] = alerts_table
        os.environ['AWS_DEFAULT_REGION'] = config['global']['account']['region']
        os.environ['CLUSTER'] = run_options.get('cluster') or ''

        if options.debug:
            # TODO(jack): Currently there is no (clean) way to set
            #             the logger formatter to provide more verbose
            #             output in debug mode.  Running basicConfig twice
            #             does not actually change the formatter on the logger object.
            #             This functionality can be added during the logging refactor
            # Example Steps:
            #   call .shutdown() on the existing logger
            #   debug_formatter = logging.Formatter(
            #       '%(name)s [%(levelname)s]: [%(module)s.%(funcName)s] %(message)s')
            #   set the new logger to the formatter above
            for streamalert_logger in (LOGGER_SA, LOGGER_SH, LOGGER_SO, LOGGER_CLI):
                streamalert_logger.setLevel(logging.DEBUG)
        else:
            # Add a filter to suppress a few noisy log messages
            LOGGER_SA.addFilter(SuppressNoise())

        # Create an in memory logging buffer to be used to caching all error messages
        log_mem_handler = get_log_memory_handler()

        # Check if the rule processor should be run for these tests
        # Using NOT set.isdisjoint will check to see if there are commonalities between
        # the options in 'processor' and {'rule', 'all'}
        test_rules = (not run_options.get('processor').isdisjoint({'rule', 'all'})
                      if run_options.get('processor') else
                      run_options.get('command') == 'live-test' or
                      run_options.get('command') == 'validate-schemas')

        # Check if the alert processor should be run for these tests
        # Using NOT set.isdisjoint will check to see if there are commonalities between
        # the options in 'processor' and {'alert', 'all'}
        test_alerts = (not run_options.get('processor').isdisjoint({'alert', 'all'})
                       if run_options.get('processor') else
                       run_options.get('command') == 'live-test')

        rule_proc_tester = RuleProcessorTester(context, config, test_rules)
        alert_proc_tester = AlertProcessorTester(config, context)

        validate_schemas = options.command == 'validate-schemas'

        rules_filter = run_options.get('rules', {})
        files_filter = run_options.get('files', {})

        # Run the rule processor for all rules or designated rule set
        if context.mocked:
            helpers.setup_mock_alerts_table(alerts_table)
        for alerts in rule_proc_tester.test_processor(rules_filter,
                                                      files_filter,
                                                      validate_schemas):
            # If the alert processor should be tested, process any alerts
            if test_alerts:
                alert_proc_tester.test_processor(alerts)

        # Report summary information for the alert processor if it was ran
        if test_alerts:
            AlertProcessorTester.report_output_summary()

        all_test_rules = None
        if rules_filter:
            all_test_rules = helpers.get_rules_from_test_events(TEST_EVENTS_DIR)
            check_invalid_rules_filters(rules_filter, all_test_rules)

        # If this is not just a validation run, and rule/file filters are not in place
        # then warn the user if there are test files without corresponding rules
        # Also check all of the rule files to make sure they have tests configured
        if not (validate_schemas or rules_filter or files_filter):
            all_test_rules = all_test_rules or helpers.get_rules_from_test_events(TEST_EVENTS_DIR)
            check_untested_files(all_test_rules)
            check_untested_rules(all_test_rules)

        if not (rule_proc_tester.all_tests_passed and
                alert_proc_tester.all_tests_passed):
            sys.exit(1)

        # If there are any log records in the memory buffer, then errors occurred somewhere
        if log_mem_handler.buffer:
            # Release the MemoryHandler so we can do some other logging now
            logging.getLogger().removeHandler(log_mem_handler)
            LOGGER_CLI.error('%sSee %d miscellaneous error(s) below '
                             'that were encountered and may need to be addressed%s',
                             COLOR_RED, len(log_mem_handler.buffer), COLOR_RESET)

            log_mem_handler.setTarget(LOGGER_CLI)
            log_mem_handler.flush()

            sys.exit(1)