Example #1
0
    def test_processor(self, filter_rules, validate_only=False):
        """Perform integration tests for the 'rule' Lambda function

        Args:
            filter_rules (list|None): Specific rule names (or None) to restrict
                testing to. This is passed in from the CLI using the --rules option.
            validate_only (bool): If true, validation of test records will occur
                without the rules engine being applied to events.

        Yields:
            tuple (bool, list) or None: If testing rules, this yields a tuple containig a
                boolean of test status and a list of alerts to run through the alert
                processor. If validating test records only, this does not yield.
        """
        for rule_name, contents in self._get_rule_test_files(
                filter_rules, validate_only):
            # Go over the records and test the applicable rule
            for index, test_record in enumerate(contents.get('records')):
                self.total_tests += 1

                if not self.check_keys(rule_name, test_record):
                    self.all_tests_passed = False
                    continue

                self.apply_helpers(test_record)

                print_header_line = index == 0

                formatted_record = helpers.format_lambda_test_record(
                    test_record)

                if validate_only:
                    self._validate_test_records(rule_name, test_record,
                                                formatted_record,
                                                print_header_line)
                    continue

                yield self._run_rule_tests(rule_name, test_record,
                                           formatted_record, print_header_line)

        # Report on the final test results
        self.report_output_summary()
Example #2
0
    def test_processor(self, rules):
        """Perform integration tests for the 'rule' Lambda function

        Args:
            rules [list or None]: Specific rule names (or None) to restrict
                testing to. This is passed in from the CLI using the --rules option.

        Returns:
            [generator] yields a tuple containig a boolean of test status and
                a list of alerts to run through the alert processor
        """
        all_tests_passed = True

        for rule_file, rule_name in get_rule_test_files(rules):
            with open(os.path.join(DIR_RULES, rule_file),
                      'r') as rule_file_handle:
                try:
                    contents = json.load(rule_file_handle)
                except (ValueError, TypeError) as err:
                    all_tests_passed = False
                    message = 'Improperly formatted file - {}: {}'.format(
                        type(err).__name__, err)
                    self.rules_fail_pass_warn[2].append((rule_name, message))
                    continue

            test_records = contents.get('records')
            if not test_records:
                all_tests_passed = False
                self.rules_fail_pass_warn[2].append(
                    (rule_name, 'No records to test in file'))
                continue

            print_header = True
            # Go over the records and test the applicable rule
            for test_record in test_records:
                if not self.check_keys(rule_name, test_record):
                    all_tests_passed = False
                    continue

                self.apply_helpers(test_record)

                # Run tests on the formatted record
                alerts, expected_alerts = self.test_rule(
                    rule_name, test_record,
                    helpers.format_lambda_test_record(test_record))

                current_test_passed = len(alerts) == expected_alerts

                # Print rule name for section header, but only if we get
                # to a point where there is a record to actually be tested.
                # This avoids potentialy blank sections
                if print_header:
                    if alerts or self.print_output:
                        print '\n{}'.format(rule_name)
                        print_header = not print_header

                if self.print_output:
                    report_output([
                        current_test_passed,
                        '[trigger={}]'.format(expected_alerts), 'rule',
                        test_record['service'], test_record['description']
                    ])

                all_tests_passed = current_test_passed and all_tests_passed

                # yield the result and alerts back to caller
                yield all_tests_passed, alerts

                # Add the name of the rule to the applicable pass or fail list
                self.rules_fail_pass_warn[current_test_passed].append(
                    (rule_name,
                     'Rule failure: {}'.format(test_record['description'])))

        # Report on the final test results
        self.report_output_summary()
Example #3
0
    def test_processor(self, rules_filter, files_filter, validate_only):
        """Perform integration tests for the 'rule' Lambda function

        Args:
            rules_filter (set): A collection of rules to filter on, passed in by the user
                via the CLI using the --test-rules option.
            files_filter (set): A collection of files to filter on, passed in by the user
                via the CLI using the --test-files option.
            validate_only (bool): If true, validation of test records will occur
                without the rules engine being applied to events.

        Yields:
            tuple (bool, list) or None: If testing rules, this yields a tuple containig a
                boolean of test status and a list of alerts to run through the alert
                processor. If validating test records only, this does not yield.
        """
        test_file_info = self._filter_files(
            helpers.get_rule_test_files(TEST_EVENTS_DIR), files_filter)

        for name in sorted(test_file_info):
            path = test_file_info[name]

            events, error = helpers.load_test_file(path)
            if error is not None:
                self.all_tests_passed = False
                self.status_messages.append(
                    StatusMessage(StatusMessage.WARNING, error))
                continue

            print_header = True
            for test_event in events:
                self.total_tests += 1
                if self._detect_old_test_event(test_event):
                    self.all_tests_passed = False
                    message = (
                        'Detected old format for test event in file \'{}.json\'. '
                        'Please visit https://streamalert.io/rule-testing.html '
                        'for information on the new format and update your '
                        'test events accordingly.'.format(name))
                    self.status_messages.append(
                        StatusMessage(StatusMessage.FAILURE, message))
                    continue

                if not self.check_keys(test_event):
                    self.all_tests_passed = False
                    continue

                # Check if there are any rule filters in place, and if the current test event
                # should be exeecuted per the filter
                if rules_filter and set(
                        test_event['trigger_rules']).isdisjoint(rules_filter):
                    self.total_tests -= 1
                    continue

                self.apply_helpers(test_event)

                if 'override_record' in test_event:
                    self.apply_template(test_event)

                formatted_record = helpers.format_lambda_test_record(
                    test_event)

                # If this test is to validate the schema only, continue the loop and
                # do not yield results on the rule tests below
                if validate_only or (not validate_only and
                                     test_event.get('validate_schema_only')):
                    if self._validate_test_record(name, test_event,
                                                  formatted_record,
                                                  print_header) is False:
                        self.all_tests_passed = False
                else:
                    yield self._run_rule_tests(name, test_event,
                                               formatted_record, print_header)

                print_header = False

        # Report on the final test results
        self.report_output_summary()