Esempio n. 1
0
    def _test_target(self, target):
        try:
            remote_dir = common.send_scripts(self.test_env.domain_ip)
        except RuntimeError as exc:
            msg = "Unable to upload test scripts: {more_info}".format(
                more_info=str(exc))
            raise RuntimeError(msg)

        self._matching_rule_found = False

        with test_env.SavedState.create_from_environment(
                self.test_env, "tests_uploaded") as state:
            for rule in common.iterate_over_rules():
                if not self._rule_should_be_tested(rule.id, target):
                    continue
                self._matching_rule_found = True
                if not xml_operations.find_rule_in_benchmark(
                        self.datastream, self.benchmark_id, rule.id):
                    logging.error(
                        "Rule '{0}' isn't present in benchmark '{1}' in '{2}'".
                        format(rule.id, self.benchmark_id, self.datastream))
                    continue
                remediation_available = self._is_remediation_available(rule)

                self._check_rule(rule, remote_dir, state,
                                 remediation_available)

        if not self._matching_rule_found:
            logging.error("No matching rule ID found for '{0}'".format(target))
Esempio n. 2
0
    def _test_target(self, target):
        try:
            remote_dir = common.send_scripts(self.test_env.domain_ip)
        except RuntimeError as exc:
            msg = "Unable to upload test scripts: {more_info}".format(
                more_info=str(exc))
            raise RuntimeError(msg)

        self._matching_rule_found = False

        with test_env.SavedState.create_from_environment(
                self.test_env, "tests_uploaded") as state:
            for rule in common.iterate_over_rules():
                if rule.short_id not in target:
                    continue
                # In combined mode there is no expectations of matching substrings,
                # every entry in the target is expected to be unique.
                # Let's remove matched targets, so we can track rules not tested
                target.remove(rule.short_id)
                remediation_available = self._is_remediation_available(rule)
                self._check_rule(rule, remote_dir, state,
                                 remediation_available)

        if len(target) != 0:
            target.sort()
            logging.info("The following rule(s) were not tested:")
            for rule in target:
                logging.info("{0}".format(rule))
Esempio n. 3
0
 def _get_rules_to_test(self, target):
     rules_to_test = []
     for rule in common.iterate_over_rules():
         if not self._rule_should_be_tested(rule, target):
             continue
         if not xml_operations.find_rule_in_benchmark(
                 self.datastream, self.benchmark_id, rule.id):
             logging.error(
                 "Rule '{0}' isn't present in benchmark '{1}' in '{2}'"
                 .format(rule.id, self.benchmark_id, self.datastream))
             continue
         rules_to_test.append(rule)
     return rules_to_test