예제 #1
0
    def _test_target(self, target):
        try:
            remote_dir = send_scripts(self.test_env.domain_ip)
        except RuntimeError as exc:
            msg = "Unable to upload test scripts: {more_info}".format(
                more_info=str(exc))
            raise RuntimeError(msg)

        self._matching_rule_found = False

        with test_env.SavedState.create_from_environment(
                self.test_env, "tests_uploaded") as state:
            for rule in data.iterate_over_rules():
                matched, target_matched = self._matches_target(
                    rule.directory, target)
                if not matched:
                    continue
                # In combined mode there is no expectations of matching substrings,
                # every entry in the target is expected to be unique.
                # Let's remove matched targets, so we can track rules not tested
                target.remove(target_matched)
                self._check_rule(rule, remote_dir, state)

        if len(target) != 0:
            target.sort()
            logging.info("The following rule(s) were not tested:")
            for rule in target:
                logging.info("{0}".format(rule))
예제 #2
0
    def _test_target(self, target):
        try:
            remote_dir = common.send_scripts(self.test_env.domain_ip)
        except RuntimeError as exc:
            msg = "Unable to upload test scripts: {more_info}".format(
                more_info=str(exc))
            raise RuntimeError(msg)

        self._matching_rule_found = False

        with test_env.SavedState.create_from_environment(
                self.test_env, "tests_uploaded") as state:
            for rule in data.iterate_over_rules():
                if not self._matches_target(rule.directory, target):
                    continue
                self._matching_rule_found = True
                if not xml_operations.find_rule_in_benchmark(
                        self.datastream, self.benchmark_id, rule.id):
                    logging.error(
                        "Rule '{0}' isn't present in benchmark '{1}' in '{2}'".
                        format(rule.id, self.benchmark_id, self.datastream))
                    return
                self._check_rule(rule, remote_dir, state)

        if not self._matching_rule_found:
            logging.error("No matching rule ID found for '{0}'".format(target))
예제 #3
0
파일: rule.py 프로젝트: ncolyer/content
    def _prepare_environment(self, scenarios_by_rule):
        domain_ip = self.test_env.domain_ip
        try:
            self.remote_dir = common.send_scripts(self.test_env)
        except RuntimeError as exc:
            msg = "Unable to upload test scripts: {more_info}".format(more_info=str(exc))
            raise RuntimeError(msg)

        self._ensure_package_present_for_all_scenarios(scenarios_by_rule)