Exemple #1
0
    def _test_target(self, target):
        try:
            remote_dir = common.send_scripts(self.test_env.domain_ip)
        except RuntimeError as exc:
            msg = "Unable to upload test scripts: {more_info}".format(
                more_info=str(exc))
            raise RuntimeError(msg)

        self._matching_rule_found = False

        with test_env.SavedState.create_from_environment(
                self.test_env, "tests_uploaded") as state:
            for rule in data.iterate_over_rules():
                if not self._matches_target(rule.directory, target):
                    continue
                self._matching_rule_found = True
                if not xml_operations.find_rule_in_benchmark(
                        self.datastream, self.benchmark_id, rule.id):
                    logging.error(
                        "Rule '{0}' isn't present in benchmark '{1}' in '{2}'".
                        format(rule.id, self.benchmark_id, self.datastream))
                    return
                self._check_rule(rule, remote_dir, state)

        if not self._matching_rule_found:
            logging.error("No matching rule ID found for '{0}'".format(target))
    def _test_target(self, target):
        try:
            remote_dir = send_scripts(self.test_env.domain_ip)
        except RuntimeError as exc:
            msg = "Unable to upload test scripts: {more_info}".format(
                more_info=str(exc))
            raise RuntimeError(msg)

        self._matching_rule_found = False

        with test_env.SavedState.create_from_environment(
                self.test_env, "tests_uploaded") as state:
            for rule in data.iterate_over_rules():
                matched, target_matched = self._matches_target(
                    rule.directory, target)
                if not matched:
                    continue
                # In combined mode there is no expectations of matching substrings,
                # every entry in the target is expected to be unique.
                # Let's remove matched targets, so we can track rules not tested
                target.remove(target_matched)
                self._check_rule(rule, remote_dir, state)

        if len(target) != 0:
            target.sort()
            logging.info("The following rule(s) were not tested:")
            for rule in target:
                logging.info("{0}".format(rule))
    def _test_target(self, target):
        remote_dir = _send_scripts(self.test_env.domain_ip)
        if not remote_dir:
            msg = "Unable to upload test scripts"
            raise RuntimeError(msg)

        self._matching_rule_found = False

        for rule in data.iterate_over_rules():
            self._check_rule(rule, remote_dir, target)

        if not self._matching_rule_found:
            logging.error("No matching rule ID found for '{0}'".format(target))
    def _test_target(self, target):
        try:
            remote_dir = _send_scripts(self.test_env.domain_ip)
        except RuntimeError as exc:
            msg = "Unable to upload test scripts: {more_info}".format(more_info=str(exc))
            raise RuntimeError(msg)

        self._matching_rule_found = False

        with test_env.SavedState.create_from_environment(self.test_env, "tests_uploaded") as state:
            for rule in data.iterate_over_rules():
                if not _matches_target(rule.directory, target):
                    continue
                self._matching_rule_found = True
                self._check_rule(rule, remote_dir, state)

        if not self._matching_rule_found:
            logging.error("No matching rule ID found for '{0}'".format(target))
    def _test_target(self, target):
        try:
            remote_dir = _send_scripts(self.test_env.domain_ip)
        except RuntimeError as exc:
            msg = "Unable to upload test scripts: {more_info}".format(more_info=str(exc))
            raise RuntimeError(msg)

        self._matching_rule_found = False

        with test_env.SavedState.create_from_environment(self.test_env, "tests_uploaded") as state:
            for rule in data.iterate_over_rules():
                if not _matches_target(rule.directory, target):
                    continue
                self._matching_rule_found = True
                self._check_rule(rule, remote_dir, state)

        if not self._matching_rule_found:
            logging.error("No matching rule ID found for '{0}'".format(target))
Exemple #6
0
def perform_rule_check(options):
    """Perform rule check.

    Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every
    scenario. Expected result is encoded in scenario file name. In case of
    `fail` or `error` results expected, continue with remediation and
    reevaluation. Revert system to clean state using snapshots.

    Return value not defined, textual output and generated reports is the
    result.
    """
    dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
                                             options.domain_name)
    if dom is None:
        sys.exit(1)
    snapshot_stack = SnapshotStack(dom)
    atexit.register(snapshot_stack.clear)

    snapshot_stack.create('origin')
    ssg_test_suite.virt.start_domain(dom)
    domain_ip = ssg_test_suite.virt.determine_ip(dom)
    scanned_something = False
    for rule_dir, rule, scripts in iterate_over_rules():
        if 'ALL' in options.target:
            # we want to have them all
            pass
        else:
            perform = False
            for target in options.target:
                if target in rule_dir:
                    perform = True
                    break
            if not perform:
                continue
        logging.info(rule)
        scanned_something = True
        logging.debug("Testing rule directory {0}".format(rule_dir))
        # get list of helper scripts (non-standard name)
        # and scenario scripts
        helpers = []
        scenarios = []
        for script in scripts:
            script_context = _get_script_context(script)
            if script_context is None:
                logging.debug('Registering helper script {0}'.format(script))
                helpers += [script]
            else:
                scenarios += [script]

        for script in scenarios:
            script_context = _get_script_context(script)
            logging.debug(('Using test script {0} '
                           'with context {1}').format(script, script_context))
            snapshot_stack.create('script')
            # copy all helper scripts, so scenario script can use them
            script_path = os.path.join(rule_dir, script)
            helper_paths = map(lambda x: os.path.join(rule_dir, x), helpers)
            _send_scripts(rule_dir, domain_ip, script_path, *helper_paths)

            if not _apply_script(rule_dir, domain_ip, script):
                logging.error("Environment failed to prepare, skipping test")
            script_params = _parse_parameters(script_path)
            has_worked = False
            profiles = get_viable_profiles(script_params['profiles'],
                                           options.datastream,
                                           options.benchmark_id)
            if len(profiles) > 1:
                snapshot_stack.create('profile')
            for profile in profiles:
                LogHelper.preload_log(logging.INFO,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "OK").format(script,
                                                    profile),
                                      log_target='pass')
                LogHelper.preload_log(logging.ERROR,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "found issue:").format(script,
                                                              profile),
                                      log_target='fail')
                has_worked = True
                if oscap.run_rule(domain_ip=domain_ip,
                                  profile=profile,
                                  stage="initial",
                                  datastream=options.datastream,
                                  benchmark_id=options.benchmark_id,
                                  rule_id=rule,
                                  context=script_context,
                                  script_name=script,
                                  remediation=False,
                                  dont_clean=options.dont_clean):
                    if script_context in ['fail', 'error']:
                        oscap.run_rule(domain_ip=domain_ip,
                                       profile=profile,
                                       stage="remediation",
                                       datastream=options.datastream,
                                       benchmark_id=options.benchmark_id,
                                       rule_id=rule,
                                       context='fixed',
                                       script_name=script,
                                       remediation=True,
                                       dont_clean=options.dont_clean)
                snapshot_stack.revert(delete=False)
            if not has_worked:
                logging.error("Nothing has been tested!")
            snapshot_stack.delete()
            if len(profiles) > 1:
                snapshot_stack.revert()
    if not scanned_something:
        logging.error("Rule {0} has not been found".format(options.target))
Exemple #7
0
def perform_rule_check(options):
    """Perform rule check.

    Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every
    scenario. Expected result is encoded in scenario file name. In case of
    `fail` or `error` results expected, continue with remediation and
    reevaluation. Revert system to clean state using snapshots.

    Return value not defined, textual output and generated reports is the
    result.
    """
    dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
                                             options.domain_name)
    if dom is None:
        sys.exit(1)
    snapshot_stack = SnapshotStack(dom)
    atexit.register(snapshot_stack.clear)

    snapshot_stack.create('origin')
    ssg_test_suite.virt.start_domain(dom)
    domain_ip = ssg_test_suite.virt.determine_ip(dom)
    scanned_something = False
    for rule_dir, rule, scripts in iterate_over_rules():
        if 'ALL' in options.target:
            # we want to have them all
            pass
        else:
            perform = False
            for target in options.target:
                if target in rule_dir:
                    perform = True
                    break
            if not perform:
                continue
        logging.info(rule)
        scanned_something = True
        logging.debug("Testing rule directory {0}".format(rule_dir))
        # get list of helper scripts (non-standard name)
        # and scenario scripts
        helpers = []
        scenarios = []
        for script in scripts:
            script_context = _get_script_context(script)
            if script_context is None:
                logging.debug('Registering helper script {0}'.format(script))
                helpers += [script]
            else:
                scenarios += [script]

        for script in scenarios:
            script_context = _get_script_context(script)
            logging.debug(('Using test script {0} '
                           'with context {1}').format(script, script_context))
            snapshot_stack.create('script')
            # copy all helper scripts, so scenario script can use them
            script_path = os.path.join(rule_dir, script)
            helper_paths = map(lambda x: os.path.join(rule_dir, x), helpers)
            _send_scripts(rule_dir, domain_ip, script_path, *helper_paths)

            if not _apply_script(rule_dir, domain_ip, script):
                logging.error("Environment failed to prepare, skipping test")
                snapshot_stack.revert()
                continue
            script_params = _parse_parameters(script_path)
            has_worked = False
            profiles = get_viable_profiles(script_params['profiles'],
                                           options.datastream,
                                           options.benchmark_id)
            if len(profiles) > 1:
                snapshot_stack.create('profile')
            for profile in profiles:
                LogHelper.preload_log(logging.INFO,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "OK").format(script,
                                                    profile),
                                      log_target='pass')
                LogHelper.preload_log(logging.ERROR,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "found issue:").format(script,
                                                              profile),
                                      log_target='fail')
                has_worked = True
                run_rule_checks(
                    domain_ip, profile, options.datastream,
                    options.benchmark_id, rule, script_context,
                    script, script_params, options.remediate_using,
                    options.dont_clean,
                )
                snapshot_stack.revert(delete=False)
            if not has_worked:
                logging.error("Nothing has been tested!")
            snapshot_stack.delete()
            if len(profiles) > 1:
                snapshot_stack.revert()
    if not scanned_something:
        logging.error("Rule {0} has not been found".format(options.target))
Exemple #8
0
def perform_rule_check(options):
    """Perform rule check.

    Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every
    scenario. Expected result is encoded in scenario file name. In case of
    `fail` or `error` results expected, continue with remediation and
    reevaluation. Revert system to clean state using snapshots.

    Return value not defined, textual output and generated reports is the
    result.
    """
    dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
                                             options.domain_name)
    if dom is None:
        sys.exit(1)
    snapshot_stack = SnapshotStack(dom)
    atexit.register(snapshot_stack.clear)

    # create origin
    snapshot_stack.create('origin')
    ssg_test_suite.virt.start_domain(dom)
    domain_ip = ssg_test_suite.virt.determine_ip(dom)
    scanned_something = False

    remote_dir = _send_scripts(domain_ip)
    if not remote_dir:
        return

    for rule_dir, rule, scripts in data.iterate_over_rules():
        remote_rule_dir = os.path.join(remote_dir, rule_dir)
        local_rule_dir = os.path.join(data.DATA_DIR, rule_dir)
        if not _matches_target(rule_dir, options.target):
            continue
        logging.info(rule)
        scanned_something = True
        logging.debug("Testing rule directory {0}".format(rule_dir))

        for script, script_context, script_params in _get_scenarios(
                local_rule_dir, scripts):
            logging.debug(('Using test script {0} '
                           'with context {1}').format(script, script_context))
            # create origin <- script
            snapshot_stack.create('script')
            has_worked = False

            if not _apply_script(remote_rule_dir, domain_ip, script):
                logging.error("Environment failed to prepare, skipping test")
                # maybe revert script
                snapshot_stack.revert()
                continue
            profiles = get_viable_profiles(script_params['profiles'],
                                           options.datastream,
                                           options.benchmark_id)
            if len(profiles) > 1:
                # create origin <- script <- profile
                snapshot_stack.create('profile')
            for profile in profiles:
                LogHelper.preload_log(logging.INFO,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "OK").format(script, profile),
                                      log_target='pass')
                LogHelper.preload_log(logging.ERROR,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "found issue:").format(script, profile),
                                      log_target='fail')
                has_worked = True
                run_rule_checks(
                    domain_ip,
                    profile,
                    options.datastream,
                    options.benchmark_id,
                    rule,
                    script_context,
                    script,
                    script_params,
                    options.remediate_using,
                    options.dont_clean,
                )
                # revert either profile (if created), or script. Don't delete
                snapshot_stack.revert(delete=False)
            if not has_worked:
                logging.error("Nothing has been tested!")
            # Delete the reverted profile or script.
            snapshot_stack.delete()
            if len(profiles) > 1:
                # revert script (we have reverted profile before).
                snapshot_stack.revert()
    if not scanned_something:
        logging.error("Rule {0} has not been found".format(options.target))