def start(self): dom = ssg_test_suite.virt.connect_domain(self.hypervisor, self.domain_name) self.snapshot_stack = SnapshotStack(dom) ssg_test_suite.virt.start_domain(dom) self.domain_ip = ssg_test_suite.virt.determine_ip(dom)
class VMTestEnv(TestEnv): name = "libvirt-based" def __init__(self, mode, hypervisor, domain_name): super(VMTestEnv, self).__init__(mode) self.domain = None self.hypervisor = hypervisor self.domain_name = domain_name self.snapshot_stack = None self._origin = None def start(self): self.domain = ssg_test_suite.virt.connect_domain( self.hypervisor, self.domain_name) self.snapshot_stack = SnapshotStack(self.domain) ssg_test_suite.virt.start_domain(self.domain) self.domain_ip = ssg_test_suite.virt.determine_ip(self.domain) self._origin = self._save_state("origin") def finalize(self): self._delete_saved_state(self._origin) # self.domain.shutdown() # logging.debug('Shut the domain off') def reset_state_to(self, state_name, new_running_state_name): last_snapshot_name = self.snapshot_stack.snapshot_stack[-1].getName() assert last_snapshot_name == state_name, ( "You can only revert to the last snapshot, which is {0}, not {1}" .format(last_snapshot_name, state_name)) state = self.snapshot_stack.revert(delete=False) return state def discard_running_state(self, state_handle): pass def _save_state(self, state_name): super(VMTestEnv, self)._save_state(state_name) state = self.snapshot_stack.create(state_name) return state def _delete_saved_state(self, snapshot): self.snapshot_stack.revert() def _local_oscap_check_base_arguments(self): command_base = [] command_base.extend( ['oscap-vm', "domain", self.domain_name, 'xccdf', 'eval']) return command_base def offline_scan(self, args, verbose_path): command_list = self._local_oscap_check_base_arguments() + args return common.run_cmd_local(command_list, verbose_path)
def start(self): self.domain = ssg_test_suite.virt.connect_domain( self.hypervisor, self.domain_name) self.snapshot_stack = SnapshotStack(self.domain) ssg_test_suite.virt.start_domain(self.domain) self.domain_ip = ssg_test_suite.virt.determine_ip(self.domain) self._origin = self._save_state("origin")
def perform_profile_check(options): """Perform profile check. Iterate over profiles in datastream and perform scanning of unaltered VM using every profile according to input. Also perform remediation run. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) # create origin snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) has_worked = False profiles = get_viable_profiles(options.target, options.datastream, options.benchmark_id) if len(profiles) > 1: # create origin <- profile snapshot_stack.create('profile') for profile in profiles: logging.info("Evaluation of profile {0}.".format(profile)) has_worked = True runner = options.remediate_using ssg_test_suite.oscap.run_profile(domain_ip, profile, 'initial', options.datastream, options.benchmark_id, runner=runner) ssg_test_suite.oscap.run_profile(domain_ip, profile, 'remediation', options.datastream, options.benchmark_id, runner=runner) ssg_test_suite.oscap.run_profile(domain_ip, profile, 'final', options.datastream, options.benchmark_id, runner=runner) # revert either profile (if created), or origin. Don't delete snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") # Delete the reverted profile or origin. snapshot_stack.delete()
class VMTestEnv(TestEnv): name = "libvirt-based" def __init__(self, mode, hypervisor, domain_name): super(VMTestEnv, self).__init__(mode) self.domain = None self.hypervisor = hypervisor self.domain_name = domain_name self.snapshot_stack = None self._origin = None def start(self): self.domain = ssg_test_suite.virt.connect_domain( self.hypervisor, self.domain_name) self.snapshot_stack = SnapshotStack(self.domain) ssg_test_suite.virt.start_domain(self.domain) self.domain_ip = ssg_test_suite.virt.determine_ip(self.domain) self._origin = self._save_state("origin") def finalize(self): self._delete_saved_state(self._origin) # self.domain.shutdown() # logging.debug('Shut the domain off') def reset_state_to(self, state_name, new_running_state_name): last_snapshot_name = self.snapshot_stack.snapshot_stack[-1].getName() assert last_snapshot_name == state_name, ( "You can only revert to the last snapshot, which is {0}, not {1}" .format(last_snapshot_name, state_name)) state = self.snapshot_stack.revert(delete=False) return state def discard_running_state(self, state_handle): pass def _save_state(self, state_name): state = self.snapshot_stack.create(state_name) return state def _delete_saved_state(self, snapshot): self.snapshot_stack.revert() def _local_oscap_check_base_arguments(self): return ['oscap-vm', "domain", self.domain_name, 'xccdf', 'eval'] def offline_scan(self, args, verbose_path): command_list = self._local_oscap_check_base_arguments() + args return common.run_cmd_local(command_list, verbose_path)
def perform_profile_check(options): """Perform profile check. Iterate over profiles in datastream and perform scanning of unaltered VM using every profile according to input. Also perform remediation run. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) has_worked = False profiles = get_viable_profiles(options.target, options.datastream, options.benchmark_id) if len(profiles) > 1: snapshot_stack.create('profile') for profile in profiles: logging.info("Evaluation of profile {0}.".format(profile)) has_worked = True runner = options.remediate_using ssg_test_suite.oscap.run_profile(domain_ip, profile, 'initial', options.datastream, options.benchmark_id, runner=runner) ssg_test_suite.oscap.run_profile(domain_ip, profile, 'remediation', options.datastream, options.benchmark_id, runner=runner) ssg_test_suite.oscap.run_profile(domain_ip, profile, 'final', options.datastream, options.benchmark_id, runner=runner) snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") snapshot_stack.delete()
class VMTestEnv(TestEnv): name = "libvirt-based" def __init__(self, hypervisor, domain_name): super(VMTestEnv, self).__init__() self.hypervisor = hypervisor self.domain_name = domain_name self.snapshot_stack = None def start(self): dom = ssg_test_suite.virt.connect_domain(self.hypervisor, self.domain_name) self.snapshot_stack = SnapshotStack(dom) ssg_test_suite.virt.start_domain(dom) self.domain_ip = ssg_test_suite.virt.determine_ip(dom) def _get_snapshot(self, snapshot_name): return self.snapshot_stack.create(snapshot_name) def _revert_snapshot(self, snapshot): self.snapshot_stack.revert()
def perform_rule_check(options): """Perform rule check. Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every scenario. Expected result is encoded in scenario file name. In case of `fail` or `error` results expected, continue with remediation and reevaluation. Revert system to clean state using snapshots. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) scanned_something = False for rule_dir, rule, scripts in iterate_over_rules(): if 'ALL' in options.target: # we want to have them all pass else: perform = False for target in options.target: if target in rule_dir: perform = True break if not perform: continue logging.info(rule) scanned_something = True logging.debug("Testing rule directory {0}".format(rule_dir)) # get list of helper scripts (non-standard name) # and scenario scripts helpers = [] scenarios = [] for script in scripts: script_context = _get_script_context(script) if script_context is None: logging.debug('Registering helper script {0}'.format(script)) helpers += [script] else: scenarios += [script] for script in scenarios: script_context = _get_script_context(script) logging.debug(('Using test script {0} ' 'with context {1}').format(script, script_context)) snapshot_stack.create('script') # copy all helper scripts, so scenario script can use them script_path = os.path.join(rule_dir, script) helper_paths = map(lambda x: os.path.join(rule_dir, x), helpers) _send_scripts(rule_dir, domain_ip, script_path, *helper_paths) if not _apply_script(rule_dir, domain_ip, script): logging.error("Environment failed to prepare, skipping test") script_params = _parse_parameters(script_path) has_worked = False profiles = get_viable_profiles(script_params['profiles'], options.datastream, options.benchmark_id) if len(profiles) > 1: snapshot_stack.create('profile') for profile in profiles: LogHelper.preload_log(logging.INFO, ("Script {0} " "using profile {1} " "OK").format(script, profile), log_target='pass') LogHelper.preload_log(logging.ERROR, ("Script {0} " "using profile {1} " "found issue:").format(script, profile), log_target='fail') has_worked = True if oscap.run_rule(domain_ip=domain_ip, profile=profile, stage="initial", datastream=options.datastream, benchmark_id=options.benchmark_id, rule_id=rule, context=script_context, script_name=script, remediation=False, dont_clean=options.dont_clean): if script_context in ['fail', 'error']: oscap.run_rule(domain_ip=domain_ip, profile=profile, stage="remediation", datastream=options.datastream, benchmark_id=options.benchmark_id, rule_id=rule, context='fixed', script_name=script, remediation=True, dont_clean=options.dont_clean) snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") snapshot_stack.delete() if len(profiles) > 1: snapshot_stack.revert() if not scanned_something: logging.error("Rule {0} has not been found".format(options.target))
def perform_rule_check(options): """Perform rule check. Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every scenario. Expected result is encoded in scenario file name. In case of `fail` or `error` results expected, continue with remediation and reevaluation. Revert system to clean state using snapshots. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) scanned_something = False for rule_dir, rule, scripts in iterate_over_rules(): if 'ALL' in options.target: # we want to have them all pass else: perform = False for target in options.target: if target in rule_dir: perform = True break if not perform: continue logging.info(rule) scanned_something = True logging.debug("Testing rule directory {0}".format(rule_dir)) # get list of helper scripts (non-standard name) # and scenario scripts helpers = [] scenarios = [] for script in scripts: script_context = _get_script_context(script) if script_context is None: logging.debug('Registering helper script {0}'.format(script)) helpers += [script] else: scenarios += [script] for script in scenarios: script_context = _get_script_context(script) logging.debug(('Using test script {0} ' 'with context {1}').format(script, script_context)) snapshot_stack.create('script') # copy all helper scripts, so scenario script can use them script_path = os.path.join(rule_dir, script) helper_paths = map(lambda x: os.path.join(rule_dir, x), helpers) _send_scripts(rule_dir, domain_ip, script_path, *helper_paths) if not _apply_script(rule_dir, domain_ip, script): logging.error("Environment failed to prepare, skipping test") snapshot_stack.revert() continue script_params = _parse_parameters(script_path) has_worked = False profiles = get_viable_profiles(script_params['profiles'], options.datastream, options.benchmark_id) if len(profiles) > 1: snapshot_stack.create('profile') for profile in profiles: LogHelper.preload_log(logging.INFO, ("Script {0} " "using profile {1} " "OK").format(script, profile), log_target='pass') LogHelper.preload_log(logging.ERROR, ("Script {0} " "using profile {1} " "found issue:").format(script, profile), log_target='fail') has_worked = True run_rule_checks( domain_ip, profile, options.datastream, options.benchmark_id, rule, script_context, script, script_params, options.remediate_using, options.dont_clean, ) snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") snapshot_stack.delete() if len(profiles) > 1: snapshot_stack.revert() if not scanned_something: logging.error("Rule {0} has not been found".format(options.target))
def perform_rule_check(options): """Perform rule check. Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every scenario. Expected result is encoded in scenario file name. In case of `fail` or `error` results expected, continue with remediation and reevaluation. Revert system to clean state using snapshots. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) # create origin snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) scanned_something = False remote_dir = _send_scripts(domain_ip) if not remote_dir: return for rule_dir, rule, scripts in data.iterate_over_rules(): remote_rule_dir = os.path.join(remote_dir, rule_dir) local_rule_dir = os.path.join(data.DATA_DIR, rule_dir) if not _matches_target(rule_dir, options.target): continue logging.info(rule) scanned_something = True logging.debug("Testing rule directory {0}".format(rule_dir)) for script, script_context, script_params in _get_scenarios( local_rule_dir, scripts): logging.debug(('Using test script {0} ' 'with context {1}').format(script, script_context)) # create origin <- script snapshot_stack.create('script') has_worked = False if not _apply_script(remote_rule_dir, domain_ip, script): logging.error("Environment failed to prepare, skipping test") # maybe revert script snapshot_stack.revert() continue profiles = get_viable_profiles(script_params['profiles'], options.datastream, options.benchmark_id) if len(profiles) > 1: # create origin <- script <- profile snapshot_stack.create('profile') for profile in profiles: LogHelper.preload_log(logging.INFO, ("Script {0} " "using profile {1} " "OK").format(script, profile), log_target='pass') LogHelper.preload_log(logging.ERROR, ("Script {0} " "using profile {1} " "found issue:").format(script, profile), log_target='fail') has_worked = True run_rule_checks( domain_ip, profile, options.datastream, options.benchmark_id, rule, script_context, script, script_params, options.remediate_using, options.dont_clean, ) # revert either profile (if created), or script. Don't delete snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") # Delete the reverted profile or script. snapshot_stack.delete() if len(profiles) > 1: # revert script (we have reverted profile before). snapshot_stack.revert() if not scanned_something: logging.error("Rule {0} has not been found".format(options.target))