def _run_test(self, profile, test_data): scenario = test_data["scenario"] rule_id = test_data["rule_id"] LogHelper.preload_log( logging.INFO, "Script {0} using profile {1} OK".format(scenario.script, profile), log_target='pass') LogHelper.preload_log( logging.ERROR, "Script {0} using profile {1} found issue:".format(scenario.script, profile), log_target='fail') runner_cls = oscap.REMEDIATION_RULE_RUNNERS[self.remediate_using] runner = runner_cls( self.test_env, profile, self.datastream, self.benchmark_id, rule_id, scenario.script, self.dont_clean, self.manual_debug) if not self._initial_scan_went_ok(runner, rule_id, scenario.context): return False supported_and_available_remediations = self._get_available_remediations(scenario) if (scenario.context not in ['fail', 'error'] or not supported_and_available_remediations): return True if not self._remediation_went_ok(runner, rule_id): return False return self._final_scan_went_ok(runner, rule_id)
def run_stage_remediation_bash(run_type, test_env, formatting, verbose_path): """ Returns False on error, or True in case of successful bash scripts run.""" formatting['output_template'] = _BASH_TEMPLATE send_arf_to_remote_machine_and_generate_remediations_there( run_type, test_env, formatting, verbose_path) if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR, '/' + formatting['output_file']): return False command_string = '/bin/bash -x /{output_file}'.format(** formatting) with open(verbose_path, "a") as log_file: error_msg_template = ( 'Bash remediation for {rule_id} '.format(** formatting) + 'has exited with these errors: {stderr}' ) try: test_env.execute_ssh_command( command_string, log_file, error_msg_template=error_msg_template) except Exception as exc: LogHelper.preload_log(logging.ERROR, str(exc), 'fail') return False return True
def _run_test(self, profile, test_data): scenario = test_data["scenario"] rule_id = test_data["rule_id"] LogHelper.preload_log(logging.INFO, "Script {0} using profile {1} OK".format( scenario.script, profile), log_target='pass') LogHelper.preload_log( logging.ERROR, "Script {0} using profile {1} found issue:".format( scenario.script, profile), log_target='fail') runner_cls = oscap.REMEDIATION_RULE_RUNNERS[self.remediate_using] runner = runner_cls(self.test_env, profile, self.datastream, self.benchmark_id, rule_id, scenario.script, self.dont_clean, self.manual_debug) if not self._initial_scan_went_ok(runner, rule_id, scenario.context): return False supported_and_available_remediations = self._get_available_remediations( scenario) if (scenario.context not in ['fail', 'error'] or not supported_and_available_remediations): return True if not self._remediation_went_ok(runner, rule_id): return False return self._final_scan_went_ok(runner, rule_id)
def run_stage_remediation_ansible(run_type, formatting, verbose_path): """ Returns False on error, or True in case of successful bash scripts run.""" formatting['output_template'] = _ANSIBLE_TEMPLATE send_arf_to_remote_machine_and_generate_remediations_there( run_type, formatting, verbose_path) if not get_file_remote(verbose_path, LogHelper.LOG_DIR, formatting['domain_ip'], '/' + formatting['output_file']): return False ansible_playbook_set_hosts(formatting['playbook']) command = ('ansible-playbook', '-i', '{0},'.format(formatting['domain_ip']), '-u' 'root', formatting['playbook']) command_string = ' '.join(command) returncode, output = run_cmd(command, verbose_path) # Appends output of ansible-playbook to the verbose_path file. with open(verbose_path, 'a') as f: f.write('Stdout of "{}":'.format(command_string)) f.write(output) if returncode != 0: msg = ('Ansible playbook remediation run has ' 'exited with return code {} instead of expected 0'.format( returncode)) LogHelper.preload_log(logging.ERROR, msg, 'fail') return False return True
def run_stage_remediation_ansible(run_type, test_env, formatting, verbose_path): """ Returns False on error, or True in case of successful Ansible playbook run.""" formatting['output_template'] = _ANSIBLE_TEMPLATE send_arf_to_remote_machine_and_generate_remediations_there( run_type, test_env, formatting, verbose_path) if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR, '/' + formatting['output_file']): return False command = ( 'ansible-playbook', '-v', '-i', '{0},'.format(formatting['domain_ip']), '-u' 'root', '--ssh-common-args={0}'.format(' '.join(test_env.ssh_additional_options)), formatting['playbook']) command_string = ' '.join(command) returncode, output = common.run_cmd_local(command, verbose_path) # Appends output of ansible-playbook to the verbose_path file. with open(verbose_path, 'ab') as f: f.write('Stdout of "{}":'.format(command_string).encode("utf-8")) f.write(output.encode("utf-8")) if returncode != 0: msg = ( 'Ansible playbook remediation run has ' 'exited with return code {} instead of expected 0' .format(returncode)) LogHelper.preload_log(logging.ERROR, msg, 'fail') return False return True
def run_stage_remediation_bash(run_type, formatting, verbose_path): """ Returns False on error, or True in case of successful Ansible playbook run.""" formatting['output_template'] = _BASH_TEMPLATE send_arf_to_remote_machine_and_generate_remediations_there( run_type, formatting, verbose_path) if not get_file_remote(verbose_path, LogHelper.LOG_DIR, formatting['domain_ip'], '/' + formatting['output_file']): return False command_string = '/bin/bash /{output_file}'.format(**formatting) returncode, output = run_cmd_remote(command_string, formatting['domain_ip'], verbose_path) # Appends output of script execution to the verbose_path file. with open(verbose_path, 'a') as f: f.write('Stdout of "{}":'.format(command_string)) f.write(output) if returncode != 0: msg = ('Bash script remediation run has exited with return code {} ' 'instead of expected 0'.format(returncode)) LogHelper.preload_log(logging.ERROR, msg, 'fail') return False return True
def _analyze_output_of_oscap_call(self): local_success = 1 # check expected result rule_result = self._find_rule_result_in_output() if rule_result == "notapplicable": msg = ( 'Rule {0} evaluation resulted in {1}' .format(self.rule_id, rule_result)) LogHelper.preload_log(logging.WARNING, msg, 'notapplicable') local_success = 2 return local_success if rule_result != self.context: local_success = 0 if rule_result == 'notselected': msg = ( 'Rule {0} has not been evaluated! ' 'Wrong profile selected in test scenario?' .format(self.rule_id)) else: msg = ( 'Rule evaluation resulted in {0}, ' 'instead of expected {1} during {2} stage ' .format(rule_result, self.context, self.stage) ) LogHelper.preload_log(logging.ERROR, msg, 'fail') return local_success
def run_stage_remediation_bash(run_type, formatting, verbose_path): """ Returns False on error, or True in case of successful Ansible playbook run.""" formatting['output_template'] = _BASH_TEMPLATE send_arf_to_remote_machine_and_generate_remediations_there( run_type, formatting, verbose_path) if not get_file_remote(verbose_path, LogHelper.LOG_DIR, formatting['domain_ip'], '/' + formatting['output_file']): return False command_string = '/bin/bash /{output_file}'.format(** formatting) returncode, output = run_cmd_remote( command_string, formatting['domain_ip'], verbose_path) # Appends output of script execution to the verbose_path file. with open(verbose_path, 'a') as f: f.write('Stdout of "{}":'.format(command_string)) f.write(output) if returncode != 0: msg = ( 'Bash script remediation run has exited with return code {} ' 'instead of expected 0'.format(returncode)) LogHelper.preload_log(logging.ERROR, msg, 'fail') return False return True
def run_stage_remediation_ansible(run_type, formatting, verbose_path): """ Returns False on error, or True in case of successful bash scripts run.""" formatting['output_template'] = _ANSIBLE_TEMPLATE send_arf_to_remote_machine_and_generate_remediations_there( run_type, formatting, verbose_path) if not get_file_remote(verbose_path, LogHelper.LOG_DIR, formatting['domain_ip'], '/' + formatting['output_file']): return False ansible_playbook_set_hosts(formatting['playbook']) command = ( 'ansible-playbook', '-i', '{0},'.format(formatting['domain_ip']), '-u' 'root', formatting['playbook']) command_string = ' '.join(command) returncode, output = run_cmd(command, verbose_path) # Appends output of ansible-playbook to the verbose_path file. with open(verbose_path, 'a') as f: f.write('Stdout of "{}":'.format(command_string)) f.write(output) if returncode != 0: msg = ( 'Ansible playbook remediation run has ' 'exited with return code {} instead of expected 0' .format(returncode)) LogHelper.preload_log(logging.ERROR, msg, 'fail') return False return True
def make_oscap_call(self): self.prepare_online_scanning_arguments() self._generate_report_file() self.command_options.extend(['--rule', self.rule_id]) returncode, self._oscap_output = self.environment.scan( self.command_options + self.command_operands, self.verbose_path) expected_return_code = _CONTEXT_RETURN_CODES[self.context] if returncode != expected_return_code: msg = ('Scan has exited with return code {0}, ' 'instead of expected {1} during stage {2}'.format( returncode, expected_return_code, self.stage)) LogHelper.preload_log(logging.ERROR, msg, 'fail') return False return True
def _analyze_output_of_oscap_call(self): local_success = True # check expected result actual_results = re.findall('{0}:(.*)$'.format(self.rule_id), self._oscap_output, re.MULTILINE) if actual_results: if self.context not in actual_results: LogHelper.preload_log(logging.ERROR, ('Rule result should have been ' '"{0}", but is "{1}"!').format( self.context, ', '.join(actual_results)), 'fail') local_success = False else: msg = ('Rule {0} has not been evaluated! Wrong profile selected?'. format(self.rule_id)) LogHelper.preload_log(logging.ERROR, msg, 'fail') local_success = False return local_success
def make_oscap_call(self): self.prepare_oscap_ssh_arguments() self._generate_report_file() self.command_options.extend( ['--rule', self.rule_id]) returncode, self._oscap_output = run_cmd(self.get_command, self.verbose_path) expected_return_code = _CONTEXT_RETURN_CODES[self.context] if returncode != expected_return_code: msg = ( 'Scan has exited with return code {0}, ' 'instead of expected {1} during stage {2}' .format(returncode, expected_return_code, self.stage) ) LogHelper.preload_log(logging.ERROR, msg, 'fail') return False return True
def _analyze_output_of_oscap_call(self): local_success = True # check expected result actual_results = re.findall('{0}:(.*)$'.format(self.rule_id), self._oscap_output, re.MULTILINE) if actual_results: if self.context not in actual_results: LogHelper.preload_log(logging.ERROR, ('Rule result should have been ' '"{0}", but is "{1}"!' ).format(self.context, ', '.join(actual_results)), 'fail') local_success = False else: msg = ( 'Rule {0} has not been evaluated! Wrong profile selected?' .format(self.rule_id)) LogHelper.preload_log(logging.ERROR, msg, 'fail') local_success = False return local_success
def run_stage_remediation_bash(run_type, test_env, formatting, verbose_path): """ Returns False on error, or True in case of successful bash scripts run.""" formatting['output_template'] = _BASH_TEMPLATE send_arf_to_remote_machine_and_generate_remediations_there( run_type, test_env, formatting, verbose_path) if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR, '/' + formatting['output_file']): return False command_string = '/bin/bash -x /{output_file}'.format(**formatting) with open(verbose_path, "a") as log_file: try: test_env.execute_ssh_command(command_string, log_file) except Exception as exc: msg = ( 'Bash script remediation run has exited with return code {} ' 'instead of expected 0'.format(exc.returncode)) LogHelper.preload_log(logging.ERROR, msg, 'fail') return False return True
def _run_test(self, profile, test_data): scenario = test_data["scenario"] rule_id = test_data["rule_id"] remediation_available = test_data["remediation_available"] LogHelper.preload_log(logging.INFO, "Script {0} using profile {1} OK".format( scenario.script, profile), log_target='pass') LogHelper.preload_log( logging.WARNING, "Script {0} using profile {1} notapplicable".format( scenario.script, profile), log_target='notapplicable') LogHelper.preload_log( logging.ERROR, "Script {0} using profile {1} found issue:".format( scenario.script, profile), log_target='fail') runner_cls = oscap.REMEDIATION_RULE_RUNNERS[self.remediate_using] runner_instance = runner_cls(self.test_env, oscap.process_profile_id(profile), self.datastream, self.benchmark_id, rule_id, scenario.script, self.dont_clean, self.no_reports, self.manual_debug) with runner_instance as runner: initial_scan_res = self._initial_scan_went_ok( runner, rule_id, scenario.context) if not initial_scan_res: return False if initial_scan_res == 2: # notapplicable return True supported_and_available_remediations = self._get_available_remediations( scenario) if (scenario.context not in ['fail', 'error'] or not supported_and_available_remediations): return True if remediation_available: if not self._remediation_went_ok(runner, rule_id): return False return self._final_scan_went_ok(runner, rule_id) else: msg = ("No remediation is available for rule '{}'.".format( rule_id)) logging.warning(msg) return False
def run_rule(domain_ip, profile, stage, datastream, benchmark_id, rule_id, context, script_name, remediation=False, dont_clean=False): """Run `oscap-ssh` command with provided parameters to check given rule, utilizing --rule option. Log output to LogHelper.LOG_DIR directory. Return True if result is as expected by context parameter. Check both exit code and output message. """ formatting = { 'domain_ip': domain_ip, 'profile': profile, 'datastream': datastream, 'benchmark_id': benchmark_id, 'rule_id': rule_id } formatting['rem'] = "--remediate" if remediation else "" report_path = os.path.join( LogHelper.LOG_DIR, '{0}-{1}-{2}'.format(rule_id, script_name, stage)) verbose_path = os.path.join( LogHelper.LOG_DIR, '{0}-{1}-{2}'.format(rule_id, script_name, stage)) formatting['report'] = LogHelper.find_name(report_path, '.html') verbose_path = LogHelper.find_name(verbose_path, '.verbose.log') command = shlex.split(('oscap-ssh root@{domain_ip} 22 xccdf eval ' '--benchmark-id {benchmark_id} ' '--profile {profile} ' '--progress --oval-results ' '--rule {rule_id} ' '--report {report} ' '--verbose DEVEL ' '{rem} ' '{datastream}').format(**formatting)) logging.debug('Running ' + ' '.join(command)) success = True # check expected return code expected_return_code = _CONTEXT_RETURN_CODES[context] try: with open(verbose_path, 'w') as verbose_file: output = subprocess.check_output(command, stderr=verbose_file) except subprocess.CalledProcessError, e: if e.returncode != expected_return_code: LogHelper.preload_log(logging.ERROR, ('Scan has exited with return code {0}, ' 'instead of expected {1} ' 'during stage {2}').format( e.returncode, expected_return_code, stage), 'fail') success = False output = e.output
def perform_rule_check(options): """Perform rule check. Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every scenario. Expected result is encoded in scenario file name. In case of `fail` or `error` results expected, continue with remediation and reevaluation. Revert system to clean state using snapshots. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) scanned_something = False for rule_dir, rule, scripts in iterate_over_rules(): if 'ALL' in options.target: # we want to have them all pass else: perform = False for target in options.target: if target in rule_dir: perform = True break if not perform: continue logging.info(rule) scanned_something = True logging.debug("Testing rule directory {0}".format(rule_dir)) # get list of helper scripts (non-standard name) # and scenario scripts helpers = [] scenarios = [] for script in scripts: script_context = _get_script_context(script) if script_context is None: logging.debug('Registering helper script {0}'.format(script)) helpers += [script] else: scenarios += [script] for script in scenarios: script_context = _get_script_context(script) logging.debug(('Using test script {0} ' 'with context {1}').format(script, script_context)) snapshot_stack.create('script') # copy all helper scripts, so scenario script can use them script_path = os.path.join(rule_dir, script) helper_paths = map(lambda x: os.path.join(rule_dir, x), helpers) _send_scripts(rule_dir, domain_ip, script_path, *helper_paths) if not _apply_script(rule_dir, domain_ip, script): logging.error("Environment failed to prepare, skipping test") snapshot_stack.revert() continue script_params = _parse_parameters(script_path) has_worked = False profiles = get_viable_profiles(script_params['profiles'], options.datastream, options.benchmark_id) if len(profiles) > 1: snapshot_stack.create('profile') for profile in profiles: LogHelper.preload_log(logging.INFO, ("Script {0} " "using profile {1} " "OK").format(script, profile), log_target='pass') LogHelper.preload_log(logging.ERROR, ("Script {0} " "using profile {1} " "found issue:").format(script, profile), log_target='fail') has_worked = True run_rule_checks( domain_ip, profile, options.datastream, options.benchmark_id, rule, script_context, script, script_params, options.remediate_using, options.dont_clean, ) snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") snapshot_stack.delete() if len(profiles) > 1: snapshot_stack.revert() if not scanned_something: logging.error("Rule {0} has not been found".format(options.target))
def perform_rule_check(options): """Perform rule check. Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every scenario. Expected result is encoded in scenario file name. In case of `fail` or `error` results expected, continue with remediation and reevaluation. Revert system to clean state using snapshots. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) # create origin snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) scanned_something = False remote_dir = _send_scripts(domain_ip) if not remote_dir: return for rule_dir, rule, scripts in data.iterate_over_rules(): remote_rule_dir = os.path.join(remote_dir, rule_dir) local_rule_dir = os.path.join(data.DATA_DIR, rule_dir) if not _matches_target(rule_dir, options.target): continue logging.info(rule) scanned_something = True logging.debug("Testing rule directory {0}".format(rule_dir)) for script, script_context, script_params in _get_scenarios( local_rule_dir, scripts): logging.debug(('Using test script {0} ' 'with context {1}').format(script, script_context)) # create origin <- script snapshot_stack.create('script') has_worked = False if not _apply_script(remote_rule_dir, domain_ip, script): logging.error("Environment failed to prepare, skipping test") # maybe revert script snapshot_stack.revert() continue profiles = get_viable_profiles(script_params['profiles'], options.datastream, options.benchmark_id) if len(profiles) > 1: # create origin <- script <- profile snapshot_stack.create('profile') for profile in profiles: LogHelper.preload_log(logging.INFO, ("Script {0} " "using profile {1} " "OK").format(script, profile), log_target='pass') LogHelper.preload_log(logging.ERROR, ("Script {0} " "using profile {1} " "found issue:").format(script, profile), log_target='fail') has_worked = True run_rule_checks( domain_ip, profile, options.datastream, options.benchmark_id, rule, script_context, script, script_params, options.remediate_using, options.dont_clean, ) # revert either profile (if created), or script. Don't delete snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") # Delete the reverted profile or script. snapshot_stack.delete() if len(profiles) > 1: # revert script (we have reverted profile before). snapshot_stack.revert() if not scanned_something: logging.error("Rule {0} has not been found".format(options.target))
def perform_rule_check(options): """Perform rule check. Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every scenario. Expected result is encoded in scenario file name. In case of `fail` or `error` results expected, continue with remediation and reevaluation. Revert system to clean state using snapshots. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) scanned_something = False for rule_dir, rule, scripts in iterate_over_rules(): if 'ALL' in options.target: # we want to have them all pass else: perform = False for target in options.target: if target in rule_dir: perform = True break if not perform: continue logging.info(rule) scanned_something = True logging.debug("Testing rule directory {0}".format(rule_dir)) # get list of helper scripts (non-standard name) # and scenario scripts helpers = [] scenarios = [] for script in scripts: script_context = _get_script_context(script) if script_context is None: logging.debug('Registering helper script {0}'.format(script)) helpers += [script] else: scenarios += [script] for script in scenarios: script_context = _get_script_context(script) logging.debug(('Using test script {0} ' 'with context {1}').format(script, script_context)) snapshot_stack.create('script') # copy all helper scripts, so scenario script can use them script_path = os.path.join(rule_dir, script) helper_paths = map(lambda x: os.path.join(rule_dir, x), helpers) _send_scripts(rule_dir, domain_ip, script_path, *helper_paths) if not _apply_script(rule_dir, domain_ip, script): logging.error("Environment failed to prepare, skipping test") script_params = _parse_parameters(script_path) has_worked = False profiles = get_viable_profiles(script_params['profiles'], options.datastream, options.benchmark_id) if len(profiles) > 1: snapshot_stack.create('profile') for profile in profiles: LogHelper.preload_log(logging.INFO, ("Script {0} " "using profile {1} " "OK").format(script, profile), log_target='pass') LogHelper.preload_log(logging.ERROR, ("Script {0} " "using profile {1} " "found issue:").format(script, profile), log_target='fail') has_worked = True if oscap.run_rule(domain_ip=domain_ip, profile=profile, stage="initial", datastream=options.datastream, benchmark_id=options.benchmark_id, rule_id=rule, context=script_context, script_name=script, remediation=False, dont_clean=options.dont_clean): if script_context in ['fail', 'error']: oscap.run_rule(domain_ip=domain_ip, profile=profile, stage="remediation", datastream=options.datastream, benchmark_id=options.benchmark_id, rule_id=rule, context='fixed', script_name=script, remediation=True, dont_clean=options.dont_clean) snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") snapshot_stack.delete() if len(profiles) > 1: snapshot_stack.revert() if not scanned_something: logging.error("Rule {0} has not been found".format(options.target))
except subprocess.CalledProcessError, e: if e.returncode != expected_return_code: LogHelper.preload_log(logging.ERROR, ('Scan has exited with return code {0}, ' 'instead of expected {1} ' 'during stage {2}').format( e.returncode, expected_return_code, stage), 'fail') success = False output = e.output else: # success branch - command exited with return code 0 if expected_return_code != 0: LogHelper.preload_log(logging.ERROR, ('Scan has exited with return code 0, ' 'instead of expected {0} ' 'during stage {1}').format( expected_return_code, stage), 'fail') success = False # check expected result try: actual_results = re.findall('{0}:(.*)$'.format(rule_id), output, re.MULTILINE) except IndexError: LogHelper.preload_log(logging.ERROR, ('Rule {0} has not been ' 'evaluated! Wrong profile ' 'selected?').format(rule_id), 'fail') success = False else: