Exemplo n.º 1
0
def run_stage_remediation_ansible(run_type, formatting, verbose_path):
    """
       Returns False on error, or True in case of successful bash scripts
       run."""
    formatting['output_template'] = _ANSIBLE_TEMPLATE
    send_arf_to_remote_machine_and_generate_remediations_there(
        run_type, formatting, verbose_path)
    if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
                           formatting['domain_ip'],
                           '/' + formatting['output_file']):
        return False
    ansible_playbook_set_hosts(formatting['playbook'])
    command = ('ansible-playbook', '-i',
               '{0},'.format(formatting['domain_ip']), '-u'
               'root', formatting['playbook'])
    command_string = ' '.join(command)
    returncode, output = run_cmd(command, verbose_path)
    # Appends output of ansible-playbook to the verbose_path file.
    with open(verbose_path, 'a') as f:
        f.write('Stdout of "{}":'.format(command_string))
        f.write(output)
    if returncode != 0:
        msg = ('Ansible playbook remediation run has '
               'exited with return code {} instead of expected 0'.format(
                   returncode))
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
        return False
    return True
Exemplo n.º 2
0
def run_stage_remediation_ansible(run_type, formatting, verbose_path):
    """
       Returns False on error, or True in case of successful bash scripts
       run."""
    formatting['output_template'] = _ANSIBLE_TEMPLATE
    send_arf_to_remote_machine_and_generate_remediations_there(
        run_type, formatting, verbose_path)
    if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
                           formatting['domain_ip'],
                           '/' + formatting['output_file']):
        return False
    ansible_playbook_set_hosts(formatting['playbook'])
    command = (
        'ansible-playbook',  '-i', '{0},'.format(formatting['domain_ip']),
        '-u' 'root', formatting['playbook'])
    command_string = ' '.join(command)
    returncode, output = run_cmd(command, verbose_path)
    # Appends output of ansible-playbook to the verbose_path file.
    with open(verbose_path, 'a') as f:
        f.write('Stdout of "{}":'.format(command_string))
        f.write(output)
    if returncode != 0:
        msg = (
            'Ansible playbook remediation run has '
            'exited with return code {} instead of expected 0'
            .format(returncode))
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
        return False
    return True
Exemplo n.º 3
0
def run_stage_remediation_bash(run_type, formatting, verbose_path):
    """
       Returns False on error, or True in case of successful Ansible playbook
       run."""
    formatting['output_template'] = _BASH_TEMPLATE
    send_arf_to_remote_machine_and_generate_remediations_there(
        run_type, formatting, verbose_path)
    if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
                           formatting['domain_ip'],
                           '/' + formatting['output_file']):
        return False

    command_string = '/bin/bash /{output_file}'.format(** formatting)
    returncode, output = run_cmd_remote(
        command_string, formatting['domain_ip'], verbose_path)
    # Appends output of script execution to the verbose_path file.
    with open(verbose_path, 'a') as f:
        f.write('Stdout of "{}":'.format(command_string))
        f.write(output)
    if returncode != 0:
        msg = (
            'Bash script remediation run has exited with return code {} '
            'instead of expected 0'.format(returncode))
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
        return False
    return True
Exemplo n.º 4
0
def run_stage_remediation_bash(run_type, formatting, verbose_path):
    """
       Returns False on error, or True in case of successful Ansible playbook
       run."""
    formatting['output_template'] = _BASH_TEMPLATE
    send_arf_to_remote_machine_and_generate_remediations_there(
        run_type, formatting, verbose_path)
    if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
                           formatting['domain_ip'],
                           '/' + formatting['output_file']):
        return False

    command_string = '/bin/bash /{output_file}'.format(**formatting)
    returncode, output = run_cmd_remote(command_string,
                                        formatting['domain_ip'], verbose_path)
    # Appends output of script execution to the verbose_path file.
    with open(verbose_path, 'a') as f:
        f.write('Stdout of "{}":'.format(command_string))
        f.write(output)
    if returncode != 0:
        msg = ('Bash script remediation run has exited with return code {} '
               'instead of expected 0'.format(returncode))
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
        return False
    return True
Exemplo n.º 5
0
def main():
    options = parse_args()

    log = logging.getLogger()
    # this is general logger level - needs to be
    # debug otherwise it cuts silently everything
    log.setLevel(logging.DEBUG)

    LogHelper.add_console_logger(log, options.loglevel)

    try:
        normalize_passed_arguments(options)
    except RuntimeError as exc:
        msg = "Error occurred during options normalization: {}".format(
            str(exc))
        logging.error(msg)
        sys.exit(1)
    # logging dir needs to be created based on other options
    # thus we have to postprocess it

    logging_dir = get_logging_dir(options)

    LogHelper.add_logging_dir(log, logging_dir)

    options.func(options)
Exemplo n.º 6
0
    def run_stage(self, stage):
        self.stage = stage

        self._make_verbose_path()
        self._make_report_path()
        self._make_arf_path()
        self._make_results_path()

        self.command_base = []
        self.command_options = []
        self.command_operands = []

        result = None
        if stage == 'initial':
            result = self.initial()
        elif stage == 'remediation':
            result = self.remediation()
        elif stage == 'final':
            result = self.final()
        else:
            raise RuntimeError('Unknown stage: {}.'.format(stage))

        if self.clean_files:
            for fname in self._filenames_to_clean_afterwards:
                os.remove(fname)

        if result:
            LogHelper.log_preloaded('pass')
        else:
            LogHelper.log_preloaded('fail')
        return result
Exemplo n.º 7
0
def run_stage_remediation_bash(run_type, test_env, formatting, verbose_path):
    """
       Returns False on error, or True in case of successful bash scripts
       run."""
    formatting['output_template'] = _BASH_TEMPLATE
    send_arf_to_remote_machine_and_generate_remediations_there(
        run_type, test_env, formatting, verbose_path)
    if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
                           '/' + formatting['output_file']):
        return False

    command_string = '/bin/bash -x /{output_file}'.format(** formatting)

    with open(verbose_path, "a") as log_file:
        error_msg_template = (
            'Bash remediation for {rule_id} '.format(** formatting) +
            'has exited with these errors: {stderr}'
        )
        try:
            test_env.execute_ssh_command(
                command_string, log_file, error_msg_template=error_msg_template)
        except Exception as exc:
            LogHelper.preload_log(logging.ERROR, str(exc), 'fail')
            return False
    return True
Exemplo n.º 8
0
def run_stage_remediation_ansible(run_type, test_env, formatting, verbose_path):
    """
       Returns False on error, or True in case of successful Ansible playbook
       run."""
    formatting['output_template'] = _ANSIBLE_TEMPLATE
    send_arf_to_remote_machine_and_generate_remediations_there(
        run_type, test_env, formatting, verbose_path)
    if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
                           '/' + formatting['output_file']):
        return False
    command = (
        'ansible-playbook', '-v', '-i', '{0},'.format(formatting['domain_ip']),
        '-u' 'root', '--ssh-common-args={0}'.format(' '.join(test_env.ssh_additional_options)),
        formatting['playbook'])
    command_string = ' '.join(command)
    returncode, output = common.run_cmd_local(command, verbose_path)
    # Appends output of ansible-playbook to the verbose_path file.
    with open(verbose_path, 'ab') as f:
        f.write('Stdout of "{}":'.format(command_string).encode("utf-8"))
        f.write(output.encode("utf-8"))
    if returncode != 0:
        msg = (
            'Ansible playbook remediation run has '
            'exited with return code {} instead of expected 0'
            .format(returncode))
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
        return False
    return True
Exemplo n.º 9
0
    def _analyze_output_of_oscap_call(self):
        local_success = 1
        # check expected result
        rule_result = self._find_rule_result_in_output()

        if rule_result == "notapplicable":
            msg = (
                'Rule {0} evaluation resulted in {1}'
                .format(self.rule_id, rule_result))
            LogHelper.preload_log(logging.WARNING, msg, 'notapplicable')
            local_success = 2
            return local_success
        if rule_result != self.context:
            local_success = 0
            if rule_result == 'notselected':
                msg = (
                    'Rule {0} has not been evaluated! '
                    'Wrong profile selected in test scenario?'
                    .format(self.rule_id))
            else:
                msg = (
                    'Rule evaluation resulted in {0}, '
                    'instead of expected {1} during {2} stage '
                    .format(rule_result, self.context, self.stage)
                )
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
        return local_success
Exemplo n.º 10
0
    def _run_test(self, profile, test_data):
        scenario = test_data["scenario"]
        rule_id = test_data["rule_id"]

        LogHelper.preload_log(logging.INFO,
                              "Script {0} using profile {1} OK".format(
                                  scenario.script, profile),
                              log_target='pass')
        LogHelper.preload_log(
            logging.ERROR,
            "Script {0} using profile {1} found issue:".format(
                scenario.script, profile),
            log_target='fail')

        runner_cls = oscap.REMEDIATION_RULE_RUNNERS[self.remediate_using]
        runner = runner_cls(self.test_env, profile, self.datastream,
                            self.benchmark_id, rule_id, scenario.script,
                            self.dont_clean, self.manual_debug)
        if not self._initial_scan_went_ok(runner, rule_id, scenario.context):
            return False

        supported_and_available_remediations = self._get_available_remediations(
            scenario)
        if (scenario.context not in ['fail', 'error']
                or not supported_and_available_remediations):
            return True

        if not self._remediation_went_ok(runner, rule_id):
            return False

        return self._final_scan_went_ok(runner, rule_id)
Exemplo n.º 11
0
    def _run_test(self, profile, test_data):
        scenario = test_data["scenario"]
        rule_id = test_data["rule_id"]

        LogHelper.preload_log(
            logging.INFO, "Script {0} using profile {1} OK".format(scenario.script, profile),
            log_target='pass')
        LogHelper.preload_log(
            logging.ERROR,
            "Script {0} using profile {1} found issue:".format(scenario.script, profile),
            log_target='fail')

        runner_cls = oscap.REMEDIATION_RULE_RUNNERS[self.remediate_using]
        runner = runner_cls(
            self.test_env, profile, self.datastream, self.benchmark_id,
            rule_id, scenario.script, self.dont_clean, self.manual_debug)
        if not self._initial_scan_went_ok(runner, rule_id, scenario.context):
            return False

        supported_and_available_remediations = self._get_available_remediations(scenario)
        if (scenario.context not in ['fail', 'error']
                or not supported_and_available_remediations):
            return True

        if not self._remediation_went_ok(runner, rule_id):
            return False

        return self._final_scan_went_ok(runner, rule_id)
Exemplo n.º 12
0
    def run_stage(self, stage):
        self.stage = stage

        self._make_verbose_path()
        self._make_report_path()
        self._make_arf_path()
        self._make_results_path()

        self.command_base = []
        self.command_options = []
        self.command_operands = []

        result = None
        if stage == 'initial':
            result = self.initial()
        elif stage == 'remediation':
            result = self.remediation()
        elif stage == 'final':
            result = self.final()
        else:
            raise RuntimeError('Unknown stage: {}.'.format(stage))

        if self.clean_files:
            for fname in self._filenames_to_clean_afterwards:
                os.remove(fname)

        if result:
            LogHelper.log_preloaded('pass')
        else:
            LogHelper.log_preloaded('fail')
        return result
Exemplo n.º 13
0
    def run_stage(self, stage):
        self.stage = stage

        self._make_verbose_path()
        self._make_report_path()
        self._make_arf_path()
        self._make_results_path()

        self.command_base = []
        self.command_options = ['--verbose', 'DEVEL']
        self.command_operands = []

        result = None
        if stage == 'initial':
            result = self.initial()
        elif stage == 'remediation':
            result = self.remediation()
        elif stage == 'final':
            result = self.final()
        else:
            raise RuntimeError('Unknown stage: {}.'.format(stage))

        if self.clean_files:
            for fname in tuple(self._filenames_to_clean_afterwards):
                try:
                    os.remove(fname)
                except OSError:
                    logging.error(
                        "Failed to cleanup file '{0}'"
                        .format(fname))
                finally:
                    self._filenames_to_clean_afterwards.remove(fname)

        if result:
            LogHelper.log_preloaded('pass')
            if self.clean_files:
                files_to_remove = [self.verbose_path]
                if stage in ['initial', 'final']:
                    files_to_remove.append(self.results_path)

                for fname in tuple(files_to_remove):
                    try:
                        if os.path.exists(fname):
                            os.remove(fname)
                    except OSError:
                        logging.error(
                            "Failed to cleanup file '{0}'"
                            .format(fname))
        else:
            LogHelper.log_preloaded('fail')
            if self.manual_debug:
                self._wait_for_continue()
        return result
Exemplo n.º 14
0
    def make_oscap_call(self):
        self.prepare_online_scanning_arguments()
        self._generate_report_file()
        self.command_options.extend(['--rule', self.rule_id])
        returncode, self._oscap_output = self.environment.scan(
            self.command_options + self.command_operands, self.verbose_path)

        expected_return_code = _CONTEXT_RETURN_CODES[self.context]

        if returncode != expected_return_code:
            msg = ('Scan has exited with return code {0}, '
                   'instead of expected {1} during stage {2}'.format(
                       returncode, expected_return_code, self.stage))
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
            return False
        return True
Exemplo n.º 15
0
def run_profile(domain_ip,
                profile,
                stage,
                datastream,
                benchmark_id,
                remediation=False):
    """Run `oscap-ssh` command with provided parameters to check given profile.
    Log output into LogHelper.LOG_DIR.

    Return True if command ends with exit codes 0 or 2
    """

    formatting = {
        'domain_ip': domain_ip,
        'profile': profile,
        'datastream': datastream,
        'benchmark_id': benchmark_id
    }

    formatting['rem'] = "--remediate" if remediation else ""

    report_path = os.path.join(LogHelper.LOG_DIR,
                               '{0}-{1}'.format(profile, stage))
    verbose_path = os.path.join(LogHelper.LOG_DIR,
                                '{0}-{1}'.format(profile, stage))
    formatting['report'] = LogHelper.find_name(report_path, '.html')
    verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')

    command = shlex.split(('oscap-ssh root@{domain_ip} 22 xccdf eval '
                           '--benchmark-id {benchmark_id} '
                           '--profile {profile} '
                           '--progress --oval-results '
                           '--report {report} '
                           '--verbose DEVEL '
                           '{rem} '
                           '{datastream}').format(**formatting))
    logging.debug('Running ' + ' '.join(command))
    success = True
    try:
        with open(verbose_path, 'w') as verbose_file:
            output = subprocess.check_output(command, stderr=verbose_file)
    except subprocess.CalledProcessError, e:
        # non-zero exit code
        if e.returncode != 2:
            success = False
            logging.error(('Profile run should end with return code 0 or 2 '
                           'not "{0}" as it did!').format(e.returncode))
Exemplo n.º 16
0
def get_logging_dir(options):
    body = 'custom'
    if 'ALL' in options.target:
        body = 'ALL'

    generic_logdir_stem = "{0}-{1}".format(options.subparser_name, body)

    if options.logdir is None:

        date_string = time.strftime('%Y-%m-%d-%H%M', time.localtime())
        logging_dir = os.path.join(
            os.getcwd(), 'logs', '{0}-{1}'.format(
                generic_logdir_stem, date_string))
        logging_dir = LogHelper.find_name(logging_dir)
    else:
        logging_dir = LogHelper.find_name(options.logdir)

    return logging_dir
Exemplo n.º 17
0
def get_logging_dir(options):
    body = 'custom'
    if 'ALL' in options.target:
        body = 'ALL'

    generic_logdir_stem = "{0}-{1}".format(options.subparser_name, body)

    if options.logdir is None:

        date_string = time.strftime('%Y-%m-%d-%H%M', time.localtime())
        logging_dir = os.path.join(
            os.getcwd(), 'logs', '{0}-{1}'.format(
                generic_logdir_stem, date_string))
        logging_dir = LogHelper.find_name(logging_dir)
    else:
        logging_dir = LogHelper.find_name(options.logdir)

    return logging_dir
Exemplo n.º 18
0
    def make_oscap_call(self):
        self.prepare_oscap_ssh_arguments()
        self._generate_report_file()
        self.command_options.extend(
            ['--rule', self.rule_id])

        returncode, self._oscap_output = run_cmd(self.get_command, self.verbose_path)

        expected_return_code = _CONTEXT_RETURN_CODES[self.context]

        if returncode != expected_return_code:
            msg = (
                'Scan has exited with return code {0}, '
                'instead of expected {1} during stage {2}'
                .format(returncode, expected_return_code, self.stage)
            )
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
            return False
        return True
Exemplo n.º 19
0
 def _analyze_output_of_oscap_call(self):
     local_success = True
     # check expected result
     actual_results = re.findall('{0}:(.*)$'.format(self.rule_id),
                                 self._oscap_output, re.MULTILINE)
     if actual_results:
         if self.context not in actual_results:
             LogHelper.preload_log(logging.ERROR,
                                   ('Rule result should have been '
                                    '"{0}", but is "{1}"!').format(
                                        self.context,
                                        ', '.join(actual_results)), 'fail')
             local_success = False
     else:
         msg = ('Rule {0} has not been evaluated! Wrong profile selected?'.
                format(self.rule_id))
         LogHelper.preload_log(logging.ERROR, msg, 'fail')
         local_success = False
     return local_success
Exemplo n.º 20
0
def main():
    options = parse_args()

    log = logging.getLogger()
    # this is general logger level - needs to be
    # debug otherwise it cuts silently everything
    log.setLevel(logging.DEBUG)

    LogHelper.add_console_logger(log, options.loglevel)

    try:
        normalize_passed_arguments(options)
    except RuntimeError as exc:
        msg = "Error occurred during options normalization: {}".format(
            str(exc))
        logging.error(msg)
        sys.exit(1)
    # logging dir needs to be created based on other options
    # thus we have to postprocess it

    logging_dir = get_logging_dir(options)

    LogHelper.add_logging_dir(log, logging_dir)

    with datastream_in_stash(options.datastream) as stashed_datastream:
        options.datastream = stashed_datastream

        with xml_operations.datastream_root(stashed_datastream,
                                            stashed_datastream) as root:
            if options.remove_machine_only:
                xml_operations.remove_machine_platform(root)
                xml_operations.remove_machine_remediation_condition(root)
            if options.remove_ocp4_only:
                xml_operations.remove_ocp4_platforms(root)
            if options.add_platform:
                xml_operations.add_platform_to_benchmark(
                    root, options.add_platform)
            if options.add_product_to_fips_certified:
                xml_operations.add_product_to_fips_certified(
                    root, options.add_product_to_fips_certified)

        options.func(options)
Exemplo n.º 21
0
 def _analyze_output_of_oscap_call(self):
     local_success = True
     # check expected result
     actual_results = re.findall('{0}:(.*)$'.format(self.rule_id),
                                 self._oscap_output,
                                 re.MULTILINE)
     if actual_results:
         if self.context not in actual_results:
             LogHelper.preload_log(logging.ERROR,
                                   ('Rule result should have been '
                                    '"{0}", but is "{1}"!'
                                    ).format(self.context,
                                             ', '.join(actual_results)),
                                   'fail')
             local_success = False
     else:
         msg = (
             'Rule {0} has not been evaluated! Wrong profile selected?'
             .format(self.rule_id))
         LogHelper.preload_log(logging.ERROR, msg, 'fail')
         local_success = False
     return local_success
Exemplo n.º 22
0
def run_stage_remediation_bash(run_type, test_env, formatting, verbose_path):
    """
       Returns False on error, or True in case of successful bash scripts
       run."""
    formatting['output_template'] = _BASH_TEMPLATE
    send_arf_to_remote_machine_and_generate_remediations_there(
        run_type, test_env, formatting, verbose_path)
    if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
                           '/' + formatting['output_file']):
        return False

    command_string = '/bin/bash -x /{output_file}'.format(**formatting)

    with open(verbose_path, "a") as log_file:
        try:
            test_env.execute_ssh_command(command_string, log_file)
        except Exception as exc:
            msg = (
                'Bash script remediation run has exited with return code {} '
                'instead of expected 0'.format(exc.returncode))
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
            return False
    return True
Exemplo n.º 23
0
def main():
    options = parse_args()

    log = logging.getLogger()
    # this is general logger level - needs to be
    # debug otherwise it cuts silently everything
    log.setLevel(logging.DEBUG)

    LogHelper.add_console_logger(log, options.loglevel)

    try:
        normalize_passed_arguments(options)
    except RuntimeError as exc:
        msg = "Error occurred during options normalization: {}".format(str(exc))
        logging.error(msg)
        sys.exit(1)
    # logging dir needs to be created based on other options
    # thus we have to postprocess it

    logging_dir = get_logging_dir(options)

    LogHelper.add_logging_dir(log, logging_dir)

    options.func(options)
Exemplo n.º 24
0
    def _run_test(self, profile, test_data):
        scenario = test_data["scenario"]
        rule_id = test_data["rule_id"]
        remediation_available = test_data["remediation_available"]

        LogHelper.preload_log(logging.INFO,
                              "Script {0} using profile {1} OK".format(
                                  scenario.script, profile),
                              log_target='pass')
        LogHelper.preload_log(
            logging.WARNING,
            "Script {0} using profile {1} notapplicable".format(
                scenario.script, profile),
            log_target='notapplicable')
        LogHelper.preload_log(
            logging.ERROR,
            "Script {0} using profile {1} found issue:".format(
                scenario.script, profile),
            log_target='fail')

        runner_cls = oscap.REMEDIATION_RULE_RUNNERS[self.remediate_using]
        runner_instance = runner_cls(self.test_env,
                                     oscap.process_profile_id(profile),
                                     self.datastream, self.benchmark_id,
                                     rule_id, scenario.script, self.dont_clean,
                                     self.no_reports, self.manual_debug)

        with runner_instance as runner:
            initial_scan_res = self._initial_scan_went_ok(
                runner, rule_id, scenario.context)
            if not initial_scan_res:
                return False
            if initial_scan_res == 2:
                # notapplicable
                return True

            supported_and_available_remediations = self._get_available_remediations(
                scenario)
            if (scenario.context not in ['fail', 'error']
                    or not supported_and_available_remediations):
                return True

            if remediation_available:
                if not self._remediation_went_ok(runner, rule_id):
                    return False

                return self._final_scan_went_ok(runner, rule_id)
            else:
                msg = ("No remediation is available for rule '{}'.".format(
                    rule_id))
                logging.warning(msg)
                return False
Exemplo n.º 25
0
def main():
    options = parse_args()

    log = logging.getLogger()
    # this is general logger level - needs to be
    # debug otherwise it cuts silently everything
    log.setLevel(logging.DEBUG)

    try:
        bench_id = xml_operations.infer_benchmark_id_from_component_ref_id(
            options.datastream, options.xccdf_id)
        options.benchmark_id = bench_id
    except RuntimeError as exc:
        msg = "Error inferring benchmark ID: {}".format(str(exc))
        logging.error(msg)
        sys.exit(1)


    LogHelper.add_console_logger(log, options.loglevel)
    # logging dir needs to be created based on other options
    # thus we have to postprocess it
    if 'ALL' in options.target:
        options.target = ['ALL']
    if options.logdir is None:
        # default!
        prefix = options.subparser_name
        body = ""
        if 'ALL' in options.target:
            body = 'ALL'
        else:
            body = 'custom'

        date_string = time.strftime('%Y-%m-%d-%H%M', time.localtime())
        logging_dir = os.path.join(os.getcwd(),
                                   'logs',
                                   '{0}-{1}-{2}'.format(prefix,
                                                        body,
                                                        date_string))
        logging_dir = LogHelper.find_name(logging_dir)
    else:
        logging_dir = LogHelper.find_name(options.logdir)
    LogHelper.add_logging_dir(log, logging_dir)

    options.func(options)
Exemplo n.º 26
0
    def run_stage(self, stage):
        self.stage = stage

        self._make_verbose_path()
        self._make_report_path()
        self._make_arf_path()
        self._make_results_path()

        self.command_base = []
        self.command_options = ['--verbose', 'DEVEL']
        self.command_operands = []

        result = None
        if stage == 'initial':
            result = self.initial()
        elif stage == 'remediation':
            result = self.remediation()
        elif stage == 'final':
            result = self.final()
        else:
            raise RuntimeError('Unknown stage: {}.'.format(stage))

        self._remove_files_to_clean()

        if result == 1:
            LogHelper.log_preloaded('pass')
            if self.clean_files:
                self._filenames_to_clean_afterwards.add(self.verbose_path)
                if stage in ['initial', 'remediation', 'final']:
                    # We need the initial ARF so we can generate the remediation out of it later
                    self._filenames_to_clean_afterwards.add(self.arf_path)

        elif result == 2:
            LogHelper.log_preloaded('notapplicable')
        else:
            LogHelper.log_preloaded('fail')
            if self.manual_debug:
                self._wait_for_continue()
        return result
Exemplo n.º 27
0
def run_rule(domain_ip,
             profile,
             stage,
             datastream,
             benchmark_id,
             rule_id,
             context,
             script_name,
             remediation=False,
             dont_clean=False):
    """Run `oscap-ssh` command with provided parameters to check given rule,
    utilizing --rule option. Log output to LogHelper.LOG_DIR directory.

    Return True if result is as expected by context parameter. Check both
    exit code and output message.
    """

    formatting = {
        'domain_ip': domain_ip,
        'profile': profile,
        'datastream': datastream,
        'benchmark_id': benchmark_id,
        'rule_id': rule_id
    }

    formatting['rem'] = "--remediate" if remediation else ""

    report_path = os.path.join(
        LogHelper.LOG_DIR, '{0}-{1}-{2}'.format(rule_id, script_name, stage))
    verbose_path = os.path.join(
        LogHelper.LOG_DIR, '{0}-{1}-{2}'.format(rule_id, script_name, stage))
    formatting['report'] = LogHelper.find_name(report_path, '.html')
    verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')

    command = shlex.split(('oscap-ssh root@{domain_ip} 22 xccdf eval '
                           '--benchmark-id {benchmark_id} '
                           '--profile {profile} '
                           '--progress --oval-results '
                           '--rule {rule_id} '
                           '--report {report} '
                           '--verbose DEVEL '
                           '{rem} '
                           '{datastream}').format(**formatting))
    logging.debug('Running ' + ' '.join(command))

    success = True
    # check expected return code
    expected_return_code = _CONTEXT_RETURN_CODES[context]
    try:
        with open(verbose_path, 'w') as verbose_file:
            output = subprocess.check_output(command, stderr=verbose_file)

    except subprocess.CalledProcessError, e:
        if e.returncode != expected_return_code:
            LogHelper.preload_log(logging.ERROR,
                                  ('Scan has exited with return code {0}, '
                                   'instead of expected {1} '
                                   'during stage {2}').format(
                                       e.returncode, expected_return_code,
                                       stage), 'fail')
            success = False
        output = e.output
Exemplo n.º 28
0
 def _make_results_path(self):
     results_basename = self._get_results_basename()
     results_path = os.path.join(LogHelper.LOG_DIR, results_basename)
     self.results_path = LogHelper.find_name(results_path, '.xml')
Exemplo n.º 29
0
 def _make_verbose_path(self):
     verbose_basename = self._get_verbose_basename()
     verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_basename)
     self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')
Exemplo n.º 30
0
log = logging.getLogger()
# this is general logger level - needs to be
# debug otherwise it cuts silently everything
log.setLevel(logging.DEBUG)

try:
    bench_id = xml_operations.infer_benchmark_id_from_component_ref_id(
        options.datastream, options.xccdf_id)
    options.benchmark_id = bench_id
except RuntimeError as exc:
    msg = "Error inferring benchmark ID: {}".format(str(exc))
    logging.error(msg)
    sys.exit(1)

LogHelper.add_console_logger(log, options.loglevel)
# logging dir needs to be created based on other options
# thus we have to postprocess it
if 'ALL' in options.target:
    options.target = ['ALL']
if options.logdir is None:
    # default!
    prefix = options.subparser_name
    body = ""
    if 'ALL' in options.target:
        body = 'ALL'
    else:
        body = 'custom'

    date_string = time.strftime('%Y-%m-%d-%H%M', time.localtime())
    logging_dir = os.path.join(os.getcwd(), 'logs',
Exemplo n.º 31
0
def perform_rule_check(options):
    """Perform rule check.

    Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every
    scenario. Expected result is encoded in scenario file name. In case of
    `fail` or `error` results expected, continue with remediation and
    reevaluation. Revert system to clean state using snapshots.

    Return value not defined, textual output and generated reports is the
    result.
    """
    dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
                                             options.domain_name)
    if dom is None:
        sys.exit(1)
    snapshot_stack = SnapshotStack(dom)
    atexit.register(snapshot_stack.clear)

    # create origin
    snapshot_stack.create('origin')
    ssg_test_suite.virt.start_domain(dom)
    domain_ip = ssg_test_suite.virt.determine_ip(dom)
    scanned_something = False

    remote_dir = _send_scripts(domain_ip)
    if not remote_dir:
        return

    for rule_dir, rule, scripts in data.iterate_over_rules():
        remote_rule_dir = os.path.join(remote_dir, rule_dir)
        local_rule_dir = os.path.join(data.DATA_DIR, rule_dir)
        if not _matches_target(rule_dir, options.target):
            continue
        logging.info(rule)
        scanned_something = True
        logging.debug("Testing rule directory {0}".format(rule_dir))

        for script, script_context, script_params in _get_scenarios(
                local_rule_dir, scripts):
            logging.debug(('Using test script {0} '
                           'with context {1}').format(script, script_context))
            # create origin <- script
            snapshot_stack.create('script')
            has_worked = False

            if not _apply_script(remote_rule_dir, domain_ip, script):
                logging.error("Environment failed to prepare, skipping test")
                # maybe revert script
                snapshot_stack.revert()
                continue
            profiles = get_viable_profiles(script_params['profiles'],
                                           options.datastream,
                                           options.benchmark_id)
            if len(profiles) > 1:
                # create origin <- script <- profile
                snapshot_stack.create('profile')
            for profile in profiles:
                LogHelper.preload_log(logging.INFO,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "OK").format(script, profile),
                                      log_target='pass')
                LogHelper.preload_log(logging.ERROR,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "found issue:").format(script, profile),
                                      log_target='fail')
                has_worked = True
                run_rule_checks(
                    domain_ip,
                    profile,
                    options.datastream,
                    options.benchmark_id,
                    rule,
                    script_context,
                    script,
                    script_params,
                    options.remediate_using,
                    options.dont_clean,
                )
                # revert either profile (if created), or script. Don't delete
                snapshot_stack.revert(delete=False)
            if not has_worked:
                logging.error("Nothing has been tested!")
            # Delete the reverted profile or script.
            snapshot_stack.delete()
            if len(profiles) > 1:
                # revert script (we have reverted profile before).
                snapshot_stack.revert()
    if not scanned_something:
        logging.error("Rule {0} has not been found".format(options.target))
Exemplo n.º 32
0
 def _make_results_path(self):
     results_file = self._get_results_file()
     results_path = os.path.join(LogHelper.LOG_DIR, results_file)
     self.results_path = LogHelper.find_name(results_path, '.xml')
Exemplo n.º 33
0
def perform_rule_check(options):
    """Perform rule check.

    Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every
    scenario. Expected result is encoded in scenario file name. In case of
    `fail` or `error` results expected, continue with remediation and
    reevaluation. Revert system to clean state using snapshots.

    Return value not defined, textual output and generated reports is the
    result.
    """
    dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
                                             options.domain_name)
    if dom is None:
        sys.exit(1)
    snapshot_stack = SnapshotStack(dom)
    atexit.register(snapshot_stack.clear)

    snapshot_stack.create('origin')
    ssg_test_suite.virt.start_domain(dom)
    domain_ip = ssg_test_suite.virt.determine_ip(dom)
    scanned_something = False
    for rule_dir, rule, scripts in iterate_over_rules():
        if 'ALL' in options.target:
            # we want to have them all
            pass
        else:
            perform = False
            for target in options.target:
                if target in rule_dir:
                    perform = True
                    break
            if not perform:
                continue
        logging.info(rule)
        scanned_something = True
        logging.debug("Testing rule directory {0}".format(rule_dir))
        # get list of helper scripts (non-standard name)
        # and scenario scripts
        helpers = []
        scenarios = []
        for script in scripts:
            script_context = _get_script_context(script)
            if script_context is None:
                logging.debug('Registering helper script {0}'.format(script))
                helpers += [script]
            else:
                scenarios += [script]

        for script in scenarios:
            script_context = _get_script_context(script)
            logging.debug(('Using test script {0} '
                           'with context {1}').format(script, script_context))
            snapshot_stack.create('script')
            # copy all helper scripts, so scenario script can use them
            script_path = os.path.join(rule_dir, script)
            helper_paths = map(lambda x: os.path.join(rule_dir, x), helpers)
            _send_scripts(rule_dir, domain_ip, script_path, *helper_paths)

            if not _apply_script(rule_dir, domain_ip, script):
                logging.error("Environment failed to prepare, skipping test")
                snapshot_stack.revert()
                continue
            script_params = _parse_parameters(script_path)
            has_worked = False
            profiles = get_viable_profiles(script_params['profiles'],
                                           options.datastream,
                                           options.benchmark_id)
            if len(profiles) > 1:
                snapshot_stack.create('profile')
            for profile in profiles:
                LogHelper.preload_log(logging.INFO,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "OK").format(script,
                                                    profile),
                                      log_target='pass')
                LogHelper.preload_log(logging.ERROR,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "found issue:").format(script,
                                                              profile),
                                      log_target='fail')
                has_worked = True
                run_rule_checks(
                    domain_ip, profile, options.datastream,
                    options.benchmark_id, rule, script_context,
                    script, script_params, options.remediate_using,
                    options.dont_clean,
                )
                snapshot_stack.revert(delete=False)
            if not has_worked:
                logging.error("Nothing has been tested!")
            snapshot_stack.delete()
            if len(profiles) > 1:
                snapshot_stack.revert()
    if not scanned_something:
        logging.error("Rule {0} has not been found".format(options.target))
Exemplo n.º 34
0
 def _make_verbose_path(self):
     verbose_file = self._get_verbose_file()
     verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_file)
     self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')
Exemplo n.º 35
0
def perform_rule_check(options):
    """Perform rule check.

    Iterate over rule-testing scenarios and utilize `oscap-ssh` to test every
    scenario. Expected result is encoded in scenario file name. In case of
    `fail` or `error` results expected, continue with remediation and
    reevaluation. Revert system to clean state using snapshots.

    Return value not defined, textual output and generated reports is the
    result.
    """
    dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
                                             options.domain_name)
    if dom is None:
        sys.exit(1)
    snapshot_stack = SnapshotStack(dom)
    atexit.register(snapshot_stack.clear)

    snapshot_stack.create('origin')
    ssg_test_suite.virt.start_domain(dom)
    domain_ip = ssg_test_suite.virt.determine_ip(dom)
    scanned_something = False
    for rule_dir, rule, scripts in iterate_over_rules():
        if 'ALL' in options.target:
            # we want to have them all
            pass
        else:
            perform = False
            for target in options.target:
                if target in rule_dir:
                    perform = True
                    break
            if not perform:
                continue
        logging.info(rule)
        scanned_something = True
        logging.debug("Testing rule directory {0}".format(rule_dir))
        # get list of helper scripts (non-standard name)
        # and scenario scripts
        helpers = []
        scenarios = []
        for script in scripts:
            script_context = _get_script_context(script)
            if script_context is None:
                logging.debug('Registering helper script {0}'.format(script))
                helpers += [script]
            else:
                scenarios += [script]

        for script in scenarios:
            script_context = _get_script_context(script)
            logging.debug(('Using test script {0} '
                           'with context {1}').format(script, script_context))
            snapshot_stack.create('script')
            # copy all helper scripts, so scenario script can use them
            script_path = os.path.join(rule_dir, script)
            helper_paths = map(lambda x: os.path.join(rule_dir, x), helpers)
            _send_scripts(rule_dir, domain_ip, script_path, *helper_paths)

            if not _apply_script(rule_dir, domain_ip, script):
                logging.error("Environment failed to prepare, skipping test")
            script_params = _parse_parameters(script_path)
            has_worked = False
            profiles = get_viable_profiles(script_params['profiles'],
                                           options.datastream,
                                           options.benchmark_id)
            if len(profiles) > 1:
                snapshot_stack.create('profile')
            for profile in profiles:
                LogHelper.preload_log(logging.INFO,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "OK").format(script,
                                                    profile),
                                      log_target='pass')
                LogHelper.preload_log(logging.ERROR,
                                      ("Script {0} "
                                       "using profile {1} "
                                       "found issue:").format(script,
                                                              profile),
                                      log_target='fail')
                has_worked = True
                if oscap.run_rule(domain_ip=domain_ip,
                                  profile=profile,
                                  stage="initial",
                                  datastream=options.datastream,
                                  benchmark_id=options.benchmark_id,
                                  rule_id=rule,
                                  context=script_context,
                                  script_name=script,
                                  remediation=False,
                                  dont_clean=options.dont_clean):
                    if script_context in ['fail', 'error']:
                        oscap.run_rule(domain_ip=domain_ip,
                                       profile=profile,
                                       stage="remediation",
                                       datastream=options.datastream,
                                       benchmark_id=options.benchmark_id,
                                       rule_id=rule,
                                       context='fixed',
                                       script_name=script,
                                       remediation=True,
                                       dont_clean=options.dont_clean)
                snapshot_stack.revert(delete=False)
            if not has_worked:
                logging.error("Nothing has been tested!")
            snapshot_stack.delete()
            if len(profiles) > 1:
                snapshot_stack.revert()
    if not scanned_something:
        logging.error("Rule {0} has not been found".format(options.target))
Exemplo n.º 36
0
    except subprocess.CalledProcessError, e:
        if e.returncode != expected_return_code:
            LogHelper.preload_log(logging.ERROR,
                                  ('Scan has exited with return code {0}, '
                                   'instead of expected {1} '
                                   'during stage {2}').format(
                                       e.returncode, expected_return_code,
                                       stage), 'fail')
            success = False
        output = e.output
    else:
        # success branch - command exited with return code 0
        if expected_return_code != 0:
            LogHelper.preload_log(logging.ERROR,
                                  ('Scan has exited with return code 0, '
                                   'instead of expected {0} '
                                   'during stage {1}').format(
                                       expected_return_code, stage), 'fail')
            success = False

    # check expected result
    try:
        actual_results = re.findall('{0}:(.*)$'.format(rule_id), output,
                                    re.MULTILINE)
    except IndexError:
        LogHelper.preload_log(logging.ERROR, ('Rule {0} has not been '
                                              'evaluated! Wrong profile '
                                              'selected?').format(rule_id),
                              'fail')
        success = False
    else:
Exemplo n.º 37
0
 def _make_report_path(self):
     report_file = self._get_report_file()
     report_path = os.path.join(LogHelper.LOG_DIR, report_file)
     self.report_path = LogHelper.find_name(report_path, '.html')
Exemplo n.º 38
0
log = logging.getLogger()
# this is general logger level - needs to be
# debug otherwise it cuts silently everything
log.setLevel(logging.DEBUG)

try:
    bench_id = xml_operations.infer_benchmark_id_from_component_ref_id(
        options.datastream, options.xccdf_id)
    options.benchmark_id = bench_id
except RuntimeError as exc:
    msg = "Error inferring benchmark ID: {}".format(str(exc))
    logging.error(msg)
    sys.exit(1)


LogHelper.add_console_logger(log, options.loglevel)
# logging dir needs to be created based on other options
# thus we have to postprocess it
if 'ALL' in options.target:
    options.target = ['ALL']
if options.logdir is None:
    # default!
    prefix = options.subparser_name
    body = ""
    if 'ALL' in options.target:
        body = 'ALL'
    else:
        body = 'custom'

    date_string = time.strftime('%Y-%m-%d-%H%M', time.localtime())
    logging_dir = os.path.join(os.getcwd(),