def generate_report_for_result(spec, results_dir, result_id, config):
    """This function assumes that the ARF was generated using evaluate
    in this same package. That's why we can avoid --datastream-id, ...

    The behavior is undefined for generic ARFs!
    """

    if not spec.is_valid():
        raise RuntimeError("Can't generate report for any result of an "
                           "invalid EvaluationSpec.")

    results_path = os.path.join(results_dir, str(result_id), "results.xml")

    if not os.path.exists(results_path):
        raise RuntimeError("Can't generate report for result '%s'. Expected "
                           "results XML at '%s' but the file doesn't exist." %
                           (result_id, results_path))

    args = get_generate_report_args_for_results(spec, results_path, config)

    logging.debug(
        "Generating report for result %i of EvaluationSpec with command '%s'.",
        result_id, " ".join(args))

    ret = subprocess_check_output(args, shell=False).decode("utf-8")

    logging.info("Generated report for result %i of EvaluationSpec.",
                 result_id)

    return ret
def generate_fix(spec, config, fix_type):
    if spec.mode not in [
            EvaluationMode.SOURCE_DATASTREAM, EvaluationMode.STANDARD_SCAN
    ]:
        raise RuntimeError(
            "Can't generate fix for an EvaluationSpec with mode '%s'. "
            "Generating a fix script only works for 'sds' and 'standard_scan' "
            "modes." % (EvaluationMode.to_string(spec.mode)))

    if not spec.is_valid():
        raise RuntimeError("Can't generate fix for an invalid EvaluationSpec.")

    template = _fix_type_to_template(fix_type)
    args = [
        config.oscap_path, "xccdf", "generate", "fix", "--profile",
        spec.profile_id, "--template", template, spec.input_.file_path
    ]

    logging.debug(
        "Generating fix script for evaluation spec with command '%s'.",
        " ".join(args))

    ret = subprocess_check_output(args).decode("utf-8")

    logging.info("Generated fix script for evaluation spec.")

    return ret
def generate_guide(spec, config):
    if spec.mode not in [EvaluationMode.SOURCE_DATASTREAM,
                         EvaluationMode.STANDARD_SCAN]:
        raise RuntimeError(
            "Can't generate guide for an EvaluationSpec with mode '%s'. "
            "Generating an HTML guide only works for 'sds' and 'standard_scan' "
            "modes."
            % (EvaluationMode.to_string(spec.mode))
        )

    if not spec.is_valid():
        raise RuntimeError(
            "Can't generate guide for an invalid EvaluationSpec."
        )

    args = get_generate_guide_args(spec, config)

    logging.debug(
        "Generating guide for evaluation spec with command '%s'.",
        " ".join(args)
    )

    ret = subprocess_check_output(
        args,
        shell=False
    ).decode("utf-8")

    logging.info("Generated guide for evaluation spec.")

    return ret
示例#4
0
def generate_report_for_result(spec, results_dir, result_id, config):
    """This function assumes that the ARF was generated using evaluate
    in this same package. That's why we can avoid --datastream-id, ...

    The behavior is undefined for generic ARFs!
    """

    if not spec.is_valid():
        raise RuntimeError("Can't generate report for any result of an "
                           "invalid EvaluationSpec.")

    results_path = os.path.join(results_dir, str(result_id), "results.xml")

    if not os.path.exists(results_path):
        raise RuntimeError("Can't generate report for result '%s'. Expected "
                           "results XML at '%s' but the file doesn't exist."
                           % (result_id, results_path))

    args = get_generate_report_args_for_results(spec, results_path, config)

    logging.debug(
        "Generating report for result %i of EvaluationSpec with command '%s'.",
        result_id, " ".join(args)
    )

    ret = subprocess_check_output(
        args,
        shell=False
    ).decode("utf-8")

    logging.info(
        "Generated report for result %i of EvaluationSpec.", result_id
    )

    return ret
示例#5
0
def generate_guide(spec, config):
    if spec.mode not in [EvaluationMode.SOURCE_DATASTREAM,
                         EvaluationMode.STANDARD_SCAN]:
        raise RuntimeError(
            "Can't generate guide for an EvaluationSpec with mode '%s'. "
            "Generating an HTML guide only works for 'sds' and 'standard_scan' "
            "modes."
            % (EvaluationMode.to_string(spec.mode))
        )

    if not spec.is_valid():
        raise RuntimeError(
            "Can't generate guide for an invalid EvaluationSpec."
        )

    args = get_generate_guide_args(spec, config)

    logging.debug(
        "Generating guide for evaluation spec with command '%s'.",
        " ".join(args)
    )

    ret = subprocess_check_output(
        args,
        shell=False
    ).decode("utf-8")

    logging.info("Generated guide for evaluation spec.")

    return ret
def generate_html_report_for_result(config, results_path):
    if not os.path.exists(results_path):
        raise RuntimeError("Can't generate report for scan result. Expected "
                           "results XML at '%s' but the file doesn't exist."
                           % results_path)
    result_id = _get_result_id(results_path)
    args = [config.oscap_path, "xccdf", "generate", "report",
            "--result-id", result_id, results_path]
    report_text = subprocess_check_output(args).decode("utf-8")
    return report_text
示例#7
0
def generate_fix_for_result(config, results_path, fix_type):
    if not os.path.exists(results_path):
        raise RuntimeError("Can't generate fix for scan result. Expected "
                           "results XML at '%s' but the file doesn't exist." %
                           results_path)
    result_id = _get_result_id(results_path)
    template = _fix_type_to_template(fix_type)
    args = [
        config.oscap_path, "xccdf", "generate", "fix", "--result-id",
        result_id, "--template", template, results_path
    ]
    fix_text = subprocess_check_output(args).decode("utf-8")
    return fix_text
示例#8
0
def generate_guide(spec, config):
    if not spec.is_valid():
        raise RuntimeError(
            "Can't generate guide for an invalid EvaluationSpec.")

    args = get_generate_guide_args(spec, config)

    logging.debug("Generating guide for evaluation spec with command '%s'.",
                  " ".join(args))

    ret = subprocess_check_output(args, shell=False).decode("utf-8")

    logging.info("Generated guide for evaluation spec.")

    return ret
示例#9
0
def generate_guide(spec, config):
    if not spec.is_valid():
        raise RuntimeError(
            "Can't generate guide for an invalid EvaluationSpec."
        )

    args = get_generate_guide_args(spec, config)

    logging.debug(
        "Generating guide for evaluation spec with command '%s'." %
        (" ".join(args))
    )

    ret = subprocess_check_output(
        args,
        shell=False
    ).decode("utf-8")

    logging.info("Generated guide for evaluation spec.")

    return ret