Exemplo n.º 1
0
def summary(sarif_files, aggregate_file=None, override_rules={}):
    """Generate overall scan summary based on the generated
    SARIF file

    :param sarif_files: List of generated sarif report files
    :param aggregate_file: Filename to store aggregate data
    :param override_rules Build break rules to override for testing
    :returns dict representing the summary
    """
    report_summary = {}
    build_status = "pass"
    # This is the list of all runs which will get stored as an aggregate
    run_data_list = []
    for sf in sarif_files:
        with open(sf, mode="r") as report_file:
            report_data = json.loads(report_file.read())
            # skip this file if the data is empty
            if not report_data or not report_data.get("runs"):
                LOG.warn("Report file {} is invalid. Skipping ...".format(sf))
                continue
            # Iterate through all the runs
            for run in report_data["runs"]:
                # Add it to the run data list for aggregation
                run_data_list.append(run)
                tool_desc = run["tool"]["driver"]["name"]
                tool_name = tool_desc
                # Initialise
                report_summary[tool_name] = {
                    "tool": tool_desc,
                    "critical": 0,
                    "high": 0,
                    "medium": 0,
                    "low": 0,
                    "status": "✅",
                }
                results = run.get("results", [])
                metrics = run.get("properties", {}).get("metrics", None)
                # If the result includes metrics use it. If not compute it
                if metrics:
                    report_summary[tool_name].update(metrics)
                    report_summary[tool_name].pop("total", None)
                else:
                    for aresult in results:
                        sev = aresult["properties"]["issue_severity"].lower()
                        report_summary[tool_name][sev] += 1
                # Compare against the build break rule to determine status
                default_rules = config.get("build_break_rules").get("default")
                tool_rules = config.get("build_break_rules").get(tool_name, {})
                build_break_rules = {
                    **default_rules,
                    **tool_rules,
                    **override_rules
                }
                for rsev in ["critical", "high", "medium", "low"]:
                    if build_break_rules.get("max_" + rsev) is not None:
                        if (report_summary.get(tool_name).get(rsev) >
                                build_break_rules["max_" + rsev]):
                            report_summary[tool_name]["status"] = "❌"
                            build_status = "fail"
    # Should we store the aggregate data
    if aggregate_file:
        # agg_sarif_file = aggregate_file.replace(".json", ".sarif")
        # aggregate.sarif_aggregate(run_data_list, agg_sarif_file)
        aggregate.jsonl_aggregate(run_data_list, aggregate_file)
        LOG.debug("Aggregate report written to {}\n".format(aggregate_file))
    return report_summary, build_status
Exemplo n.º 2
0
def summary(sarif_files,
            depscan_files=None,
            aggregate_file=None,
            override_rules={}):
    """Generate overall scan summary based on the generated
    SARIF file

    :param sarif_files: List of generated sarif report files
    :param aggregate_file: Filename to store aggregate data
    :param override_rules Build break rules to override for testing
    :returns dict representing the summary
    """
    report_summary = {}
    build_status = "pass"
    # This is the list of all runs which will get stored as an aggregate
    run_data_list = []
    default_rules = config.get("build_break_rules").get("default")
    depscan_default_rules = config.get("build_break_rules").get("depscan")
    # Collect stats from depscan files if available
    if depscan_files:
        for df in depscan_files:
            with open(df, mode="r") as drep_file:
                dep_data = get_depscan_data(drep_file)
                if not dep_data:
                    continue
                # depscan-java or depscan-nodejs based on filename
                dep_type = (os.path.basename(df).replace(".json", "").replace(
                    "-report", ""))
                metrics, required_pkgs_found = calculate_depscan_metrics(
                    dep_data)
                report_summary[dep_type] = {
                    "tool":
                    f"""Dependency Scan ({dep_type.replace("depscan-", "")})""",
                    "critical": metrics["critical"],
                    "high": metrics["high"],
                    "medium": metrics["medium"],
                    "low": metrics["low"],
                    "status": ":white_heavy_check_mark:",
                }
                report_summary[dep_type].pop("total", None)
                # Compare against the build break rule to determine status
                dep_tool_rules = config.get("build_break_rules").get(
                    dep_type, {})
                build_break_rules = {**depscan_default_rules, **dep_tool_rules}
                if override_rules and override_rules.get("depscan"):
                    build_break_rules = {
                        **build_break_rules,
                        **override_rules.get("depscan"),
                    }
                # Default severity categories for build status
                build_status_categories = (
                    "critical",
                    "required_critical",
                    "optional_critical",
                    "high",
                    "required_high",
                    "optional_high",
                    "medium",
                    "required_medium",
                    "optional_medium",
                    "low",
                    "required_low",
                    "optional_low",
                )
                # Issue 233 - Consider only required packages if available
                if required_pkgs_found:
                    build_status_categories = (
                        "required_critical",
                        "required_high",
                        "required_medium",
                        "required_low",
                    )
                for rsev in build_status_categories:
                    if build_break_rules.get("max_" + rsev) is not None:
                        if metrics.get(rsev) > build_break_rules["max_" +
                                                                 rsev]:
                            report_summary[dep_type]["status"] = ":cross_mark:"
                            build_status = "fail"

    for sf in sarif_files:
        with open(sf, mode="r") as report_file:
            report_data = json.load(report_file)
            # skip this file if the data is empty
            if not report_data or not report_data.get("runs"):
                LOG.warn("Report file {} is invalid. Skipping ...".format(sf))
                continue
            # Iterate through all the runs
            for run in report_data["runs"]:
                # Add it to the run data list for aggregation
                run_data_list.append(run)
                tool_desc = run["tool"]["driver"]["name"]
                tool_name = tool_desc
                # Initialise
                report_summary[tool_name] = {
                    "tool": tool_desc,
                    "critical": 0,
                    "high": 0,
                    "medium": 0,
                    "low": 0,
                    "status": ":white_heavy_check_mark:",
                }
                results = run.get("results", [])
                metrics = run.get("properties", {}).get("metrics", None)
                # If the result includes metrics use it. If not compute it
                if metrics:
                    report_summary[tool_name].update(metrics)
                    report_summary[tool_name].pop("total", None)
                else:
                    for aresult in results:
                        sev = aresult["properties"]["issue_severity"].lower()
                        report_summary[tool_name][sev] += 1
                # Compare against the build break rule to determine status
                tool_rules = config.get("build_break_rules").get(tool_name, {})
                build_break_rules = {
                    **default_rules,
                    **tool_rules,
                    **override_rules
                }
                for rsev in ("critical", "high", "medium", "low"):
                    if build_break_rules.get("max_" + rsev) is not None:
                        if (report_summary.get(tool_name).get(rsev) >
                                build_break_rules["max_" + rsev]):
                            report_summary[tool_name][
                                "status"] = ":cross_mark:"
                            build_status = "fail"

    # Should we store the aggregate data
    if aggregate_file:
        # agg_sarif_file = aggregate_file.replace(".json", ".sarif")
        # aggregate.sarif_aggregate(run_data_list, agg_sarif_file)
        aggregate.jsonl_aggregate(run_data_list, aggregate_file)
        LOG.debug("Aggregate report written to {}\n".format(aggregate_file))
    return report_summary, build_status