Ejemplo n.º 1
0
def analyse_licenses(project_type, licenses_results, license_report_file=None):
    if not licenses_results:
        return
    table = Table(
        title=f"License Scan Summary ({project_type})",
        box=box.DOUBLE_EDGE,
        header_style="bold magenta",
    )
    headers = ["Package", "Version", "License Id", "License conditions"]
    for h in headers:
        table.add_column(header=h)
    report_data = []
    for pkg, ll in licenses_results.items():
        pkg_ver = pkg.split("@")
        for lic in ll:
            if not lic:
                data = [*pkg_ver, "Unknown license"]
                table.add_row(*data)
                report_data.append(dict(zip(headers, data)))
            elif lic["condition_flag"]:
                conditions_str = ", ".join(lic["conditions"])
                if "http" not in conditions_str:
                    conditions_str = (conditions_str.replace(
                        "--", " for ").replace("-", " ").title())
                data = [
                    *pkg_ver,
                    "{}{}".format(
                        "[cyan]" if "GPL" in lic["spdx-id"] or "CC-BY-"
                        in lic["spdx-id"] or "Facebook" in lic["spdx-id"]
                        or "WTFPL" in lic["spdx-id"] else "",
                        lic["spdx-id"],
                    ),
                    conditions_str,
                ]
                table.add_row(*data)
                report_data.append(dict(zip(headers, data)))
    if report_data:
        console.print(table)
        # Store the license scan findings in jsonl format
        if license_report_file:
            with open(license_report_file, "w") as outfile:
                for row in report_data:
                    json.dump(row, outfile)
                    outfile.write("\n")
    else:
        LOG.info("No license violation detected ✅")
Ejemplo n.º 2
0
def main():
    args = build_args()
    if not args.no_banner:
        print(at_logo)
    src_dir = args.src_dir_image
    if not src_dir:
        src_dir = os.getcwd()
    reports_base_dir = src_dir
    # Detect the project types and perform the right type of scan
    if args.project_type:
        project_types_list = args.project_type.split(",")
    elif args.bom:
        project_types_list = ["bom"]
    else:
        project_types_list = utils.detect_project_type(src_dir)
    if ("docker" in project_types_list or "podman" in project_types_list
            or "container" in project_types_list
            or "binary" in project_types_list):
        reports_base_dir = os.getcwd()
    db = dbLib.get()
    run_cacher = args.cache
    areport_file = (args.report_file if args.report_file else os.path.join(
        reports_base_dir, "reports", "depscan.json"))
    reports_dir = os.path.dirname(areport_file)
    # Create reports directory
    if not os.path.exists(reports_dir):
        os.makedirs(reports_dir)
    if len(project_types_list) > 1:
        LOG.debug(
            "Multiple project types found: {}".format(project_types_list))
    # Enable license scanning
    if "license" in project_types_list:
        os.environ["FETCH_LICENSE"] = "true"
        project_types_list.remove("license")
        console.print(
            Panel(
                "License audit is enabled for this scan. This would increase the time by up to 10 minutes.",
                title="License Audit",
                expand=False,
            ))
    for project_type in project_types_list:
        sug_version_dict = {}
        pkg_aliases = {}
        results = []
        report_file = areport_file.replace(".json",
                                           "-{}.json".format(project_type))
        risk_report_file = areport_file.replace(
            ".json", "-risk.{}.json".format(project_type))
        LOG.info("=" * 80)
        creation_status = False
        if args.bom and os.path.exists(args.bom):
            bom_file = args.bom
            creation_status = True
        else:
            bom_file = os.path.join(reports_dir,
                                    "bom-" + project_type + ".json")
            creation_status = create_bom(project_type, bom_file, src_dir)
        if not creation_status:
            LOG.debug(
                "Bom file {} was not created successfully".format(bom_file))
            continue
        LOG.debug("Scanning using the bom file {}".format(bom_file))
        pkg_list = get_pkg_list(bom_file)
        if not pkg_list:
            LOG.debug("No packages found in the project!")
            continue
        scoped_pkgs = {}
        if project_type in ["python"]:
            all_imports = utils.get_all_imports(src_dir)
            LOG.debug(f"Identified {len(all_imports)} imports in your project")
            scoped_pkgs = utils.get_scope_from_imports(project_type, pkg_list,
                                                       all_imports)
        else:
            scoped_pkgs = utils.get_pkgs_by_scope(project_type, pkg_list)
        if os.getenv("FETCH_LICENSE", "") in (True, "1", "true"):
            licenses_results = bulk_lookup(
                build_license_data(license_data_dir, spdx_license_list),
                pkg_list=pkg_list,
            )
            license_report_file = os.path.join(
                reports_dir, "license-" + project_type + ".json")
            analyse_licenses(project_type, licenses_results,
                             license_report_file)
        if project_type in risk_audit_map.keys():
            if args.risk_audit:
                console.print(
                    Panel(
                        f"Performing OSS Risk Audit for packages from {src_dir}\nNo of packages [bold]{len(pkg_list)}[/bold]. This will take a while ...",
                        title="OSS Risk Audit",
                        expand=False,
                    ))
                try:
                    risk_results = risk_audit(
                        project_type,
                        scoped_pkgs,
                        args.private_ns,
                        pkg_list,
                        risk_report_file,
                    )
                    analyse_pkg_risks(
                        project_type,
                        scoped_pkgs,
                        args.private_ns,
                        risk_results,
                        risk_report_file,
                    )
                except Exception as e:
                    LOG.error(e)
                    LOG.error("Risk audit was not successful")
            else:
                console.print(
                    Panel(
                        "Depscan supports OSS Risk audit for this project.\nTo enable set the environment variable [bold]ENABLE_OSS_RISK=true[/bold]",
                        title="New Feature",
                        expand=False,
                    ))
        if project_type in type_audit_map.keys():
            LOG.info("Performing remote audit for {} of type {}".format(
                src_dir, project_type))
            LOG.debug(f"No of packages {len(pkg_list)}")
            try:
                audit_results = audit(project_type, pkg_list, report_file)
                if audit_results:
                    LOG.debug(
                        f"Remote audit yielded {len(audit_results)} results")
                    results = results + audit_results
            except Exception as e:
                LOG.error("Remote audit was not successful")
                LOG.error(e)
                results = None
        # In case of docker, check if there are any npm packages that can be audited remotely
        if project_type in ("podman", "docker"):
            npm_pkg_list = get_pkg_by_type(pkg_list, "npm")
            if npm_pkg_list:
                LOG.debug(f"No of packages {len(npm_pkg_list)}")
                try:
                    audit_results = audit("nodejs", npm_pkg_list, report_file)
                    if audit_results:
                        LOG.debug(
                            f"Remote audit yielded {len(audit_results)} results"
                        )
                        results = results + audit_results
                except Exception as e:
                    LOG.error("Remote audit was not successful")
                    LOG.error(e)
        if not dbLib.index_count(db["index_file"]):
            run_cacher = True
        else:
            LOG.debug("Vulnerability database loaded from {}".format(
                config.vdb_bin_file))
        sources_list = [OSVSource(), NvdSource()]
        if os.environ.get("GITHUB_TOKEN"):
            sources_list.insert(0, GitHubSource())
        if run_cacher:
            for s in sources_list:
                LOG.debug("Refreshing {}".format(s.__class__.__name__))
                s.refresh()
                run_cacher = False
        elif args.sync:
            for s in sources_list:
                LOG.debug("Syncing {}".format(s.__class__.__name__))
                s.download_recent()
                run_cacher = False
        LOG.debug("Vulnerability database contains {} records".format(
            dbLib.index_count(db["index_file"])))
        LOG.info("Performing regular scan for {} using plugin {}".format(
            src_dir, project_type))
        vdb_results, pkg_aliases, sug_version_dict = scan(
            db, project_type, pkg_list, args.suggest)
        if vdb_results:
            results = results + vdb_results
        # Summarise and print results
        summary = summarise(
            project_type,
            results,
            pkg_aliases,
            sug_version_dict,
            scoped_pkgs,
            report_file,
            True,
        )
        if summary and not args.noerror and len(project_types_list) == 1:
            # Hard coded build break logic for now
            if summary.get("CRITICAL") > 0:
                sys.exit(1)
Ejemplo n.º 3
0
def main():
    args = build_args()
    if not args.no_banner:
        print(at_logo)
    src_dir = args.src_dir
    if not args.src_dir:
        src_dir = os.getcwd()
    db = dbLib.get()
    run_cacher = args.cache
    areport_file = (
        args.report_file
        if args.report_file
        else os.path.join(src_dir, "reports", "depscan.json")
    )
    reports_dir = os.path.dirname(areport_file)
    # Create reports directory
    if not os.path.exists(reports_dir):
        os.makedirs(reports_dir)
    # Detect the project types and perform the right type of scan
    if args.project_type:
        project_types_list = args.project_type.split(",")
    else:
        project_types_list = utils.detect_project_type(src_dir)
    if len(project_types_list) > 1:
        LOG.debug("Multiple project types found: {}".format(project_types_list))
    for project_type in project_types_list:
        sug_version_dict = {}
        pkg_aliases = {}
        report_file = areport_file.replace(".json", "-{}.json".format(project_type))
        risk_report_file = areport_file.replace(
            ".json", "-risk.{}.json".format(project_type)
        )
        LOG.info("=" * 80)
        creation_status = False
        if args.bom and os.path.exists(args.bom):
            bom_file = args.bom
            creation_status = True
        else:
            bom_file = os.path.join(reports_dir, "bom-" + project_type + ".json")
            creation_status = create_bom(project_type, bom_file, src_dir)
        if not creation_status:
            LOG.debug("Bom file {} was not created successfully".format(bom_file))
            continue
        LOG.debug("Scanning using the bom file {}".format(bom_file))
        pkg_list = get_pkg_list(bom_file)
        if not pkg_list:
            LOG.debug("No packages found in the project!")
            continue
        scoped_pkgs = utils.get_pkgs_by_scope(pkg_list)
        if not args.no_license_scan:
            licenses_results = bulk_lookup(
                build_license_data(license_data_dir), pkg_list=pkg_list
            )
            license_report_file = os.path.join(
                reports_dir, "license-" + project_type + ".json"
            )
            analyse_licenses(project_type, licenses_results, license_report_file)
        if project_type in risk_audit_map.keys():
            if args.risk_audit:
                console.print(
                    Panel(
                        f"Performing OSS Risk Audit for packages from {src_dir}\nNo of packages [bold]{len(pkg_list)}[/bold]. This will take a while ...",
                        title="OSS Risk Audit",
                        expand=False,
                    )
                )
                try:
                    risk_results = risk_audit(
                        project_type, args.private_ns, pkg_list, risk_report_file
                    )
                    analyse_pkg_risks(
                        project_type, args.private_ns, risk_results, risk_report_file
                    )
                except Exception as e:
                    LOG.error(e)
                    LOG.error("Risk audit was not successful")
                    risk_results = None
            else:
                console.print(
                    Panel(
                        "Depscan supports OSS Risk audit for this project.\nTo enable set the environment variable [bold]ENABLE_OSS_RISK=true[/bold]",
                        title="New Feature",
                        expand=False,
                    )
                )
        if project_type in type_audit_map.keys():
            LOG.info(
                "Performing remote audit for {} of type {}".format(
                    src_dir, project_type
                )
            )
            LOG.debug(f"No of packages {len(pkg_list)}")
            try:
                results = audit(project_type, pkg_list, report_file)
            except Exception as e:
                LOG.error("Remote audit was not successful")
                LOG.error(e)
                results = None
        else:
            if not dbLib.index_count(db["index_file"]):
                run_cacher = True
            else:
                LOG.debug(
                    "Vulnerability database loaded from {}".format(config.vdb_bin_file)
                )
            sources_list = [NvdSource()]
            if os.environ.get("GITHUB_TOKEN"):
                sources_list.insert(0, GitHubSource())
            else:
                LOG.info(
                    "To use GitHub advisory source please set the environment variable GITHUB_TOKEN!"
                )
            if run_cacher:
                for s in sources_list:
                    LOG.debug("Refreshing {}".format(s.__class__.__name__))
                    s.refresh()
            elif args.sync:
                for s in sources_list:
                    LOG.debug("Syncing {}".format(s.__class__.__name__))
                    s.download_recent()
            LOG.debug(
                "Vulnerability database contains {} records".format(
                    dbLib.index_count(db["index_file"])
                )
            )
            LOG.info(
                "Performing regular scan for {} using plugin {}".format(
                    src_dir, project_type
                )
            )
            results, pkg_aliases, sug_version_dict = scan(
                db, project_type, pkg_list, args.suggest
            )
        # Summarise and print results
        summary = summarise(
            project_type,
            results,
            pkg_aliases,
            sug_version_dict,
            scoped_pkgs,
            report_file,
            True,
        )
        if summary and not args.noerror and len(project_types_list) == 1:
            # Hard coded build break logic for now
            if summary.get("CRITICAL") > 0:
                sys.exit(1)
Ejemplo n.º 4
0
def analyse_pkg_risks(project_type,
                      scoped_pkgs,
                      private_ns,
                      risk_results,
                      risk_report_file=None):
    if not risk_results:
        return
    table = Table(
        title=f"Risk Audit Summary ({project_type})",
        box=box.DOUBLE_EDGE,
        header_style="bold magenta",
    )
    report_data = []
    required_pkgs = scoped_pkgs.get("required", [])
    optional_pkgs = scoped_pkgs.get("optional", [])
    excluded_pkgs = scoped_pkgs.get("excluded", [])
    headers = ["Package", "Used?", "Risk Score", "Identified Risks"]
    for h in headers:
        justify = "left"
        if h == "Risk Score":
            justify = "right"
        table.add_column(header=h, justify=justify)
    for pkg, risk_obj in risk_results.items():
        if not risk_obj:
            continue
        risk_metrics = risk_obj.get("risk_metrics")
        scope = risk_obj.get("scope")
        project_type_pkg = "{}:{}".format(project_type, pkg).lower()
        if project_type_pkg in required_pkgs:
            scope = "required"
        elif project_type_pkg in optional_pkgs:
            scope = "optional"
        elif project_type_pkg in excluded_pkgs:
            scope = "excluded"
        package_usage = "N/A"
        package_usage_simple = "N/A"
        if scope == "required":
            package_usage = "[bright_green][bold]Yes"
            package_usage_simple = "Yes"
        if scope == "optional":
            package_usage = "[magenta]No"
            package_usage_simple = "No"
        if not risk_metrics:
            continue
        if risk_metrics.get("risk_score") and (
                risk_metrics.get("risk_score") > config.pkg_max_risk_score
                or risk_metrics.get("pkg_private_on_public_registry_risk")):
            risk_score = f"""{round(risk_metrics.get("risk_score"), 2)}"""
            data = [
                pkg,
                package_usage,
                risk_score,
            ]
            edata = [
                pkg,
                package_usage_simple,
                risk_score,
            ]
            risk_categories = []
            risk_categories_simple = []
            for rk, rv in risk_metrics.items():
                if rk.endswith("_risk") and rv is True:
                    rcat = rk.replace("_risk", "")
                    help_text = config.risk_help_text.get(rcat)
                    # Only add texts that are available.
                    if help_text:
                        if rcat in ("pkg_deprecated",
                                    "pkg_private_on_public_registry"):
                            risk_categories.append(f":cross_mark: {help_text}")
                        else:
                            risk_categories.append(f":warning: {help_text}")
                        risk_categories_simple.append(help_text)
            data.append("\n".join(risk_categories))
            edata.append(", ".join(risk_categories_simple))
            table.add_row(*data)
            report_data.append(dict(zip(headers, edata)))
    if report_data:
        console.print(table)
        # Store the risk audit findings in jsonl format
        if risk_report_file:
            with open(risk_report_file, "w") as outfile:
                for row in report_data:
                    json.dump(row, outfile)
                    outfile.write("\n")
    else:
        LOG.info("No package risks detected ✅")
Ejemplo n.º 5
0
def print_results(project_type, results, pkg_aliases, sug_version_dict,
                  scoped_pkgs):
    """Pretty print report summary"""
    if not results:
        return
    table = Table(
        title=f"Dependency Scan Results ({project_type})",
        box=box.DOUBLE_EDGE,
        header_style="bold magenta",
    )
    ids_seen = {}
    required_pkgs = scoped_pkgs.get("required", [])
    optional_pkgs = scoped_pkgs.get("optional", [])
    pkg_attention_count = 0
    fix_version_count = 0
    for h in [
            "Id",
            "Package",
            "Insights",
            "Version",
            "Fix Version",
            "Severity",
            "Score",
    ]:
        justify = "left"
        if h == "Score":
            justify = "right"
        table.add_column(header=h, justify=justify, no_wrap=False)
    for res in results:
        vuln_occ_dict = res.to_dict()
        id = vuln_occ_dict.get("id")
        package_issue = res.package_issue
        full_pkg = package_issue.affected_location.package
        project_type_pkg = "{}:{}".format(
            project_type, package_issue.affected_location.package)
        if package_issue.affected_location.vendor:
            full_pkg = "{}:{}".format(
                package_issue.affected_location.vendor,
                package_issue.affected_location.package,
            )
        # De-alias package names
        full_pkg = pkg_aliases.get(full_pkg, full_pkg)
        if ids_seen.get(id + full_pkg):
            continue
        ids_seen[id + full_pkg] = True
        fixed_location = sug_version_dict.get(full_pkg,
                                              package_issue.fixed_location)
        package_usage = "N/A"
        insights = []
        package_name_style = ""
        id_style = ""
        pkg_severity = vuln_occ_dict.get("severity")
        is_required = False
        if full_pkg in required_pkgs or project_type_pkg in required_pkgs:
            is_required = True
        if is_required and pkg_severity in ("CRITICAL", "HIGH"):
            id_style = ":point_right: "
            pkg_attention_count = pkg_attention_count + 1
            if fixed_location:
                fix_version_count = fix_version_count + 1
        if is_required:
            package_usage = ":direct_hit: Direct usage"
            package_name_style = "[bold]"
        elif full_pkg in optional_pkgs or project_type_pkg in optional_pkgs:
            package_usage = (
                "[spring_green4]:information: Indirect dependency[/spring_green4]"
            )
            package_name_style = "[italic]"
        package = full_pkg.split(":")[-1]
        clinks = classify_links(
            id,
            full_pkg,
            vuln_occ_dict.get("type"),
            package_issue.affected_location.version,
            vuln_occ_dict.get("related_urls"),
        )
        if package_usage != "N/A":
            insights.append(package_usage)
        if clinks.get("poc") or clinks.get("Bug Bounty"):
            insights.append(
                "[yellow]:notebook_with_decorative_cover: Has PoC[/yellow]")
        if clinks.get("vendor"):
            insights.append(":receipt: Vendor Confirmed")
        if clinks.get("exploit"):
            insights.append(
                "[bright_red]:exclamation_mark: Known exploits[/bright_red]")
        table.add_row(
            "{}{}{}{}".format(
                id_style,
                package_name_style,
                "[bright_red]" if pkg_severity == "CRITICAL" else "",
                id,
            ),
            "{}{}".format(package_name_style, package),
            "\n".join(insights),
            package_issue.affected_location.version,
            fixed_location,
            "{}{}".format(
                "[bright_red]" if pkg_severity == "CRITICAL" else "",
                vuln_occ_dict.get("severity"),
            ),
            "{}{}".format(
                "[bright_red]" if pkg_severity == "CRITICAL" else "",
                vuln_occ_dict.get("cvss_score"),
            ),
        )
    console.print(table)
    if scoped_pkgs:
        if pkg_attention_count:
            rmessage = f":point_right: [magenta]{pkg_attention_count}[/magenta] out of {len(results)} vulnerabilities requires your attention."
            if fix_version_count:
                if fix_version_count == pkg_attention_count:
                    rmessage += "\n:white_heavy_check_mark: You can update [bright_green]all[/bright_green] the packages using the mentioned fix version to remediate."
                else:
                    rmessage += f"\nYou can remediate [bright_green]{fix_version_count}[/bright_green] {'vulnerability' if fix_version_count == 1 else 'vulnerabilities'} by updating the packages using the fix version :thumbsup:"
            console.print(
                Panel(
                    rmessage,
                    title="Recommendation",
                    expand=False,
                ))
        else:
            console.print(
                Panel(
                    ":white_check_mark: No package requires immediate attention since the major vulnerabilities are found only in dev packages and indirect dependencies.",
                    title="Recommendation",
                    expand=False,
                ))
Ejemplo n.º 6
0
def print_results(project_type, results, pkg_aliases, sug_version_dict,
                  scoped_pkgs):
    """Pretty print report summary"""
    if not results:
        return
    table = Table(
        title=f"Dependency Scan Results ({project_type})",
        box=box.DOUBLE_EDGE,
        header_style="bold magenta",
    )
    required_pkgs = scoped_pkgs.get("required", [])
    optional_pkgs = scoped_pkgs.get("optional", [])
    pkg_attention_count = 0
    fix_version_count = 0
    for h in [
            "Id",
            "Package",
            "Used?",
            "Version",
            "Fix Version",
            "Severity",
            "Score",
    ]:
        justify = "left"
        if h == "Score":
            justify = "right"
        width = None
        if h == "Id":
            width = 20
        elif h == "Used?" or h == "Fix Version":
            width = 10
        elif h == "Description":
            width = 58
        table.add_column(header=h, justify=justify, width=width, no_wrap=False)
    for res in results:
        vuln_occ_dict = res.to_dict()
        id = vuln_occ_dict.get("id")
        package_issue = res.package_issue
        full_pkg = package_issue.affected_location.package
        if package_issue.affected_location.vendor:
            full_pkg = "{}:{}".format(
                package_issue.affected_location.vendor,
                package_issue.affected_location.package,
            )
        # De-alias package names
        full_pkg = pkg_aliases.get(full_pkg, full_pkg)
        fixed_location = sug_version_dict.get(full_pkg,
                                              package_issue.fixed_location)
        package_usage = "N/A"
        package_name_style = ""
        id_style = ""
        pkg_severity = vuln_occ_dict.get("severity")
        if full_pkg in required_pkgs and pkg_severity in ("CRITICAL", "HIGH"):
            id_style = ":point_right: "
            pkg_attention_count = pkg_attention_count + 1
            if fixed_location:
                fix_version_count = fix_version_count + 1
        if full_pkg in required_pkgs:
            package_usage = "[bright_green][bold]Yes"
            package_name_style = "[bold]"
        elif full_pkg in optional_pkgs:
            package_usage = "[magenta]No"
            package_name_style = "[italic]"
        package = full_pkg.split(":")[-1]
        table.add_row(
            "{}{}{}{}".format(
                id_style,
                package_name_style,
                "[bright_red]" if pkg_severity == "CRITICAL" else "",
                id,
            ),
            "{}{}".format(package_name_style, package),
            package_usage,
            package_issue.affected_location.version,
            fixed_location,
            "{}{}".format(
                "[bright_red]" if pkg_severity == "CRITICAL" else "",
                vuln_occ_dict.get("severity"),
            ),
            "{}{}".format(
                "[bright_red]" if pkg_severity == "CRITICAL" else "",
                vuln_occ_dict.get("cvss_score"),
            ),
        )
    console.print(table)
    if scoped_pkgs:
        if pkg_attention_count:
            rmessage = f":heavy_exclamation_mark: [magenta]{pkg_attention_count}[/magenta] out of {len(results)} vulnerabilities requires your attention."
            if fix_version_count:
                if fix_version_count == pkg_attention_count:
                    rmessage += "\n:white_heavy_check_mark: You can update [bright_green]all[/bright_green] the packages using the mentioned fix version to remediate."
                else:
                    rmessage += f"\nYou can remediate [bright_green]{fix_version_count}[/bright_green] {'vulnerability' if fix_version_count == 1 else 'vulnerabilities'} by updating the packages using the fix version :thumbsup:."
            console.print(
                Panel(
                    rmessage,
                    title="Recommendation",
                    expand=False,
                ))
        else:
            console.print(
                Panel(
                    ":white_check_mark: No package requires immediate attention since the major vulnerabilities are found only in dev packages and indirect dependencies.",
                    title="Recommendation",
                    expand=False,
                ))