Beispiel #1
0
def test_vuln_data():
    test_cve_data = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 "data", "cve_data.json")
    with open(test_cve_data, "r") as fp:
        json_data = json.loads(fp.read())
        nvdlatest = NvdSource()
        return nvdlatest.convert(json_data)
Beispiel #2
0
def test_convert(test_cve_json):
    nvdlatest = NvdSource()
    data = nvdlatest.convert(test_cve_json)
    assert len(data) == 385
    for v in data:
        details = v.details
        for detail in details:
            assert detail
            assert detail.severity
            assert detail.package
            assert detail.package_type
Beispiel #3
0
def main():
    args = build_args()
    print(at_logo)
    if args.clean:
        if os.path.exists(config.data_dir):
            try:
                os.rmdir(config.data_dir)
            except Exception:
                pass
    else:
        LOG.info("Vulnerability database loaded from {}".format(
            config.vdb_bin_file))

    if args.cache:
        for s in [GitHubSource(), NvdSource()]:
            LOG.info("Refreshing {}".format(s.__class__.__name__))
            s.refresh()
    elif args.sync:
        for s in [GitHubSource(), NvdSource()]:
            LOG.info("Syncing {}".format(s.__class__.__name__))
            s.download_recent()
    if args.sync_npm:
        for s in [NpmSource()]:
            LOG.info("Syncing {}".format(s.__class__.__name__))
            s.download_recent()
    if args.sync_github:
        for s in [GitHubSource()]:
            LOG.info("Syncing {}".format(s.__class__.__name__))
            s.download_recent()
    if args.search_npm:
        source = NpmSource()
        results = source.bulk_search(config.npm_app_info, [args.search_npm])
        print_results(results)
    if args.list:
        db = dbLib.get()
        results = dbLib.list_all_occurrence(db)
        print_results(results)
    elif args.search:
        db = dbLib.get()
        search_list = re.split(r"[,|;]", args.search)
        for pkg_info in search_list:
            pstr = re.split(r"[:=@]", pkg_info)
            if pstr:
                if len(pstr) == 2 and dbLib.index_search(*pstr):
                    results = dbLib.pkg_search(db, *pstr)
                    print_results(results)
                elif len(pstr) == 3:
                    results = dbLib.vendor_pkg_search(db, *pstr)
                    print_results(results)
                else:
                    print("No vulnerability found!")
Beispiel #4
0
def main():
    args = build_args()
    if not args.no_banner:
        print(at_logo)
    src_dir = args.src_dir_image
    if not src_dir:
        src_dir = os.getcwd()
    reports_base_dir = src_dir
    # Detect the project types and perform the right type of scan
    if args.project_type:
        project_types_list = args.project_type.split(",")
    elif args.bom:
        project_types_list = ["bom"]
    else:
        project_types_list = utils.detect_project_type(src_dir)
    if ("docker" in project_types_list or "podman" in project_types_list
            or "container" in project_types_list
            or "binary" in project_types_list):
        reports_base_dir = os.getcwd()
    db = dbLib.get()
    run_cacher = args.cache
    areport_file = (args.report_file if args.report_file else os.path.join(
        reports_base_dir, "reports", "depscan.json"))
    reports_dir = os.path.dirname(areport_file)
    # Create reports directory
    if not os.path.exists(reports_dir):
        os.makedirs(reports_dir)
    if len(project_types_list) > 1:
        LOG.debug(
            "Multiple project types found: {}".format(project_types_list))
    # Enable license scanning
    if "license" in project_types_list:
        os.environ["FETCH_LICENSE"] = "true"
        project_types_list.remove("license")
        console.print(
            Panel(
                "License audit is enabled for this scan. This would increase the time by up to 10 minutes.",
                title="License Audit",
                expand=False,
            ))
    for project_type in project_types_list:
        sug_version_dict = {}
        pkg_aliases = {}
        results = []
        report_file = areport_file.replace(".json",
                                           "-{}.json".format(project_type))
        risk_report_file = areport_file.replace(
            ".json", "-risk.{}.json".format(project_type))
        LOG.info("=" * 80)
        creation_status = False
        if args.bom and os.path.exists(args.bom):
            bom_file = args.bom
            creation_status = True
        else:
            bom_file = os.path.join(reports_dir,
                                    "bom-" + project_type + ".json")
            creation_status = create_bom(project_type, bom_file, src_dir)
        if not creation_status:
            LOG.debug(
                "Bom file {} was not created successfully".format(bom_file))
            continue
        LOG.debug("Scanning using the bom file {}".format(bom_file))
        pkg_list = get_pkg_list(bom_file)
        if not pkg_list:
            LOG.debug("No packages found in the project!")
            continue
        scoped_pkgs = {}
        if project_type in ["python"]:
            all_imports = utils.get_all_imports(src_dir)
            LOG.debug(f"Identified {len(all_imports)} imports in your project")
            scoped_pkgs = utils.get_scope_from_imports(project_type, pkg_list,
                                                       all_imports)
        else:
            scoped_pkgs = utils.get_pkgs_by_scope(project_type, pkg_list)
        if os.getenv("FETCH_LICENSE", "") in (True, "1", "true"):
            licenses_results = bulk_lookup(
                build_license_data(license_data_dir, spdx_license_list),
                pkg_list=pkg_list,
            )
            license_report_file = os.path.join(
                reports_dir, "license-" + project_type + ".json")
            analyse_licenses(project_type, licenses_results,
                             license_report_file)
        if project_type in risk_audit_map.keys():
            if args.risk_audit:
                console.print(
                    Panel(
                        f"Performing OSS Risk Audit for packages from {src_dir}\nNo of packages [bold]{len(pkg_list)}[/bold]. This will take a while ...",
                        title="OSS Risk Audit",
                        expand=False,
                    ))
                try:
                    risk_results = risk_audit(
                        project_type,
                        scoped_pkgs,
                        args.private_ns,
                        pkg_list,
                        risk_report_file,
                    )
                    analyse_pkg_risks(
                        project_type,
                        scoped_pkgs,
                        args.private_ns,
                        risk_results,
                        risk_report_file,
                    )
                except Exception as e:
                    LOG.error(e)
                    LOG.error("Risk audit was not successful")
            else:
                console.print(
                    Panel(
                        "Depscan supports OSS Risk audit for this project.\nTo enable set the environment variable [bold]ENABLE_OSS_RISK=true[/bold]",
                        title="New Feature",
                        expand=False,
                    ))
        if project_type in type_audit_map.keys():
            LOG.info("Performing remote audit for {} of type {}".format(
                src_dir, project_type))
            LOG.debug(f"No of packages {len(pkg_list)}")
            try:
                audit_results = audit(project_type, pkg_list, report_file)
                if audit_results:
                    LOG.debug(
                        f"Remote audit yielded {len(audit_results)} results")
                    results = results + audit_results
            except Exception as e:
                LOG.error("Remote audit was not successful")
                LOG.error(e)
                results = None
        # In case of docker, check if there are any npm packages that can be audited remotely
        if project_type in ("podman", "docker"):
            npm_pkg_list = get_pkg_by_type(pkg_list, "npm")
            if npm_pkg_list:
                LOG.debug(f"No of packages {len(npm_pkg_list)}")
                try:
                    audit_results = audit("nodejs", npm_pkg_list, report_file)
                    if audit_results:
                        LOG.debug(
                            f"Remote audit yielded {len(audit_results)} results"
                        )
                        results = results + audit_results
                except Exception as e:
                    LOG.error("Remote audit was not successful")
                    LOG.error(e)
        if not dbLib.index_count(db["index_file"]):
            run_cacher = True
        else:
            LOG.debug("Vulnerability database loaded from {}".format(
                config.vdb_bin_file))
        sources_list = [OSVSource(), NvdSource()]
        if os.environ.get("GITHUB_TOKEN"):
            sources_list.insert(0, GitHubSource())
        if run_cacher:
            for s in sources_list:
                LOG.debug("Refreshing {}".format(s.__class__.__name__))
                s.refresh()
                run_cacher = False
        elif args.sync:
            for s in sources_list:
                LOG.debug("Syncing {}".format(s.__class__.__name__))
                s.download_recent()
                run_cacher = False
        LOG.debug("Vulnerability database contains {} records".format(
            dbLib.index_count(db["index_file"])))
        LOG.info("Performing regular scan for {} using plugin {}".format(
            src_dir, project_type))
        vdb_results, pkg_aliases, sug_version_dict = scan(
            db, project_type, pkg_list, args.suggest)
        if vdb_results:
            results = results + vdb_results
        # Summarise and print results
        summary = summarise(
            project_type,
            results,
            pkg_aliases,
            sug_version_dict,
            scoped_pkgs,
            report_file,
            True,
        )
        if summary and not args.noerror and len(project_types_list) == 1:
            # Hard coded build break logic for now
            if summary.get("CRITICAL") > 0:
                sys.exit(1)
Beispiel #5
0
def main():
    args = build_args()
    if not args.no_banner:
        print(at_logo, flush=True)
    # Set logging level
    if os.environ.get("SCAN_DEBUG_MODE") == "debug":
        LOG.setLevel(logging.DEBUG)
    src_dir = args.src_dir
    if not args.src_dir:
        src_dir = os.getcwd()
    db = dbLib.get()
    run_cacher = args.cache
    areport_file = (
        args.report_file
        if args.report_file
        else os.path.join(src_dir, "reports", "depscan.json")
    )
    reports_dir = os.path.dirname(areport_file)
    # Create reports directory
    if not os.path.exists(reports_dir):
        os.makedirs(reports_dir)
    # Detect the project types and perform the right type of scan
    if args.project_type:
        project_types_list = args.project_type.split(",")
    else:
        project_types_list = utils.detect_project_type(src_dir)
    if len(project_types_list) > 1:
        LOG.debug("Multiple project types found: {}".format(project_types_list))
    for project_type in project_types_list:
        sug_version_dict = {}
        pkg_aliases = {}
        report_file = areport_file.replace(".json", "-{}.json".format(project_type))
        LOG.info("=" * 80)
        bom_file = os.path.join(reports_dir, "bom-" + project_type + ".json")
        creation_status = create_bom(project_type, bom_file, src_dir)
        if not creation_status:
            LOG.debug("Bom file {} was not created successfully".format(bom_file))
            continue
        LOG.debug("Scanning using the bom file {}".format(bom_file))
        pkg_list = get_pkg_list(bom_file)
        if not pkg_list:
            LOG.debug("No packages found in the project!")
            continue
        if not args.no_license_scan:
            licenses_results = bulk_lookup(
                build_license_data(license_data_dir), pkg_list=pkg_list
            )
            license_report_file = os.path.join(
                reports_dir, "license-" + project_type + ".json"
            )
            analyse_licenses(
                project_type,
                licenses_results,
                license_report_file
            )
        if project_type in type_audit_map.keys():
            LOG.info(
                "Performing remote audit for {} of type {}".format(
                    src_dir, project_type
                )
            )
            LOG.debug(f"No of packages {len(pkg_list)}")
            results = audit(project_type, pkg_list, report_file)
        else:
            if not dbLib.index_count(db["index_file"]):
                run_cacher = True
            else:
                LOG.debug(
                    "Vulnerability database loaded from {}".format(config.vdb_bin_file)
                )
            sources_list = [NvdSource()]
            if os.environ.get("GITHUB_TOKEN"):
                sources_list.insert(0, GitHubSource())
            else:
                LOG.info(
                    "To use GitHub advisory source please set the environment variable GITHUB_TOKEN!"
                )
            if run_cacher:
                for s in sources_list:
                    LOG.debug("Refreshing {}".format(s.__class__.__name__))
                    s.refresh()
            elif args.sync:
                for s in sources_list:
                    LOG.debug("Syncing {}".format(s.__class__.__name__))
                    s.download_recent()
            LOG.debug(
                "Vulnerability database contains {} records".format(
                    dbLib.index_count(db["index_file"])
                )
            )
            LOG.info(
                "Performing regular scan for {} using plugin {}".format(
                    src_dir, project_type
                )
            )
            results, pkg_aliases, sug_version_dict = scan(db, pkg_list, args.suggest)
        # Summarise and print results
        summary = summarise(
            project_type, results, pkg_aliases, sug_version_dict, report_file, True
        )
        if summary and not args.noerror and len(project_types_list) == 1:
            # Hard coded build break logic for now
            if summary.get("CRITICAL") > 0:
                sys.exit(1)
Beispiel #6
0
def test_download_all():
    nvdlatest = NvdSource()
    data = nvdlatest.download_all()
    assert len(data) > 128000
Beispiel #7
0
def test_nvd_download():
    nvdlatest = NvdSource()
    data = nvdlatest.download_recent()
    assert len(data) > 300
Beispiel #8
0
def main():
    args = build_args()
    if not args.no_banner:
        print(at_logo)
    src_dir = args.src_dir
    if not args.src_dir:
        src_dir = os.getcwd()
    db = dbLib.get()
    run_cacher = args.cache
    areport_file = (
        args.report_file
        if args.report_file
        else os.path.join(src_dir, "reports", "depscan.json")
    )
    reports_dir = os.path.dirname(areport_file)
    # Create reports directory
    if not os.path.exists(reports_dir):
        os.makedirs(reports_dir)
    # Detect the project types and perform the right type of scan
    if args.project_type:
        project_types_list = args.project_type.split(",")
    else:
        project_types_list = utils.detect_project_type(src_dir)
    if len(project_types_list) > 1:
        LOG.debug("Multiple project types found: {}".format(project_types_list))
    for project_type in project_types_list:
        sug_version_dict = {}
        pkg_aliases = {}
        report_file = areport_file.replace(".json", "-{}.json".format(project_type))
        risk_report_file = areport_file.replace(
            ".json", "-risk.{}.json".format(project_type)
        )
        LOG.info("=" * 80)
        creation_status = False
        if args.bom and os.path.exists(args.bom):
            bom_file = args.bom
            creation_status = True
        else:
            bom_file = os.path.join(reports_dir, "bom-" + project_type + ".json")
            creation_status = create_bom(project_type, bom_file, src_dir)
        if not creation_status:
            LOG.debug("Bom file {} was not created successfully".format(bom_file))
            continue
        LOG.debug("Scanning using the bom file {}".format(bom_file))
        pkg_list = get_pkg_list(bom_file)
        if not pkg_list:
            LOG.debug("No packages found in the project!")
            continue
        scoped_pkgs = utils.get_pkgs_by_scope(pkg_list)
        if not args.no_license_scan:
            licenses_results = bulk_lookup(
                build_license_data(license_data_dir), pkg_list=pkg_list
            )
            license_report_file = os.path.join(
                reports_dir, "license-" + project_type + ".json"
            )
            analyse_licenses(project_type, licenses_results, license_report_file)
        if project_type in risk_audit_map.keys():
            if args.risk_audit:
                console.print(
                    Panel(
                        f"Performing OSS Risk Audit for packages from {src_dir}\nNo of packages [bold]{len(pkg_list)}[/bold]. This will take a while ...",
                        title="OSS Risk Audit",
                        expand=False,
                    )
                )
                try:
                    risk_results = risk_audit(
                        project_type, args.private_ns, pkg_list, risk_report_file
                    )
                    analyse_pkg_risks(
                        project_type, args.private_ns, risk_results, risk_report_file
                    )
                except Exception as e:
                    LOG.error(e)
                    LOG.error("Risk audit was not successful")
                    risk_results = None
            else:
                console.print(
                    Panel(
                        "Depscan supports OSS Risk audit for this project.\nTo enable set the environment variable [bold]ENABLE_OSS_RISK=true[/bold]",
                        title="New Feature",
                        expand=False,
                    )
                )
        if project_type in type_audit_map.keys():
            LOG.info(
                "Performing remote audit for {} of type {}".format(
                    src_dir, project_type
                )
            )
            LOG.debug(f"No of packages {len(pkg_list)}")
            try:
                results = audit(project_type, pkg_list, report_file)
            except Exception as e:
                LOG.error("Remote audit was not successful")
                LOG.error(e)
                results = None
        else:
            if not dbLib.index_count(db["index_file"]):
                run_cacher = True
            else:
                LOG.debug(
                    "Vulnerability database loaded from {}".format(config.vdb_bin_file)
                )
            sources_list = [NvdSource()]
            if os.environ.get("GITHUB_TOKEN"):
                sources_list.insert(0, GitHubSource())
            else:
                LOG.info(
                    "To use GitHub advisory source please set the environment variable GITHUB_TOKEN!"
                )
            if run_cacher:
                for s in sources_list:
                    LOG.debug("Refreshing {}".format(s.__class__.__name__))
                    s.refresh()
            elif args.sync:
                for s in sources_list:
                    LOG.debug("Syncing {}".format(s.__class__.__name__))
                    s.download_recent()
            LOG.debug(
                "Vulnerability database contains {} records".format(
                    dbLib.index_count(db["index_file"])
                )
            )
            LOG.info(
                "Performing regular scan for {} using plugin {}".format(
                    src_dir, project_type
                )
            )
            results, pkg_aliases, sug_version_dict = scan(
                db, project_type, pkg_list, args.suggest
            )
        # Summarise and print results
        summary = summarise(
            project_type,
            results,
            pkg_aliases,
            sug_version_dict,
            scoped_pkgs,
            report_file,
            True,
        )
        if summary and not args.noerror and len(project_types_list) == 1:
            # Hard coded build break logic for now
            if summary.get("CRITICAL") > 0:
                sys.exit(1)
Beispiel #9
0
    def convert(self, cve_data):
        """Convert the GitHub advisory data into Vulnerability objects
        """
        ret_data = []
        if cve_data.get("errors"):
            return ret_data, None
        if cve_data.get("message") and cve_data.get("message") == "Bad credentials":
            LOG.warning("GITHUB_TOKEN environment variable is invalid!")
            return ret_data, None
        page_info = cve_data["data"]["securityAdvisories"].get("pageInfo")
        for cve in cve_data["data"]["securityAdvisories"]["nodes"]:
            # If this CVE is withdrawn continue
            if cve.get("withdrawnAt"):
                continue
            cve_id = None
            assigner = "*****@*****.**"
            references = []
            for r in cve["references"]:
                references.append({"url": r["url"], "name": r["url"]})
            for id in cve["identifiers"]:
                if id["type"] == "CVE":
                    cve_id = id["value"]
            if not cve_id:
                cve_id = cve["ghsaId"]
                assigner = "@github"
            for p in cve["vulnerabilities"]["nodes"]:
                vendor = p["package"]["ecosystem"]
                product = p["package"]["name"]
                if ":" in product or "/" in product:
                    tmpA = re.split(r"[/|:]", product)
                    # This extract's the correct vendor based on the namespace
                    # Eg: org.springframework:spring-webflux would result in
                    # vendor: org.springframework
                    # product: spring-webflux
                    vendor = tmpA[0]
                    product = tmpA[len(tmpA) - 1]
                version = p["vulnerableVersionRange"]
                (
                    version_start_including,
                    version_end_including,
                    version_start_excluding,
                    version_end_excluding,
                ) = self.get_version_range(version)
                top_fix_version = p.get("firstPatchedVersion")
                if not top_fix_version or not top_fix_version.get("identifier"):
                    top_fix_version = {"identifier": ""}
                fix_version = top_fix_version.get("identifier", {})
                (
                    fix_version_start_including,
                    fix_version_end_including,
                    fix_version_start_excluding,
                    fix_version_end_excluding,
                ) = self.get_version_range(fix_version)

                severity = p["severity"]
                score, severity, vectorString, attackComplexity = get_default_cve_data(
                    severity
                )
                exploitabilityScore = score
                description = """# {}
{}
            """.format(
                    cve.get("summary"), cve.get("description")
                )
                tdata = config.CVE_TPL % dict(
                    cve_id=cve_id,
                    cwe_id="UNKNOWN",
                    assigner=assigner,
                    references=orjson.dumps(references).decode("utf-8"),
                    description="",
                    vectorString=vectorString,
                    vendor=vendor.lower(),
                    product=product.lower(),
                    version="*",
                    version_start_including=version_start_including,
                    version_end_including=version_end_including,
                    version_start_excluding=version_start_excluding,
                    version_end_excluding=version_end_excluding,
                    fix_version_start_including=fix_version_start_including,
                    fix_version_end_including=fix_version_end_including,
                    fix_version_start_excluding=fix_version_start_excluding,
                    fix_version_end_excluding=fix_version_end_excluding,
                    severity=severity,
                    attackComplexity=attackComplexity,
                    score=score,
                    exploitabilityScore=exploitabilityScore,
                    publishedDate=cve["publishedAt"],
                    lastModifiedDate=cve["updatedAt"],
                )
                try:
                    tdata_json = orjson.loads(tdata)
                    vuln = NvdSource.convert_vuln(tdata_json)
                    vuln.description = description
                    ret_data.append(vuln)
                except Exception as e:
                    LOG.debug(e)
        return ret_data, page_info
Beispiel #10
0
    def to_vuln(self, v, ret_data):
        assigner = "@npm"
        # Iterate the cve list if available
        cves = v.get("cves")
        if not cves:
            cves = ["{}-{}".format("NPM", v.get("id"))]
        for cve_id in cves:
            publishedDate = v["created"]
            lastModifiedDate = v["updated"]
            title = v.get("title", "")
            overview = v.get("overview", "")
            recommendation = v.get("recommendation", "")
            description = """# {}
{}
{}
            """.format(title, overview, recommendation)
            references = ([{
                "name": "npm advisory",
                "url": v.get("url")
            }] if v.get("url") else [])
            if v.get("references"):
                references = convert_md_references(v.get("references"))
            severity = v.get("severity")
            vendor = "npm"
            product = v["module_name"]
            score, severity, vectorString, attackComplexity = get_default_cve_data(
                severity)
            exploitabilityScore = score
            if v.get("metadata", {}).get("exploitability"):
                exploitabilityScore = v.get("metadata").get("exploitability")
            cwe_id = v.get("cwe")
            version = v["vulnerable_versions"]
            fix_version = v.get("patched_versions")
            version_ranges = self.get_version_ranges(version)
            fix_version_ranges = self.get_version_ranges(fix_version)
            vr = 0
            for ver in version_ranges:
                version_start_including = ver["version_start_including"]
                version_end_including = ver["version_end_including"]
                version_start_excluding = ver["version_start_excluding"]
                version_end_excluding = ver["version_end_excluding"]
                top_fix_version = (fix_version_ranges[vr]
                                   if len(fix_version_ranges) > vr else
                                   fix_version_ranges[0])
                fix_version_start_including = top_fix_version.get(
                    "version_start_including", "")
                fix_version_end_including = top_fix_version.get(
                    "version_end_including", "")
                fix_version_start_excluding = top_fix_version.get(
                    "version_start_excluding", "")
                fix_version_end_excluding = top_fix_version.get(
                    "version_end_excluding", "")
                description = fix_text(description)
                tdata = config.CVE_TPL % dict(
                    cve_id=cve_id,
                    cwe_id=cwe_id,
                    assigner=assigner,
                    references=orjson.dumps(references).decode("utf-8"),
                    description="",
                    vectorString=vectorString,
                    vendor=vendor,
                    product=product,
                    version="*",
                    version_start_including=version_start_including,
                    version_end_including=version_end_including,
                    version_start_excluding=version_start_excluding,
                    version_end_excluding=version_end_excluding,
                    fix_version_start_including=fix_version_start_including,
                    fix_version_end_including=fix_version_end_including,
                    fix_version_start_excluding=fix_version_start_excluding,
                    fix_version_end_excluding=fix_version_end_excluding,
                    severity=severity,
                    attackComplexity=attackComplexity,
                    score=score,
                    exploitabilityScore=exploitabilityScore,
                    publishedDate=publishedDate,
                    lastModifiedDate=lastModifiedDate,
                )
                try:
                    vuln = NvdSource.convert_vuln(orjson.loads(tdata))
                    vuln.description = description
                    ret_data.append(vuln)
                except Exception:
                    pass
                vr = vr + 1
        return ret_data