Пример #1
0
    def run_python_package_checkers(self, filename, lines):
        """
        This generator runs only for python packages.
        There are no actual checkers.
        The ProductInfo is computed without the help of any checkers from PKG-INFO or METADATA.
        """
        try:
            product = search(compile(r"^Name: (.+)$", MULTILINE),
                             lines).group(1)
            version = search(compile(r"^Version: (.+)$", MULTILINE),
                             lines).group(1)

            cve_db = CVEDB()
            vendor_package_pair = cve_db.get_vendor_product_pairs(product)

            if vendor_package_pair != []:
                vendor = vendor_package_pair[0]["vendor"]
                file_path = "".join(self.file_stack)

                self.logger.info(f"{file_path} is {product} {version}")

                yield ProductInfo(vendor, product, version), file_path

        # There are packages with a METADATA file in them containing different data from what the tool expects
        except AttributeError:
            self.logger.debug(f"{filename} is an invalid METADATA/PKG-INFO")

        self.logger.debug(f"Done scanning file: {filename}")
Пример #2
0
    def parse_data(self, fields: Set[str], data: Iterable) -> None:
        required_fields = {"vendor", "product", "version"}
        missing_fields = required_fields - fields
        if missing_fields != set():
            with ErrorHandler(mode=self.error_mode):
                raise MissingFieldsError(
                    f"{missing_fields} are required fields")

        for row in data:
            product_info = ProductInfo(row["vendor"].strip(),
                                       row["product"].strip(),
                                       row["version"].strip())
            self.parsed_data[product_info][row.get("cve_number", "").strip()
                                           or "default"] = {
                                               "remarks": Remarks(
                                                   str(row.get("remarks",
                                                               "")).strip()),
                                               "comments":
                                               row.get("comments", "").strip(),
                                               "severity":
                                               row.get("severity", "").strip(),
                                           }
            self.parsed_data[product_info]["paths"] = set(
                map(lambda x: x.strip(),
                    row.get("paths", "").split(",")))
Пример #3
0
 def parse_data(self):
     for row in self.package_names_with_vendor:
         product_info = ProductInfo(row["vendor"], row["name"].lower(),
                                    row["version"])
         self.parsed_data_with_vendor[product_info][
             row.get("cve_number", "").strip() or "default"] = {
                 "remarks": Remarks.NewFound,
                 "comments": row.get("comments", "").strip(),
                 "severity": row.get("severity", "").strip(),
             }
         self.parsed_data_with_vendor[product_info]["paths"] = {""}
Пример #4
0
    def scan_file(self) -> Dict[ProductInfo, TriageData]:
        LOGGER.info(f"Processing SBOM {self.filename} of type {self.type.upper()}")
        try:
            if self.type == "spdx":
                spdx = SPDXParser()
                modules = spdx.parse(self.filename)
            elif self.type == "cyclonedx":
                cyclone = CycloneParser()
                modules = cyclone.parse(self.filename)
            elif self.type == "swid":
                swid = SWIDParser()
                modules = swid.parse(self.filename)
            else:
                modules = []
        except (KeyError, FileNotFoundError, ET.ParseError) as e:
            LOGGER.debug(e, exc_info=True)
            modules = []

        LOGGER.debug(
            f"The number of modules identified in SBOM - {len(modules)}\n{modules}"
        )

        # Now process list of modules to create [vendor, product, version] tuples
        parsed_data: List[ProductInfo] = []
        for m in modules:
            product, version = m[0], m[1]
            if version != "":
                # Now add vendor to create product record....
                # print (f"Find vendor for {product} {version}")
                vendor = self.get_vendor(product)
                if vendor is not None:
                    parsed_data.append(ProductInfo(vendor, product, version))
                    # print(vendor,product,version)

        for row in parsed_data:
            self.sbom_data[row]["default"] = {
                "remarks": Remarks.NewFound,
                "comments": "",
                "severity": "",
            }
            self.sbom_data[row]["paths"] = set(map(lambda x: x.strip(), "".split(",")))

        LOGGER.debug(f"SBOM Data {self.sbom_data}")
        return self.sbom_data
Пример #5
0
class TestCVEScanner:
    @pytest.mark.parametrize(
        "version, expected_result",
        (
            ("1.1.0f", "1.1.0.5"),
            ("1.1.0", "1.1.0"),
        ),
    )
    def test_openssl_convert(self, version: str, expected_result: str):
        scanner = CVEScanner()
        assert scanner.openssl_convert(version) == expected_result

    @pytest.mark.parametrize(
        "product, expected_result, between_result",
        (
            (ProductInfo(vendor="", product="glibc",
                         version="2.11.1"), "2.11.1", ""),
            (
                ProductInfo(vendor="", product="glibc", version="2.11.1_pre1"),
                "2.11.1",
                "",
            ),
            (
                ProductInfo(vendor="", product="openssl", version="1.1.0h"),
                "1.1.0h",
                "1.1.0.7",
            ),
            (
                ProductInfo(
                    vendor="", product="openssl", version="1.1.0h_kali2"),
                "1.1.0h",
                "1.1.0.7",
            ),
            (ProductInfo(vendor="", product="openssl", version=""), "", ""),
            (ProductInfo(vendor="", product="php",
                         version="2:7.4"), "7.4", ""),
            (ProductInfo(vendor="", product="php",
                         version="2:7.4_deb0"), "7.4", ""),
        ),
    )
    def test_canonical_convert(self, product: ProductInfo,
                               expected_result: str, between_result: str):
        scanner = CVEScanner()
        res1, res2 = scanner.canonical_convert(product)
        assert str(res1) == expected_result
        assert str(res2) == between_result
Пример #6
0
 def find_java_vendor(self, product, version):
     """Find vendor for Java product"""
     vendor_package_pair = self.cve_db.get_vendor_product_pairs(product)
     # If no match, try alternative product name.
     # Apache product names are stored as A_B in NVD database but often called A-B
     # Some packages have -parent appended to product which is not in NVD database
     if vendor_package_pair == [] and "-" in product:
         self.logger.debug(f"Try alternative product {product}")
         # Remove parent appendage
         if "-parent" in product:
             product = product.replace("-parent", "")
         product = product.replace("-", "_")
         vendor_package_pair = self.cve_db.get_vendor_product_pairs(product)
     if vendor_package_pair != []:
         vendor = vendor_package_pair[0]["vendor"]
         file_path = "".join(self.file_stack)
         self.logger.debug(f"{file_path} {product} {version} by {vendor}")
         return ProductInfo(vendor, product, version), file_path
     return None, None
Пример #7
0
    def run_checkers(self, filename, lines):
        # tko
        for (dummy_checker_name, checker) in self.checkers.items():
            checker = checker()
            result = checker.get_version(lines, filename)
            # do some magic so we can iterate over all results, even the ones that just return 1 hit
            if "is_or_contains" in result:
                results = [dict()]
                results[0] = result
            else:
                results = result

            for result in results:
                if "is_or_contains" in result:
                    version = "UNKNOWN"
                    if "version" in result and result["version"] != "UNKNOWN":
                        version = result["version"]
                    elif result["version"] == "UNKNOWN":
                        file_path = "".join(self.file_stack)
                        self.logger.warning(
                            f"{dummy_checker_name} was detected with version UNKNOWN in file {file_path}"
                        )
                    else:
                        self.logger.error(
                            f"No version info for {dummy_checker_name}")

                    if version != "UNKNOWN":
                        file_path = "".join(self.file_stack)
                        self.logger.info(
                            f'{file_path} {result["is_or_contains"]} {dummy_checker_name} {version}'
                        )
                        for vendor, product in checker.VENDOR_PRODUCT:
                            yield ProductInfo(vendor, product,
                                              version), file_path

        self.logger.debug(f"Done scanning file: {filename}")
Пример #8
0
class TestOutputEngine(unittest.TestCase):
    """ Test the OutputEngine class functions """

    MOCK_OUTPUT = {
        ProductInfo("vendor0", "product0", "1.0"):
        CVEData(
            cves=[
                CVE("CVE-1234-1234", "MEDIUM", score=4.2, cvss_version=2),
                CVE("CVE-1234-1234", "LOW", score=1.2, cvss_version=2),
            ],
            paths={""},
        ),
        ProductInfo("vendor0", "product0", "2.8.6"):
        CVEData(cves=[CVE("CVE-1234-1234", "LOW", score=2.5, cvss_version=3)],
                paths={""}),
        ProductInfo("vendor1", "product1", "3.2.1.0"):
        CVEData(cves=[CVE("CVE-1234-1234", "HIGH", score=7.5, cvss_version=2)],
                paths={""}),
    }

    FORMATTED_OUTPUT = [
        {
            "vendor": "vendor0",
            "product": "product0",
            "version": "1.0",
            "cve_number": "CVE-1234-1234",
            "severity": "MEDIUM",
            "score": "4.2",
            "cvss_version": "2",
            "paths": "",
            "remarks": "NewFound",
            "comments": "",
        },
        {
            "vendor": "vendor0",
            "product": "product0",
            "version": "1.0",
            "cve_number": "CVE-1234-1234",
            "severity": "LOW",
            "score": "1.2",
            "cvss_version": "2",
            "paths": "",
            "remarks": "NewFound",
            "comments": "",
        },
        {
            "vendor": "vendor0",
            "product": "product0",
            "version": "2.8.6",
            "cve_number": "CVE-1234-1234",
            "severity": "LOW",
            "score": "2.5",
            "cvss_version": "3",
            "paths": "",
            "remarks": "NewFound",
            "comments": "",
        },
        {
            "vendor": "vendor1",
            "product": "product1",
            "version": "3.2.1.0",
            "cve_number": "CVE-1234-1234",
            "severity": "HIGH",
            "score": "7.5",
            "cvss_version": "2",
            "paths": "",
            "remarks": "NewFound",
            "comments": "",
        },
    ]

    def setUp(self) -> None:
        self.output_engine = OutputEngine(all_cve_data=self.MOCK_OUTPUT,
                                          scanned_dir="",
                                          filename="",
                                          themes_dir="")
        self.mock_file = tempfile.NamedTemporaryFile("w+", encoding="utf-8")

    def tearDown(self) -> None:
        self.mock_file.close()

    def test_formatted_output(self):
        """ Test reformatting products """
        self.assertEqual(format_output(self.MOCK_OUTPUT),
                         self.FORMATTED_OUTPUT)

    def test_output_json(self):
        """ Test formatting output as JSON """
        output_json(self.MOCK_OUTPUT, self.mock_file)
        self.mock_file.seek(0)  # reset file position
        self.assertEqual(json.load(self.mock_file), self.FORMATTED_OUTPUT)

    def test_output_csv(self):
        """ Test formatting output as CSV """
        output_csv(self.MOCK_OUTPUT, self.mock_file)
        self.mock_file.seek(0)  # reset file position
        reader = csv.DictReader(self.mock_file)
        expected_value = [dict(x) for x in reader]
        self.assertEqual(expected_value, self.FORMATTED_OUTPUT)

    def test_output_console(self):
        """Test Formatting Output as console"""

        console = Console(file=self.mock_file)
        output_console(self.MOCK_OUTPUT, console=console)

        expected_output = "│ vendor0 │ product0 │ 1.0     │ CVE-1234-1234 │ MEDIUM   │ 4.2 (v2)             │\n│ vendor0 │ product0 │ 1.0     │ CVE-1234-1234 │ LOW      │ 1.2 (v2)             │\n│ vendor0 │ product0 │ 2.8.6   │ CVE-1234-1234 │ LOW      │ 2.5 (v3)             │\n│ vendor1 │ product1 │ 3.2.1.0 │ CVE-1234-1234 │ HIGH     │ 7.5 (v2)             │\n└─────────┴──────────┴─────────┴───────────────┴──────────┴──────────────────────┘\n"
        self.mock_file.seek(0)  # reset file position
        result = self.mock_file.read()
        self.assertIn(expected_output, result)

    def test_output_file(self):
        """Test file generation logic in output_file"""
        logger = logging.getLogger()

        with self.assertLogs(logger, logging.INFO) as cm:
            self.output_engine.output_file(output_type="json")

        contains_filename = False
        contains_msg = False

        filename = self.output_engine.filename

        if os.path.isfile(filename):
            contains_filename = True

        if "Output stored at" in cm.output[0]:
            contains_msg = True

        # reset everything back
        os.remove(filename)
        self.output_engine.filename = ""

        self.assertEqual(contains_filename, True)
        self.assertEqual(contains_msg, True)

    def test_output_file_filename_already_exists(self):
        """Tests output_file when filename already exist"""

        # update the filename in output_engine
        self.output_engine.filename = "testfile.csv"

        # create a file with the same name as output_engine.filename
        with open("testfile.csv", "w") as f:
            f.write("testing")

        logger = logging.getLogger()

        # setup the context manager
        with self.assertLogs(logger, logging.INFO) as cm:
            self.output_engine.output_file(output_type="csv")

        # logs to check in cm
        msg_generate_filename = (
            "Generating a new filename with Default Naming Convention")
        msg_failed_to_write = "Failed to write at 'testfile.csv'. File already exists"

        # flags for logs
        contains_fail2write = False
        contains_gen_file = False

        # check if the logger contains msg
        for log in cm.output:
            if msg_generate_filename in log:
                contains_gen_file = True
            elif msg_failed_to_write in log:
                contains_fail2write = True

        # remove the generated files and reset updated variables
        os.remove("testfile.csv")
        os.remove(self.output_engine.filename)
        self.output_engine.filename = ""

        # assert
        self.assertEqual(contains_gen_file, True)
        self.assertEqual(contains_fail2write, True)

    def test_output_file_incorrect_filename(self):
        """Tests filenames that are incorrect or are not accessible"""

        # update the filename in output_engine
        self.output_engine.filename = "/not/a/good_filename"

        logger = logging.getLogger()

        # setup the context manager
        with self.assertLogs(logger, logging.INFO) as cm:
            self.output_engine.output_file(output_type="csv")

        # log to check
        msg_switch_back = "Switching Back to Default Naming Convention"

        # flags
        contains_sb = False

        for log in cm.output:
            if msg_switch_back in log:
                contains_sb = True

        # remove the generated files and reset updated variables
        os.remove(self.output_engine.filename)
        self.output_engine.filename = ""

        # assert
        self.assertEqual(contains_sb, True)
Пример #9
0
class TestMergeReports:

    INTERMEDIATE_PATH = os.path.join(
        os.path.abspath(os.path.dirname(__file__)), "json")
    MERGED_TRIAGE_PATH = os.path.join(
        os.path.abspath(os.path.dirname(__file__)), "json")

    MERGED_TRIAGE_DATA = {
        ProductInfo(vendor="libjpeg-turbo",
                    product="libjpeg-turbo",
                    version="2.0.1"): {
            "CVE-2018-19664": {
                "remarks": Remarks.Confirmed,
                "comments": "High priority need to resolve fast",
                "severity": "CRITICAL",
            },
            "paths": {""},
            "CVE-2018-20330": {
                "remarks": Remarks.Unexplored,
                "comments": "Need to mitigate cves of this product",
                "severity": "HIGH",
            },
            "CVE-2020-17541": {
                "remarks": Remarks.Unexplored,
                "comments": "Need to mitigate cves of this product",
                "severity": "HIGH",
            },
        }
    }

    MISSING_FIELD_REGEX = re.compile(r"({.+}) are required fields")

    @pytest.mark.parametrize(
        "filepaths, exception",
        (([os.path.join(INTERMEDIATE_PATH, "bad.json")], InvalidJsonError), ),
    )
    def test_invalid_file(self, filepaths, exception):
        merged_cves = MergeReports(merge_files=filepaths,
                                   error_mode=ErrorMode.FullTrace)
        with pytest.raises(exception):
            merged_cves.merge_intermediate()

    @pytest.mark.parametrize(
        "filepaths,missing_fields",
        (
            (
                [os.path.join(INTERMEDIATE_PATH, "bad_intermediate.json")],
                {"metadata", "report"},
            ),
            (
                [os.path.join(INTERMEDIATE_PATH, "bad_metadata.json")],
                REQUIRED_INTERMEDIATE_METADATA,
            ),
        ),
    )
    def test_missing_fields(self, filepaths, missing_fields):
        merged_cves = MergeReports(merge_files=filepaths,
                                   error_mode=ErrorMode.FullTrace)
        with pytest.raises(MissingFieldsError) as exc:
            merged_cves.merge_intermediate()
        match = self.MISSING_FIELD_REGEX.search(exc.value.args[0])
        raised_fields = match.group(1)

        assert missing_fields - eval(raised_fields) == set()

    @pytest.mark.parametrize(
        "filepaths, merged_data",
        ((
            [os.path.join(INTERMEDIATE_PATH, "test_intermediate.json")],
            MERGED_TRIAGE_DATA,
        ), ),
    )
    def test_valid_merge(self, filepaths, merged_data):

        merged_cves = MergeReports(merge_files=filepaths,
                                   error_mode=ErrorMode.FullTrace,
                                   score=0)
        merge_cve_scanner = merged_cves.merge_intermediate()
        with CVEScanner(score=0) as cve_scanner:
            for product_info, triage_data in merged_data.items():
                cve_scanner.get_cves(product_info, triage_data)

            assert merge_cve_scanner.all_cve_data == cve_scanner.all_cve_data

    @pytest.mark.parametrize(
        "filepaths",
        (([os.path.join(INTERMEDIATE_PATH, "test_intermediate.json")]), ),
    )
    def test_valid_cve_scanner_instance(self, filepaths):

        merged_cves = MergeReports(
            merge_files=filepaths,
            error_mode=ErrorMode.FullTrace,
        )
        merge_cve_scanner = merged_cves.merge_intermediate()

        assert isinstance(merge_cve_scanner, CVEScanner)
Пример #10
0
class TestPackageListParser:
    TXT_PATH = join(dirname(__file__), "txt")

    REQ_PARSED_TRIAGE_DATA = {
        ProductInfo(vendor="httplib2_project*",
                    product="httplib2",
                    version="0.18.1"): {
            "default": {
                "remarks": Remarks.NewFound,
                "comments": "",
                "severity": ""
            },
            "paths": {""},
        },
        ProductInfo(vendor="python*", product="requests", version="2.25.1"): {
            "default": {
                "remarks": Remarks.NewFound,
                "comments": "",
                "severity": ""
            },
            "paths": {""},
        },
        ProductInfo(vendor="html5lib*", product="html5lib", version="0.99"): {
            "default": {
                "remarks": Remarks.NewFound,
                "comments": "",
                "severity": ""
            },
            "paths": {""},
        },
    }

    # Find the versions of the ubuntu packages
    UBUNTU_PACKAGE_VERSIONS = ((subprocess.run(
        [
            "dpkg-query",
            "--show",
            "--showformat=${Version}\n",
            "bash",
            "binutils",
            "wget",
        ],
        stdout=subprocess.PIPE,
    ).stdout.decode("utf-8").splitlines()) if "ubuntu" in distro.id() else
                               ["dummy", "array", "for windows"])

    UBUNTU_PARSED_TRIAGE_DATA = {
        ProductInfo(vendor="gnu*",
                    product="bash",
                    version=UBUNTU_PACKAGE_VERSIONS[0]): {
            "default": {
                "remarks": Remarks.NewFound,
                "comments": "",
                "severity": ""
            },
            "paths": {""},
        },
        ProductInfo(vendor="gnu*",
                    product="binutils",
                    version=UBUNTU_PACKAGE_VERSIONS[1]): {
            "default": {
                "remarks": Remarks.NewFound,
                "comments": "",
                "severity": ""
            },
            "paths": {""},
        },
        ProductInfo(vendor="gnu*",
                    product="wget",
                    version=UBUNTU_PACKAGE_VERSIONS[2]): {
            "default": {
                "remarks": Remarks.NewFound,
                "comments": "",
                "severity": ""
            },
            "paths": {""},
        },
    }

    @pytest.mark.parametrize("filepath", [join(TXT_PATH, "nonexistent.txt")])
    def test_nonexistent_txt(self, filepath):
        package_list = PackageListParser(filepath,
                                         error_mode=ErrorMode.FullTrace)
        with pytest.raises(FileNotFoundError):
            package_list.parse_list()

    @pytest.mark.parametrize("filepath, exception",
                             [(join(TXT_PATH, "empty.txt"), EmptyTxtError)])
    def test_empty_txt(self, filepath, exception):
        package_list = PackageListParser(filepath,
                                         error_mode=ErrorMode.FullTrace)
        with pytest.raises(exception):
            package_list.parse_list()

    @pytest.mark.parametrize(
        "filepath, exception",
        [(join(TXT_PATH, "not_txt.csv"), InvalidListError)])
    def test_not_txt(self, filepath, exception):
        package_list = PackageListParser(filepath,
                                         error_mode=ErrorMode.FullTrace)
        with pytest.raises(exception):
            package_list.parse_list()

    @pytest.mark.parametrize(
        "filepath, parsed_data",
        [(join(TXT_PATH, "test_requirements.txt"), REQ_PARSED_TRIAGE_DATA)],
    )
    def test_valid_requirements(self, filepath, parsed_data):
        # packages is installed from test_requirements with specific versions for the test to pass
        subprocess.run(["pip", "install", "-r", filepath])
        package_list = PackageListParser(filepath,
                                         error_mode=ErrorMode.FullTrace)
        assert package_list.parse_list() == parsed_data
        # Update the packages back to latest
        subprocess.run(
            ["pip", "install", "httplib2", "requests", "html5lib", "-U"])

    @pytest.mark.skipif(
        distro.id() not in SUPPORTED_DISTROS,
        reason=f"Test for {','.join(SUPPORTED_DISTROS)} systems",
    )
    @pytest.mark.parametrize(
        "filepath",
        [(join(TXT_PATH, "test_broken_linux_list.txt"))],
    )
    def test_invalid_linux_list(self, filepath, caplog):
        package_list = PackageListParser(filepath,
                                         error_mode=ErrorMode.FullTrace)
        package_list.check_file()
        expected_output = ["Invalid Package found: br0s"]

        assert expected_output == [rec.message for rec in caplog.records]

    @pytest.mark.skipif(
        "ubuntu" not in distro.id(),
        reason="Test for Ubuntu systems",
    )
    @pytest.mark.parametrize(
        "filepath, parsed_data",
        [(join(TXT_PATH, "test_ubuntu_list.txt"), UBUNTU_PARSED_TRIAGE_DATA)],
    )
    def test_valid_ubuntu_list(self, filepath, parsed_data):
        package_list = PackageListParser(filepath,
                                         error_mode=ErrorMode.FullTrace)
        assert package_list.parse_list() == parsed_data
Пример #11
0
class TestInputEngine:
    CSV_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), "csv")
    JSON_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), "json")
    PARSED_TRIAGE_DATA = {
        ProductInfo("haxx", "curl", "7.59.0"): {
            "default": {"comments": "", "remarks": Remarks.NewFound, "severity": ""},
            "paths": {""},
        },
        ProductInfo("haxx", "libcurl", "7.59.0"): {
            "default": {"comments": "", "remarks": Remarks.Unexplored, "severity": ""},
            "paths": {""},
        },
        ProductInfo("libjpeg-turbo", "libjpeg-turbo", "2.0.1"): {
            "CVE-2018-19664": {
                "comments": "High priority need to resolve fast",
                "remarks": Remarks.Confirmed,
                "severity": "CRITICAL",
            },
            "default": {
                "comments": "Need to mitigate cves of this product",
                "remarks": Remarks.Unexplored,
                "severity": "HIGH",
            },
            "paths": {""},
        },
        ProductInfo("mit", "kerberos", "1.15.1"): {
            "default": {"comments": "", "remarks": Remarks.Unexplored, "severity": ""},
            "paths": {""},
        },
        ProductInfo("mit", "kerberos_5", "5-1.15.1"): {
            "default": {"comments": "", "remarks": Remarks.Confirmed, "severity": ""},
            "paths": {""},
        },
        ProductInfo("ssh", "ssh2", "2.0"): {
            "default": {"comments": "", "remarks": Remarks.Mitigated, "severity": ""},
            "paths": {""},
        },
        ProductInfo("sun", "sunos", "5.4"): {
            "default": {"comments": "", "remarks": Remarks.Mitigated, "severity": ""},
            "paths": {""},
        },
    }
    MISSING_FIELD_REGEX = re.compile(
        r"({[' ,](([a-z])+[' ,]{1,4})+}) are required fields"
    )

    @pytest.mark.parametrize(
        "filepath",
        (
            os.path.join(CSV_PATH, "nonexistent.csv"),
            os.path.join(JSON_PATH, "nonexistent.json"),
        ),
    )
    def test_nonexistent_file(self, filepath):
        input_engine = InputEngine(filepath, error_mode=ErrorMode.FullTrace)
        with pytest.raises(FileNotFoundError):
            input_engine.parse_input()

    @pytest.mark.parametrize(
        "filepath, exception",
        (
            (os.path.join(CSV_PATH, "bad.csv"), InvalidCsvError),
            (os.path.join(JSON_PATH, "bad.json"), InvalidJsonError),
        ),
    )
    def test_invalid_file(self, filepath, exception):
        input_engine = InputEngine(filepath, error_mode=ErrorMode.FullTrace)
        with pytest.raises(exception):
            input_engine.parse_input()

    @pytest.mark.parametrize(
        "filepath, missing_fields",
        (
            (os.path.join(CSV_PATH, "bad_product.csv"), {"product"}),
            (
                os.path.join(CSV_PATH, "bad_heading.csv"),
                {"vendor", "product", "version"},
            ),
            (
                os.path.join(JSON_PATH, "bad_heading.json"),
                {"vendor", "product", "version"},
            ),
        ),
    )
    def test_missing_fields(self, filepath, missing_fields):
        input_engine = InputEngine(filepath, error_mode=ErrorMode.FullTrace)
        with pytest.raises(MissingFieldsError) as exc:
            input_engine.parse_input()

        match = self.MISSING_FIELD_REGEX.search(exc.value.args[0])
        raised_fields = match.group(1)

        assert missing_fields - eval(raised_fields) == set()

    @pytest.mark.parametrize(
        "filepath, parsed_data",
        (
            (os.path.join(CSV_PATH, "test_triage.csv"), PARSED_TRIAGE_DATA),
            (os.path.join(JSON_PATH, "test_triage.json"), PARSED_TRIAGE_DATA),
        ),
    )
    def test_valid_file(self, filepath, parsed_data):
        input_engine = InputEngine(filepath, error_mode=ErrorMode.FullTrace)
        assert dict(input_engine.parse_input()) == parsed_data
Пример #12
0
class TestAvailableFixReport:
    @pytest.fixture(autouse=True)
    def arrange_data(self):
        check_json()

    @pytest.mark.skipif(LONG_TESTS() != 1,
                        reason="Skipping tests to reduce network calls")
    def test_long_debian_backport_fix_output(
            self, caplog: pytest.LogCaptureFixture) -> None:
        """Test Backported fix for Debian distros output on console with external API"""

        fixes = AvailableFixReport(self.MOCK_PSPP_CVE_DATA, "debian-bullseye",
                                   True)
        fixes.check_available_fix()
        expected_output = [
            "pspp: CVE-2018-20230 has backported fix in v1.2.0-3 release.",
            "pspp: CVE-2019-9211 has backported fix in v1.2.0-4 release.",
        ]

        assert expected_output == [rec.message for rec in caplog.records]

    def test_debian_backport_fix_output(
            self, mocker: MockerFixture,
            caplog: pytest.LogCaptureFixture) -> None:
        """Test Backported fix for Debian distros output on console"""

        fixes = AvailableFixReport(self.MOCK_PSPP_CVE_DATA, "debian-bullseye",
                                   True)
        mocker.patch(
            "cve_bin_tool.available_fix.debian_cve_tracker.DebianCVETracker.get_data",
            return_value=self.MOCK_DEBIAN_API,
        )
        fixes.check_available_fix()
        expected_output = [
            "pspp: CVE-2018-20230 has backported fix in v1.2.0-3 release.",
            "pspp: CVE-2019-9211 has backported fix in v1.2.0-4 release.",
        ]

        assert expected_output == [rec.message for rec in caplog.records]

    @pytest.mark.skipif(LONG_TESTS() != 1,
                        reason="Skipping tests to reduce network calls")
    def test_long_debian_available_fix_output(
            self, caplog: pytest.LogCaptureFixture) -> None:
        """Test Available fix for Debian distros output on console with external API"""

        fixes = AvailableFixReport(self.MOCK_AVAHI_CVE_DATA, "debian-bullseye",
                                   False)
        fixes.check_available_fix()
        expected_output = [
            "avahi: CVE-2010-2244 has available fix in v0.6.26-1 release.",
            "avahi: CVE-2011-1002 has available fix in v0.6.28-4 release.",
            "avahi: CVE-2017-6519 has available fix in v0.7-5 release.",
            "avahi: CVE-2021-26720 has available fix in v0.8-4 release.",
        ]

        assert expected_output == [rec.message for rec in caplog.records]

    def test_debian_available_fix_output(
            self, mocker: MockerFixture,
            caplog: pytest.LogCaptureFixture) -> None:
        """Test Available fix for Debian distros output on console"""

        fixes = AvailableFixReport(self.MOCK_AVAHI_CVE_DATA, "debian-bullseye",
                                   False)
        mocker.patch(
            "cve_bin_tool.available_fix.debian_cve_tracker.DebianCVETracker.get_data",
            return_value=self.MOCK_DEBIAN_API,
        )
        fixes.check_available_fix()
        expected_output = [
            "avahi: CVE-2010-2244 has available fix in v0.6.26-1 release.",
            "avahi: CVE-2011-1002 has available fix in v0.6.28-4 release.",
            "avahi: CVE-2017-6519 has available fix in v0.7-5 release.",
            "avahi: CVE-2021-26720 has available fix in v0.8-4 release.",
        ]

        assert expected_output == [rec.message for rec in caplog.records]

    @pytest.mark.skipif(LONG_TESTS() != 1,
                        reason="Skipping tests to reduce network calls")
    def test_long_redhat_available_fix_output(
            self, caplog: pytest.LogCaptureFixture) -> None:
        """Test Available fix for Redhat distros output on console with external API"""

        fixes = AvailableFixReport(self.MOCK_NODEJS_CVE_DATA, "rhel-8", False)
        fixes.check_available_fix()
        expected_output = [
            "node.js: CVE-2021-22918 - Status: Fixed - Fixed package: nodejs v12",
            "node.js: CVE-2021-22918 - Status: Fixed - Fixed package: nodejs v14",
            "node.js: CVE-2021-22918 - Status: Fixed - Fixed package: libuv v1.41",
            "node.js: CVE-2021-22918 - Status: Not affected - Related package: nodejs v16",
            "node.js: CVE-2021-22931 - Status: Fixed - Fixed package: nodejs v12",
            "node.js: CVE-2021-22931 - Status: Fixed - Fixed package: nodejs v14",
            "node.js: CVE-2021-22931 - Status: Not affected - Related package: nodejs v16",
            "node.js: CVE-2021-22939 - Status: Fixed - Fixed package: nodejs v12",
            "node.js: CVE-2021-22939 - Status: Fixed - Fixed package: nodejs v14",
            "node.js: CVE-2021-22939 - Status: Not affected - Related package: nodejs v16",
            "node.js: CVE-2021-22940 - Status: Fixed - Fixed package: nodejs v12",
            "node.js: CVE-2021-22940 - Status: Fixed - Fixed package: nodejs v14",
            "node.js: CVE-2021-22940 - Status: Not affected - Related package: nodejs v16",
        ]

        assert expected_output == [rec.message for rec in caplog.records]

    def test_redhat_available_fix_output(
            self, mocker: MockerFixture,
            caplog: pytest.LogCaptureFixture) -> None:
        """Test Available fix for Redhat distros output on console"""

        fixes = AvailableFixReport(self.MOCK_NODEJS_CVE_DATA, "rhel-8", False)
        mocker.patch(
            "cve_bin_tool.available_fix.redhat_cve_tracker.RedhatCVETracker.get_data",
            return_value=self.MOCK_RH_API,
        )
        fixes.check_available_fix()
        expected_output = [
            "node.js: CVE-2021-22918 - Status: Fixed - Fixed package: nodejs v12",
            "node.js: CVE-2021-22918 - Status: Fixed - Fixed package: nodejs v14",
            "node.js: CVE-2021-22918 - Status: Fixed - Fixed package: libuv v1.41",
            "node.js: CVE-2021-22918 - Status: Not affected - Related package: nodejs v16",
            "node.js: CVE-2021-22931 - Status: Fixed - Fixed package: nodejs v12",
            "node.js: CVE-2021-22931 - Status: Fixed - Fixed package: nodejs v14",
            "node.js: CVE-2021-22931 - Status: Fixed - Fixed package: libuv v1.41",
            "node.js: CVE-2021-22931 - Status: Not affected - Related package: nodejs v16",
            "node.js: CVE-2021-22939 - Status: Fixed - Fixed package: nodejs v12",
            "node.js: CVE-2021-22939 - Status: Fixed - Fixed package: nodejs v14",
            "node.js: CVE-2021-22939 - Status: Fixed - Fixed package: libuv v1.41",
            "node.js: CVE-2021-22939 - Status: Not affected - Related package: nodejs v16",
            "node.js: CVE-2021-22940 - Status: Fixed - Fixed package: nodejs v12",
            "node.js: CVE-2021-22940 - Status: Fixed - Fixed package: nodejs v14",
            "node.js: CVE-2021-22940 - Status: Fixed - Fixed package: libuv v1.41",
            "node.js: CVE-2021-22940 - Status: Not affected - Related package: nodejs v16",
        ]

        assert expected_output == [rec.message for rec in caplog.records]

    MOCK_PSPP_CVE_DATA = {
        ProductInfo(vendor="gnu", product="pspp", version="1.2.0"):
        CVEData(
            None,
            {
                "cves": [
                    CVE(
                        cve_number="CVE-2018-20230",
                        severity="HIGH",
                    ),
                    CVE(
                        cve_number="CVE-2019-9211",
                        severity="MEDIUM",
                    ),
                ],
            },
        )
    }

    MOCK_AVAHI_CVE_DATA = {
        ProductInfo(vendor="avahi", product="avahi", version="0.6.25"):
        CVEData(
            None,
            {
                "cves": [
                    CVE(
                        cve_number="CVE-2010-2244",
                        severity="MEDIUM",
                    ),
                    CVE(
                        cve_number="CVE-2011-1002",
                        severity="MEDIUM",
                    ),
                    CVE(
                        cve_number="CVE-2017-6519",
                        severity="CRITICAL",
                    ),
                    CVE(
                        cve_number="CVE-2021-26720",
                        severity="HIGH",
                    ),
                    CVE(
                        cve_number="CVE-2021-3468",
                        severity="MEDIUM",
                    ),
                ],
            },
        )
    }

    MOCK_NODEJS_CVE_DATA = {
        ProductInfo(vendor="nodejs", product="node.js", version="14.16.0"):
        CVEData(
            None,
            {
                "cves": [
                    CVE(
                        cve_number="CVE-2021-22918",
                        severity="MEDIUM",
                    ),
                    CVE(
                        cve_number="CVE-2021-22931",
                        severity="CRITICAL",
                    ),
                    CVE(
                        cve_number="CVE-2021-22939",
                        severity="MEDIUM",
                    ),
                    CVE(
                        cve_number="CVE-2021-22940",
                        severity="HIGH",
                    ),
                ],
            },
        )
    }

    MOCK_RH_API = {
        "affected_release": [
            {
                "product_name": "Red Hat Enterprise Linux 8",
                "package": "nodejs:12-8040020210708131418.522a0ee4",
            },
            {
                "product_name": "Red Hat Enterprise Linux 8",
                "package": "nodejs:14-8040020210708154809.522a0ee4",
            },
            {
                "product_name": "Red Hat Enterprise Linux 8",
                "package": "libuv-1:1.41.1-1.el8_4",
            },
        ],
        "package_state": [{
            "product_name": "Red Hat Enterprise Linux 8",
            "fix_state": "Not affected",
            "package_name": "nodejs:16/nodejs",
        }],
    }

    MOCK_DEBIAN_API = {
        "pspp": {
            "CVE-2018-20230": {
                "releases": {
                    "bullseye": {
                        "status": "resolved",
                        "fixed_version": "1.2.0-3",
                    },
                },
            },
            "CVE-2019-9211": {
                "releases": {
                    "bullseye": {
                        "status": "resolved",
                        "fixed_version": "1.2.0-4",
                    },
                },
            },
        },
        "avahi": {
            "CVE-2010-2244": {
                "releases": {
                    "bullseye": {
                        "status": "resolved",
                        "fixed_version": "0.6.26-1",
                    },
                },
            },
            "CVE-2011-1002": {
                "releases": {
                    "bullseye": {
                        "status": "resolved",
                        "fixed_version": "0.6.28-4",
                    },
                },
            },
            "CVE-2017-6519": {
                "releases": {
                    "bullseye": {
                        "status": "resolved",
                        "fixed_version": "0.7-5",
                    },
                },
            },
            "CVE-2021-26720": {
                "releases": {
                    "bullseye": {
                        "status": "resolved",
                        "fixed_version": "0.8-4",
                    },
                },
            },
            "CVE-2021-3468": {
                "releases": {
                    "bullseye": {
                        "status": "open",
                    },
                }
            },
        },
    }
Пример #13
0
class TestSBOM:
    SBOM_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                             "sbom")
    PARSED_SBOM_DATA = {
        ProductInfo(vendor="gnu", product="glibc", version="2.11.1"): {
            "default": {
                "remarks": Remarks.NewFound,
                "comments": "",
                "severity": ""
            },
            "paths": {""},
        }
    }

    @pytest.mark.parametrize(
        "filepath",
        (os.path.join(SBOM_PATH, "nonexistent.spdx.json"), ),
    )
    def test_nonexistent_file(self, filepath: str):
        sbom_engine = SBOMManager(filepath)
        assert sbom_engine.scan_file() == {}

    @pytest.mark.parametrize(
        "filename, sbom_type",
        (
            ((os.path.join(SBOM_PATH, "bad.csv")), "spdx"),
            ((os.path.join(SBOM_PATH, "bad.csv")), "cyclonedx"),
            ((os.path.join(SBOM_PATH, "bad.csv")), "swid"),
        ),
    )
    def test_invalid_file(self, filename: str, sbom_type: str):
        sbom_engine = SBOMManager(filename, sbom_type)
        assert sbom_engine.scan_file() == {}

    @pytest.mark.parametrize(
        "filename, sbom_type",
        (
            ((os.path.join(SBOM_PATH, "bad.csv")), "sbom"),
            ((os.path.join(SBOM_PATH, "bad.csv")), "SPDX"),
        ),
    )
    def test_invalid_type(self, filename: str, sbom_type: str):
        sbom_engine = SBOMManager(filename, sbom_type)
        assert sbom_engine.scan_file() == {}

    @pytest.mark.parametrize(
        "filename, spdx_parsed_data",
        (
            (os.path.join(SBOM_PATH, "spdx_test.spdx"), PARSED_SBOM_DATA),
            (os.path.join(SBOM_PATH, "spdx_test.spdx.rdf"), PARSED_SBOM_DATA),
            (os.path.join(SBOM_PATH, "spdx_test.spdx.json"), PARSED_SBOM_DATA),
            (os.path.join(SBOM_PATH, "spdx_test.spdx.xml"), PARSED_SBOM_DATA),
            (os.path.join(SBOM_PATH, "spdx_test.spdx.yml"), PARSED_SBOM_DATA),
            (os.path.join(SBOM_PATH, "spdx_test.spdx.yaml"), PARSED_SBOM_DATA),
        ),
    )
    def test_valid_spdx_file(self, filename: str,
                             spdx_parsed_data: Dict[ProductInfo, TriageData]):
        sbom_engine = SBOMManager(filename, sbom_type="spdx")
        assert sbom_engine.scan_file() == spdx_parsed_data

    @pytest.mark.parametrize(
        "filename, cyclonedx_parsed_data",
        (
            (os.path.join(SBOM_PATH, "cyclonedx_test.xml"), PARSED_SBOM_DATA),
            (os.path.join(SBOM_PATH, "cyclonedx_test.json"), PARSED_SBOM_DATA),
        ),
    )
    def test_valid_cyclonedx_file(self, filename: str,
                                  cyclonedx_parsed_data: Dict[ProductInfo,
                                                              TriageData]):
        sbom_engine = SBOMManager(filename, sbom_type="cyclonedx")
        assert sbom_engine.scan_file() == cyclonedx_parsed_data

    @pytest.mark.parametrize(
        "filename, swid_parsed_data",
        ((os.path.join(SBOM_PATH, "swid_test.xml"), PARSED_SBOM_DATA), ),
    )
    def test_valid_swid_file(self, filename: str,
                             swid_parsed_data: Dict[ProductInfo, TriageData]):
        sbom_engine = SBOMManager(filename, sbom_type="swid")
        assert sbom_engine.scan_file() == swid_parsed_data