def __init__(self, filename, lastmodified): self.lastmodified = lastmodified self.cves = {} root = None updated = None for event, elem in eT.iterparse(filename, events=("start", "end")): if elem.tag == "cvemap" and event == "start": root = elem updated = parse_datetime(elem.get('updated')) elif elem.tag == "Vulnerability" and event == "end": name = elem.get('name') self.cves[name] = { 'impact': text_strip(elem.find('ThreatSeverity')), 'published_date': parse_datetime(text_strip(elem.find('PublicDate'))), 'modified_date': updated, 'cvss3_score': text_strip(elem.find('CVSS3/CVSS3BaseScore')), 'cvss3_metrics': text_strip(elem.find('CVSS3/CVSS3ScoringVector')), 'cwe_list': self._cwe_list(text_strip(elem.find('CWE'))), 'description': self._cve_description( elem.findall('Details[@{%s}lang="en:us"]' % NS)) } # Clear the XML tree continuously root.clear()
def check_repo(self, dbdump): """Check repo data in dump.""" assert "repo_detail:801" in dbdump assert dbdump["repo_detail:801"][0] == "content set 1" assert dbdump["repo_detail:801"][1] == "content-set-name-1" assert dbdump["repo_detail:801"][2] == "https://www.repourl.com/repo1" assert dbdump["repo_detail:801"][3] == "noarch" assert dbdump["repo_detail:801"][4] == "1" assert dbdump["repo_detail:801"][5] == "product1" assert dbdump["repo_detail:801"][6] == 501 assert parse_datetime(dbdump["repo_detail:801"][7]) == parse_datetime("2019-08-01T01:00:00-05:00") #assert dbdump["repo_detail:801"] == ("content set 1", "content-set-name-1", # "https://www.repourl.com/repo1", "noarch", "1", # "product1", 501, "2019-08-01T01:00:00-05:00") assert "repolabel2ids:content set 1" in dbdump assert dbdump["repolabel2ids:content set 1"] == [801] assert "pkgid2repoids:301" in dbdump assert "pkgid2repoids:302" in dbdump assert "pkgid2repoids:303" in dbdump assert "pkgid2repoids:304" not in dbdump assert "pkgid2repoids:305" in dbdump assert "pkgid2repoids:306" in dbdump repo_list = dbdump["pkgid2repoids:306"] assert 801 in repo_list assert 802 in repo_list assert "pkgid2repoids:307" in dbdump
def _get_first_published_from_erratas(self, erratas): # pylint: disable=R0201 # 'first_published' is the 'issued' date of the oldest errata. first_published = None for ert in erratas: issued = parse_datetime(ert['issued']) if first_published is None or issued < first_published: first_published = issued return format_datetime(first_published)
def _read_head(self, failed): """Reads downloaded meta files and checks for updates.""" if not failed: header_path = self._tmp_head() header = CvemapHead(header_path) # already synced before? db_lastmodified = parse_datetime(self.cvemap_store.lastmodified()) #db_lastmodified = None self.lastmodified = parse_datetime(header.get_lastmodified()) # synced for the first time or has newer revision if (db_lastmodified is None or self.lastmodified is None or self.lastmodified > db_lastmodified): self.updated = True else: self.logger.info("Cve map has not been updated (since %s).", str(db_lastmodified)) else: self.logger.warning("Download failed: %s (HTTP CODE %d)", URL, failed[header_path])
def _read_meta(self, failed): """Reads downloaded meta files and checks for updates.""" for repo in self.repos: meta_path = repo.meta_tmp() if meta_path not in failed: meta = CveMeta(meta_path) # already synced before? db_lastmodified = parse_datetime( self.db_lastmodified.get(repo.label, None)) meta_lastmodified = parse_datetime(meta.get_lastmodified()) # synced for the first time or has newer revision if (db_lastmodified is None or meta_lastmodified is None or meta_lastmodified > db_lastmodified): repo.meta = meta else: self.logger.info( "Cve list '%s' has not been updated (since %s).", repo.label, str(db_lastmodified)) else: FAILED_NIST.inc() self.logger.warning("Download failed: %s (HTTP CODE %d)", repo.meta_url(), failed[meta_path])
def __init__(self, filename): self.lastmodified = None self.cpes = {} root = None for event, elem in eT.iterparse(filename, events=("start", "end")): if elem.tag == "{%s}cpe-list" % NS["cpe"] and event == "start": root = elem elif elem.tag == "{%s}generator" % NS["cpe"] and event == "end": self.lastmodified = parse_datetime( text_strip(elem.find('cpe:timestamp', NS))) elif elem.tag == "{%s}cpe-item" % NS["cpe"] and event == "end": name = elem.get('name') self.cpes[name] = text_strip(elem.find('cpe:title', NS)) # Clear the XML tree continuously root.clear()
def _populate_cves(self, repo): cve_impact_map = self._populate_cve_impacts() nist_source_id = self._get_source_id('NIST') cur = self.conn.cursor() cve_data = {} for cve in repo.list_cves(): cve_name = _dget(cve, "cve", "CVE_data_meta", "ID") cve_desc_list = _dget(cve, "cve", "description", "description_data") impact = _dget(cve, "impact", "baseMetricV3", "cvssV3", "baseSeverity") if impact is None: impact = _dget(cve, "impact", "baseMetricV2", "severity") url_list = _dget(cve, "cve", "references", "reference_data") modified_date = parse_datetime(_dget(cve, "lastModifiedDate")) published_date = parse_datetime(_dget(cve, "publishedDate")) cwe_data = _dget(cve, "cve", "problemtype", "problemtype_data") cwe_list = _process_cwe_list(cwe_data) redhat_url, secondary_url = self._process_url_list( cve_name, url_list) cve_data[cve_name] = { "description": _desc(cve_desc_list, "lang", "en", "value"), "impact_id": cve_impact_map[impact.capitalize()] if impact is not None else cve_impact_map['NotSet'], "cvss3_score": _dget(cve, "impact", "baseMetricV3", "cvssV3", "baseScore"), "cvss3_metrics": _dget(cve, "impact", "baseMetricV3", "cvssV3", "vectorString"), "redhat_url": redhat_url, "cwe_list": cwe_list, "secondary_url": secondary_url, "published_date": published_date, "modified_date": modified_date, "iava": None, "source_id": nist_source_id, } if cve_data: names = [(key, ) for key in cve_data] execute_values(cur, """select id, name, source_id from cve inner join (values %s) t(name) using (name) """, names, page_size=len(names)) for row in cur.fetchall(): if row[2] is not None and row[2] != nist_source_id: # different source, do not touch! del cve_data[row[1]] continue cve_data[row[1]]["id"] = row[0] to_import = [(name, values["description"], values["impact_id"], values["published_date"], values["modified_date"], values["cvss3_score"], values["cvss3_metrics"], values["iava"], values["redhat_url"], values["secondary_url"], values["source_id"]) for name, values in cve_data.items() if "id" not in values] self.logger.debug("CVEs to import: %d", len(to_import)) to_update = [ (values["id"], name, values["description"], values["impact_id"], values["published_date"], values["modified_date"], values["cvss3_score"], values["cvss3_metrics"], values["iava"], values["redhat_url"], values["secondary_url"], values["source_id"]) for name, values in cve_data.items() if "id" in values ] self.logger.debug("CVEs to update: %d", len(to_update)) if to_import: execute_values( cur, """insert into cve (name, description, impact_id, published_date, modified_date, cvss3_score, cvss3_metrics, iava, redhat_url, secondary_url, source_id) values %s returning id, name""", list(to_import), page_size=len(to_import)) for row in cur.fetchall(): cve_data[row[1]]["id"] = row[0] if to_update: execute_values( cur, """update cve set name = v.name, description = v.description, impact_id = v.impact_id, published_date = v.published_date, modified_date = v.modified_date, redhat_url = v.redhat_url, secondary_url = v.secondary_url, cvss3_score = v.cvss3_score, cvss3_metrics = v.cvss3_metrics, iava = v.iava, source_id = v.source_id from (values %s) as v(id, name, description, impact_id, published_date, modified_date, cvss3_score, cvss3_metrics, iava, redhat_url, secondary_url, source_id) where cve.id = v.id """, list(to_update), page_size=len(to_update), template= b"(%s, %s, %s, %s::int, %s, %s, %s::numeric, %s, %s, %s, %s, %s::int)" ) self._populate_cwes(cur, cve_data) cur.close() self.conn.commit() return cve_data
def test_parse_string(self): """Test parsing datetime from string.""" assert isinstance( dateutil.parse_datetime("2018-10-24 15:27:40.058353"), datetime)
def test_parse_none(self): """Test parsing date = None.""" assert dateutil.parse_datetime(None) is None