Пример #1
0
def setup_module():
    # create test account and collections, with appropriate permissions
    admindb = db.ExistDB(server_url=EXISTDB_SERVER_URL,
                         username=EXISTDB_SERVER_ADMIN_USER,
                         password=EXISTDB_SERVER_ADMIN_PASSWORD)

    # create non-admin test account
    admindb.create_group(EXISTDB_TEST_GROUP)
    admindb.create_account(EXISTDB_SERVER_USER, EXISTDB_SERVER_PASSWORD,
                           EXISTDB_TEST_GROUP)

    admindb.createCollection('/db' + EXISTDB_TEST_BASECOLLECTION, True)
    # test index config
    test_cfg_collection = '/db/system/config/db/' + EXISTDB_TEST_BASECOLLECTION
    admindb.createCollection(test_cfg_collection, True)

    # make both collections owned by test group and group writable
    admindb.query('sm:chgrp(xs:anyURI("/db%s"), "%s")' % \
        (EXISTDB_TEST_BASECOLLECTION, EXISTDB_TEST_GROUP))
    admindb.query('sm:chgrp(xs:anyURI("%s"), "%s")' % \
        (test_cfg_collection, EXISTDB_TEST_GROUP))

    admindb.query('sm:chmod(xs:anyURI("/db%s"), "rwxrwxrwx")' % \
        (EXISTDB_TEST_BASECOLLECTION))
    admindb.query('sm:chmod(xs:anyURI("%s"), "rwxrwxrwx")' % \
        (test_cfg_collection))
Пример #2
0
def import_disa_iavm_cve():
	flist=[]
	exdb = db.ExistDB()	 
	validateCollection(exdb,db_iavm_cve_disa_collection)
	validateDataPath(iavm_cve_data_dir)
		
	urls=[]
	urls.append(("http://iasecontent.disa.mil/stigs/xml/iavm-to-cve%28u%29.xml","u_iavm-to-cve.xml"))
	
	# -----------------------------------------------------------------------------
	# download files even if they exist; NIST is constantly updating them
	# -----------------------------------------------------------------------------
	for url in urls:
		u = url[0]
		uname = url[1]
		# if file does not exist, download
		if (os.access(iavm_cve_data_dir, os.W_OK)):
			try:
				urllib.urlretrieve (u, iavm_cve_data_dir+uname)
				try:
					fo = open(iavm_cve_data_dir+uname, 'rb')
					try:
						if exdb.load(fo, db_iavm_cve_disa_collection+'/'+uname, True):
							flist.append(uname+": data import successful")
					except:
						flist.append(uname+": data import failed")
					fo.close()
				except:
					flist.append(uname+": file read failed")
			except:
				flist.append(uname+": file download failed")
		else:
			flist.append(uname+": file write failed")
	
	return flist
Пример #3
0
def iavm_to_cpe_doc(flag=False):
    exdb = db.ExistDB()
    validateCollection(exdb, iavm_to_cpe_coll)

    thisdoc = exdb.getDocument(iavm_to_cve_path)

    root = etree.fromstring(thisdoc)

    # get all IAVMs
    find = etree.XPath('//IAVM')
    iavms = find(root)

    for i in range(len(iavms)):
        print(iavms[i].find('./S').get('IAVM'))
        if ((flag and iavms[i].find('./S').get('IAVM').startswith('2015-'))
                or (not flag)):
            iav_title = iavms[i].find('./S').get('Title')
            cves = iavms[i].findall('./CVEs/CVENumber')
            for j in range(len(cves)):
                cpes = getCpeFromCve(cves[j].text)
                if len(cpes) > 0:
                    cves[j].append(etree.Element('CPEs'))
                    cpes_elem = cves[j].find('./CPEs')
                    for v in cpes:
                        # apply vendor/reference filter
                        if filter_iavm_references(
                                v[0], iavms[i].find("./References")):
                            cpes_elem.append(etree.Element('Vendor', id=v[0]))
                            v_elem = cpes_elem.findall('./Vendor')[-1]
                            for cpe in v[1]:
                                # apply cpe/title filter
                                if filter_iavm_title(cpe, iav_title):
                                    v_elem.append(etree.Element('CPE'))
                                    this = v_elem.findall('./CPE')[-1]
                                    this.text = cpe

    # rewrite the above loop to just query once for each cve in the IAVMs
    # create a dict for each cve with cpe values
    # get all IAVMs
    # this is 1800 CVEs at roughly 1.5 seconds = 45 minutes.
    '''
    find = etree.XPath('//CVENumber')
    cvenodes = find(root)
    cves=[]
    for j in range(len(cvenodes)):
        cves.append(cvenodes[j].text)
    
    cves = list(set(cves)) # create unique list
    cves.sort()
    cve2cpe = {} # dictionary
    for cve in cves:
        print cve
        cve2cpe[cve]=getCpeFromCve(cve)
    '''

    # print(etree.tostring(iavms[0],pretty_print=True))

    # write xml doc to database
    exdb.load(etree.tostring(root, pretty_print=True), iavm_to_cpe_path, True)
Пример #4
0
def importAdobeHtml():
    exdb = db.ExistDB()
    validateCollection(exdb, db_cvrf_adobe_collection)
    flist = []
    produrls = [
        "https://helpx.adobe.com/security/products/reader.html",
        "https://helpx.adobe.com/security/products/flash-player.html",
        "https://helpx.adobe.com/security/products/shockwave.html",
    ]

    apslinks = []
    for pu in produrls:
        content = urllib.urlopen(pu).read()
        doc = fromstring(content)
        doc.make_links_absolute("https://helpx.adobe.com")
        for a in doc.xpath('//a[contains(@href,"/aps")]'):
            apslinks.append(a.get('href'))

    apslinks = list(set(list(apslinks)))
    apslinks.sort()

    # -----------------------------------------------------------------------------
    # download files if they don't exist
    # TODO: check for revisions and download those as well (check hashes?)
    # -----------------------------------------------------------------------------
    for u in apslinks:
        uname = u.split('/')[-1]
        # if file does not exist, download
        if (not os.path.isfile(uname) and os.access(".", os.W_OK)):
            print("downloading " + uname)
            headers = {'User-Agent': 'Mozilla/5.0'}
            req = urllib2.Request(u, None, headers)
            #cvrfhtml = urllib2.urlopen(req).read()
            urllib.urlretrieve(u, adobe_data_dir + uname)
            #f = open(uname,'w')
            #f.write(cvrfhtml)
            #f.close()
            try:
                if int(uname[4:6]) > 13:
                    try:
                        translateAdobeHtmlToCvrf(adobe_data_dir + uname)
                        try:
                            cvrfname = uname.replace(".html", "-cvrf.xml")
                            fo = open(adobe_data_dir + cvrfname, 'rb')
                            if exdb.load(
                                    fo,
                                    db_cvrf_adobe_collection + '/' + cvrfname,
                                    True):
                                flist.append(cvrfname +
                                             ": data import successful")
                            else:
                                flist.append(cvrfname + ": data import failed")
                            fo.close()
                        except:
                            flist.append(uname + ": file read failed")
                    except:
                        print("failed to translate: " + uname)
            except:
                pass
Пример #5
0
def import_cisco_cvrf():
    flist = []
    exdb = db.ExistDB()
    validateCollection(exdb, db_cvrf_cisco_collection)

    # -----------------------------------------------------------------------------
    # get list of cvrf urls
    # -----------------------------------------------------------------------------
    nurl = "http://tools.cisco.com/security/center/cvrfListing.x"
    request = urllib2.Request(nurl)
    rawPage = urllib2.urlopen(request)
    read = rawPage.read()
    #print read
    tree = etree.HTML(read)
    tpath = "//a[contains(@href,'cvrf.xml')]"
    findall = etree.ETXPath(tpath)
    arefs = findall(tree)

    urls = []
    for a in arefs:
        urls.append(a.get('href').replace('\t', '').replace('\n', ''))

    # just for tracking for now, need to get cisco to fix or apply a fix
    # i might ignore if it wasn't for poodle
    badfiles = [
        "/cisco-sa-20040420-tcp-nonios_cvrf.xml",
        "cisco-sa-20120328-msdp_cvrf.xml",
        "cisco-sa-20141015-poodle_cvrf.xml",
    ]

    # -----------------------------------------------------------------------------
    # download files if they don't exist
    # -----------------------------------------------------------------------------
    for u in urls:
        uname = u.split('/')[-1]
        # if file does not exist, download
        #if (not os.path.isfile(cisco_data_dir+uname) and os.access(".", os.W_OK)):
        if (os.access(".", os.W_OK)):
            try:
                print("downloading " + uname)
                urllib.urlretrieve(u, cisco_data_dir + uname)
                try:
                    fo = open(cisco_data_dir + uname, 'rb')
                    if exdb.load(fo, db_cvrf_cisco_collection + '/' + uname,
                                 True):
                        flist.append(uname + ": data import successful")
                    else:
                        flist.append(uname + ": data import failed")
                    fo.close()
                except:
                    flist.append(uname + ": file read failed")
            except:
                flist.append(uname + ": file download failed")
        else:
            flist.append(uname + ": file write failed")

    return flist
Пример #6
0
def parse_disa_iavm_zip(fn):
	if not disa_pki_flag:
		return []
	flist=[]
	filen=open(fn,"rb")
	exdb = db.ExistDB()
	validateCollection(exdb,db_iavm_disa_collection)
	#logger.debug(': '.join(['parse_zip',filen.name]))

	#create zipfile object from passed in zip file object
	z_file = zipfile.ZipFile(filen)

	#create temporary directory
	f_name = filen.name.split('/')[-1]
	dir_name = f_name.replace('.zip', '')

	tmp_dir = root_src_dir + dir_name + '/'

	#logger.info(tmp_dir)

	if not os.path.exists(tmp_dir):
		os.makedirs(tmp_dir)

	#extract files to tmp dir
	z_file.extractall(tmp_dir)

	# walk files in dir and add to database
	# ValueError: too many values to unpack
	# for root, dirs, files in os.walk(tmp_dir):
	for src in glob.glob(tmp_dir+'/*/*.xml'):
		print src
		f =  src.split(os.sep)[-1].split()[0]+'.xml'
		#move tmp files to permanent location
		#TODO: use static definition
		dst = iavm_data_dir+f
		try:
			if os.path.exists(dst):
				os.remove(dst)
			shutil.move(src, dst)
			#logger.debug(': '.join(['move_iavm',src, dst]))
			#parse_xml(root+'/'+f) this is where I database boogie!
			fo = open(dst, 'rb')
			try:
				if exdb.load(fo, db_iavm_disa_collection+'/'+f, True):
					flist.append(f+": data import successful")
			except:
				flist.append(f+": data import failed")
			fo.close()
		except:
			#logger.debug(': '.join(['move_iavm', 'FAILED',src, dst]))
			flist.append(f+": file upload failed")
			pass


	flist.reverse()
	return flist
Пример #7
0
def parse_msrc_cvrf_zip(fn):
    flist = []
    filen = open(fn, "rb")
    exdb = db.ExistDB()
    validateCollection(exdb, db_cvrf_microsoft_collection)
    #logger.debug(': '.join(['parse_zip',filen.name]))

    #create zipfile object from passed in zip file object
    z_file = zipfile.ZipFile(filen)

    #create temporary directory
    f_name = filen.name.split('/')[-1]
    dir_name = f_name.replace('.zip', '')

    tmp_dir = root_src_dir + dir_name + '/'

    #logger.info(tmp_dir)

    if not os.path.exists(tmp_dir):
        os.makedirs(tmp_dir)

    #extract files to tmp dir
    z_file.extractall(tmp_dir)

    #walk files in dir and add to database
    #20140709rb: this is an awkward construction
    for root, dirs, files in os.walk(tmp_dir):
        for f in files:
            #move tmp files to permanent location
            #TODO: use static definition
            src = root + '/' + f
            dst = ms_data_dir + f
            try:
                if f[-4:].lower() == '.xml':
                    if os.path.exists(dst):
                        os.remove(dst)
                    shutil.move(src, dst)
                    #logger.debug(': '.join(['move_iavm',src, dst]))
                    #parse_xml(root+'/'+f) this is where I database boogie!
                    fo = open(dst, 'rb')
                    if exdb.load(fo, db_cvrf_microsoft_collection + '/' + f,
                                 True):
                        flist.append(f + ": data import successful")
                    else:
                        flist.append(f + ": data import failed")
                    fo.close()
            except:
                #logger.debug(': '.join(['move_iavm', 'FAILED',src, dst]))
                flist.append(f + ": file upload failed")
                pass

    flist.reverse()
    return flist
Пример #8
0
def teardown_module():
    # remove test account & collections

    admindb = db.ExistDB(server_url=EXISTDB_SERVER_URL,
                         username=EXISTDB_SERVER_ADMIN_USER,
                         password=EXISTDB_SERVER_ADMIN_PASSWORD)

    test_cfg_collection = '/db/system/config/db/' + EXISTDB_TEST_BASECOLLECTION
    admindb.removeCollection(test_cfg_collection)
    admindb.removeCollection(EXISTDB_TEST_BASECOLLECTION)
    admindb.query('sm:remove-group("%s")' % EXISTDB_TEST_GROUP)
    admindb.query('sm:remove-group("%s")' % EXISTDB_SERVER_USER)
    admindb.query('sm:remove-account("%s")' % EXISTDB_SERVER_USER)
Пример #9
0
def import_redhat_cvrf():
    flist = []
    exdb = db.ExistDB()
    validateCollection(exdb, db_cvrf_redhat_collection)
    # -----------------------------------------------------------------------------
    # get list of cvrf urls
    # -----------------------------------------------------------------------------
    # need User-Agent or Red Hat blocks request
    nurl = "http://www.redhat.com/security/data/cvrf/index.txt"
    headers = {'User-Agent': 'Mozilla/5.0'}
    req = urllib2.Request(nurl, None, headers)
    index = urllib2.urlopen(req).readlines()
    urls = [
        'http://www.redhat.com/security/data/cvrf/' + i.replace('\n', '')
        for i in index
    ]

    # -----------------------------------------------------------------------------
    # download files if they don't exist
    # TODO: check for revisions and download those as well (check hashes?)
    # -----------------------------------------------------------------------------
    for u in urls:
        uname = u.split('/')[-1]
        # if file does not exist, download
        #if (not os.path.isfile(redhat_data_dir+uname) and os.access(redhat_data_dir, os.W_OK)):
        if (os.access(redhat_data_dir, os.W_OK)):
            try:
                headers = {'User-Agent': 'Mozilla/5.0'}
                req = urllib2.Request(u, None, headers)
                cvrfxml = urllib2.urlopen(req).read()
                #urllib.urlretrieve (u, redhat_data_dir+uname)
                f = open(redhat_data_dir + uname, 'w')
                f.write(cvrfxml)
                f.close()
                try:
                    fo = open(redhat_data_dir + uname, 'rb')
                    if exdb.load(fo, db_cvrf_redhat_collection + '/' + uname,
                                 True):
                        flist.append(uname + ": data import successful")
                    else:
                        flist.append(uname + ": data import failed")
                    fo.close()
                except:
                    flist.append(uname + ": file read failed")
            except:
                flist.append(uname + ": file download failed")
        else:
            flist.append(uname + ": file write failed")

    return flist
Пример #10
0
def import_oracle_cvrf():
    flist = []
    exdb = db.ExistDB()
    validateCollection(exdb, db_cvrf_oracle_collection)
    # -----------------------------------------------------------------------------
    # get list of cvrf urls
    # -----------------------------------------------------------------------------
    nurl = "http://www.oracle.com/ocom/groups/public/@otn/documents/webcontent/1932662.xml"
    request = urllib2.Request(nurl)
    rawPage = urllib2.urlopen(request)
    read = rawPage.read()
    #print read
    root = etree.fromstring(read)
    arefs = root.xpath("//link/text()")

    urls = []
    for a in arefs:
        if "@otn" in a:
            urls.append(a.replace('\t', '').replace('\n', '').replace(' ', ''))

    # -----------------------------------------------------------------------------
    # download files if they don't exist
    # -----------------------------------------------------------------------------
    for u in urls:
        uname = u.split('/')[-1]
        # if file does not exist, download
        #if (not os.path.isfile(oracle_data_dir+uname) and os.access(oracle_data_dir, os.W_OK)):
        if (os.access(oracle_data_dir, os.W_OK)):
            try:
                urllib.urlretrieve(u, oracle_data_dir + uname)
                try:
                    fo = open(oracle_data_dir + uname)
                    if exdb.load(fo, db_cvrf_oracle_collection + '/' + uname,
                                 True):
                        flist.append(uname + ": data import successful")
                    else:
                        flist.append(uname + ": data import failed")
                    fo.close()
                except:
                    flist.append(uname + ": file read failed : " +
                                 oracle_data_dir + uname)
            except:
                flist.append(uname + ": file download failed : " + u)
        else:
            flist.append(uname + ": file write failed : " + oracle_data_dir +
                         uname)

    return flist
Пример #11
0
def import_redhat_oval():
    flist = []
    exdb = db.ExistDB()
    # download
    download_redhat_oval()
    # get list of file names
    for fname in glob.glob(redhat_oval_dir + "com.redhat.rhsa-*.xml"):
        uname = fname.split(os.sep)[-1]
        try:
            fo = open(redhat_oval_dir + uname, 'rb')
            if exdb.load(fo, db_oval_redhat_collection + '/' + uname, True):
                flist.append(uname + ": data import successful")
            else:
                flist.append(uname + ": data import failed")
            fo.close()
        except:
            flist.append(uname + ": file read failed")
    return flist
Пример #12
0
def import_redhat_cpe():
    flist = []
    exdb = db.ExistDB()
    validateCollection(exdb, db_cpe_redhat_collection)
    validateDataPath(cpe_redhat_data_dir)

    urls = []
    urls.append(
        "https://www.redhat.com/security/data/metrics/cpe-dictionary.xml")

    # -----------------------------------------------------------------------------
    # download files even if they exist; NIST is constantly updating them
    # -----------------------------------------------------------------------------
    for u in urls:
        uname = u.split('/')[-1]
        # if file does not exist, download
        if (os.access(cpe_nist_data_dir, os.W_OK)):
            try:
                urllib.urlretrieve(u, cpe_redhat_data_dir + uname)
                headers = {'User-Agent': 'Mozilla/5.0'}
                req = urllib2.Request(u, None, headers)
                cpexml = urllib2.urlopen(req).read()
                #urllib.urlretrieve (u, cpe_redhat_data_dir+uname)
                f = open(cpe_redhat_data_dir + uname, 'w')
                f.write(cpexml)
                f.close()
                try:
                    fo = open(cpe_redhat_data_dir + uname, 'rb')
                    try:
                        if exdb.load(fo,
                                     db_cpe_redhat_collection + '/' + uname,
                                     True):
                            flist.append(uname + ": data import successful")
                    except:
                        flist.append(uname + ": data import failed")
                    fo.close()
                except:
                    flist.append(uname + ": file read failed")
            except:
                flist.append(uname + ": file download failed")
        else:
            flist.append(uname + ": file write failed")

    return flist
Пример #13
0
def import_nist_cce():
	flist=[]
	exdb = db.ExistDB()	 
	validateCollection(exdb,db_cce_nist_collection)
	validateDataPath(cce_data_dir)
		
	urls=[]
	urls.append("http://static.nvd.nist.gov/feeds/xml/cce/nvdcce-0.1-feed.xml.zip")

	# -----------------------------------------------------------------------------
	# download files even if they exist; NIST is constantly updating them
	# -----------------------------------------------------------------------------
	for u in urls:
		uname = u.split('/')[-1]
		# if file does not exist, download
		if (os.access(cce_data_dir, os.W_OK)):
			try:
				urllib.urlretrieve (u, cce_data_dir+uname)
				# unzip in place
				zip = ZipFile(cce_data_dir+uname)
				zip.extractall(cce_data_dir)
				zip.close()
				# remove zip file
				os.remove(cce_data_dir+uname)
				try:
					xname = uname.replace('.zip','')
					fo = open(cce_data_dir+xname, 'rb')
					try:
						if exdb.load(fo, db_cce_nist_collection+'/'+xname, True):
							flist.append(xname+": data import successful")
					except:
						flist.append(xname+": data import failed")
					fo.close()
				except:
					flist.append(uname+": file read failed")
			except:
				flist.append(uname+": file download failed")
		else:
			flist.append(uname+": file write failed")
	
	return flist
Пример #14
0
def import_nist_cpe():
    flist = []
    exdb = db.ExistDB()
    validateCollection(exdb, db_cpe_nist_collection)
    validateDataPath(cpe_nist_data_dir)

    urls = []
    urls.append(
        "http://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml"
    )
    urls.append(
        "http://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.2.xml"
    )

    # -----------------------------------------------------------------------------
    # download files even if they exist; NIST is constantly updating them
    # -----------------------------------------------------------------------------
    for u in urls:
        uname = u.split('/')[-1]
        # if file does not exist, download
        if (os.access(cpe_nist_data_dir, os.W_OK)):
            try:
                urllib.urlretrieve(u, cpe_nist_data_dir + uname)
                try:
                    fo = open(cpe_nist_data_dir + uname, 'rb')
                    if exdb.load(fo, db_cpe_nist_collection + '/' + uname,
                                 True):
                        flist.append(uname + ": data import successful")
                    else:
                        flist.append(uname + ": data import failed")
                    fo.close()
                except:
                    flist.append(uname + ": file read failed")
            except:
                flist.append(uname + ": file download failed")
        else:
            flist.append(uname + ": file write failed")

    return flist
Пример #15
0
def download_redhat_oval():
    exdb = db.ExistDB()
    validateCollection(exdb, db_oval_redhat_collection)

    try:
        dlurl = "https://www.redhat.com/security/data/oval/rhsa.tar.bz2"
        headers = {'User-Agent': 'Mozilla/5.0'}
        req = urllib2.Request(dlurl, None, headers)
        dlreq = urllib2.urlopen(req)
        CHUNK = 16 * 1024
        with open(redhat_oval_dir + 'rhsa.tar.bz2', 'wb') as fp:
            while True:
                chunk = dlreq.read(CHUNK)
                if not chunk: break
                fp.write(chunk)
        try:
            tar = tarfile.open(redhat_oval_dir + 'rhsa.tar.bz2', 'r:bz2')
            tar.extractall(redhat_oval_dir)
        except:
            print("import_redhat_oval: failed to uncompress tar.bz2 file")
    except:
        print("import_redhat_oval: failed to download file")
    return
Пример #16
0
def importJuniperHtml():
    exdb = db.ExistDB()  
    validateCollection(exdb,db_cvrf_juniper_collection)
    flist=[]
    produrls = [
        "http://kb.juniper.net/InfoCenter/index?page=content&channel=SECURITY_ADVISORIES&cat=SIRT_ADVISORY&&actp=&sort=datemodified&dir=descending&max=1000&batch=1000&rss=true&itData.offset=0",
        ]
    
    apslinks=[]
    for pu in produrls:
        content = urllib.urlopen(pu).read()
        doc = fromstring(content)
        doc.make_links_absolute("http://kb.juniper.net/InfoCenter/")
        for a in doc.xpath('//a[contains(@href,"id=JSA") and contains(@href,"showDraft")]'):
            apslinks.append(a.get('href'))
    
    apslinks = list(set(list(apslinks)))
    apslinks.sort()
    
    # -----------------------------------------------------------------------------
    # download files if they don't exist
    # TODO: check for revisions and download those as well (check hashes?)
    # -----------------------------------------------------------------------------
    for u in apslinks:
        uname = u.split('id=')[1].split('&')[0]+'.html'
        # if file does not exist, download
        if (not os.path.isfile(uname) and os.access(".", os.W_OK)):
            print ("downloading "+uname)
            headers = { 'User-Agent' : 'Mozilla/5.0' }
            req = urllib2.Request(u, None, headers)
            #cvrfhtml = urllib2.urlopen(req).read()
            urllib.urlretrieve (u, juniper_data_dir+uname)
            #f = open(uname,'w')
            #f.write(cvrfhtml)
            #f.close()
            '''
Пример #17
0
def export_iavm_to_cpe_doc(fn):
    exdb = db.ExistDB()
    thisdoc = exdb.getDocument(iavm_to_cpe_path)
    #f=open(fn,'w')
    fn.write(thisdoc)
    fn.close()
Пример #18
0
 def __init__(self):
     self.db = db.ExistDB()
Пример #19
0
 def __init__(self):
     self.db = db.ExistDB(server_url=EXISTDB_SERVER_URL)
Пример #20
0
 def __init__(self):
     self.db = db.ExistDB(server_url="http://localhost:8080/exist")