Esempio n. 1
0
    def __init__(self):
        self.name = SOURCE_NAME
        self.cves = defaultdict(list)
        self.exploits = {}

        # Source 1 (Exploit db github)
        _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE1)
        exploits = {}
        exploitcsv = csv.DictReader(StringIO(_file.decode('utf-8')),
                                    delimiter=',')
        for row in exploitcsv:
            self.exploits[row['id']] = row

        # Source 2 (Vulners)
        _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE2)
        data = json.loads(str(_file.read(), 'utf-8'))
        for entry in data:
            edb = {}
            source = entry['_source']
            add_if(edb, source, 'published')
            add_if(edb, source, 'modified')
            add_if(edb, source, 'lastseen', 'last seen')
            add_if(edb, source, 'id')
            add_if(edb, source, 'title')
            add_if(edb, source, 'description')
            add_if(edb, source, 'references')
            add_if(edb, source, 'reporter')
            add_if(edb, source, 'sourceHref', 'source')

            for date in ['published', 'modified', 'last seen']:
                clean_date(edb, date)
            if edb:
                for CVE in source['cvelist']:
                    self.cves[CVE].append(edb)
Esempio n. 2
0
    def __init__(self):
        self.name = SOURCE_NAME
        self.cves = defaultdict(list)

        source_file = SOURCE_FILE.format(
            conf.readSetting("vulners", "api_key", ""))

        _file, r = conf.getFeedData(SOURCE_NAME, source_file)
        data = json.loads(str(_file.read(), 'utf-8'))
        for entry in data:
            msf = {}
            source = entry['_source']
            add_if(msf, source, 'published')
            add_if(msf, source, 'modified')
            add_if(msf, source, 'lastseen', 'last seen')
            add_if(msf, source, 'metasploitReliability', 'reliability')
            add_if(msf, source, 'id')
            add_if(msf, source, 'title')
            add_if(msf, source, 'description')
            add_if(msf, source, 'references')
            add_if(msf, source, 'reporter')
            add_if(msf, source, 'sourceHref', 'source')

            for date in ['published', 'modified', 'last seen']:
                clean_date(msf, date)
            if msf:
                for CVE in source['cvelist']:
                    self.cves[CVE].append(msf)
Esempio n. 3
0
  def __init__(self):
    self.name = SOURCE_NAME
    _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE)
    workbook  = xlrd.open_workbook(file_contents = _file)
    worksheet = workbook.sheet_by_index(0)
    iavm      = defaultdict(lambda : {'references': []})
    self.cves = defaultdict(list)

    for rownum in range(worksheet.nrows-1): # -1 because we skip the header
      row = worksheet.row_values(rownum+1)  # +1 because we skip the header
      iavm[row[wf['iavm']]]['id']           = row[wf['iavm']]
      iavm[row[wf['iavm']]]['vms']          = row[wf['vms']]
      iavm[row[wf['iavm']]]['severity']     = row[wf['severity']]
      iavm[row[wf['iavm']]]['release_date'] = row[wf['release_date']]
      iavm[row[wf['iavm']]]['title']        = row[wf['title']]
      iavm[row[wf['iavm']]]['date']         = row[wf['date']]
      if row[wf['cve']]:
        iavm[row[wf['iavm']]]['cve']        = row[wf['cve']]
      if row[wf['url']] and row[wf['reference']]:
        iavm[row[wf['iavm']]]['references'].append({'name': row[wf['reference']],
                                                    'url':  row[wf['url']]})

    for _id, data in iavm.items():
      if data.get('cve'):
        cve = data.pop("cve")
        self.cves[cve] = data
Esempio n. 4
0
    def __init__(self):
        self.name = SOURCE_NAME
        self.cves = defaultdict(list)

        _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE)
        data = json.loads(str(_file.read(), 'utf-8'))
        for entry in data:
            sbg = {}
            source = entry['_source']
            add_if(sbg, source, 'published')
            add_if(sbg, source, 'modified')
            add_if(sbg, source, 'lastseen', 'last seen')
            add_if(sbg, source, 'id')
            add_if(sbg, source, 'title')
            add_if(sbg, source, 'bulletinFamily')
            add_if(sbg, source, 'description')
            add_if(sbg, source, 'references')
            add_if(sbg, source, 'reporter')
            add_if(sbg, source, 'sourceHref', 'source')

            for date in ['published', 'modified', 'last seen']:
                clean_date(sbg, date)
            if sbg:
                for CVE in source['cvelist']:
                    self.cves[CVE].append(sbg)
Esempio n. 5
0
  def __init__(self):
    self.name = SOURCE_NAME
    _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE, unpack=False) # Don't unpack, 'cuz XLSX's are zips
    workbook  = xlrd.open_workbook(file_contents = _file)
    worksheet = workbook.sheet_by_index(0)
    mskb      = defaultdict(dict)
    self.cves = defaultdict(list)

    for rownum in range(worksheet.nrows-1): # -1 because we skip the header
      row = worksheet.row_values(rownum+1)  # +1 because we skip the header
      # Convert date to date object
      date = minimalist_xldate_as_datetime(row[wf['date']], 0).isoformat()

      mskb[row[wf['bulletin_id']]]['date']             = date
      mskb[row[wf['bulletin_id']]]['bulletin_id']      = row[wf['bulletin_id']]
      mskb[row[wf['bulletin_id']]]['knowledgebase_id'] = row[wf['knowledgebase_id']]
      mskb[row[wf['bulletin_id']]]['severity']         = row[wf['severity']]
      mskb[row[wf['bulletin_id']]]['impact']           = row[wf['impact']]
      mskb[row[wf['bulletin_id']]]['title']            = row[wf['title']]
      mskb[row[wf['bulletin_id']]]['cves']             = row[wf['cves']].split(",")

      bulletin_url      = worksheet.hyperlink_map.get((rownum, wf['bulletin_id']))
      knowledgebase_url = worksheet.hyperlink_map.get((rownum, wf['knowledgebase_id']))
      mskb[row[wf['bulletin_id']]]['bulletin_url']      = bulletin_url
      mskb[row[wf['bulletin_id']]]['knowledgebase_url'] = knowledgebase_url

    for _id, data in mskb.items():
      to_store = copy.copy(data)
      to_store.pop("cves")
      for cve in data['cves']:
        if cve: self.cves[cve].append(to_store)
Esempio n. 6
0
    def __init__(self):
        self.name = SOURCE_NAME
        self.cves = defaultdict(list)

        _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE)
        data = json.loads(str(_file.read(), 'utf-8'))
        for entry in data:
            nessus = {}
            source = entry['_source']
            add_if(nessus, source, 'published')
            add_if(nessus, source, 'modified')
            add_if(nessus, source, 'lastseen', 'last seen')
            add_if(nessus, source, 'pluginID', 'plugin id')
            add_if(nessus, source, 'title')
            add_if(nessus, source, 'description')
            add_if(nessus, source, 'naslFamily', 'NASL family')
            add_if(nessus, source, 'id', 'NASL id')
            add_if(nessus, source, 'href', 'source')
            add_if(nessus, source, 'reporter')

            for date in ['published', 'modified', 'last seen']:
                clean_date(nessus, date)
            if nessus:
                for CVE in source['cvelist']:
                    self.cves[CVE].append(nessus)
Esempio n. 7
0
    def __init__(self):
        self.name = SOURCE_NAME
        self.cves = defaultdict(list)

        source_file = SOURCE_FILE.format(
            conf.readSetting("vulners", "api_key", ""))

        _file, r = conf.getFeedData(SOURCE_NAME, source_file)
        data = json.loads(str(_file.read(), 'utf-8'))
        for entry in data:
            talos = {}
            source = entry['_source']
            add_if(talos, source, 'published')
            add_if(talos, source, 'lastseen', 'last seen')
            add_if(talos, source, 'id')
            add_if(talos, source, 'title')
            add_if(talos, source, 'references')
            add_if(talos, source, 'reporter')
            add_if(talos, source, 'href', 'source')

            for date in ['published', 'last seen']:
                clean_date(talos, date)
            if talos:
                for CVE in source['cvelist']:
                    self.cves[CVE].append(talos)
Esempio n. 8
0
 def __init__(self):
     self.name = SOURCE_NAME
     parser = make_parser()
     handler = VendorStatementsHandler()
     _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE)
     parser.setContentHandler(handler)
     parser.parse(_file)
     self.cves = handler.statements
Esempio n. 9
0
 def __init__(self):
   self.name = SOURCE_NAME
   parser    = make_parser()
   handler = OVALHandler()
   _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE)
   parser.setContentHandler(handler)
   parser.parse(_file)
   self.cves = {}
   self.oval = handler.ovals
Esempio n. 10
0
  def __init__(self):
    self.name = SOURCE_NAME
    self.cves = {}
    self.exploits = {}

    _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE)
    exploits={}
    exploitcsv = csv.DictReader(StringIO(_file.decode('utf-8')), delimiter=',')
    for row in exploitcsv:
      self.exploits[row['id']] = row
Esempio n. 11
0
def getFile(source, unpack=True):
    global Modified
    try:
        (f, r) = Configuration.getFeedData(source, unpack)
        if (r.headers['last-modified'] == None or
            r.headers['last-modified'] != db.getLastModified(source)):
            Modified = True
            return (f, r)
        else: return (None, None)
    except:
        print("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL(source)))
Esempio n. 12
0
    def __init__(self):
        self.name = "exploit-db"
        self.cves = {}
        self.exploits = {}

        _file, r = conf.getFeedData('exploitdb')
        exploits = {}
        if _file:
            exploitcsv = csv.DictReader(StringIO(_file.read().decode('utf-8')),
                                        delimiter=',')
            for row in exploitcsv:
                self.exploits[row['id']] = row
Esempio n. 13
0
def getFile(source, unpack=True):
    global Modified
    try:
        (f, r) = Configuration.getFeedData(source, unpack)
        if (r.headers['last-modified'] == None
                or r.headers['last-modified'] != db.getLastModified(source)):
            Modified = True
            return (f, r)
        else:
            return (None, None)
    except:
        print("Cannot open url %s. Bad URL or not connected to the internet?" %
              (Configuration.getFeedURL(source)))
Esempio n. 14
0
    def __init__(self):
        self.name = SOURCE_NAME
        _file, r = conf.getFeedData(
            SOURCE_NAME, SOURCE_FILE,
            unpack=False)  # Don't unpack, 'cuz XLSX's are zips
        workbook = xlrd.open_workbook(file_contents=_file)
        worksheet = workbook.sheet_by_index(0)
        vmware = defaultdict(lambda: defaultdict(dict))
        self.cves = defaultdict(list)

        for rownum in range(worksheet.nrows -
                            1):  # -1 because we skip the header
            row = worksheet.row_values(rownum +
                                       1)  # +1 because we skip the header
            # Convert date to date object
            published = minimalist_xldate_as_datetime(row[wf['published']],
                                                      0).isoformat()
            last_update = minimalist_xldate_as_datetime(
                row[wf['last_updated']], 0).isoformat()

            for cve in row[wf['cve']].split(";"):
                cve = cve.strip()
                # skip if exists
                if row[wf['advisory_id']] in vmware[cve].keys():
                    continue

                vmware[cve][row[wf['advisory_id']]]['id'] = row[
                    wf['advisory_id']]
                vmware[cve][row[wf['advisory_id']]]['title'] = row[wf['title']]
                vmware[cve][row[wf['advisory_id']]]['description'] = row[
                    wf['description']]
                vmware[cve][row[wf['advisory_id']]]['published'] = published
                vmware[cve][row[
                    wf['advisory_id']]]['last_updated'] = last_update
                if row[wf['workaround']] not in ["NA", "N/A", ""]:
                    vmware[cve][row[wf['advisory_id']]]['workaround'] = row[
                        wf['workaround']]

                finder = {}
                if row[wf['finder_company']] not in ["NA", "N/A", ""]:
                    finder['company'] = row[wf['finder_company']]
                if row[wf['finder_name']] not in ["NA", "N/A", ""]:
                    finder['name'] = row[wf['finder_name']]
                if len(finder.keys()) is not 0:
                    vmware[cve][row[wf['advisory_id']]]['finder'] = finder

        for cve, data in vmware.items():
            for _id, _data in data.items():
                self.cves[cve].append(_data)
Esempio n. 15
0
 def __init__(self):
     self.name = SOURCE_NAME
     parser = make_parser()
     handler = SaintHandler()
     _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE)
     parser.setContentHandler(handler)
     parser.parse(BytesIO(_file))
     self.cves = defaultdict(list)
     self.bids = defaultdict(list)
     self.osvdbs = defaultdict(list)
     for exploit in handler.exploits:
         data = copy.copy(exploit)
         if data.get('cve'): data.pop('cve')
         if exploit.get('cve'): self.cves[exploit['cve']].append(data)
         if exploit.get('bid'): self.bids[exploit['bid']].append(data)
         if exploit.get('osvdb'): self.osvdbs[exploit['osvdb']].append(data)
Esempio n. 16
0
 def __init__(self):
     self.name = SOURCE_NAME
     parser = make_parser()
     handler = D2secHandler()
     _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE)
     parser.setContentHandler(handler)
     parser.parse(BytesIO(_file))
     self.cves = defaultdict(list)
     self.exploits = defaultdict(dict)
     for exploit in handler.exploits:
         _exploit = copy.copy(exploit)  # clean exploit to add to the list
         _exploit.pop('refs')
         for ref in exploit.get('refs', []):
             if ref['type'] == 'cve': self.cves[ref['key']].append(_exploit)
             else:
                 if ref['key'] not in self.exploits[ref['type']]:
                     self.exploits[ref['type']][ref['key']] = []
                 self.exploits[ref['type']][ref['key']].append(_exploit)
Esempio n. 17
0
    def __init__(self):
        self.name = "redhat"
        handlers = [{
            'handler': RPMHandler(),
            'source': 'rpm'
        }, {
            'handler': RHSAHandler(),
            'source': 'rhsa'
        }]
        parser = make_parser()
        self.cves = defaultdict(dict)

        for handler in handlers:
            _file, r = conf.getFeedData(handler['source'])
            if _file:
                parser.setContentHandler(handler['handler'])
                parser.parse(_file)
                for cve, data in handler['handler'].CVEs.items():
                    if self.name not in self.cves[cve]:
                        self.cves[cve][self.name] = {}
                    self.cves[cve].update(data)
Esempio n. 18
0
    def __init__(self):
        self.name = "redhat"
        handlers = [{
            'handler': RPMHandler(),
            'source': 'rpm'
        }, {
            'handler': RHSAHandler(),
            'source': 'rhsa'
        }]
        parser = make_parser()
        self.cves = defaultdict(dict)

        for handler in handlers:
            _file, r = conf.getFeedData(handler['source'],
                                        SOURCES[handler['source']])
            parser.setContentHandler(handler['handler'])
            if type(_file) is bytes:
                _file = BytesIO(_file)
            parser.parse(_file)
            for cve, data in handler['handler'].CVEs.items():
                self.cves[cve].update(data)
Esempio n. 19
0
    def __init__(self):
        self.name = SOURCE_NAME
        _file, r = conf.getFeedData(SOURCE_NAME, SOURCE_FILE, unpack=False)
        zipobj = zipfile.ZipFile(BytesIO(_file))
        self.cves = defaultdict(dict)

        for filename in zipobj.namelist():
            with zipobj.open(filename) as infile:
                page = fromstring(infile.read().decode("utf-8"))
                vendor = page.xpath("//table[1]//tr[1]//td[2]")
                if vendor: vendor = vendor[0].text.lower()
                rows = page.xpath("//table[2]//tr//td")
                # CVE - Source ID
                IDs = [[
                    rows[i].text, [x.text for x in rows[i + 1].iterchildren()]
                ] for i in range(0, len(rows), 2)]
                for e in IDs:
                    vendorID = e[0] if not e[0].startswith(
                        vendor.upper() + ':') else e[0][len(vendor) + 1:]
                    for cve in e[1]:
                        if vendor not in self.cves[cve]:
                            self.cves[cve][vendor] = []
                        if vendorID not in self.cves[cve][vendor]:
                            self.cves[cve][vendor].append(vendorID)
Esempio n. 20
0
            self.statement += ch

    def endElement(self, name):
        if name == 'statement':
            self.statementtag = False
            self.statement = self.statement + self.statement.rstrip()
            self.vendor[-1]['statement'] = self.statement


# make parser
parser = make_parser()
ch = VendorHandler()
parser.setContentHandler(ch)
# check modification date
try:
    (f, r) = Configuration.getFeedData('vendor')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?" %
             (Configuration.getFeedURL('vendor')))
last_modified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.getLastModified('vendor')
if i is not None:
    if last_modified == i:
        print("Not modified")
        sys.exit(0)
# parse xml and store in database
parser.parse(f)
statements = []
for statement in progressbar(ch.vendor):
    if args.v:
        print(statement)
Esempio n. 21
0
tmppath = Configuration.getTmpdir()

argparser = argparse.ArgumentParser(
    description='Populate/update the NIST ref database')
argparser.add_argument('-v',
                       action='store_true',
                       help='verbose output',
                       default=False)
args = argparser.parse_args()

if args.v:
    verbose = True

# check modification date
try:
    (f, r) = Configuration.getFeedData('ref')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?" %
             (Configuration.getFeedURL("ref")))
i = db.getLastModified('ref')
if i is not None:
    if r.headers['last-modified'] == i:
        print("Not modified")
        sys.exit(0)

# Create temp file and download and unpack database
if not os.path.exists(tmppath):
    os.mkdir(tmppath)

with open(tmppath + '/allrefmaps.zip', 'wb') as fp:
    shutil.copyfileobj(f, fp)
Esempio n. 22
0
            self.description_summary_tag = False
            self.cwe[-1][
                'description_summary'] = self.description_summary.replace(
                    "\n", "")
        elif name == 'Weakness':
            self.weakness_tag = False


# make parser
parser = make_parser()
ch = CWEHandler()
parser.setContentHandler(ch)
db = DatabaseLayer()
# check modification date
try:
    (f, r) = Configuration.getFeedData('cwe')
except Exception as e:
    print(e)
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?" %
             (Configuration.getFeedURL("cwe")))
lastmodified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.CWE.updated()
if i is not None:
    if lastmodified == i:
        print("Not modified")
        sys.exit(0)

# parse xml and store in database
parser.parse(f)
cweList = []
for cwe in progressbar(ch.cwe):
Esempio n. 23
0
from lib.Config import Configuration
import lib.DatabaseLayer as db

import csv
import argparse

# dictionary
tmppath = Configuration.getTmpdir()

argparser = argparse.ArgumentParser(description='Populate/update the exploitdb ref database')
argparser.add_argument('-v', action='store_true', help='verbose output', default=False)
args = argparser.parse_args()


try:
    (f, r) = Configuration.getFeedData('exploitdb')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL("exploitdb")))

i = db.getLastModified('exploitdb')
if i is not None:
    if r.headers['last-modified'] == i:
        print("Not modified")
        sys.exit(0)

if not os.path.exists(tmppath):
    os.mkdir(tmppath)

csvfile = tmppath+'/exploitdb.csv'
with open(csvfile, 'wb') as fp:
    shutil.copyfileobj(f, fp)
Esempio n. 24
0
                    'url': self.url,
                    'id': refl
                })
            self.exploittag = False
            self.refl = []
        if name == 'elliot':
            self.elliottag = False


# make parser
parser = make_parser()
ch = ExploitHandler()
parser.setContentHandler(ch)
# check modification date
try:
    (f, r) = Configuration.getFeedData('d2sec')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?" %
             (Configuration.getFeedURL("d2sec")))
last_modified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.getLastModified("d2sec")
if i is not None:
    if last_modified == i:
        print("Not modified")
        sys.exit(0)
# parse xml and store in database
parser.parse(f)
exploitList = []
for exploit in progressbar(ch.d2sec):
    print(exploit)
    if args.v:
Esempio n. 25
0
import lib.DatabaseLayer as dbLayer

bulletinurl = "https://technet.microsoft.com/library/security/"

def minimalist_xldate_as_datetime(xldate, datemode):
    # datemode: 0 for 1900-based, 1 for 1904-based
    return (
        datetime.datetime(1899, 12, 30)
        + datetime.timedelta(days=xldate + 1462 * datemode)
        )

# dictionary
tmppath = Configuration.getTmpdir()

try:
    (f, r) = Configuration.getFeedData('msbulletin')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL("msbulletin")))

# check modification date
i=dbLayer.getInfo("ms")
if i is not None:
    if r.headers['last-modified'] == i['last-modified']:
        print("Not modified")
        sys.exit(0)

if not os.path.exists(tmppath):
    os.mkdir(tmppath)
with open(tmppath+'/BulletinSearch.xlsx', 'wb') as fp:
    shutil.copyfileobj(f, fp)
fp.close()
Esempio n. 26
0
import lib.DatabaseLayer as dbLayer

bulletinurl = "https://technet.microsoft.com/library/security/"


def minimalist_xldate_as_datetime(xldate, datemode):
    # datemode: 0 for 1900-based, 1 for 1904-based
    return (datetime.datetime(1899, 12, 30) +
            datetime.timedelta(days=xldate + 1462 * datemode))


# dictionary
tmppath = Configuration.getTmpdir()

try:
    (f, r) = Configuration.getFeedData('msbulletin')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?" %
             (Configuration.getFeedURL("msbulletin")))

# check modification date
i = dbLayer.getInfo("ms")
if i is not None:
    if r.headers['last-modified'] == i['last-modified']:
        print("Not modified")
        sys.exit(0)

if not os.path.exists(tmppath):
    os.mkdir(tmppath)
with open(tmppath + '/BulletinSearch.xlsx', 'wb') as fp:
    shutil.copyfileobj(f, fp)
Esempio n. 27
0
            self.Solution_or_Mitigation = []
            self.Related_Weakness = []

            self.Attack_Pattern_tag = False
        if name == 'capec:Attack_Patterns':
            self.Attack_Patterns_tag = False
        if name == 'capec:Attack_Pattern_Catalog':
            self.Attack_Pattern_Catalog_tag = False

# make parser
parser = make_parser()
ch = CapecHandler()
parser.setContentHandler(ch)
# check modification date
try:
    (f, r) = Configuration.getFeedData('capec')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL("capec")))
i = db.getLastModified('capec')
last_modified = parse_datetime(r.headers['last-modified'], ignoretz=True)
if i is not None:
    if last_modified == i:
        print("Not modified")
        sys.exit(0)
# parse xml and store in database
parser.parse(f)
attacks=[]
for attack in progressbar(ch.capec):
    attacks.append(attack)
db.bulkUpdate("capec", attacks)
Esempio n. 28
0
            self.Attack_Pattern_tag = False
        if name == 'Attack_Patterns':
            self.Attack_Patterns_tag = False
        if name == 'Attack_Pattern_Catalog':
            self.Attack_Pattern_Catalog_tag = False


# make parser
parser = make_parser()
ch = CapecHandler()
parser.setContentHandler(ch)
print(ch.capec)
# check modification date
try:
    (f, r) = Configuration.getFeedData('capec')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?" %
             (Configuration.getFeedURL("capec")))
i = db.getLastModified('capec')
last_modified = parse_datetime(r.headers['last-modified'], ignoretz=True)
#if i is not None:
#    if last_modified == i:
#        print("Not modified")
#        sys.exit(0)
# parse xml and store in database
parser.parse(f)
attacks = []
for attack in progressbar(ch.capec):
    attacks.append(attack)
db.bulkUpdate("capec", attacks)
Esempio n. 29
0
    def endElement(self, name):
        if name == 'Description_Summary' and self.weakness_tag:
            self.description_summary_tag = False
            self.description_summary = self.description_summary + self.description_summary
            self.cwe[-1]['description_summary'] = self.description_summary.replace("\n", "")
        elif name == 'Weakness':
            self.weakness_tag = False

# make parser
parser = make_parser()
ch = CWEHandler()
parser.setContentHandler(ch)
# check modification date
try:
    (f, r) = Configuration.getFeedData('cwe')
except Exception as e:
    print(e)
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL("cwe")))
lastmodified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.getLastModified('cwe')
if i is not None:
    if lastmodified == i:
        print("Not modified")
        sys.exit(0)


# parse xml and store in database
parser.parse(f)
cweList=[]
for cwe in progressbar(ch.cwe):
Esempio n. 30
0
import csv
import argparse

# dictionary
tmppath = Configuration.getTmpdir()

argparser = argparse.ArgumentParser(
    description='Populate/update the exploitdb ref database')
argparser.add_argument('-v',
                       action='store_true',
                       help='verbose output',
                       default=False)
args = argparser.parse_args()

try:
    (f, r) = Configuration.getFeedData('exploitdb')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?" %
             (Configuration.getFeedURL("exploitdb")))

i = db.getLastModified('exploitdb')
if i is not None:
    if r.headers['last-modified'] == i:
        print("Not modified")
        sys.exit(0)

if not os.path.exists(tmppath):
    os.mkdir(tmppath)

csvfile = tmppath + '/exploitdb.csv'
with open(csvfile, 'wb') as fp:
Esempio n. 31
0
# To Do: Implement REDIS

try:
    redis = Configuration.getRedisRefConnection()
    try:
        redis.info()
    except:
        sys.exit("Redis server not running on %s:%s" %
                 (Configuration.getRedisHost(), Configuration.getRedisPort()))
except Exception as e:
    print(e)
    sys.exit(1)

try:
    (f, r) = Configuration.getFeedData('via4')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?" %
             (Configuration.getFeedURL("via4")))

# check modification date
lastmodified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.getLastModified("via4")
db.setColUpdateCurrentTime('via4')
if i is not None:
    if lastmodified == i:
        print("Not modified")
        sys.exit(0)

data = json.loads(f.read().decode('utf-8'))
cves = data['cves']
Esempio n. 32
0
import lib.DatabaseLayer as db

# To Do: Implement REDIS

try:
    redis = Configuration.getRedisRefConnection()
    try:
        redis.info()
    except:
        sys.exit("Redis server not running on %s:%s"%(Configuration.getRedisHost(),Configuration.getRedisPort()))
except Exception as e:
    print(e)
    sys.exit(1)

try:
    (f, r) = Configuration.getFeedData('via4')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL("via4")))

# check modification date
lastmodified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i=db.getLastModified("via4")
if i is not None:
    if lastmodified == i:
        print("Not modified")
        sys.exit(0)

data = json.loads(f.read().decode('utf-8'))
cves = data['cves']
bulk = [dict(val, id=key) for key, val in cves.items() if key]
db.bulkUpdate('via4', bulk)
Esempio n. 33
0
            self.reftag = False
        if name == 'exploit':
            for refl in self.refl:
                self.d2sec.append({'name': self.name, 'url': self.url, 'id': refl})
            self.exploittag = False
            self.refl = []
        if name == 'elliot':
            self.elliottag = False

# make parser
parser = make_parser()
ch = ExploitHandler()
parser.setContentHandler(ch)
# check modification date
try:
    (f, r) = Configuration.getFeedData('d2sec')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL("d2sec")))
last_modified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.getLastModified("d2sec")
if i is not None:
    if last_modified == i:
        print("Not modified")
        sys.exit(0)
# parse xml and store in database
parser.parse(f)
exploitList=[]
for exploit in progressbar(ch.d2sec):
    print (exploit)
    if args.v:
        print (exploit)
Esempio n. 34
0
        if name == "Attack_Patterns":
            self.Attack_Patterns_tag = False
        if name == "Attack_Pattern_Catalog":
            self.Attack_Pattern_Catalog_tag = False


if __name__ == "__main__":
    # Make a SAX2 XML parser
    parser = make_parser()
    ch = CapecHandler()
    parser.setContentHandler(ch)

    # Retrieve CAPECs from the configuration's capec url
    try:
        print("[+] Getting CAPEC XML file")
        (f, r) = Configuration.getFeedData("capec")
    except Exception as e:
        sys.exit(
            "Cannot open url %s. Bad URL or not connected to the internet?"
            % (Configuration.getFeedURL("capec"))
        )

    db_last_modified = db.getLastModified("capec")
    last_modified = parse_datetime(r.headers["last-modified"], ignoretz=True)
    if db_last_modified is not None:
        if last_modified == db_last_modified:
            print("Not modified")
            sys.exit(0)

    # Parse XML and store in database
    parser.parse(f)
        if self.statementtag:
            self.statement += ch

    def endElement(self, name):
        if name == 'statement':
            self.statementtag = False
            self.statement = self.statement + self.statement.rstrip()
            self.vendor[-1]['statement'] = self.statement

# make parser
parser = make_parser()
ch = VendorHandler()
parser.setContentHandler(ch)
# check modification date
try:
    (f, r) = Configuration.getFeedData('vendor')
except:
    sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL('vendor')))
last_modified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.getLastModified('vendor')
if i is not None:
    if last_modified == i:
        print("Not modified")
        sys.exit(0)
# parse xml and store in database
parser.parse(f)
statements=[]
for statement in progressbar(ch.vendor):
    if args.v:
        print (statement)
    statements.append(statement)