def build_db(filename1='base-nessus-min', filename2='base-qualys-min', verbose=False): db.drop(COLLECTIONS[0]) db.drop(COLLECTIONS[1]) df1 = Utils.open_csv(f'{DATA_DIR}/{filename1}.csv') if df1 is None: print(f'{filename1} not found') return False df2 = Utils.open_csv(f'{DATA_DIR}/{filename2}.csv') if df2 is None: print(f'{filename2} not found') return False if verbose: print("Starting to parse the first file...") df1.progress_apply(Parser.nessus, axis=1) else: df1.apply(Parser.nessus, axis=1) if verbose: print("Done!\n") if verbose: print("Starting to parse the second file...") df2.progress_apply(Parser.qualys, axis=1) else: df2.apply(Parser.qualys, axis=1) if verbose: print("Done!\n") plugins1 = Plugin.get_all(COLLECTIONS[1]) plugins2 = Plugin.get_all(COLLECTIONS[1]) if verbose: print("Done!\n")
def run(): db.initialize(DATABASE) collections = db.get_collections() if len(collections) == 0: Utils.build_db(verbose=True) Utils.build_test_db() metrics('nessus', 'qualys')
def get_pid_df(collection, pid): r = db.find_one(collection, {'dsk': pid}) if not r: return False else: p = pd.DataFrame.from_dict(r, orient='index') return p.transpose
def get_title_df(collection, title): r = db.find_one(collection, {'title': title}) if not r: return False else: p = pd.DataFrame.from_dict(r, orient='index') return p.transpose()
def count_plugins(collection, search=None): # TODO: mount query with search if search: pass else: query = {} return db.count(collection, query)
def get_all_filtered(cls, collection, order_by='0', order_direction=1, limit_number=0, page=1, logical_expr='$or', search=None): if order_direction == 'asc': order_direction = 1 elif order_direction == 'desc': order_direction = -1 columns = { '0': '_id', '1': 'title', '2': 'description', '3': 'cvss', '4': 'category', '5': 'cve', '6': 'cwe', '7': 'similarity', '8': 'type', '9': 'refs', '10': 'dsk' } limit_number = int(limit_number) # TODO: mount query with search if search: query = {} else: query = {} # print('order by: {}'.format(order_by)) # print('order_direction: {}'.format(order_direction)) # print('limit number: {}'.format(limit_number)) # print('page: {}'.format(page)) try: r = [cls(**elem) for elem in db.find(collection, query, order_by=columns[order_by], order_direction=order_direction, limit_number=limit_number, page=int(page))] except TypeError: r = False print(r) return r
def get_by_scanner(cls, collection, scanner): try: return [cls(**elem) for elem in db.find_all(collection, {'type': scanner})] except TypeError: return False
def get_all_df(collection): r = db.find(collection, {}) if not r: return False else: return pd.DataFrame(list(r))
def get_all(cls, collection): try: return [cls(**elem) for elem in db.find_all(collection, {})] except TypeError: return False
def get_by_pid(cls, collection, pid): try: return cls(**db.find_one(collection, {'dsk': pid})) except TypeError: return False
def get_by_id(cls, collection, _id): try: return cls(**db.find_one(collection, {'_id': _id})) except TypeError: return False
def get_by_title(cls, collection, title): try: return cls(**db.find_one(collection, {'title': title})) except TypeError: return False
def save(self, collection): return db.update(collection, {'_id': self._id}, self.jsonify())
def get_by_scanner_df(collection, scanner): r = db.find(collection, {'type': scanner}) if not r: return False else: return pd.DataFrame(list(r))
def run_test(): db.initialize(DATABASE) mapper.mapper()