def start_monitor_by_container_id(container_id): # -- Checks input if not container_id: return json.dumps({'err': 400, 'msg': 'Bad container id'}, sort_keys=True), 400 # -- Retrieves docker image name try: image_name = InternalServer.get_docker_driver().get_docker_image_name_from_container_id(container_id) except: return json.dumps({'err': 404, 'msg': 'Container Id not found'}, sort_keys=True), 404 # -- Checks if the container is already being monitoring if InternalServer.get_mongodb_driver().is_there_a_started_monitoring(container_id): return json.dumps({'err': 400, 'msg': 'The monitoring for the requested container id is already started'}, sort_keys=True), 400 now = datetime.datetime.now().timestamp() # -- Create image_history history = {} history['image_name'] = image_name history['timestamp'] = now history['status'] = 'Monitoring' history['runtime_analysis'] = {'container_id': container_id, 'start_timestamp': now, 'stop_timestamp': None, 'anomalous_activities_detected': None} id = InternalServer.get_mongodb_driver().insert_docker_image_scan_result_to_history(history) # -- Return output = {} output['id'] = str(id) output['image_name'] = image_name output['msg'] = 'Monitoring of docker container with id <' + container_id + '> started' return json.dumps(output, sort_keys=True), 202
def run(self): edn_pid = os.fork() if edn_pid == 0: try: while True: item = InternalServer.get_dagda_edn().get() if item['msg'] == 'init_db': self._init_or_update_db() elif item['msg'] == 'check_image': self._check_docker_by_image_name(item) elif item['msg'] == 'check_container': self._check_docker_by_container_id(item) except KeyboardInterrupt: # Pressed CTRL+C to quit, so nothing to do None else: sysdig_falco_monitor_pid = os.fork() if sysdig_falco_monitor_pid == 0: try: self.sysdig_falco_monitor.pre_check() self.sysdig_falco_monitor.run() except KeyboardInterrupt: # Pressed CTRL+C to quit InternalServer.get_docker_driver().docker_stop(self.sysdig_falco_monitor.get_running_container_id()) else: DagdaServer.app.run(debug=False, host=self.dagda_server_host, port=self.dagda_server_port)
def check_docker_by_container_id(container_id): # -- Check input if not container_id: return json.dumps({'err': 400, 'msg': 'Bad container id'}, sort_keys=True), 400 # -- Retrieves docker image name try: image_name = InternalServer.get_docker_driver().get_docker_image_name_by_container_id(container_id) except Exception as ex: message = "Unexpected exception of type {0} occurred while getting the docker image name: {1!r}" \ .format(type(ex).__name__, ex.get_message() if type(ex).__name__ == 'DagdaError' else ex.args) DagdaLogger.get_logger().error(message) return json.dumps({'err': 404, 'msg': 'Container Id not found'}, sort_keys=True), 404 # -- Process request data = {} data['image_name'] = image_name data['timestamp'] = datetime.datetime.now().timestamp() data['status'] = 'Analyzing' id = InternalServer.get_mongodb_driver().insert_docker_image_scan_result_to_history(data) InternalServer.get_dagda_edn().put({'msg': 'check_container', 'container_id': container_id, '_id': str(id)}) # -- Return output = {} output['id'] = str(id) output['msg'] = 'Accepted the analysis of <' + image_name + '> with id: ' + container_id return json.dumps(output, sort_keys=True), 202
def run(self): edn_pid = os.fork() if edn_pid == 0: try: while True: item = InternalServer.get_dagda_edn().get() if item['msg'] == 'init_db': self._init_or_update_db() elif item['msg'] == 'check_image': self._check_docker_by_image_name(item) elif item['msg'] == 'check_container': self._check_docker_by_container_id(item) except KeyboardInterrupt: # Pressed CTRL+C to quit, so nothing to do pass else: sysdig_falco_monitor_pid = os.fork() if sysdig_falco_monitor_pid == 0: try: self.sysdig_falco_monitor.pre_check() self.sysdig_falco_monitor.run() except DagdaError as e: DagdaLogger.get_logger().error(e.get_message()) DagdaLogger.get_logger().warning( 'Runtime behaviour monitor disabled.') except KeyboardInterrupt: # Pressed CTRL+C to quit InternalServer.get_docker_driver().docker_stop( self.sysdig_falco_monitor.get_running_container_id()) InternalServer.get_docker_driver().docker_remove_container( self.sysdig_falco_monitor.get_running_container_id()) else: DagdaServer.app.run(debug=False, host=self.dagda_server_host, port=self.dagda_server_port)
def __init__(self, dagda_server_host='127.0.0.1', dagda_server_port=5000, mongodb_host='127.0.0.1', mongodb_port=27017): super(DagdaServer, self).__init__() self.dagda_server_host = dagda_server_host self.dagda_server_port = dagda_server_port InternalServer.set_mongodb_driver(mongodb_host, mongodb_port) self.sysdig_falco_monitor = SysdigFalcoMonitor(InternalServer.get_docker_driver(), InternalServer.get_mongodb_driver())
def _init_or_update_db(): InternalServer.get_mongodb_driver().insert_init_db_process_status( {'status': 'Initializing', 'timestamp': datetime.datetime.now().timestamp()}) # Init db db_composer = DBComposer() db_composer.compose_vuln_db() InternalServer.get_mongodb_driver().insert_init_db_process_status( {'status': 'Updated', 'timestamp': datetime.datetime.now().timestamp()})
def _check_docker_by_container_id(item): analyzer = Analyzer() # -- Evaluates the docker image evaluated_docker_image = analyzer.evaluate_image(None, item['container_id']) # -- Updates mongodb report InternalServer.get_mongodb_driver().update_docker_image_scan_result_to_history(item['_id'], evaluated_docker_image)
def __init__(self, dagda_server_url=None): super(Analyzer, self).__init__() self.is_remote = False if dagda_server_url is not None: self.dagda_server_url = dagda_server_url self.is_remote = True else: self.mongoDbDriver = InternalServer.get_mongodb_driver() self.dockerDriver = InternalServer.get_docker_driver()
def run(self): if not InternalServer.is_external_falco(): self.running_container_id = self._start_container( 'falco -pc -o json_output=true -o file_output.enabled=true ' + '-o file_output.filename=/host' + SysdigFalcoMonitor._falco_output_filename + self.falco_rules) # Wait 3 seconds for sysdig/falco start up and creates the output file time.sleep(3) # Check output file and running docker container if not os.path.isfile(SysdigFalcoMonitor._falco_output_filename) or \ (not InternalServer.is_external_falco() and \ len(self.docker_driver.get_docker_container_ids_by_image_name('falcosecurity/falco:0.18.0')) == 0): raise DagdaError('Falcosecurity/falco output file not found.') # Review sysdig/falco logs after rules parser if not InternalServer.is_external_falco(): sysdig_falco_logs = self.docker_driver.docker_logs( self.running_container_id, True, True, False) if "Rule " in sysdig_falco_logs: SysdigFalcoMonitor._parse_log_and_show_dagda_warnings( sysdig_falco_logs) # Read file with open(SysdigFalcoMonitor._falco_output_filename, 'rb') as f: last_file_position = 0 fbuf = io.BufferedReader(f) while True: fbuf.seek(last_file_position) content = fbuf.readlines() sysdig_falco_events = [] for line in content: line = line.decode('utf-8').replace("\n", "") json_data = json.loads(line) container_id = json_data['output_fields']['container.id'] if container_id != 'host': try: json_data['container_id'] = container_id json_data['image_name'] = json_data[ 'output_fields']['container.image.repository'] if 'container.image.tag' in json_data[ 'output_fields']: json_data['image_name'] += ":" + json_data[ 'output_fields']['container.image.tag'] sysdig_falco_events.append(json_data) except IndexError: # The /tmp/falco_output.json file had information about ancient events, so nothing to do pass except KeyError: # The /tmp/falco_output.json file had information about ancient events, so nothing to do pass last_file_position = fbuf.tell() if len(sysdig_falco_events) > 0: self.mongodb_driver.bulk_insert_sysdig_falco_events( sysdig_falco_events) time.sleep(2)
def _threaded_malware(dockerDriver, temp_dir, malware_binaries): # Get malware binaries if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Retrieving malware files from the docker image ...') malware_binaries.extend(malware_extractor.get_malware_included_in_docker_image(docker_driver=dockerDriver, temp_dir=temp_dir)) if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Malware files from the docker image retrieved')
def __init__(self, dagda_server_host='127.0.0.1', dagda_server_port=5000, mongodb_host='127.0.0.1', mongodb_port=27017, mongodb_ssl=False, mongodb_user=None, mongodb_pass=None, falco_rules_filename=None): super(DagdaServer, self).__init__() self.dagda_server_host = dagda_server_host self.dagda_server_port = dagda_server_port InternalServer.set_mongodb_driver(mongodb_host, mongodb_port, mongodb_ssl, mongodb_user, mongodb_pass) self.sysdig_falco_monitor = SysdigFalcoMonitor(InternalServer.get_docker_driver(), InternalServer.get_mongodb_driver(), falco_rules_filename)
def stop_monitor_by_container_id(container_id): # -- Check runtime monitor status if not InternalServer.is_runtime_analysis_enabled(): return json.dumps({'err': 503, 'msg': 'Behaviour analysis service unavailable'}, sort_keys=True), 503 # -- Checks input if not container_id: return json.dumps({'err': 400, 'msg': 'Bad container id'}, sort_keys=True), 400 # -- Retrieves docker image name try: image_name = InternalServer.get_docker_driver().get_docker_image_name_by_container_id(container_id) except: return json.dumps({'err': 404, 'msg': 'Container Id not found'}, sort_keys=True), 404 # -- Checks if the container is already being monitoring if not InternalServer.get_mongodb_driver().is_there_a_started_monitoring(container_id): return json.dumps({'err': 400, 'msg': 'There is not monitoring for the requested container id'}, sort_keys=True), 400 now = datetime.datetime.now().timestamp() # -- Process request InternalServer.get_mongodb_driver().update_runtime_monitoring_analysis(container_id) monitoring_result = InternalServer.get_mongodb_driver().get_a_started_monitoring(container_id) monitoring_result['runtime_analysis']['stop_timestamp'] = now monitoring_result['status'] = 'Completed' id = str(monitoring_result['_id']) # -- Update history InternalServer.get_mongodb_driver().update_docker_image_scan_result_to_history(id, monitoring_result) # -- Return return json.dumps(InternalServer.get_mongodb_driver().get_docker_image_history(image_name, id)[0], sort_keys=True)
def _threaded_dependencies(dockerDriver, image_name, temp_dir, dependencies): # Get programming language dependencies if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Retrieving dependencies from the docker image ...') dependencies.extend(dep_info_extractor.get_dependencies_from_docker_image(docker_driver=dockerDriver, image_name=image_name, temp_dir=temp_dir)) if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Dependencies from the docker image retrieved')
def _check_docker_by_image_name(item): analyzer = Analyzer() # -- Evaluates the docker image evaluated_docker_image = analyzer.evaluate_image(item['image_name'], None) # -- Updates mongodb report InternalServer.get_mongodb_driver().update_docker_image_scan_result_to_history(item['_id'], evaluated_docker_image) # -- Cleanup if item['pulled']: InternalServer.get_docker_driver().docker_remove_image(item['image_name'])
def stop_monitor_by_container_id(container_id): # -- Check runtime monitor status if not InternalServer.is_runtime_analysis_enabled(): return json.dumps( { 'err': 503, 'msg': 'Behaviour analysis service unavailable' }, sort_keys=True), 503 # -- Checks input if not container_id: return json.dumps({ 'err': 400, 'msg': 'Bad container id' }, sort_keys=True), 400 # -- Retrieves docker image name try: image_name = InternalServer.get_docker_driver( ).get_docker_image_name_by_container_id(container_id) except: return json.dumps({ 'err': 404, 'msg': 'Container Id not found' }, sort_keys=True), 404 # -- Checks if the container is already being monitoring if not InternalServer.get_mongodb_driver().is_there_a_started_monitoring( container_id): return json.dumps( { 'err': 400, 'msg': 'There is not monitoring for the requested container id' }, sort_keys=True), 400 now = datetime.datetime.now().timestamp() # -- Process request InternalServer.get_mongodb_driver().update_runtime_monitoring_analysis( container_id) monitoring_result = InternalServer.get_mongodb_driver( ).get_a_started_monitoring(container_id) monitoring_result['runtime_analysis']['stop_timestamp'] = now monitoring_result['status'] = 'Completed' id = str(monitoring_result['_id']) # -- Update history InternalServer.get_mongodb_driver( ).update_docker_image_scan_result_to_history(id, monitoring_result) # -- Return return json.dumps( InternalServer.get_mongodb_driver().get_docker_image_history( image_name, id)[0], sort_keys=True)
def __init__(self, docker_driver, mongodb_driver, falco_rules_filename, external_falco_output_filename): super(SysdigFalcoMonitor, self).__init__() self.mongodb_driver = mongodb_driver self.docker_driver = docker_driver self.running_container_id = '' if falco_rules_filename is None: self.falco_rules = '' else: copyfile(falco_rules_filename, SysdigFalcoMonitor._falco_custom_rules_filename) self.falco_rules = ' -o rules_file=/host' + SysdigFalcoMonitor._falco_custom_rules_filename if external_falco_output_filename is not None: InternalServer.set_external_falco(True) SysdigFalcoMonitor._falco_output_filename = external_falco_output_filename
def _execute_bid_query(bid_id, details): if not details: result = InternalServer.get_mongodb_driver().get_products_by_bid( bid_id) else: result = InternalServer.get_mongodb_driver().get_bid_info_by_id(bid_id) if len(result) == 0: return json.dumps({ 'err': 404, 'msg': 'BugTraq Id not found' }, sort_keys=True), 404 return json.dumps(result, sort_keys=True)
def set_product_vulnerability_as_false_positive(image_name, product, version=None): updated = InternalServer.get_mongodb_driver().update_product_vulnerability_as_fp(image_name=image_name, product=product, version=version) if not updated: return json.dumps({'err': 404, 'msg': 'Product vulnerability not found'}, sort_keys=True), 404 return '', 204
def run(self): if not InternalServer.is_external_falco(): self.running_container_id = self._start_container('falco -pc -o json_output=true -o file_output.enabled=true ' + '-o file_output.filename=/host' + SysdigFalcoMonitor._falco_output_filename + self.falco_rules) # Wait 3 seconds for sysdig/falco start up and creates the output file time.sleep(3) # Check output file and running docker container if not os.path.isfile(SysdigFalcoMonitor._falco_output_filename) or \ (not InternalServer.is_external_falco() and \ len(self.docker_driver.get_docker_container_ids_by_image_name('sysdig/falco')) == 0): raise DagdaError('Sysdig/falco output file not found.') # Review sysdig/falco logs after rules parser if not InternalServer.is_external_falco(): sysdig_falco_logs = self.docker_driver.docker_logs(self.running_container_id, True, True, False) if "Rule " in sysdig_falco_logs: SysdigFalcoMonitor._parse_log_and_show_dagda_warnings(sysdig_falco_logs) # Read file with open(SysdigFalcoMonitor._falco_output_filename, 'rb') as f: last_file_position = 0 fbuf = io.BufferedReader(f) while True: fbuf.seek(last_file_position) content = fbuf.readlines() sysdig_falco_events = [] for line in content: line = line.decode('utf-8').replace("\n", "") json_data = json.loads(line) container_id = json_data['output'].split(" (id=")[1].replace(")", "") if container_id != 'host': try: image_name = self.docker_driver.get_docker_image_name_by_container_id(container_id) json_data['container_id'] = container_id json_data['image_name'] = image_name sysdig_falco_events.append(json_data) except IndexError: # The /tmp/falco_output.json file had information about ancient events, so nothing to do pass last_file_position = fbuf.tell() if len(sysdig_falco_events) > 0: self.mongodb_driver.bulk_insert_sysdig_falco_events(sysdig_falco_events) time.sleep(2)
def post_image_analysis_to_the_history(image_name): data = json.loads(request.data.decode('utf-8')) id = InternalServer.get_mongodb_driver().insert_docker_image_scan_result_to_history(data) # -- Return output = {} output['id'] = str(id) output['image_name'] = image_name return json.dumps(output, sort_keys=True), 201
def get_init_or_update_db_status(): status = InternalServer.get_mongodb_driver().get_init_db_process_status() if not status['timestamp']: status['timestamp'] = '-' else: status['timestamp'] = str( datetime.datetime.utcfromtimestamp(status['timestamp'])) return json.dumps(status, sort_keys=True)
def check_docker_by_image_name(image_name): # -- Check input if not image_name: return json.dumps({ 'err': 400, 'msg': 'Bad image name' }, sort_keys=True), 400 # -- Docker pull from remote registry if it is necessary try: pulled = False if not InternalServer.get_docker_driver().is_docker_image(image_name): output = InternalServer.get_docker_driver().docker_pull(image_name) if 'errorDetail' in output: msg = 'Error: image library/' + image_name + ':latest not found' DagdaLogger.get_logger().error(msg) raise DagdaError(msg) pulled = True except: return json.dumps({ 'err': 404, 'msg': 'Image name not found' }, sort_keys=True), 404 # -- Process request data = {} data['image_name'] = image_name data['timestamp'] = datetime.datetime.now().timestamp() data['status'] = 'Analyzing' id = InternalServer.get_mongodb_driver( ).insert_docker_image_scan_result_to_history(data) InternalServer.get_dagda_edn().put({ 'msg': 'check_image', 'image_name': image_name, '_id': str(id), 'pulled': pulled }) # -- Return output = {} output['id'] = str(id) output['msg'] = 'Accepted the analysis of <' + image_name + '>' return json.dumps(output, sort_keys=True), 202
def _init_or_update_db(): try: InternalServer.get_mongodb_driver().insert_init_db_process_status({ 'status': 'Initializing', 'timestamp': datetime.datetime.now().timestamp() }) # Init db db_composer = DBComposer() db_composer.compose_vuln_db() InternalServer.get_mongodb_driver().insert_init_db_process_status({ 'status': 'Updated', 'timestamp': datetime.datetime.now().timestamp() }) except Exception as ex: message = "Unexpected exception of type {0} occurred: {1!r}".format( type(ex).__name__, ex.args) DagdaLogger.get_logger().error(message) if InternalServer.is_debug_logging_enabled(): traceback.print_exc() InternalServer.get_mongodb_driver().insert_init_db_process_status({ 'status': message, 'timestamp': datetime.datetime.now().timestamp() })
def post_image_analysis_to_the_history(image_name): data = json.loads(request.data.decode('utf-8')) id = InternalServer.get_mongodb_driver( ).insert_docker_image_scan_result_to_history(data) # -- Return output = {} output['id'] = str(id) output['image_name'] = image_name return json.dumps(output, sort_keys=True), 201
def get_rhsa_and_rhba_bz2_archive_files(): if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug( "Collecting RHSA and RHBA .bz2 archive files...") feed_json_file = get_http_resource_content( "https://www.redhat.com/security/data/oval/v2/feed.json") bz2_archive_files = [] for entry in json.loads(feed_json_file)["feed"]["entry"]: if entry["link"][0]["length"] > 850: if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug("Collected " + entry["content"]["src"]) bz2_archive_files.append( get_http_resource_content(entry["content"]["src"])) return bz2_archive_files
def get_products_by_bid(bid_id): products = InternalServer.get_mongodb_driver().get_products_by_bid(bid_id) if len(products) == 0: return json.dumps({ 'err': 404, 'msg': 'BugTraq Id not found' }, sort_keys=True), 404 return json.dumps(products, sort_keys=True)
def get_history(): history = InternalServer.get_mongodb_driver().get_docker_image_all_history( ) if len(history) == 0: return json.dumps({ 'err': 404, 'msg': 'Analysis not found' }, sort_keys=True), 404 return json.dumps(history, sort_keys=True)
def get_vulns_by_product_and_version(product, version=None): vulns = InternalServer.get_mongodb_driver().get_vulnerabilities( product, version) if len(vulns) == 0: return json.dumps({ 'err': 404, 'msg': 'Vulnerabilities not found' }, sort_keys=True), 404 return json.dumps(vulns, sort_keys=True)
def check_docker_by_container_id(container_id): # -- Check input if not container_id: return json.dumps({ 'err': 400, 'msg': 'Bad container id' }, sort_keys=True), 400 # -- Retrieves docker image name try: image_name = InternalServer.get_docker_driver( ).get_docker_image_name_by_container_id(container_id) except Exception as ex: message = "Unexpected exception of type {0} occurred while getting the docker image name: {1!r}" \ .format(type(ex).__name__, ex.get_message() if type(ex).__name__ == 'DagdaError' else ex.args) DagdaLogger.get_logger().error(message) return json.dumps({ 'err': 404, 'msg': 'Container Id not found' }, sort_keys=True), 404 # -- Process request data = {} data['image_name'] = image_name data['timestamp'] = datetime.datetime.now().timestamp() data['status'] = 'Analyzing' id = InternalServer.get_mongodb_driver( ).insert_docker_image_scan_result_to_history(data) InternalServer.get_dagda_edn().put({ 'msg': 'check_container', 'container_id': container_id, '_id': str(id) }) # -- Return output = {} output['id'] = str(id) output[ 'msg'] = 'Accepted the analysis of <' + image_name + '> with id: ' + container_id return json.dumps(output, sort_keys=True), 202
def get_history_by_image_name(image_name): id = request.args.get('id') history = InternalServer.get_mongodb_driver().get_docker_image_history( image_name, id) if len(history) == 0: return json.dumps({ 'err': 404, 'msg': 'History not found' }, sort_keys=True), 404 return json.dumps(history, sort_keys=True)
def _threaded_cve_gathering(mongoDbDriver, i): if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('... Including CVEs - ' + str(i)) compressed_content = get_http_resource_content( "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-" + str(i) + ".json.gz") cve_list, cve_ext_info_list = get_cve_list_from_file(compressed_content, i) if len(cve_list) > 0: mongoDbDriver.bulk_insert_cves(cve_list) if len(cve_ext_info_list) > 0: mongoDbDriver.bulk_insert_cves_info(cve_ext_info_list)
def pre_check(self): if not InternalServer.is_external_falco(): # Init linux_distro = SysdigFalcoMonitor._get_linux_distro() uname_r = os.uname().release # Check requirements if not os.path.isfile('/.dockerenv'): # I'm living in real world! if 'Red Hat' in linux_distro or 'CentOS' in linux_distro or 'Fedora' in linux_distro \ or 'openSUSE' in linux_distro: # Red Hat/CentOS/Fedora/openSUSE return_code = subprocess.call(["rpm", "-q", "kernel-devel-" + uname_r], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) elif 'Debian' in linux_distro or 'Ubuntu' in linux_distro: # Debian/Ubuntu return_code = subprocess.call(["dpkg", "-l", "linux-headers-" + uname_r], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) else: raise DagdaError('Linux distribution not supported yet.') if return_code != 0: raise DagdaError('The kernel headers are not installed in the host operating system.') else: # I'm running inside a docker container DagdaLogger.get_logger().warning("I'm running inside a docker container, so I can't check if the kernel " "headers are installed in the host operating system. Please, review it!!") # Check Docker driver if self.docker_driver.get_docker_client() is None: raise DagdaError('Error while fetching Docker server API version.') # Docker pull for ensuring the sysdig/falco image self.docker_driver.docker_pull('sysdig/falco') # Stops sysdig/falco containers if there are any container_ids = self.docker_driver.get_docker_container_ids_by_image_name('sysdig/falco') if len(container_ids) > 0: for container_id in container_ids: self.docker_driver.docker_stop(container_id) self.docker_driver.docker_remove_container(container_id) # Cleans mongodb falco_events collection self.mongodb_driver.delete_falco_events_collection() # Starts sysdig running container without custom entrypoint for avoiding: # --> Runtime error: error opening device /host/dev/sysdig0 self.running_container_id = self._start_container() time.sleep(30) logs = self.docker_driver.docker_logs(self.running_container_id, True, True, False) if "Runtime error: error opening device /host/dev/sysdig0" not in logs: self.docker_driver.docker_stop(self.running_container_id) else: raise DagdaError('Runtime error opening device /host/dev/sysdig0.') # Clean up self.docker_driver.docker_remove_container(self.running_container_id)
def get_all_running_containers(): containers = InternalServer.get_docker_driver().get_docker_client().containers() output = [] for container in containers: c = {} c['id'] = container['Id'][:12] c['image'] = container['Image'] c['created'] = str(datetime.datetime.utcfromtimestamp(container['Created'])) c['status'] = container['State'] c['name'] = container['Names'][0][1:] output.append(c) return json.dumps(output, sort_keys=True)
def _execute_cve_query(cve_id, details): regex = r"(CVE-[0-9]{4}-[0-9]{4,5})" search_obj = re.search(regex, cve_id) if not search_obj or len(search_obj.group(0)) != len(cve_id): return json.dumps({ 'err': 400, 'msg': 'Bad cve format' }, sort_keys=True), 400 if not details: result = InternalServer.get_mongodb_driver().get_products_by_cve( cve_id) else: result = InternalServer.get_mongodb_driver().get_cve_info_by_cve_id( cve_id) if len(result) == 0: return json.dumps({ 'err': 404, 'msg': 'CVE not found' }, sort_keys=True), 404 return json.dumps(result, sort_keys=True)
def check_docker_by_container_id(container_id): # -- Check input if not container_id: return json.dumps({ 'err': 400, 'msg': 'Bad container id' }, sort_keys=True), 400 # -- Retrieves docker image name try: image_name = InternalServer.get_docker_driver( ).get_docker_image_name_from_container_id(container_id) except: return json.dumps({ 'err': 404, 'msg': 'Container Id not found' }, sort_keys=True), 404 # -- Process request data = {} data['image_name'] = image_name data['timestamp'] = datetime.datetime.now().timestamp() data['status'] = 'Analyzing' _id = InternalServer.get_mongodb_driver( ).insert_docker_image_scan_result_to_history(data) InternalServer.get_dagda_edn().put({ 'msg': 'check_container', 'container_id': container_id, '_id': str(_id) }) # -- Return output = {} output['id'] = str(_id) output[ 'msg'] = 'Accepted the analysis of <' + image_name + '> with id: ' + container_id return json.dumps(output, sort_keys=True), 202
def _execute_rhba_query(rhba_id, details): regex = r"(RHBA-[0-9]{4}:[0-9]+)" search_obj = re.search(regex, rhba_id) if not search_obj or len(search_obj.group(0)) != len(rhba_id): return json.dumps({ 'err': 400, 'msg': 'Bad rhba format' }, sort_keys=True), 400 if not details: result = InternalServer.get_mongodb_driver().get_products_by_rhba( rhba_id) else: result = InternalServer.get_mongodb_driver().get_rhba_info_by_id( rhba_id) if len(result) == 0: return json.dumps({ 'err': 404, 'msg': 'RHBA not found' }, sort_keys=True), 404 return json.dumps(result, sort_keys=True)
def start_monitor_by_container_id(container_id): # -- Check runtime monitor status if not InternalServer.is_runtime_analysis_enabled(): return json.dumps({'err': 503, 'msg': 'Behaviour analysis service unavailable'}, sort_keys=True), 503 # -- Checks input if not container_id: return json.dumps({'err': 400, 'msg': 'Bad container id'}, sort_keys=True), 400 # -- Retrieves docker image name try: image_name = InternalServer.get_docker_driver().get_docker_image_name_by_container_id(container_id) except: return json.dumps({'err': 404, 'msg': 'Container Id not found'}, sort_keys=True), 404 # -- Checks if the container is already being monitoring if InternalServer.get_mongodb_driver().is_there_a_started_monitoring(container_id): return json.dumps({'err': 400, 'msg': 'The monitoring for the requested container id is already started'}, sort_keys=True), 400 now = datetime.datetime.now().timestamp() # -- Create image_history history = {} history['image_name'] = image_name history['timestamp'] = now history['status'] = 'Monitoring' history['runtime_analysis'] = {'container_id': container_id, 'start_timestamp': now, 'stop_timestamp': None, 'anomalous_activities_detected': None} id = InternalServer.get_mongodb_driver().insert_docker_image_scan_result_to_history(history) # -- Return output = {} output['id'] = str(id) output['image_name'] = image_name output['msg'] = 'Monitoring of docker container with id <' + container_id + '> started' return json.dumps(output, sort_keys=True), 202
def get_all_docker_images(): images = InternalServer.get_docker_driver().get_docker_client().images() output = [] for image in images: i = {} if image['RepoTags'] is None: i['tags'] = list(['None:None']) else: i['tags'] = list(set(image['RepoTags'])) i['id'] = image['Id'][7:][:12] i['created'] = str(datetime.datetime.utcfromtimestamp(image['Created'])) i['size'] = sizeof_fmt(image['VirtualSize']) output.append(i) return json.dumps(output, sort_keys=True)
def __init__(self, dagda_server_host='127.0.0.1', dagda_server_port=5000, mongodb_host='127.0.0.1', mongodb_port=27017, mongodb_ssl=False, mongodb_user=None, mongodb_pass=None, falco_rules_filename=None, external_falco_output_filename=None, debug_logging=False): super(DagdaServer, self).__init__() self.dagda_server_host = dagda_server_host self.dagda_server_port = dagda_server_port InternalServer.set_debug_logging_enabled(debug_logging) InternalServer.set_mongodb_driver(mongodb_host, mongodb_port, mongodb_ssl, mongodb_user, mongodb_pass) self.sysdig_falco_monitor = SysdigFalcoMonitor(InternalServer.get_docker_driver(), InternalServer.get_mongodb_driver(), falco_rules_filename, external_falco_output_filename)
def _init_or_update_db(): try: InternalServer.get_mongodb_driver().insert_init_db_process_status( {'status': 'Initializing', 'timestamp': datetime.datetime.now().timestamp()}) # Init db db_composer = DBComposer() db_composer.compose_vuln_db() InternalServer.get_mongodb_driver().insert_init_db_process_status( {'status': 'Updated', 'timestamp': datetime.datetime.now().timestamp()}) except Exception as ex: message = "Unexpected exception of type {0} occurred: {1!r}".format(type(ex).__name__, ex.args) DagdaLogger.get_logger().error(message) if InternalServer.is_debug_logging_enabled(): traceback.print_exc() InternalServer.get_mongodb_driver().insert_init_db_process_status( {'status': message, 'timestamp': datetime.datetime.now().timestamp()})
def run(self): edn_pid = os.fork() if edn_pid == 0: try: while True: item = InternalServer.get_dagda_edn().get() if item['msg'] == 'init_db': self._init_or_update_db() elif item['msg'] == 'check_image': self._check_docker_by_image_name(item) elif item['msg'] == 'check_container': self._check_docker_by_container_id(item) except KeyboardInterrupt: # Pressed CTRL+C to quit, so nothing to do pass else: docker_events_monitor_pid = os.fork() if docker_events_monitor_pid == 0: try: docker_daemon_events_monitor = DockerDaemonEventsMonitor(InternalServer.get_docker_driver(), InternalServer.get_mongodb_driver()) docker_daemon_events_monitor.run() except KeyboardInterrupt: # Pressed CTRL+C to quit, so nothing to do pass else: sysdig_falco_monitor_pid = os.fork() if sysdig_falco_monitor_pid == 0: try: self.sysdig_falco_monitor.pre_check() self.sysdig_falco_monitor.run() except DagdaError as e: DagdaLogger.get_logger().error(e.get_message()) DagdaLogger.get_logger().warning('Runtime behaviour monitor disabled.') except KeyboardInterrupt: # Pressed CTRL+C to quit if not InternalServer.is_external_falco(): InternalServer.get_docker_driver().docker_stop(self.sysdig_falco_monitor.get_running_container_id()) InternalServer.get_docker_driver().docker_remove_container( self.sysdig_falco_monitor.get_running_container_id()) else: DagdaServer.app.run(debug=False, host=self.dagda_server_host, port=self.dagda_server_port)
def get_docker_daemon_events(): # Init event_from = request.args.get('event_from') if not event_from: event_from = None event_type = request.args.get('event_type') if not event_type: event_type = None event_action = request.args.get('event_action') if not event_action: event_action = None # Run query events = InternalServer.get_mongodb_driver().get_docker_events_daemon(op_from=event_from, op_type=event_type, op_action=event_action) # Return if len(events) == 0: return json.dumps({'err': 404, 'msg': 'Docker daemon events not found'}, sort_keys=True), 404 return json.dumps(events, sort_keys=True)
def run(self): # Read docker daemon events while True: try: for event in self.docker_driver.docker_events(): e = json.loads(event.decode('UTF-8').replace("\n", "")) if 'Actor' in e and 'Attributes' in e['Actor']: iter = list(e['Actor']['Attributes']) for key in iter: if '.' in key: del e['Actor']['Attributes'][key] # Bulk insert self.mongodb_driver.bulk_insert_docker_daemon_events([e]) except requests.packages.urllib3.exceptions.ReadTimeoutError: # Nothing to do pass except bson.errors.InvalidDocument as ex: message = "Unexpected exception of type {0} occurred: {1!r}" \ .format(type(ex).__name__, ex.get_message() if type(ex).__name__ == 'DagdaError' else ex.args) DagdaLogger.get_logger().error(message) if InternalServer.is_debug_logging_enabled(): traceback.print_exc()
def check_docker_by_image_name(image_name): # -- Check input if not image_name: return json.dumps({'err': 400, 'msg': 'Bad image name'}, sort_keys=True), 400 # -- Docker pull from remote registry if it is necessary try: pulled = False if not InternalServer.get_docker_driver().is_docker_image(image_name): if ':' in image_name: tmp = image_name.split(':')[0] tag = image_name.split(':')[1] msg = 'Error: image library/' + image_name + ':' + tag + ' not found' output = InternalServer.get_docker_driver().docker_pull(tmp, tag=tag) else: msg = 'Error: image library/' + image_name + ':latest not found' output = InternalServer.get_docker_driver().docker_pull(image_name) if 'errorDetail' in output: DagdaLogger.get_logger().error(msg) raise DagdaError(msg) pulled = True except Exception as ex: message = "Unexpected exception of type {0} occurred while pulling the docker image: {1!r}" \ .format(type(ex).__name__, ex.get_message() if type(ex).__name__ == 'DagdaError' else ex.args) DagdaLogger.get_logger().error(message) return json.dumps({'err': 404, 'msg': 'Image name not found'}, sort_keys=True), 404 # -- Process request data = {} data['image_name'] = image_name data['timestamp'] = datetime.datetime.now().timestamp() data['status'] = 'Analyzing' id = InternalServer.get_mongodb_driver().insert_docker_image_scan_result_to_history(data) InternalServer.get_dagda_edn().put({'msg': 'check_image', 'image_name': image_name, '_id': str(id), 'pulled': pulled}) # -- Return output = {} output['id'] = str(id) output['msg'] = 'Accepted the analysis of <' + image_name + '>' return json.dumps(output, sort_keys=True), 202
def evaluate_image(self, image_name, container_id): if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('ENTRY to the method for analyzing a docker image') # Init data = {} # -- Static analysis image_name = self.dockerDriver.get_docker_image_name_by_container_id(container_id) if container_id \ else image_name os_packages = [] malware_binaries = [] dependencies = [] temp_dir = None try: # Get OS packages if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Retrieving OS packages from the docker image ...') if container_id is None: # Scans the docker image os_packages = os_info_extractor.get_soft_from_docker_image(docker_driver=self.dockerDriver, image_name=image_name) temp_dir = extract_filesystem_bundle(docker_driver=self.dockerDriver, image_name=image_name) else: # Scans the docker container os_packages = os_info_extractor.get_soft_from_docker_container_id(docker_driver=self.dockerDriver, container_id=container_id) temp_dir = extract_filesystem_bundle(docker_driver=self.dockerDriver, container_id=container_id) if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('OS packages from the docker image retrieved') # Get malware binaries in a parallel way malware_thread = Thread(target=Analyzer._threaded_malware, args=(self.dockerDriver, temp_dir, malware_binaries)) malware_thread.start() # Get programming language dependencies in a parallel way dependencies_thread = Thread(target=Analyzer._threaded_dependencies, args=(self.dockerDriver, image_name, temp_dir, dependencies)) dependencies_thread.start() # Waiting for the threads malware_thread.join() dependencies_thread.join() except Exception as ex: message = "Unexpected exception of type {0} occurred: {1!r}"\ .format(type(ex).__name__, ex.get_message() if type(ex).__name__ == 'DagdaError' else ex.args) DagdaLogger.get_logger().error(message) if InternalServer.is_debug_logging_enabled(): traceback.print_exc() data['status'] = message # -- Cleanup if temp_dir is not None: clean_up(temporary_dir=temp_dir) # -- Prepare output if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Preparing analysis output ...') if 'status' not in data or data['status'] is None: data['status'] = 'Completed' data['image_name'] = image_name data['timestamp'] = datetime.datetime.now().timestamp() data['static_analysis'] = self.generate_static_analysis(image_name, os_packages, dependencies, malware_binaries) if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Analysis output completed') # -- Return if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('EXIT from the method for analyzing a docker image') return data
def get_history_by_image_name(image_name): id = request.args.get('id') history = InternalServer.get_mongodb_driver().get_docker_image_history(image_name, id) if len(history) == 0: return json.dumps({'err': 404, 'msg': 'History not found'}, sort_keys=True), 404 return json.dumps(history, sort_keys=True)
def get_history(): history = InternalServer.get_mongodb_driver().get_docker_image_all_history() if len(history) == 0: return json.dumps({'err': 404, 'msg': 'Analysis not found'}, sort_keys=True), 404 return json.dumps(history, sort_keys=True)
def compose_vuln_db(self): if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('ENTRY to the method for composing VulnDB') # -- CVE # Adding or updating CVEs if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Updating CVE collection ...') first_year = self.mongoDbDriver.remove_only_cve_for_update() for i in range(first_year, next_year): if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('... Including CVEs - ' + str(i)) compressed_content = get_http_resource_content( "https://static.nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-" + str(i) + ".xml.gz") cve_list = get_cve_list_from_file(compressed_content, i) if len(cve_list) > 0: self.mongoDbDriver.bulk_insert_cves(cve_list) # Add CVE info collection with additional info like score compressed_content_info = get_http_resource_content("https://nvd.nist.gov/download/nvdcve-" + str(i) + ".xml.zip") cve_info_list = get_cve_description_from_file(compressed_content_info) compressed_ext_content_info = \ get_http_resource_content("https://static.nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-" + str(i) + ".xml.zip") cve_ext_info_list = get_cve_cweid_from_file(compressed_ext_content_info, cve_info_list) if len(cve_ext_info_list) > 0: self.mongoDbDriver.bulk_insert_cves_info(cve_ext_info_list) if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('CVE collection updated') # -- Exploit DB # Adding or updating Exploit_db and Exploit_db info if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Updating Exploit DB collection ...') self.mongoDbDriver.delete_exploit_db_collection() self.mongoDbDriver.delete_exploit_db_info_collection() csv_content = get_http_resource_content( 'https://raw.githubusercontent.com/offensive-security/exploit-database/master/files_exploits.csv') exploit_db_list, exploit_db_info_list = get_exploit_db_list_from_csv(csv_content.decode("utf-8")) self.mongoDbDriver.bulk_insert_exploit_db_ids(exploit_db_list) self.mongoDbDriver.bulk_insert_exploit_db_info(exploit_db_info_list) if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Exploit DB collection updated') # -- BID # Adding BugTraqs from 20180328_sf_db.json.gz, where 103525 is the max bid in the gz file if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Updating BugTraqs Id collection ...') max_bid = self.mongoDbDriver.get_max_bid_inserted() if max_bid < 103525: # Clean if max_bid != 0: self.mongoDbDriver.delete_bid_collection() self.mongoDbDriver.delete_bid_info_collection() # Adding BIDs compressed_file = io.BytesIO(get_http_resource_content( "https://github.com/eliasgranderubio/bidDB_downloader/raw/master/bonus_track/20180328_sf_db.json.gz")) bid_items_array, bid_detail_array = get_bug_traqs_lists_from_file(compressed_file) # Insert BIDs for bid_items_list in bid_items_array: self.mongoDbDriver.bulk_insert_bids(bid_items_list) bid_items_list.clear() # Insert BID details self.mongoDbDriver.bulk_insert_bid_info(bid_detail_array) bid_detail_array.clear() # Set the new max bid max_bid = 103525 # Updating BugTraqs from http://www.securityfocus.com/ bid_items_array, bid_detail_array = get_bug_traqs_lists_from_online_mode(bid_downloader(first_bid=max_bid+1, last_bid=104000)) # Insert BIDs if len(bid_items_array) > 0: for bid_items_list in bid_items_array: self.mongoDbDriver.bulk_insert_bids(bid_items_list) bid_items_list.clear() # Insert BID details if len(bid_detail_array) > 0: self.mongoDbDriver.bulk_insert_bid_info(bid_detail_array) bid_detail_array.clear() if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('BugTraqs Id collection updated') # -- RHSA (Red Hat Security Advisory) and RHBA (Red Hat Bug Advisory) # Adding or updating rhsa and rhba collections if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('Updating RHSA & RHBA collections ...') self.mongoDbDriver.delete_rhba_collection() self.mongoDbDriver.delete_rhba_info_collection() self.mongoDbDriver.delete_rhsa_collection() self.mongoDbDriver.delete_rhsa_info_collection() bz2_file = get_http_resource_content('https://www.redhat.com/security/data/oval/rhsa.tar.bz2') rhsa_list, rhba_list, rhsa_info_list, rhba_info_list = get_rhsa_and_rhba_lists_from_file(bz2_file) self.mongoDbDriver.bulk_insert_rhsa(rhsa_list) self.mongoDbDriver.bulk_insert_rhba(rhba_list) self.mongoDbDriver.bulk_insert_rhsa_info(rhsa_info_list) self.mongoDbDriver.bulk_insert_rhba_info(rhba_info_list) if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('RHSA & RHBA collections updated') if InternalServer.is_debug_logging_enabled(): DagdaLogger.get_logger().debug('EXIT from the method for composing VulnDB')
def __init__(self): super(DBComposer, self).__init__() self.mongoDbDriver = InternalServer.get_mongodb_driver()