def test_get_parser(self): with self.subTest(scan_type="Acunetix Scan"): scan_type = "Acunetix Scan" testfile = open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml") parser = get_parser(scan_type) findings = parser.get_findings(testfile, Test()) testfile.close() with self.subTest(scan_type="Anchore Engine Scan"): scan_type = "Anchore Engine Scan" testfile = open(get_unit_tests_path() + "/scans/anchore/one_vuln.json") parser = get_parser(scan_type) findings = parser.get_findings(testfile, Test()) testfile.close() with self.subTest(scan_type="Nessus Scan"): scan_type = "Nessus Scan" testfile = open(get_unit_tests_path() + "/scans/nessus/nessus_v_unknown.xml") parser = get_parser(scan_type) findings = parser.get_findings(testfile, Test()) testfile.close() with self.subTest(scan_type="ZAP Scan"): scan_type = "ZAP Scan" testfile = open(get_unit_tests_path() + "/scans/zap/some_2.9.0.xml") parser = get_parser(scan_type) findings = parser.get_findings(testfile, Test()) testfile.close()
def test_get_parser_test_active_in_db(self): """This test is designed to validate that the factory take into account the falg 'active' in DB""" scan_type = "ZAP Scan" # desactivate the parser Test_Type.objects.update_or_create( name=scan_type, defaults={"active": False}, ) with self.assertRaises(ValueError): get_parser(scan_type) # activate the parser test_type, created = Test_Type.objects.update_or_create( name=scan_type, defaults={"active": True}, ) parser = get_parser(scan_type) self.assertIsNotNone(parser)
def test_parse_findings(self): scan_type = "Acunetix Scan" scan = open("dojo/unittests/scans/acunetix/one_finding.xml") user, created = User.objects.get_or_create(username="******") product_type, created = Product_Type.objects.get_or_create(name="test") if created: product_type.save() product, created = Product.objects.get_or_create( name="TestDojoDefaultImporter", prod_type=product_type, ) if created: product.save() engagement_name = "Test Create Engagement" engagement, created = Engagement.objects.get_or_create( name=engagement_name, product=product, target_start=timezone.now(), target_end=timezone.now(), ) if created: engagement.save() lead = None environment = None # boot importer = Importer() # create the test # by defaut test_type == scan_type test = importer.create_test(scan_type, scan_type, engagement, lead, environment) # parse the findings parser = get_parser(scan_type) parsed_findings = parser.get_findings(scan, test) # process minimum_severity = "Info" active = True verified = True new_findings = importer.process_parsed_findings( test, parsed_findings, scan_type, user, active, verified, minimum_severity=minimum_severity, ) for finding in new_findings: self.assertIn(finding.numerical_severity, ["S0", "S1", "S2", "S3", "S4"])
def reimport_scan(self, scan, scan_type, test, active=True, verified=True, tags=None, minimum_severity=None, user=None, endpoints_to_add=None, scan_date=None, version=None, branch_tag=None, build_id=None, commit_hash=None, push_to_jira=None, close_old_findings=True, group_by=None, api_scan_configuration=None, service=None): logger.debug(f'REIMPORT_SCAN: parameters: {locals()}') user = user or get_current_user() now = timezone.now() if api_scan_configuration: if api_scan_configuration.product != test.engagement.product: raise ValidationError( 'API Scan Configuration has to be from same product as the Test' ) if test.api_scan_configuration != api_scan_configuration: test.api_scan_configuration = api_scan_configuration test.save() # check if the parser that handle the scan_type manage tests parser = get_parser(scan_type) if hasattr(parser, 'get_tests'): logger.debug('REIMPORT_SCAN parser v2: Create parse findings') tests = parser.get_tests(scan_type, scan) # for now we only consider the first test in the list and artificially aggregate all findings of all tests # this is the same as the old behavior as current import/reimporter implementation doesn't handle the case # when there is more than 1 test parsed_findings = [] for test_raw in tests: parsed_findings.extend(test_raw.findings) else: logger.debug('REIMPORT_SCAN: Parse findings') parsed_findings = parser.get_findings(scan, test) logger.debug('REIMPORT_SCAN: Processing findings') new_findings = [] reactivated_findings = [] findings_to_mitigate = [] untouched_findings = [] if settings.ASYNC_FINDING_IMPORT: chunk_list = importer_utils.chunk_list(parsed_findings) results_list = [] # First kick off all the workers for findings_list in chunk_list: result = self.process_parsed_findings( test, findings_list, scan_type, user, active, verified, minimum_severity=minimum_severity, endpoints_to_add=endpoints_to_add, push_to_jira=push_to_jira, group_by=group_by, now=now, service=service, scan_date=scan_date, sync=False) # Since I dont want to wait until the task is done right now, save the id # So I can check on the task later results_list += [result] # After all tasks have been started, time to pull the results logger.debug('REIMPORT_SCAN: Collecting Findings') for results in results_list: serial_new_findings, serial_reactivated_findings, serial_findings_to_mitigate, serial_untouched_findings = results.get( ) new_findings += [ next(serializers.deserialize("json", finding)).object for finding in serial_new_findings ] reactivated_findings += [ next(serializers.deserialize("json", finding)).object for finding in serial_reactivated_findings ] findings_to_mitigate += [ next(serializers.deserialize("json", finding)).object for finding in serial_findings_to_mitigate ] untouched_findings += [ next(serializers.deserialize("json", finding)).object for finding in serial_untouched_findings ] logger.debug('REIMPORT_SCAN: All Findings Collected') # Indicate that the test is not complete yet as endpoints will still be rolling in. test.percent_complete = 50 test.save() importer_utils.update_test_progress(test) else: new_findings, reactivated_findings, findings_to_mitigate, untouched_findings = \ self.process_parsed_findings(test, parsed_findings, scan_type, user, active, verified, minimum_severity=minimum_severity, endpoints_to_add=endpoints_to_add, push_to_jira=push_to_jira, group_by=group_by, now=now, service=service, scan_date=scan_date, sync=True) closed_findings = [] if close_old_findings: logger.debug( 'REIMPORT_SCAN: Closing findings no longer present in scan report' ) closed_findings = self.close_old_findings( test, findings_to_mitigate, scan_date, user=user, push_to_jira=push_to_jira) logger.debug('REIMPORT_SCAN: Updating test/engagement timestamps') importer_utils.update_timestamps(test, version, branch_tag, build_id, commit_hash, now, scan_date) if settings.TRACK_IMPORT_HISTORY: logger.debug('REIMPORT_SCAN: Updating Import History') importer_utils.update_import_history( Test_Import.REIMPORT_TYPE, active, verified, tags, minimum_severity, endpoints_to_add, version, branch_tag, build_id, commit_hash, push_to_jira, close_old_findings, test, new_findings, closed_findings, reactivated_findings) logger.debug('REIMPORT_SCAN: Generating notifications') updated_count = len(closed_findings) + len(reactivated_findings) + len( new_findings) if updated_count > 0: notifications_helper.notify_scan_added( test, updated_count, new_findings=new_findings, findings_mitigated=closed_findings, findings_reactivated=reactivated_findings, findings_untouched=untouched_findings) logger.debug('REIMPORT_SCAN: Done') return test, updated_count, len(new_findings), len( closed_findings), len(reactivated_findings), len( untouched_findings)
def reimport_scan(self, scan, scan_type, test, active=True, verified=True, tags=None, minimum_severity=None, user=None, endpoints_to_add=None, scan_date=None, version=None, branch_tag=None, build_id=None, commit_hash=None, push_to_jira=None, close_old_findings=True, group_by=None, sonarqube_config=None, cobaltio_config=None): logger.debug(f'REIMPORT_SCAN: parameters: {locals()}') user = user or get_current_user() now = timezone.now() # retain weird existing logic to use current time for provided scan date scan_date_time = datetime.datetime.combine(scan_date, timezone.now().time()) if settings.USE_TZ: scan_date_time = timezone.make_aware(scan_date_time, timezone.get_default_timezone()) if sonarqube_config: # it there is not sonarqube_config, just use original if sonarqube_config.product != test.engagement.product: raise ValidationError('"sonarqube_config" has to be from same product as "test"') if test.sonarqube_config != sonarqube_config: # update of sonarqube_config test.sonarqube_config = sonarqube_config test.save() if cobaltio_config: # it there is no cobaltio_config, just use original if cobaltio_config.product != test.engagement.product: raise ValidationError('"cobaltio_config" has to be from same product as "test"') if test.cobaltio_config != cobaltio_config: # update the cobaltio_config test.cobaltio_config = cobaltio_config test.save() # check if the parser that handle the scan_type manage tests parser = get_parser(scan_type) if hasattr(parser, 'get_tests'): logger.debug('REIMPORT_SCAN parser v2: Create parse findings') tests = parser.get_tests(scan_type, scan) # for now we only consider the first test in the list and artificially aggregate all findings of all tests # this is the same as the old behavior as current import/reimporter implementation doesn't handle the case # when there is more than 1 test parsed_findings = [] for test_raw in tests: parsed_findings.extend(test_raw.findings) else: logger.debug('REIMPORT_SCAN: Parse findings') parsed_findings = parser.get_findings(scan, test) logger.debug('REIMPORT_SCAN: Processing findings') new_findings, reactivated_findings, findings_to_mitigate, untouched_findings = \ self.process_parsed_findings(test, parsed_findings, scan_type, user, active, verified, minimum_severity=minimum_severity, endpoints_to_add=endpoints_to_add, push_to_jira=push_to_jira, group_by=group_by, now=now) closed_findings = [] if close_old_findings: logger.debug('REIMPORT_SCAN: Closing findings no longer present in scan report') closed_findings = self.close_old_findings(test, findings_to_mitigate, scan_date_time, user=user, push_to_jira=push_to_jira) logger.debug('REIMPORT_SCAN: Updating test/engagement timestamps') importer_utils.update_timestamps(test, scan_date, version, branch_tag, build_id, commit_hash, now, scan_date_time) if settings.TRACK_IMPORT_HISTORY: logger.debug('REIMPORT_SCAN: Updating Import History') importer_utils.update_import_history(Test_Import.REIMPORT_TYPE, active, verified, tags, minimum_severity, endpoints_to_add, version, branch_tag, build_id, commit_hash, push_to_jira, close_old_findings, test, new_findings, closed_findings, reactivated_findings) logger.debug('REIMPORT_SCAN: Generating notifications') updated_count = len(closed_findings) + len(reactivated_findings) + len(new_findings) if updated_count > 0: notifications_helper.notify_scan_added(test, updated_count, new_findings=new_findings, findings_mitigated=closed_findings, findings_reactivated=reactivated_findings, findings_untouched=untouched_findings) logger.debug('REIMPORT_SCAN: Done') return test, updated_count, len(new_findings), len(closed_findings), len(reactivated_findings), len(untouched_findings)
def test_get_parser_error(self): with self.assertRaises(ValueError): scan_type = "type_that_doesn't_exist" get_parser(scan_type)
def import_scan(self, scan, scan_type, engagement, lead, environment, active, verified, tags=None, minimum_severity=None, user=None, endpoints_to_add=None, scan_date=None, version=None, branch_tag=None, build_id=None, commit_hash=None, push_to_jira=None, close_old_findings=False, group_by=None, api_scan_configuration=None, service=None, title=None): logger.debug(f'IMPORT_SCAN: parameters: {locals()}') user = user or get_current_user() now = timezone.now() if api_scan_configuration and api_scan_configuration.product != engagement.product: raise ValidationError( 'API Scan Configuration has to be from same product as the Engagement' ) # check if the parser that handle the scan_type manage tests # if yes, we parse the data first # after that we customize the Test_Type to reflect the data # This allow us to support some meta-formats like SARIF or the generic format parser = get_parser(scan_type) if hasattr(parser, 'get_tests'): logger.debug( 'IMPORT_SCAN parser v2: Create Test and parse findings') tests = parser.get_tests(scan_type, scan) # for now we only consider the first test in the list and artificially aggregate all findings of all tests # this is the same as the old behavior as current import/reimporter implementation doesn't handle the case # when there is more than 1 test # # we also aggregate the label of the Test_type to show the user the original scan_type # only if they are different. This is to support meta format like SARIF # so a report that have the label 'CodeScanner' will be changed to 'CodeScanner Scan (SARIF)' test_type_name = scan_type if len(tests) > 0: if tests[0].type: test_type_name = tests[0].type + " Scan" if test_type_name != scan_type: test_type_name = f"{test_type_name} ({scan_type})" test = self.create_test( scan_type, test_type_name, engagement, lead, environment, scan_date=scan_date, tags=tags, version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, now=now, api_scan_configuration=api_scan_configuration, title=title) # This part change the name of the Test # we get it from the data of the parser test_raw = tests[0] if test_raw.name: test.name = test_raw.name if test_raw.description: test.description = test_raw.description test.save() logger.debug( 'IMPORT_SCAN parser v2: Parse findings (aggregate)') # currently we only support import one Test # so for parser that support multiple tests (like SARIF) # we aggregate all the findings into one uniq test parsed_findings = [] for test_raw in tests: parsed_findings.extend(test_raw.findings) else: logger.info(f'No tests found in import for {scan_type}') else: logger.debug('IMPORT_SCAN: Create Test') # by default test_type == scan_type test = self.create_test( scan_type, scan_type, engagement, lead, environment, scan_date=scan_date, tags=tags, version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, now=now, api_scan_configuration=api_scan_configuration, title=title) logger.debug('IMPORT_SCAN: Parse findings') parser = get_parser(scan_type) parsed_findings = parser.get_findings(scan, test) logger.debug('IMPORT_SCAN: Processing findings') new_findings = [] if settings.ASYNC_FINDING_IMPORT: chunk_list = importer_utils.chunk_list(parsed_findings) results_list = [] # First kick off all the workers for findings_list in chunk_list: result = self.process_parsed_findings( test, findings_list, scan_type, user, active, verified, minimum_severity=minimum_severity, endpoints_to_add=endpoints_to_add, push_to_jira=push_to_jira, group_by=group_by, now=now, service=service, scan_date=scan_date, sync=False) # Since I dont want to wait until the task is done right now, save the id # So I can check on the task later results_list += [result] # After all tasks have been started, time to pull the results logger.info('IMPORT_SCAN: Collecting Findings') for results in results_list: serial_new_findings = results.get() new_findings += [ next(serializers.deserialize("json", finding)).object for finding in serial_new_findings ] logger.info('IMPORT_SCAN: All Findings Collected') # Indicate that the test is not complete yet as endpoints will still be rolling in. test.percent_complete = 50 test.save() importer_utils.update_test_progress(test) else: new_findings = self.process_parsed_findings( test, parsed_findings, scan_type, user, active, verified, minimum_severity=minimum_severity, endpoints_to_add=endpoints_to_add, push_to_jira=push_to_jira, group_by=group_by, now=now, service=service, scan_date=scan_date, sync=True) closed_findings = [] if close_old_findings: logger.debug( 'IMPORT_SCAN: Closing findings no longer present in scan report' ) closed_findings = self.close_old_findings( test, scan_date, user=user, push_to_jira=push_to_jira, service=service) logger.debug('IMPORT_SCAN: Updating test/engagement timestamps') importer_utils.update_timestamps(test, version, branch_tag, build_id, commit_hash, now, scan_date) if settings.TRACK_IMPORT_HISTORY: logger.debug('IMPORT_SCAN: Updating Import History') importer_utils.update_import_history( Test_Import.IMPORT_TYPE, active, verified, tags, minimum_severity, endpoints_to_add, version, branch_tag, build_id, commit_hash, push_to_jira, close_old_findings, test, new_findings, closed_findings) logger.debug('IMPORT_SCAN: Generating notifications') notifications_helper.notify_test_created(test) updated_count = len(new_findings) + len(closed_findings) if updated_count > 0: notifications_helper.notify_scan_added( test, updated_count, new_findings=new_findings, findings_mitigated=closed_findings) logger.debug('IMPORT_SCAN: Done') return test, len(new_findings), len(closed_findings)
def import_scan(self, scan, scan_type, engagement, lead, environment, active, verified, tags=None, minimum_severity=None, user=None, endpoints_to_add=None, scan_date=None, version=None, branch_tag=None, build_id=None, commit_hash=None, push_to_jira=None, close_old_findings=False, group_by=None, api_scan_configuration=None, service=None): logger.debug(f'IMPORT_SCAN: parameters: {locals()}') user = user or get_current_user() now = timezone.now() # retain weird existing logic to use current time for provided scan date scan_date_time = datetime.datetime.combine(scan_date, timezone.now().time()) if settings.USE_TZ: scan_date_time = timezone.make_aware( scan_date_time, timezone.get_default_timezone()) if api_scan_configuration and api_scan_configuration.product != engagement.product: raise ValidationError( 'API Scan Configuration has to be from same product as the Engagement' ) # check if the parser that handle the scan_type manage tests # if yes, we parse the data first # after that we customize the Test_Type to reflect the data # This allow us to support some meta-formats like SARIF or the generic format parser = get_parser(scan_type) if hasattr(parser, 'get_tests'): logger.debug( 'IMPORT_SCAN parser v2: Create Test and parse findings') tests = parser.get_tests(scan_type, scan) # for now we only consider the first test in the list and artificially aggregate all findings of all tests # this is the same as the old behavior as current import/reimporter implementation doesn't handle the case # when there is more than 1 test # # we also aggregate the label of the Test_type to show the user the original scan_type # only if they are different. This is to support meta format like SARIF # so a report that have the label 'CodeScanner' will be changed to 'CodeScanner Scan (SARIF)' test_type_name = scan_type if len(tests) > 0: if tests[0].type: test_type_name = tests[0].type + " Scan" if test_type_name != scan_type: test_type_name = f"{test_type_name} ({scan_type})" test = self.create_test( scan_type, test_type_name, engagement, lead, environment, scan_date=scan_date, tags=tags, version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, now=now, api_scan_configuration=api_scan_configuration) # This part change the name of the Test # we get it from the data of the parser test_raw = tests[0] if test_raw.name: test.name = test_raw.name if test_raw.description: test.description = test_raw.description test.save() logger.debug( 'IMPORT_SCAN parser v2: Parse findings (aggregate)') # currently we only support import one Test # so for parser that support multiple tests (like SARIF) # we aggregate all the findings into one uniq test parsed_findings = [] for test_raw in tests: parsed_findings.extend(test_raw.findings) else: logger.info(f'No tests found in import for {scan_type}') else: logger.debug('IMPORT_SCAN: Create Test') # by default test_type == scan_type test = self.create_test( scan_type, scan_type, engagement, lead, environment, scan_date=scan_date, tags=tags, version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, now=now, api_scan_configuration=api_scan_configuration) logger.debug('IMPORT_SCAN: Parse findings') parser = get_parser(scan_type) parsed_findings = parser.get_findings(scan, test) logger.debug('IMPORT_SCAN: Processing findings') new_findings = self.process_parsed_findings( test, parsed_findings, scan_type, user, active, verified, minimum_severity=minimum_severity, endpoints_to_add=endpoints_to_add, push_to_jira=push_to_jira, group_by=group_by, now=now, service=service) closed_findings = [] if close_old_findings: logger.debug( 'IMPORT_SCAN: Closing findings no longer present in scan report' ) closed_findings = self.close_old_findings( test, scan_date_time, user=user, push_to_jira=push_to_jira) logger.debug('IMPORT_SCAN: Updating test/engagement timestamps') importer_utils.update_timestamps(test, scan_date, version, branch_tag, build_id, commit_hash, now, scan_date_time) if settings.TRACK_IMPORT_HISTORY: logger.debug('IMPORT_SCAN: Updating Import History') importer_utils.update_import_history( Test_Import.IMPORT_TYPE, active, verified, tags, minimum_severity, endpoints_to_add, version, branch_tag, build_id, commit_hash, push_to_jira, close_old_findings, test, new_findings, closed_findings) logger.debug('IMPORT_SCAN: Generating notifications') notifications_helper.notify_test_created(test) updated_count = len(new_findings) + len(closed_findings) if updated_count > 0: notifications_helper.notify_scan_added( test, updated_count, new_findings=new_findings, findings_mitigated=closed_findings) logger.debug('IMPORT_SCAN: Done') return test, len(new_findings), len(closed_findings)