def close_old_findings(self, test, scan_date_time, user, push_to_jira=None): old_findings = [] # Close old active findings that are not reported by this scan. new_hash_codes = test.finding_set.values('hash_code') # TODO I don't think these criteria are 100% correct, why are findings with the same hash_code excluded? # Would it make more sense to exclude duplicates? But the deduplication process can be unfinished because it's # run in a celery async task... if test.engagement.deduplication_on_engagement: old_findings = Finding.objects.exclude(test=test) \ .exclude(hash_code__in=new_hash_codes) \ .filter(test__engagement=test.engagement, test__test_type=test.test_type, active=True) else: # TODO BUG? this will violate the deduplication_on_engagement setting for other engagements old_findings = Finding.objects.exclude(test=test) \ .exclude(hash_code__in=new_hash_codes) \ .filter(test__engagement__product=test.engagement.product, test__test_type=test.test_type, active=True) for old_finding in old_findings: old_finding.active = False old_finding.is_mitigated = True old_finding.mitigated = scan_date_time old_finding.notes.create( author=user, entry="This finding has been automatically closed" " as it is not present anymore in recent scans.") endpoint_status = old_finding.endpoint_status.all() for status in endpoint_status: status.mitigated_by = user status.mitigated_time = timezone.now() status.mitigated = True status.last_modified = timezone.now() status.save() old_finding.tags.add('stale') # to avoid pushing a finding group multiple times, we push those outside of the loop if settings.FEATURE_FINDING_GROUPS and old_finding.finding_group: # don't try to dedupe findings that we are closing old_finding.save(dedupe_option=False) else: old_finding.save(dedupe_option=False, push_to_jira=push_to_jira) if settings.FEATURE_FINDING_GROUPS and push_to_jira: for finding_group in set([ finding.finding_group for finding in old_findings if finding.finding_group is not None ]): jira_helper.push_to_jira(finding_group) return old_findings
def post_process_finding_save(finding, dedupe_option=True, false_history=False, rules_option=True, product_grading_option=True, issue_updater_option=True, push_to_jira=False, user=None, *args, **kwargs): system_settings = System_Settings.objects.get() # STEP 1 run all status changing tasks sequentially to avoid race conditions if dedupe_option: if finding.hash_code is not None: if system_settings.enable_deduplication: from dojo.utils import do_dedupe_finding do_dedupe_finding(finding, *args, **kwargs) else: deduplicationLogger.debug( "skipping dedupe because it's disabled in system settings") else: deduplicationLogger.warning( "skipping dedupe because hash_code is None") if false_history: if system_settings.false_positive_history: from dojo.utils import do_false_positive_history do_false_positive_history(finding, *args, **kwargs) else: deduplicationLogger.debug( "skipping false positive history because it's disabled in system settings" ) # STEP 2 run all non-status changing tasks as celery tasks in the background if issue_updater_option: from dojo.tools import tool_issue_updater tool_issue_updater.async_tool_issue_update(finding) if product_grading_option: if system_settings.enable_product_grade: from dojo.utils import calculate_grade calculate_grade(finding.test.engagement.product) else: deduplicationLogger.debug( "skipping product grading because it's disabled in system settings" ) # Adding a snippet here for push to JIRA so that it's in one place if push_to_jira: logger.debug('pushing finding %s to jira from finding.save()', finding.pk) import dojo.jira_link.helper as jira_helper jira_helper.push_to_jira(finding)
def close_old_findings(self, test, to_mitigate, scan_date_time, user, push_to_jira=None): logger.debug( 'IMPORT_SCAN: Closing findings no longer present in scan report') mitigated_findings = [] for finding in to_mitigate: if not finding.mitigated or not finding.is_mitigated: logger.debug('mitigating finding: %i:%s', finding.id, finding) finding.mitigated = scan_date_time finding.is_mitigated = True finding.mitigated_by = user finding.active = False endpoint_status = finding.endpoint_status.all() for status in endpoint_status: status.mitigated_by = user status.mitigated_time = timezone.now() status.mitigated = True status.last_modified = timezone.now() status.save() # to avoid pushing a finding group multiple times, we push those outside of the loop if settings.FEATURE_FINDING_GROUPS and finding.finding_group: # don't try to dedupe findings that we are closing finding.save(dedupe_option=False) else: finding.save(push_to_jira=push_to_jira, dedupe_option=False) note = Notes(entry="Mitigated by %s re-upload." % test.test_type, author=user) note.save() finding.notes.add(note) mitigated_findings.append(finding) if settings.FEATURE_FINDING_GROUPS and push_to_jira: for finding_group in set([ finding.finding_group for finding in to_mitigate if finding.finding_group is not None ]): jira_helper.push_to_jira(finding_group) return mitigated_findings
def push_to_jira(request, fgid): logger.debug('/finding_group/%s/jira/push', fgid) group = get_object_or_404(Finding_Group, id=fgid) try: logger.info( 'trying to push %d:%s to JIRA to create or update JIRA issue', group.id, group.name) logger.debug('pushing to jira from group.push_to-jira()') # it may look like succes here, but the push_to_jira are swallowing exceptions # but cant't change too much now without having a test suite, so leave as is for now with the addition warning message to check alerts for background errors. if jira_helper.push_to_jira(group, sync=True): messages.add_message( request, messages.SUCCESS, message= 'Action queued to create or update linked JIRA issue, check alerts for background errors.', extra_tags='alert-success') else: messages.add_message( request, messages.SUCCESS, 'Push to JIRA failed, check alerts on the top right for errors', extra_tags='alert-danger') return JsonResponse({'result': 'OK'}) except Exception as e: logger.exception(e) logger.error('Error pushing to JIRA: ', exc_info=True) messages.add_message(request, messages.ERROR, 'Error pushing to JIRA', extra_tags='alert-danger') return HttpResponse(status=500)
def post_process_finding_save(finding, dedupe_option=True, false_history=False, rules_option=True, product_grading_option=True, issue_updater_option=True, push_to_jira=False, user=None, *args, **kwargs): system_settings = System_Settings.objects.get() # STEP 1 run all status changing tasks sequentially to avoid race conditions if dedupe_option: if finding.hash_code is not None: if system_settings.enable_deduplication: from dojo.utils import do_dedupe_finding do_dedupe_finding(finding, *args, **kwargs) else: deduplicationLogger.debug("skipping dedupe because it's disabled in system settings") else: deduplicationLogger.warning("skipping dedupe because hash_code is None") if false_history: if system_settings.false_positive_history: from dojo.utils import do_false_positive_history do_false_positive_history(finding, *args, **kwargs) else: deduplicationLogger.debug("skipping false positive history because it's disabled in system settings") # STEP 2 run all non-status changing tasks as celery tasks in the background if issue_updater_option: from dojo.tools import tool_issue_updater tool_issue_updater.async_tool_issue_update(finding) if product_grading_option: if system_settings.enable_product_grade: from dojo.utils import calculate_grade calculate_grade(finding.test.engagement.product) else: deduplicationLogger.debug("skipping product grading because it's disabled in system settings") # Adding a snippet here for push to JIRA so that it's in one place if push_to_jira: logger.debug('pushing finding %s to jira from finding.save()', finding.pk) import dojo.jira_link.helper as jira_helper # current approach is that whenever a finding is in a group, the group will be pushed to JIRA # based on feedback we could introduct another push_group_to_jira boolean everywhere # but what about the push_all boolean? Let's see how this works for now and get some feedback. if finding.has_jira_issue or not finding.finding_group: jira_helper.push_to_jira(finding) elif finding.finding_group: jira_helper.push_to_jira(finding.finding_group)
def add_temp_finding(request, tid, fid): jform = None test = get_object_or_404(Test, id=tid) finding = get_object_or_404(Finding_Template, id=fid) findings = Finding_Template.objects.all() push_all_jira_issues = jira_helper.is_push_all_issues(finding) if request.method == 'POST': form = AddFindingForm(request.POST, req_resp=None, product=test.engagement.product) if jira_helper.get_jira_project(test): jform = JIRAFindingForm( push_all=jira_helper.is_push_all_issues(test), prefix='jiraform', jira_project=jira_helper.get_jira_project(test), finding_form=form) logger.debug('jform valid: %s', jform.is_valid()) if (form['active'].value() is False or form['false_p'].value() ) and form['duplicate'].value() is False: closing_disabled = Note_Type.objects.filter( is_mandatory=True, is_active=True).count() if closing_disabled != 0: error_inactive = ValidationError( 'Can not set a finding as inactive without adding all mandatory notes', code='not_active_or_false_p_true') error_false_p = ValidationError( 'Can not set a finding as false positive without adding all mandatory notes', code='not_active_or_false_p_true') if form['active'].value() is False: form.add_error('active', error_inactive) if form['false_p'].value(): form.add_error('false_p', error_false_p) messages.add_message( request, messages.ERROR, 'Can not set a finding as inactive or false positive without adding all mandatory notes', extra_tags='alert-danger') if form.is_valid(): finding.last_used = timezone.now() finding.save() new_finding = form.save(commit=False) new_finding.test = test new_finding.reporter = request.user new_finding.numerical_severity = Finding.get_numerical_severity( new_finding.severity) new_finding.date = form.cleaned_data['date'] or datetime.today() finding_helper.update_finding_status(new_finding, request.user) new_finding.save(dedupe_option=False, false_history=False) # Save and add new endpoints finding_helper.add_endpoints(new_finding, form) new_finding.save(false_history=True) if 'jiraform-push_to_jira' in request.POST: jform = JIRAFindingForm( request.POST, prefix='jiraform', instance=new_finding, push_all=push_all_jira_issues, jira_project=jira_helper.get_jira_project(test), finding_form=form) if jform.is_valid(): if jform.cleaned_data.get('push_to_jira'): jira_helper.push_to_jira(new_finding) else: add_error_message_to_response( 'jira form validation failed: %s' % jform.errors) messages.add_message(request, messages.SUCCESS, 'Finding from template added successfully.', extra_tags='alert-success') return HttpResponseRedirect(reverse('view_test', args=(test.id, ))) else: messages.add_message( request, messages.ERROR, 'The form has errors, please correct them below.', extra_tags='alert-danger') else: form = AddFindingForm(req_resp=None, product=test.engagement.product, initial={ 'active': False, 'date': timezone.now().date(), 'verified': False, 'false_p': False, 'duplicate': False, 'out_of_scope': False, 'title': finding.title, 'description': finding.description, 'cwe': finding.cwe, 'severity': finding.severity, 'mitigation': finding.mitigation, 'impact': finding.impact, 'references': finding.references, 'numerical_severity': finding.numerical_severity }) if jira_helper.get_jira_project(test): jform = JIRAFindingForm( push_all=jira_helper.is_push_all_issues(test), prefix='jiraform', jira_project=jira_helper.get_jira_project(test), finding_form=form) # logger.debug('form valid: %s', form.is_valid()) # logger.debug('jform valid: %s', jform.is_valid()) # logger.debug('form errors: %s', form.errors) # logger.debug('jform errors: %s', jform.errors) # logger.debug('jform errors: %s', vars(jform)) product_tab = Product_Tab(test.engagement.product.id, title="Add Finding", tab="engagements") product_tab.setEngagement(test.engagement) return render( request, 'dojo/add_findings.html', { 'form': form, 'product_tab': product_tab, 'jform': jform, 'findings': findings, 'temp': True, 'fid': finding.id, 'tid': test.id, 'test': test, })
def process_parsed_findings(self, test, parsed_findings, scan_type, user, active, verified, minimum_severity=None, endpoints_to_add=None, push_to_jira=None, group_by=None, now=timezone.now()): logger.debug('endpoints_to_add: %s', endpoints_to_add) new_findings = [] items = parsed_findings logger.debug('starting import of %i items.', len(items) if items else 0) i = 0 for item in items: sev = item.severity if sev == 'Information' or sev == 'Informational': sev = 'Info' item.severity = sev item.numerical_severity = Finding.get_numerical_severity(sev) if minimum_severity and (Finding.SEVERITIES[sev] > Finding.SEVERITIES[minimum_severity]): continue item.test = test item.reporter = user if user else get_current_user item.last_reviewed = now item.last_reviewed_by = user if user else get_current_user # Only set active/verified flags if they were NOT set by default value(True) if item.active: item.active = active if item.verified: item.verified = verified item.created = now item.updated = now item.save(dedupe_option=False) if settings.FEATURE_FINDING_GROUPS and group_by: finding_helper.add_finding_to_auto_group(item, group_by) if (hasattr(item, 'unsaved_req_resp') and len(item.unsaved_req_resp) > 0): for req_resp in item.unsaved_req_resp: burp_rr = BurpRawRequestResponse( finding=item, burpRequestBase64=base64.b64encode( req_resp["req"].encode("utf-8")), burpResponseBase64=base64.b64encode( req_resp["resp"].encode("utf-8"))) burp_rr.clean() burp_rr.save() if (item.unsaved_request is not None and item.unsaved_response is not None): burp_rr = BurpRawRequestResponse( finding=item, burpRequestBase64=base64.b64encode( item.unsaved_request.encode()), burpResponseBase64=base64.b64encode( item.unsaved_response.encode())) burp_rr.clean() burp_rr.save() for endpoint in item.unsaved_endpoints: try: endpoint.clean() except ValidationError as e: logger.warning( "DefectDojo is storing broken endpoint because cleaning wasn't successful: " "{}".format(e)) try: ep, created = endpoint_get_or_create( protocol=endpoint.protocol, userinfo=endpoint.userinfo, host=endpoint.host, port=endpoint.port, path=endpoint.path, query=endpoint.query, fragment=endpoint.fragment, product=test.engagement.product) except (MultipleObjectsReturned): pass try: eps, created = Endpoint_Status.objects.get_or_create( finding=item, endpoint=ep) except (MultipleObjectsReturned): pass ep.endpoint_status.add(eps) item.endpoint_status.add(eps) item.endpoints.add(ep) if endpoints_to_add: for endpoint in endpoints_to_add: logger.debug('adding endpoint %s', endpoint) # TODO Not sure what happens here, we get an endpoint model and try to create it again? try: endpoint.clean() except ValidationError as e: logger.warning( "DefectDojo is storing broken endpoint because cleaning wasn't successful: " "{}".format(e)) try: ep, created = endpoint_get_or_create( protocol=endpoint.protocol, userinfo=endpoint.userinfo, host=endpoint.host, port=endpoint.port, path=endpoint.path, query=endpoint.query, fragment=endpoint.fragment, product=test.engagement.product) except (MultipleObjectsReturned): pass try: eps, created = Endpoint_Status.objects.get_or_create( finding=item, endpoint=ep) except (MultipleObjectsReturned): pass ep.endpoint_status.add(eps) item.endpoints.add(ep) item.endpoint_status.add(eps) if item.unsaved_tags: item.tags = item.unsaved_tags new_findings.append(item) # to avoid pushing a finding group multiple times, we push those outside of the loop if settings.FEATURE_FINDING_GROUPS and item.finding_group: item.save() else: item.save(push_to_jira=push_to_jira) if settings.FEATURE_FINDING_GROUPS and push_to_jira: for finding_group in set([ finding.finding_group for finding in new_findings if finding.finding_group is not None ]): jira_helper.push_to_jira(finding_group) return new_findings
def process_parsed_findings(self, test, parsed_findings, scan_type, user, active, verified, minimum_severity=None, endpoints_to_add=None, push_to_jira=None, group_by=None, now=timezone.now(), service=None, scan_date=None, **kwargs): items = parsed_findings original_items = list(test.finding_set.all()) new_items = [] mitigated_count = 0 finding_count = 0 finding_added_count = 0 reactivated_count = 0 reactivated_items = [] unchanged_count = 0 unchanged_items = [] logger.debug('starting reimport of %i items.', len(items) if items else 0) from dojo.importers.reimporter.utils import ( get_deduplication_algorithm_from_conf, match_new_finding_to_existing_finding, update_endpoint_status, reactivate_endpoint_status) deduplication_algorithm = get_deduplication_algorithm_from_conf( scan_type) i = 0 logger.debug( 'STEP 1: looping over findings from the reimported report and trying to match them to existing findings' ) deduplicationLogger.debug( 'Algorithm used for matching new findings to existing findings: %s', deduplication_algorithm) for item in items: # FIXME hack to remove when all parsers have unit tests for this attribute if item.severity.lower().startswith( 'info') and item.severity != 'Info': item.severity = 'Info' item.numerical_severity = Finding.get_numerical_severity( item.severity) if minimum_severity and (Finding.SEVERITIES[item.severity] > Finding.SEVERITIES[minimum_severity]): # finding's severity is below the configured threshold : ignoring the finding continue # existing findings may be from before we had component_name/version fields component_name = item.component_name if hasattr( item, 'component_name') else None component_version = item.component_version if hasattr( item, 'component_version') else None if not hasattr(item, 'test'): item.test = test item.service = service item.hash_code = item.compute_hash_code() deduplicationLogger.debug("item's hash_code: %s", item.hash_code) findings = match_new_finding_to_existing_finding( item, test, deduplication_algorithm, scan_type) deduplicationLogger.debug( 'found %i findings matching with current new finding', len(findings)) if findings: # existing finding found finding = findings[0] if finding.false_p or finding.out_of_scope or finding.risk_accepted: logger.debug( '%i: skipping existing finding (it is marked as false positive:%s and/or out of scope:%s or is a risk accepted:%s): %i:%s:%s:%s', i, finding.false_p, finding.out_of_scope, finding.risk_accepted, finding.id, finding, finding.component_name, finding.component_version) elif finding.mitigated or finding.is_mitigated: logger.debug('%i: reactivating: %i:%s:%s:%s', i, finding.id, finding, finding.component_name, finding.component_version) finding.mitigated = None finding.is_mitigated = False finding.mitigated_by = None finding.active = True finding.verified = verified # existing findings may be from before we had component_name/version fields finding.component_name = finding.component_name if finding.component_name else component_name finding.component_version = finding.component_version if finding.component_version else component_version # don't dedupe before endpoints are added finding.save(dedupe_option=False) note = Notes(entry="Re-activated by %s re-upload." % scan_type, author=user) note.save() endpoint_statuses = finding.endpoint_status.all() # Determine if this can be run async if settings.ASYNC_FINDING_IMPORT: chunk_list = importer_utils.chunk_list( endpoint_statuses) # If there is only one chunk, then do not bother with async if len(chunk_list) < 2: reactivate_endpoint_status(endpoint_statuses, sync=True) logger.debug('IMPORT_SCAN: Split endpoints into ' + str(len(chunk_list)) + ' chunks of ' + str(chunk_list[0])) # First kick off all the workers for endpoint_status_list in chunk_list: reactivate_endpoint_status(endpoint_status_list, sync=False) else: reactivate_endpoint_status(endpoint_statuses, sync=True) finding.notes.add(note) reactivated_items.append(finding) reactivated_count += 1 else: # existing findings may be from before we had component_name/version fields logger.debug('%i: updating existing finding: %i:%s:%s:%s', i, finding.id, finding, finding.component_name, finding.component_version) if not finding.component_name or not finding.component_version: finding.component_name = finding.component_name if finding.component_name else component_name finding.component_version = finding.component_version if finding.component_version else component_version finding.save(dedupe_option=False) unchanged_items.append(finding) unchanged_count += 1 if finding.dynamic_finding: logger.debug( "Re-import found an existing dynamic finding for this new finding. Checking the status of endpoints" ) update_endpoint_status(finding, item, user) else: # no existing finding found item.reporter = user item.last_reviewed = timezone.now() item.last_reviewed_by = user item.verified = verified item.active = active # if scan_date was provided, override value from parser if scan_date: item.date = scan_date # Save it. Don't dedupe before endpoints are added. item.save(dedupe_option=False) logger.debug( '%i: reimport created new finding as no existing finding match: %i:%s:%s:%s', i, item.id, item, item.component_name, item.component_version) # only new items get auto grouped to avoid confusion around already existing items that are already grouped if settings.FEATURE_FINDING_GROUPS and group_by: finding_helper.add_finding_to_auto_group(item, group_by) finding_added_count += 1 new_items.append(item) finding = item if hasattr(item, 'unsaved_req_resp'): for req_resp in item.unsaved_req_resp: burp_rr = BurpRawRequestResponse( finding=finding, burpRequestBase64=base64.b64encode( req_resp["req"].encode("utf-8")), burpResponseBase64=base64.b64encode( req_resp["resp"].encode("utf-8"))) burp_rr.clean() burp_rr.save() if item.unsaved_request and item.unsaved_response: burp_rr = BurpRawRequestResponse( finding=finding, burpRequestBase64=base64.b64encode( item.unsaved_request.encode()), burpResponseBase64=base64.b64encode( item.unsaved_response.encode())) burp_rr.clean() burp_rr.save() # for existing findings: make sure endpoints are present or created if finding: finding_count += 1 if settings.ASYNC_FINDING_IMPORT: importer_utils.chunk_endpoints_and_disperse( finding, test, item.unsaved_endpoints) else: importer_utils.add_endpoints_to_unsaved_finding( finding, test, item.unsaved_endpoints, sync=True) if endpoints_to_add: if settings.ASYNC_FINDING_IMPORT: importer_utils.chunk_endpoints_and_disperse( finding, test, endpoints_to_add) else: importer_utils.add_endpoints_to_unsaved_finding( finding, test, endpoints_to_add, sync=True) if item.unsaved_tags: finding.tags = item.unsaved_tags if item.unsaved_files: for unsaved_file in item.unsaved_files: data = base64.b64decode(unsaved_file.get('data')) title = unsaved_file.get('title', '<No title>') file_upload, file_upload_created = FileUpload.objects.get_or_create( title=title, ) file_upload.file.save(title, ContentFile(data)) file_upload.save() finding.files.add(file_upload) # existing findings may be from before we had component_name/version fields finding.component_name = finding.component_name if finding.component_name else component_name finding.component_version = finding.component_version if finding.component_version else component_version # finding = new finding or existing finding still in the upload report # to avoid pushing a finding group multiple times, we push those outside of the loop if settings.FEATURE_FINDING_GROUPS and finding.finding_group: finding.save() else: finding.save(push_to_jira=push_to_jira) to_mitigate = set(original_items) - set(reactivated_items) - set( unchanged_items) untouched = set(unchanged_items) - set(to_mitigate) if settings.FEATURE_FINDING_GROUPS and push_to_jira: for finding_group in set([ finding.finding_group for finding in reactivated_items + unchanged_items + new_items if finding.finding_group is not None ]): jira_helper.push_to_jira(finding_group) sync = kwargs.get('sync', False) if not sync: serialized_new_items = [ serializers.serialize('json', [ finding, ]) for finding in new_items ] serialized_reactivated_items = [ serializers.serialize('json', [ finding, ]) for finding in reactivated_items ] serialized_to_mitigate = [ serializers.serialize('json', [ finding, ]) for finding in to_mitigate ] serialized_untouched = [ serializers.serialize('json', [ finding, ]) for finding in untouched ] return serialized_new_items, serialized_reactivated_items, serialized_to_mitigate, serialized_untouched return new_items, reactivated_items, to_mitigate, untouched
def process_parsed_findings(self, test, parsed_findings, scan_type, user, active, verified, minimum_severity=None, endpoints_to_add=None, push_to_jira=None, group_by=None, now=timezone.now()): items = parsed_findings original_items = list(test.finding_set.all()) new_items = [] mitigated_count = 0 finding_count = 0 finding_added_count = 0 reactivated_count = 0 reactivated_items = [] unchanged_count = 0 unchanged_items = [] logger.debug('starting reimport of %i items.', len(items) if items else 0) from dojo.importers.reimporter.utils import ( get_deduplication_algorithm_from_conf, match_new_finding_to_existing_finding, update_endpoint_status) deduplication_algorithm = get_deduplication_algorithm_from_conf(scan_type) i = 0 logger.debug('STEP 1: looping over findings from the reimported report and trying to match them to existing findings') deduplicationLogger.debug('Algorithm used for matching new findings to existing findings: %s', deduplication_algorithm) for item in items: # FIXME hack to remove when all parsers have unit tests for this attribute if item.severity.lower().startswith('info') and item.severity != 'Info': item.severity = 'Info' item.numerical_severity = Finding.get_numerical_severity(item.severity) if minimum_severity and (Finding.SEVERITIES[item.severity] > Finding.SEVERITIES[minimum_severity]): # finding's severity is below the configured threshold : ignoring the finding continue # existing findings may be from before we had component_name/version fields component_name = item.component_name if hasattr(item, 'component_name') else None component_version = item.component_version if hasattr(item, 'component_version') else None if not hasattr(item, 'test'): item.test = test item.hash_code = item.compute_hash_code() deduplicationLogger.debug("item's hash_code: %s", item.hash_code) findings = match_new_finding_to_existing_finding(item, test, deduplication_algorithm, scan_type) deduplicationLogger.debug('found %i findings matching with current new finding', len(findings)) if findings: # existing finding found finding = findings[0] if finding.false_p or finding.out_of_scope or finding.risk_accepted: logger.debug('%i: skipping existing finding (it is marked as false positive:%s and/or out of scope:%s or is a risk accepted:%s): %i:%s:%s:%s', i, finding.false_p, finding.out_of_scope, finding.risk_accepted, finding.id, finding, finding.component_name, finding.component_version) elif finding.mitigated or finding.is_mitigated: logger.debug('%i: reactivating: %i:%s:%s:%s', i, finding.id, finding, finding.component_name, finding.component_version) finding.mitigated = None finding.is_mitigated = False finding.mitigated_by = None finding.active = True finding.verified = verified # existing findings may be from before we had component_name/version fields finding.component_name = finding.component_name if finding.component_name else component_name finding.component_version = finding.component_version if finding.component_version else component_version # don't dedupe before endpoints are added finding.save(dedupe_option=False) note = Notes( entry="Re-activated by %s re-upload." % scan_type, author=user) note.save() endpoint_status = finding.endpoint_status.all() for status in endpoint_status: status.mitigated_by = None status.mitigated_time = None status.mitigated = False status.last_modified = timezone.now() status.save() finding.notes.add(note) reactivated_items.append(finding) reactivated_count += 1 else: # existing findings may be from before we had component_name/version fields logger.debug('%i: updating existing finding: %i:%s:%s:%s', i, finding.id, finding, finding.component_name, finding.component_version) if not finding.component_name or not finding.component_version: finding.component_name = finding.component_name if finding.component_name else component_name finding.component_version = finding.component_version if finding.component_version else component_version finding.save(dedupe_option=False) unchanged_items.append(finding) unchanged_count += 1 if finding.dynamic_finding: logger.debug("Re-import found an existing dynamic finding for this new finding. Checking the status of endpoints") update_endpoint_status(finding, item, user) else: # no existing finding found item.reporter = user item.last_reviewed = timezone.now() item.last_reviewed_by = user item.verified = verified item.active = active # Save it. Don't dedupe before endpoints are added. item.save(dedupe_option=False) logger.debug('%i: reimport created new finding as no existing finding match: %i:%s:%s:%s', i, item.id, item, item.component_name, item.component_version) # only new items get auto grouped to avoid confusion around already existing items that are already grouped if settings.FEATURE_FINDING_GROUPS and group_by: finding_helper.add_finding_to_auto_group(item, group_by) finding_added_count += 1 new_items.append(item) finding = item if hasattr(item, 'unsaved_req_resp'): for req_resp in item.unsaved_req_resp: burp_rr = BurpRawRequestResponse( finding=finding, burpRequestBase64=base64.b64encode(req_resp["req"].encode("utf-8")), burpResponseBase64=base64.b64encode(req_resp["resp"].encode("utf-8"))) burp_rr.clean() burp_rr.save() if item.unsaved_request and item.unsaved_response: burp_rr = BurpRawRequestResponse( finding=finding, burpRequestBase64=base64.b64encode(item.unsaved_request.encode()), burpResponseBase64=base64.b64encode(item.unsaved_response.encode())) burp_rr.clean() burp_rr.save() # for existing findings: make sure endpoints are present or created if finding: finding_count += 1 for endpoint in item.unsaved_endpoints: try: endpoint.clean() except ValidationError as e: logger.warning("DefectDojo is storing broken endpoint because cleaning wasn't successful: " "{}".format(e)) try: ep, created = endpoint_get_or_create( protocol=endpoint.protocol, userinfo=endpoint.userinfo, host=endpoint.host, port=endpoint.port, path=endpoint.path, query=endpoint.query, fragment=endpoint.fragment, product=test.engagement.product) except (MultipleObjectsReturned): pass try: eps, created = Endpoint_Status.objects.get_or_create( finding=finding, endpoint=ep) except (MultipleObjectsReturned): pass ep.endpoint_status.add(eps) finding.endpoints.add(ep) finding.endpoint_status.add(eps) if endpoints_to_add: for endpoint in endpoints_to_add: # TODO Not sure what happens here, we get an endpoint model and try to create it again? try: endpoint.clean() except ValidationError as e: logger.warning("DefectDojo is storing broken endpoint because cleaning wasn't successful: " "{}".format(e)) try: ep, created = endpoint_get_or_create( protocol=endpoint.protocol, userinfo=endpoint.userinfo, host=endpoint.host, port=endpoint.port, path=endpoint.path, query=endpoint.query, fragment=endpoint.fragment, product=test.engagement.product) except (MultipleObjectsReturned): pass try: eps, created = Endpoint_Status.objects.get_or_create( finding=finding, endpoint=ep) except (MultipleObjectsReturned): pass ep.endpoint_status.add(eps) finding.endpoints.add(ep) finding.endpoint_status.add(eps) if item.unsaved_tags: finding.tags = item.unsaved_tags # existing findings may be from before we had component_name/version fields finding.component_name = finding.component_name if finding.component_name else component_name finding.component_version = finding.component_version if finding.component_version else component_version # finding = new finding or existing finding still in the upload report # to avoid pushing a finding group multiple times, we push those outside of the loop if settings.FEATURE_FINDING_GROUPS and finding.finding_group: finding.save() else: finding.save(push_to_jira=push_to_jira) to_mitigate = set(original_items) - set(reactivated_items) - set(unchanged_items) untouched = set(unchanged_items) - set(to_mitigate) if settings.FEATURE_FINDING_GROUPS and push_to_jira: for finding_group in set([finding.finding_group for finding in reactivated_items + unchanged_items + new_items if finding.finding_group is not None]): jira_helper.push_to_jira(finding_group) return new_items, reactivated_items, to_mitigate, untouched
def process_parsed_findings(self, test, parsed_findings, scan_type, user, active, verified, minimum_severity=None, endpoints_to_add=None, push_to_jira=None, group_by=None, now=timezone.now(), service=None, scan_date=None, **kwargs): logger.debug('endpoints_to_add: %s', endpoints_to_add) new_findings = [] items = parsed_findings logger.debug('starting import of %i items.', len(items) if items else 0) i = 0 for item in items: # FIXME hack to remove when all parsers have unit tests for this attribute if item.severity.lower().startswith( 'info') and item.severity != 'Info': item.severity = 'Info' item.numerical_severity = Finding.get_numerical_severity( item.severity) if minimum_severity and (Finding.SEVERITIES[item.severity] > Finding.SEVERITIES[minimum_severity]): # finding's severity is below the configured threshold : ignoring the finding continue item.test = test item.reporter = user if user else get_current_user item.last_reviewed = now item.last_reviewed_by = user if user else get_current_user logger.debug( 'process_parsed_findings: active from report: %s, verified from report: %s', item.active, item.verified) # active, verified parameters = parameters from the gui or api call. # item.active, item.verified = values from the report / the parser # if either value of active (from the parser or from the api/gui) is false, final status is inactive # else final status is active # if either value of verified (from the parser or from the api/gui) is false, final status is not verified # else final status is verified # Note that: # - the API (active/verified parameters) values default to True if not specified # - the parser values default to true if not set by the parser (as per the default value in models.py) # - there is no "not specified" in the GUI (not ticked means not active/not verified) if item.active: item.active = active if item.verified: item.verified = verified # if scan_date was provided, override value from parser if scan_date: item.date = scan_date item.service = service item.save(dedupe_option=False) if settings.FEATURE_FINDING_GROUPS and group_by: finding_helper.add_finding_to_auto_group(item, group_by) if (hasattr(item, 'unsaved_req_resp') and len(item.unsaved_req_resp) > 0): for req_resp in item.unsaved_req_resp: burp_rr = BurpRawRequestResponse( finding=item, burpRequestBase64=base64.b64encode( req_resp["req"].encode("utf-8")), burpResponseBase64=base64.b64encode( req_resp["resp"].encode("utf-8"))) burp_rr.clean() burp_rr.save() if (item.unsaved_request is not None and item.unsaved_response is not None): burp_rr = BurpRawRequestResponse( finding=item, burpRequestBase64=base64.b64encode( item.unsaved_request.encode()), burpResponseBase64=base64.b64encode( item.unsaved_response.encode())) burp_rr.clean() burp_rr.save() if settings.ASYNC_FINDING_IMPORT: importer_utils.chunk_endpoints_and_disperse( item, test, item.unsaved_endpoints) else: importer_utils.add_endpoints_to_unsaved_finding( item, test, item.unsaved_endpoints, sync=True) if endpoints_to_add: if settings.ASYNC_FINDING_IMPORT: importer_utils.chunk_endpoints_and_disperse( item, test, endpoints_to_add) else: importer_utils.add_endpoints_to_unsaved_finding( item, test, endpoints_to_add, sync=True) if item.unsaved_tags: item.tags = item.unsaved_tags if item.unsaved_files: for unsaved_file in item.unsaved_files: data = base64.b64decode(unsaved_file.get('data')) title = unsaved_file.get('title', '<No title>') file_upload, file_upload_created = FileUpload.objects.get_or_create( title=title, ) file_upload.file.save(title, ContentFile(data)) file_upload.save() item.files.add(file_upload) new_findings.append(item) # to avoid pushing a finding group multiple times, we push those outside of the loop if settings.FEATURE_FINDING_GROUPS and item.finding_group: item.save() else: item.save(push_to_jira=push_to_jira) if settings.FEATURE_FINDING_GROUPS and push_to_jira: for finding_group in set([ finding.finding_group for finding in new_findings if finding.finding_group is not None ]): jira_helper.push_to_jira(finding_group) sync = kwargs.get('sync', False) if not sync: return [ serializers.serialize('json', [ finding, ]) for finding in new_findings ] return new_findings
def add_temp_finding(request, tid, fid): jform = None test = get_object_or_404(Test, id=tid) finding = get_object_or_404(Finding_Template, id=fid) findings = Finding_Template.objects.all() push_all_jira_issues = jira_helper.is_push_all_issues(finding) if jira_helper.get_jira_project(test): jform = JIRAFindingForm( push_all=jira_helper.is_push_all_issues(test), prefix='jiraform', jira_project=jira_helper.get_jira_project(test)) else: jform = None if request.method == 'POST': form = FindingForm(request.POST, template=True, req_resp=None) if (form['active'].value() is False or form['false_p'].value() ) and form['duplicate'].value() is False: closing_disabled = Note_Type.objects.filter( is_mandatory=True, is_active=True).count() if closing_disabled != 0: error_inactive = ValidationError( 'Can not set a finding as inactive without adding all mandatory notes', code='not_active_or_false_p_true') error_false_p = ValidationError( 'Can not set a finding as false positive without adding all mandatory notes', code='not_active_or_false_p_true') if form['active'].value() is False: form.add_error('active', error_inactive) if form['false_p'].value(): form.add_error('false_p', error_false_p) messages.add_message( request, messages.ERROR, 'Can not set a finding as inactive or false positive without adding all mandatory notes', extra_tags='alert-danger') if form.is_valid(): finding.last_used = timezone.now() finding.save() new_finding = form.save(commit=False) new_finding.test = test new_finding.reporter = request.user new_finding.numerical_severity = Finding.get_numerical_severity( new_finding.severity) new_finding.date = datetime.today() if new_finding.false_p or new_finding.active is False: new_finding.mitigated = timezone.now() new_finding.mitigated_by = request.user new_finding.is_Mitigated = True create_template = new_finding.is_template # is template always False now in favor of new model Finding_Template # no further action needed here since this is already adding from template. new_finding.is_template = False new_finding.save(dedupe_option=False, false_history=False) for ep in form.cleaned_data['endpoints']: eps, created = Endpoint_Status.objects.get_or_create( finding=new_finding, endpoint=ep) ep.endpoint_status.add(eps) new_finding.endpoints.add(ep) new_finding.endpoint_status.add(eps) new_finding.save(false_history=True) tags = request.POST.getlist('tags') t = ", ".join('"{0}"'.format(w) for w in tags) new_finding.tags = t if 'jiraform-push_to_jira' in request.POST: jform = JIRAFindingForm( request.POST, prefix='jiraform', push_all=push_all_jira_issues, jira_project=jira_helper.get_jira_project(test)) if jform.is_valid(): if jform.cleaned_data.get('push_to_jira'): jira_helper.push_to_jira(new_finding) messages.add_message(request, messages.SUCCESS, 'Finding from template added successfully.', extra_tags='alert-success') if create_template: templates = Finding_Template.objects.filter( title=new_finding.title) if len(templates) > 0: messages.add_message( request, messages.ERROR, 'A finding template was not created. A template with this title already ' 'exists.', extra_tags='alert-danger') else: template = Finding_Template( title=new_finding.title, cwe=new_finding.cwe, severity=new_finding.severity, description=new_finding.description, mitigation=new_finding.mitigation, impact=new_finding.impact, references=new_finding.references, numerical_severity=new_finding.numerical_severity) template.save() messages.add_message( request, messages.SUCCESS, 'A finding template was also created.', extra_tags='alert-success') return HttpResponseRedirect(reverse('view_test', args=(test.id, ))) else: messages.add_message( request, messages.ERROR, 'The form has errors, please correct them below.', extra_tags='alert-danger') else: form = FindingForm(template=True, req_resp=None, initial={ 'active': False, 'date': timezone.now().date(), 'verified': False, 'false_p': False, 'duplicate': False, 'out_of_scope': False, 'title': finding.title, 'description': finding.description, 'cwe': finding.cwe, 'severity': finding.severity, 'mitigation': finding.mitigation, 'impact': finding.impact, 'references': finding.references, 'numerical_severity': finding.numerical_severity, 'tags': [tag.name for tag in finding.tags] }) product_tab = Product_Tab(test.engagement.product.id, title="Add Finding", tab="engagements") product_tab.setEngagement(test.engagement) return render( request, 'dojo/add_findings.html', { 'form': form, 'product_tab': product_tab, 'jform': jform, 'findings': findings, 'temp': True, 'fid': finding.id, 'tid': test.id, 'test': test, })
def edit_engagement(request, eid): engagement = Engagement.objects.get(pk=eid) is_ci_cd = engagement.engagement_type == "CI/CD" jira_epic_form = None jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False) jira_error = False if request.method == 'POST': form = EngForm(request.POST, instance=engagement, cicd=is_ci_cd, product=engagement.product.id, user=request.user) jira_project_form = JIRAProjectForm(request.POST, prefix='jira-project-form', instance=jira_project, target='engagement') jira_epic_form = JIRAEngagementForm(request.POST, prefix='jira-epic-form', instance=engagement) if (form.is_valid() and (jira_project_form is None or jira_project_form.is_valid()) and (jira_epic_form is None or jira_epic_form.is_valid())): # first save engagement details new_status = form.cleaned_data.get('status') engagement = form.save(commit=False) if (new_status == "Cancelled" or new_status == "Completed"): engagement.active = False else: engagement.active = True engagement.save() tags = request.POST.getlist('tags') t = ", ".join('"{0}"'.format(w) for w in tags) engagement.tags = t # save jira project config jira_project = jira_project_form.save(commit=False) jira_project.engagement = engagement # only check jira project if form is sufficiently populated if jira_project.jira_instance and jira_project.project_key: jira_error = not jira_helper.is_jira_project_valid( jira_project) if not jira_error: jira_project.save() messages.add_message( request, messages.SUCCESS, 'JIRA Project config added successfully.', extra_tags='alert-success') # push epic if jira_epic_form.cleaned_data.get('push_to_jira'): if jira_helper.push_to_jira(engagement): messages.add_message( request, messages.SUCCESS, 'Push to JIRA for Epic queued succesfully, check alerts on the top right for errors', extra_tags='alert-success') else: jira_error = True messages.add_message( request, messages.SUCCESS, 'Push to JIRA for Epic failed, check alerts on the top right for errors', extra_tags='alert-danger') messages.add_message(request, messages.SUCCESS, 'Engagement updated successfully.', extra_tags='alert-success') if not jira_error: if '_Add Tests' in request.POST: return HttpResponseRedirect( reverse('add_tests', args=(engagement.id, ))) else: return HttpResponseRedirect( reverse('view_engagement', args=(engagement.id, ))) else: # if forms invalid, page will just reload and show errors if jira_project_form.errors or jira_epic_form.errors: messages.add_message(request, messages.ERROR, 'Errors in JIRA forms, see below', extra_tags='alert-danger') else: form = EngForm(initial={'product': engagement.product}, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user) jira_project_form = None jira_epic_form = None if get_system_setting('enable_jira'): jira_project_form = JIRAProjectForm(prefix='jira-project-form', instance=jira_project, target='engagement', product=engagement.product) if jira_project: logger.debug('showing jira-epic-form') jira_epic_form = JIRAEngagementForm(prefix='jira-epic-form', instance=engagement) form.initial['tags'] = [tag.name for tag in engagement.tags] title = ' CI/CD' if is_ci_cd else '' product_tab = Product_Tab(engagement.product.id, title="Edit" + title + " Engagement", tab="engagements") product_tab.setEngagement(engagement) return render( request, 'dojo/new_eng.html', { 'product_tab': product_tab, 'form': form, 'edit': True, 'jira_epic_form': jira_epic_form, 'jira_project_form': jira_project_form, })